summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cn_proc.h13
-rw-r--r--include/linux/ptrace.h104
-rw-r--r--include/linux/sched.h52
-rw-r--r--include/linux/tracehook.h385
4 files changed, 121 insertions, 433 deletions
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 47dac5ea8d3a..12c517b51ca2 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -53,6 +53,7 @@ struct proc_event {
PROC_EVENT_UID = 0x00000004,
PROC_EVENT_GID = 0x00000040,
PROC_EVENT_SID = 0x00000080,
+ PROC_EVENT_PTRACE = 0x00000100,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit */
PROC_EVENT_EXIT = 0x80000000
@@ -95,6 +96,13 @@ struct proc_event {
__kernel_pid_t process_tgid;
} sid;
+ struct ptrace_proc_event {
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
+ __kernel_pid_t tracer_pid;
+ __kernel_pid_t tracer_tgid;
+ } ptrace;
+
struct exit_proc_event {
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
@@ -109,6 +117,7 @@ void proc_fork_connector(struct task_struct *task);
void proc_exec_connector(struct task_struct *task);
void proc_id_connector(struct task_struct *task, int which_id);
void proc_sid_connector(struct task_struct *task);
+void proc_ptrace_connector(struct task_struct *task, int which_id);
void proc_exit_connector(struct task_struct *task);
#else
static inline void proc_fork_connector(struct task_struct *task)
@@ -124,6 +133,10 @@ static inline void proc_id_connector(struct task_struct *task,
static inline void proc_sid_connector(struct task_struct *task)
{}
+static inline void proc_ptrace_connector(struct task_struct *task,
+ int ptrace_id)
+{}
+
static inline void proc_exit_connector(struct task_struct *task)
{}
#endif /* CONFIG_PROC_EVENTS */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 9178d5cc0b01..800f113bea66 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -47,6 +47,13 @@
#define PTRACE_GETREGSET 0x4204
#define PTRACE_SETREGSET 0x4205
+#define PTRACE_SEIZE 0x4206
+#define PTRACE_INTERRUPT 0x4207
+#define PTRACE_LISTEN 0x4208
+
+/* flags in @data for PTRACE_SEIZE */
+#define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */
+
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#define PTRACE_O_TRACEFORK 0x00000002
@@ -65,6 +72,7 @@
#define PTRACE_EVENT_EXEC 4
#define PTRACE_EVENT_VFORK_DONE 5
#define PTRACE_EVENT_EXIT 6
+#define PTRACE_EVENT_STOP 7
#include <asm/ptrace.h>
@@ -77,16 +85,22 @@
* flags. When the a task is stopped the ptracer owns task->ptrace.
*/
+#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_TRACESYSGOOD 0x00000004
#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
-#define PT_TRACE_FORK 0x00000010
-#define PT_TRACE_VFORK 0x00000020
-#define PT_TRACE_CLONE 0x00000040
-#define PT_TRACE_EXEC 0x00000080
-#define PT_TRACE_VFORK_DONE 0x00000100
-#define PT_TRACE_EXIT 0x00000200
+
+/* PT_TRACE_* event enable flags */
+#define PT_EVENT_FLAG_SHIFT 4
+#define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1))
+
+#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
+#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
+#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
+#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
+#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
+#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
#define PT_TRACE_MASK 0x000003f4
@@ -105,7 +119,7 @@ extern long arch_ptrace(struct task_struct *child, long request,
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern void ptrace_disable(struct task_struct *);
-extern int ptrace_check_attach(struct task_struct *task, int kill);
+extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern void ptrace_notify(int exit_code);
@@ -122,7 +136,7 @@ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
{
- return child->real_parent != child->parent;
+ return !same_thread_group(child->real_parent, child->parent);
}
static inline void ptrace_unlink(struct task_struct *child)
@@ -137,36 +151,56 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
unsigned long data);
/**
- * task_ptrace - return %PT_* flags that apply to a task
- * @task: pointer to &task_struct in question
+ * ptrace_parent - return the task that is tracing the given task
+ * @task: task to consider
*
- * Returns the %PT_* flags that apply to @task.
+ * Returns %NULL if no one is tracing @task, or the &struct task_struct
+ * pointer to its tracer.
+ *
+ * Must called under rcu_read_lock(). The pointer returned might be kept
+ * live only by RCU. During exec, this may be called with task_lock() held
+ * on @task, still held from when check_unsafe_exec() was called.
*/
-static inline int task_ptrace(struct task_struct *task)
+static inline struct task_struct *ptrace_parent(struct task_struct *task)
{
- return task->ptrace;
+ if (unlikely(task->ptrace))
+ return rcu_dereference(task->parent);
+ return NULL;
+}
+
+/**
+ * ptrace_event_enabled - test whether a ptrace event is enabled
+ * @task: ptracee of interest
+ * @event: %PTRACE_EVENT_* to test
+ *
+ * Test whether @event is enabled for ptracee @task.
+ *
+ * Returns %true if @event is enabled, %false otherwise.
+ */
+static inline bool ptrace_event_enabled(struct task_struct *task, int event)
+{
+ return task->ptrace & PT_EVENT_FLAG(event);
}
/**
* ptrace_event - possibly stop for a ptrace event notification
- * @mask: %PT_* bit to check in @current->ptrace
- * @event: %PTRACE_EVENT_* value to report if @mask is set
+ * @event: %PTRACE_EVENT_* value to report
* @message: value for %PTRACE_GETEVENTMSG to return
*
- * This checks the @mask bit to see if ptrace wants stops for this event.
- * If so we stop, reporting @event and @message to the ptrace parent.
- *
- * Returns nonzero if we did a ptrace notification, zero if not.
+ * Check whether @event is enabled and, if so, report @event and @message
+ * to the ptrace parent.
*
* Called without locks.
*/
-static inline int ptrace_event(int mask, int event, unsigned long message)
+static inline void ptrace_event(int event, unsigned long message)
{
- if (mask && likely(!(current->ptrace & mask)))
- return 0;
- current->ptrace_message = message;
- ptrace_notify((event << 8) | SIGTRAP);
- return 1;
+ if (unlikely(ptrace_event_enabled(current, event))) {
+ current->ptrace_message = message;
+ ptrace_notify((event << 8) | SIGTRAP);
+ } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
+ /* legacy EXEC report via SIGTRAP */
+ send_sig(SIGTRAP, current, 0);
+ }
}
/**
@@ -183,16 +217,24 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
{
INIT_LIST_HEAD(&child->ptrace_entry);
INIT_LIST_HEAD(&child->ptraced);
- child->parent = child->real_parent;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ atomic_set(&child->ptrace_bp_refcnt, 1);
+#endif
+ child->jobctl = 0;
child->ptrace = 0;
- if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
+ child->parent = child->real_parent;
+
+ if (unlikely(ptrace) && current->ptrace) {
child->ptrace = current->ptrace;
__ptrace_link(child, current->parent);
- }
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- atomic_set(&child->ptrace_bp_refcnt, 1);
-#endif
+ if (child->ptrace & PT_SEIZED)
+ task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
+ else
+ sigaddset(&child->pending.signal, SIGSTOP);
+
+ set_tsk_thread_flag(child, TIF_SIGPENDING);
+ }
}
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 14a6c7b545de..f6ef727ee4fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1292,7 +1292,7 @@ struct task_struct {
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
- unsigned int group_stop; /* GROUP_STOP_*, siglock protected */
+ unsigned int jobctl; /* JOBCTL_*, siglock protected */
/* ??? */
unsigned int personality;
unsigned did_exec:1;
@@ -1813,15 +1813,34 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define used_math() tsk_used_math(current)
/*
- * task->group_stop flags
+ * task->jobctl flags
*/
-#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
-#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
-#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
-#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
-#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */
-
-extern void task_clear_group_stop_pending(struct task_struct *task);
+#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
+
+#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
+#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
+#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
+#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
+#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
+#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
+#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
+
+#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
+#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
+#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
+#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
+#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
+#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
+#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
+
+#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
+#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+
+extern bool task_set_jobctl_pending(struct task_struct *task,
+ unsigned int mask);
+extern void task_clear_jobctl_trapping(struct task_struct *task);
+extern void task_clear_jobctl_pending(struct task_struct *task,
+ unsigned int mask);
#ifdef CONFIG_PREEMPT_RCU
@@ -2136,7 +2155,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
return ret;
-}
+}
extern void block_all_signals(int (*notifier)(void *priv), void *priv,
sigset_t *mask);
@@ -2151,7 +2170,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
-extern int do_notify_parent(struct task_struct *, int);
+extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
@@ -2275,8 +2294,10 @@ static inline int get_nr_threads(struct task_struct *tsk)
return tsk->signal->nr_threads;
}
-/* de_thread depends on thread_group_leader not being a pid based check */
-#define thread_group_leader(p) (p == p->group_leader)
+static inline bool thread_group_leader(struct task_struct *p)
+{
+ return p->exit_signal >= 0;
+}
/* Do to the insanities of de_thread it is possible for a process
* to have the pid of the thread group leader without actually being
@@ -2309,11 +2330,6 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-static inline int task_detached(struct task_struct *p)
-{
- return p->exit_signal == -1;
-}
-
/*
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index e95f5236611f..a71a2927a6a0 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,27 +51,12 @@
#include <linux/security.h>
struct linux_binprm;
-/**
- * tracehook_expect_breakpoints - guess if task memory might be touched
- * @task: current task, making a new mapping
- *
- * Return nonzero if @task is expected to want breakpoint insertion in
- * its memory at some point. A zero return is no guarantee it won't
- * be done, but this is a hint that it's known to be likely.
- *
- * May be called with @task->mm->mmap_sem held for writing.
- */
-static inline int tracehook_expect_breakpoints(struct task_struct *task)
-{
- return (task_ptrace(task) & PT_PTRACED) != 0;
-}
-
/*
* ptrace report for syscall entry and exit looks identical.
*/
static inline void ptrace_report_syscall(struct pt_regs *regs)
{
- int ptrace = task_ptrace(current);
+ int ptrace = current->ptrace;
if (!(ptrace & PT_PTRACED))
return;
@@ -145,229 +130,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
}
/**
- * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
- * @task: current task doing exec
- *
- * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
- *
- * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
- */
-static inline int tracehook_unsafe_exec(struct task_struct *task)
-{
- int unsafe = 0;
- int ptrace = task_ptrace(task);
- if (ptrace & PT_PTRACED) {
- if (ptrace & PT_PTRACE_CAP)
- unsafe |= LSM_UNSAFE_PTRACE_CAP;
- else
- unsafe |= LSM_UNSAFE_PTRACE;
- }
- return unsafe;
-}
-
-/**
- * tracehook_tracer_task - return the task that is tracing the given task
- * @tsk: task to consider
- *
- * Returns NULL if no one is tracing @task, or the &struct task_struct
- * pointer to its tracer.
- *
- * Must called under rcu_read_lock(). The pointer returned might be kept
- * live only by RCU. During exec, this may be called with task_lock()
- * held on @task, still held from when tracehook_unsafe_exec() was called.
- */
-static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
-{
- if (task_ptrace(tsk) & PT_PTRACED)
- return rcu_dereference(tsk->parent);
- return NULL;
-}
-
-/**
- * tracehook_report_exec - a successful exec was completed
- * @fmt: &struct linux_binfmt that performed the exec
- * @bprm: &struct linux_binprm containing exec details
- * @regs: user-mode register state
- *
- * An exec just completed, we are shortly going to return to user mode.
- * The freshly initialized register state can be seen and changed in @regs.
- * The name, file and other pointers in @bprm are still on hand to be
- * inspected, but will be freed as soon as this returns.
- *
- * Called with no locks, but with some kernel resources held live
- * and a reference on @fmt->module.
- */
-static inline void tracehook_report_exec(struct linux_binfmt *fmt,
- struct linux_binprm *bprm,
- struct pt_regs *regs)
-{
- if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
- unlikely(task_ptrace(current) & PT_PTRACED))
- send_sig(SIGTRAP, current, 0);
-}
-
-/**
- * tracehook_report_exit - task has begun to exit
- * @exit_code: pointer to value destined for @current->exit_code
- *
- * @exit_code points to the value passed to do_exit(), which tracing
- * might change here. This is almost the first thing in do_exit(),
- * before freeing any resources or setting the %PF_EXITING flag.
- *
- * Called with no locks held.
- */
-static inline void tracehook_report_exit(long *exit_code)
-{
- ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
-}
-
-/**
- * tracehook_prepare_clone - prepare for new child to be cloned
- * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
- *
- * This is called before a new user task is to be cloned.
- * Its return value will be passed to tracehook_finish_clone().
- *
- * Called with no locks held.
- */
-static inline int tracehook_prepare_clone(unsigned clone_flags)
-{
- if (clone_flags & CLONE_UNTRACED)
- return 0;
-
- if (clone_flags & CLONE_VFORK) {
- if (current->ptrace & PT_TRACE_VFORK)
- return PTRACE_EVENT_VFORK;
- } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
- if (current->ptrace & PT_TRACE_CLONE)
- return PTRACE_EVENT_CLONE;
- } else if (current->ptrace & PT_TRACE_FORK)
- return PTRACE_EVENT_FORK;
-
- return 0;
-}
-
-/**
- * tracehook_finish_clone - new child created and being attached
- * @child: new child task
- * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
- * @trace: return value from tracehook_prepare_clone()
- *
- * This is called immediately after adding @child to its parent's children list.
- * The @trace value is that returned by tracehook_prepare_clone().
- *
- * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
- */
-static inline void tracehook_finish_clone(struct task_struct *child,
- unsigned long clone_flags, int trace)
-{
- ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
-}
-
-/**
- * tracehook_report_clone - in parent, new child is about to start running
- * @regs: parent's user register state
- * @clone_flags: flags from parent's system call
- * @pid: new child's PID in the parent's namespace
- * @child: new child task
- *
- * Called after a child is set up, but before it has been started running.
- * This is not a good place to block, because the child has not started
- * yet. Suspend the child here if desired, and then block in
- * tracehook_report_clone_complete(). This must prevent the child from
- * self-reaping if tracehook_report_clone_complete() uses the @child
- * pointer; otherwise it might have died and been released by the time
- * tracehook_report_clone_complete() is called.
- *
- * Called with no locks held, but the child cannot run until this returns.
- */
-static inline void tracehook_report_clone(struct pt_regs *regs,
- unsigned long clone_flags,
- pid_t pid, struct task_struct *child)
-{
- if (unlikely(task_ptrace(child))) {
- /*
- * It doesn't matter who attached/attaching to this
- * task, the pending SIGSTOP is right in any case.
- */
- sigaddset(&child->pending.signal, SIGSTOP);
- set_tsk_thread_flag(child, TIF_SIGPENDING);
- }
-}
-
-/**
- * tracehook_report_clone_complete - new child is running
- * @trace: return value from tracehook_prepare_clone()
- * @regs: parent's user register state
- * @clone_flags: flags from parent's system call
- * @pid: new child's PID in the parent's namespace
- * @child: child task, already running
- *
- * This is called just after the child has started running. This is
- * just before the clone/fork syscall returns, or blocks for vfork
- * child completion if @clone_flags has the %CLONE_VFORK bit set.
- * The @child pointer may be invalid if a self-reaping child died and
- * tracehook_report_clone() took no action to prevent it from self-reaping.
- *
- * Called with no locks held.
- */
-static inline void tracehook_report_clone_complete(int trace,
- struct pt_regs *regs,
- unsigned long clone_flags,
- pid_t pid,
- struct task_struct *child)
-{
- if (unlikely(trace))
- ptrace_event(0, trace, pid);
-}
-
-/**
- * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
- * @child: child task, already running
- * @pid: new child's PID in the parent's namespace
- *
- * Called after a %CLONE_VFORK parent has waited for the child to complete.
- * The clone/vfork system call will return immediately after this.
- * The @child pointer may be invalid if a self-reaping child died and
- * tracehook_report_clone() took no action to prevent it from self-reaping.
- *
- * Called with no locks held.
- */
-static inline void tracehook_report_vfork_done(struct task_struct *child,
- pid_t pid)
-{
- ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
-}
-
-/**
- * tracehook_prepare_release_task - task is being reaped, clean up tracing
- * @task: task in %EXIT_DEAD state
- *
- * This is called in release_task() just before @task gets finally reaped
- * and freed. This would be the ideal place to remove and clean up any
- * tracing-related state for @task.
- *
- * Called with no locks held.
- */
-static inline void tracehook_prepare_release_task(struct task_struct *task)
-{
-}
-
-/**
- * tracehook_finish_release_task - final tracing clean-up
- * @task: task in %EXIT_DEAD state
- *
- * This is called in release_task() when @task is being in the middle of
- * being reaped. After this, there must be no tracing entanglements.
- *
- * Called with write_lock_irq(&tasklist_lock) held.
- */
-static inline void tracehook_finish_release_task(struct task_struct *task)
-{
- ptrace_release_task(task);
-}
-
-/**
* tracehook_signal_handler - signal handler setup is complete
* @sig: number of signal being delivered
* @info: siginfo_t of signal being delivered
@@ -390,151 +152,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
ptrace_notify(SIGTRAP);
}
-/**
- * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
- * @task: task receiving the signal
- * @sig: signal number being sent
- *
- * Return zero iff tracing doesn't care to examine this ignored signal,
- * so it can short-circuit normal delivery and never even get queued.
- *
- * Called with @task->sighand->siglock held.
- */
-static inline int tracehook_consider_ignored_signal(struct task_struct *task,
- int sig)
-{
- return (task_ptrace(task) & PT_PTRACED) != 0;
-}
-
-/**
- * tracehook_consider_fatal_signal - suppress special handling of fatal signal
- * @task: task receiving the signal
- * @sig: signal number being sent
- *
- * Return nonzero to prevent special handling of this termination signal.
- * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
- * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
- * When this returns zero, this signal might cause a quick termination
- * that does not give the debugger a chance to intercept the signal.
- *
- * Called with or without @task->sighand->siglock held.
- */
-static inline int tracehook_consider_fatal_signal(struct task_struct *task,
- int sig)
-{
- return (task_ptrace(task) & PT_PTRACED) != 0;
-}
-
-/**
- * tracehook_force_sigpending - let tracing force signal_pending(current) on
- *
- * Called when recomputing our signal_pending() flag. Return nonzero
- * to force the signal_pending() flag on, so that tracehook_get_signal()
- * will be called before the next return to user mode.
- *
- * Called with @current->sighand->siglock held.
- */
-static inline int tracehook_force_sigpending(void)
-{
- return 0;
-}
-
-/**
- * tracehook_get_signal - deliver synthetic signal to traced task
- * @task: @current
- * @regs: task_pt_regs(@current)
- * @info: details of synthetic signal
- * @return_ka: sigaction for synthetic signal
- *
- * Return zero to check for a real pending signal normally.
- * Return -1 after releasing the siglock to repeat the check.
- * Return a signal number to induce an artificial signal delivery,
- * setting *@info and *@return_ka to specify its details and behavior.
- *
- * The @return_ka->sa_handler value controls the disposition of the
- * signal, no matter the signal number. For %SIG_DFL, the return value
- * is a representative signal to indicate the behavior (e.g. %SIGTERM
- * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
- * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
- * reported will be @info->si_signo instead.
- *
- * Called with @task->sighand->siglock held, before dequeuing pending signals.
- */
-static inline int tracehook_get_signal(struct task_struct *task,
- struct pt_regs *regs,
- siginfo_t *info,
- struct k_sigaction *return_ka)
-{
- return 0;
-}
-
-/**
- * tracehook_finish_jctl - report about return from job control stop
- *
- * This is called by do_signal_stop() after wakeup.
- */
-static inline void tracehook_finish_jctl(void)
-{
-}
-
-#define DEATH_REAP -1
-#define DEATH_DELAYED_GROUP_LEADER -2
-
-/**
- * tracehook_notify_death - task is dead, ready to notify parent
- * @task: @current task now exiting
- * @death_cookie: value to pass to tracehook_report_death()
- * @group_dead: nonzero if this was the last thread in the group to die
- *
- * A return value >= 0 means call do_notify_parent() with that signal
- * number. Negative return value can be %DEATH_REAP to self-reap right
- * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
- * parent. Note that a return value of 0 means a do_notify_parent() call
- * that sends no signal, but still wakes up a parent blocked in wait*().
- *
- * Called with write_lock_irq(&tasklist_lock) held.
- */
-static inline int tracehook_notify_death(struct task_struct *task,
- void **death_cookie, int group_dead)
-{
- if (task_detached(task))
- return task->ptrace ? SIGCHLD : DEATH_REAP;
-
- /*
- * If something other than our normal parent is ptracing us, then
- * send it a SIGCHLD instead of honoring exit_signal. exit_signal
- * only has special meaning to our real parent.
- */
- if (thread_group_empty(task) && !ptrace_reparented(task))
- return task->exit_signal;
-
- return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
-}
-
-/**
- * tracehook_report_death - task is dead and ready to be reaped
- * @task: @current task now exiting
- * @signal: return value from tracheook_notify_death()
- * @death_cookie: value passed back from tracehook_notify_death()
- * @group_dead: nonzero if this was the last thread in the group to die
- *
- * Thread has just become a zombie or is about to self-reap. If positive,
- * @signal is the signal number just sent to the parent (usually %SIGCHLD).
- * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
- * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
- * The @death_cookie was passed back by tracehook_notify_death().
- *
- * If normal reaping is not inhibited, @task->exit_state might be changing
- * in parallel.
- *
- * Called without locks.
- */
-static inline void tracehook_report_death(struct task_struct *task,
- int signal, void *death_cookie,
- int group_dead)
-{
-}
-
#ifdef TIF_NOTIFY_RESUME
/**
* set_notify_resume - cause tracehook_notify_resume() to be called