zap_threads() iterates over all threads to find those ones which
share current->mm. All threads in the thread group share the same
->mm, so we can skip entire thread group if it has another ->mm.
This patch shifts the killing of thread group into the newly added
zap_process() function. This looks as unnecessary complication, but
it is used in further patches.
Signed-off-by: Oleg Nesterov <[email protected]>
--- rc1/fs/exec.c~1_ZAPT 2006-04-03 16:21:25.000000000 +0400
+++ rc1/fs/exec.c 2006-04-06 15:10:28.000000000 +0400
@@ -1384,6 +1384,22 @@ static void format_corename(char *corena
*out_ptr = 0;
}
+static void zap_process(struct task_struct *start, int *ptraced)
+{
+ struct task_struct *t;
+
+ t = start;
+ do {
+ if (t != current && t->mm) {
+ t->mm->core_waiters++;
+ force_sig_specific(SIGKILL, t);
+ if (unlikely(t->ptrace) &&
+ unlikely(t->parent->mm == t->mm))
+ *ptraced = 1;
+ }
+ } while ((t = next_thread(t)) != start);
+}
+
static void zap_threads (struct mm_struct *mm)
{
struct task_struct *g, *p;
@@ -1401,16 +1417,16 @@ static void zap_threads (struct mm_struc
}
read_lock(&tasklist_lock);
- do_each_thread(g,p)
- if (mm == p->mm && p != tsk) {
- force_sig_specific(SIGKILL, p);
- mm->core_waiters++;
- if (unlikely(p->ptrace) &&
- unlikely(p->parent->mm == mm))
- traced = 1;
- }
- while_each_thread(g,p);
-
+ for_each_process(g) {
+ p = g;
+ do {
+ if (p->mm) {
+ if (p->mm == mm)
+ zap_process(p, &traced);
+ break;
+ }
+ } while ((p = next_thread(p)) != g);
+ }
read_unlock(&tasklist_lock);
if (unlikely(traced)) {
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]