On High end systems (1024 or so cpus) this can potentially cause stack
overflow. Fix the stack usage.
Signed-off-by: Suresh Siddha <[email protected]>
--- linux-2.6.18-rc3/mm/slab.c~ 2006-08-03 15:00:40.634997600 -0700
+++ linux-2.6.18-rc3/mm/slab.c 2006-08-03 16:22:41.799866824 -0700
@@ -3603,22 +3603,27 @@ static void do_ccupdate_local(void *info
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared)
{
- struct ccupdate_struct new;
+ struct ccupdate_struct *new;
int i, err;
- memset(&new.new, 0, sizeof(new.new));
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ memset(&new->new, 0, sizeof(new->new));
for_each_online_cpu(i) {
- new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
+ new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
batchcount);
- if (!new.new[i]) {
+ if (!new->new[i]) {
for (i--; i >= 0; i--)
- kfree(new.new[i]);
+ kfree(new->new[i]);
+ kfree(new);
return -ENOMEM;
}
}
- new.cachep = cachep;
+ new->cachep = cachep;
- on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
+ on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
check_irq_on();
cachep->batchcount = batchcount;
@@ -3626,7 +3631,7 @@ static int do_tune_cpucache(struct kmem_
cachep->shared = shared;
for_each_online_cpu(i) {
- struct array_cache *ccold = new.new[i];
+ struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
@@ -3641,6 +3646,8 @@ static int do_tune_cpucache(struct kmem_
cachep->name, -err);
BUG();
}
+
+ kfree(new);
return 0;
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]