Avoid pushing cpumask variables onto stack when calling set_cpus_allowed
when NR_CPUS > BITS_PER_LONG by passing cpumast_t arg as a pointer.
Signed-off-by: Mike Travis <[email protected]>
---
include/linux/sched.h | 11 +++++++++--
kernel/sched.c | 22 ++++++++++++++++------
2 files changed, 25 insertions(+), 8 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1435,8 +1435,15 @@ static inline void put_task_struct(struc
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-#ifdef CONFIG_SMP
-extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
+#if defined(CONFIG_SMP)
+# if NR_CPUS > BITS_PER_LONG
+ /* avoid pushing cpumask variable onto the stack */
+# define set_cpus_allowed(p, new_mask) __set_cpus_allowed((p), &(new_mask))
+ extern int __set_cpus_allowed(struct task_struct *p, cpumask_t *new_mask);
+# else
+# define set_cpus_allowed(p, new_mask) __set_cpus_allowed((p), (new_mask))
+ extern int __set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
+# endif
#else
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5036,7 +5036,16 @@ static inline void sched_init_granularit
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
-int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+#if NR_CPUS > BITS_PER_LONG
+/*
+ * avoid pushing a large CPU count cpumask_t variable onto stack
+ * (relies on the fact that new_mask is a const arg to subfunctions)
+ */
+#define CPU_MASK_VAR(v) *v
+#else
+#define CPU_MASK_VAR(v) v
+#endif
+int __set_cpus_allowed(struct task_struct *p, cpumask_t CPU_MASK_VAR(new_mask))
{
struct migration_req req;
unsigned long flags;
@@ -5044,17 +5053,17 @@ int set_cpus_allowed(struct task_struct
int ret = 0;
rq = task_rq_lock(p, &flags);
- if (!cpus_intersects(new_mask, cpu_online_map)) {
+ if (!cpus_intersects(CPU_MASK_VAR(new_mask), cpu_online_map)) {
ret = -EINVAL;
goto out;
}
- p->cpus_allowed = new_mask;
+ p->cpus_allowed = CPU_MASK_VAR(new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpu_isset(task_cpu(p), new_mask))
+ if (cpu_isset(task_cpu(p), CPU_MASK_VAR(new_mask)))
goto out;
- if (migrate_task(p, any_online_cpu(new_mask), &req)) {
+ if (migrate_task(p, any_online_cpu(CPU_MASK_VAR(new_mask)), &req)) {
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, &flags);
wake_up_process(rq->migration_thread);
@@ -5067,7 +5076,8 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(set_cpus_allowed);
+/* keep CPU_MASK_VAR local to __set_cpus_allowed */
+EXPORT_SYMBOL_GPL(__set_cpus_allowed);
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
--
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]