in phys flat mode, when using set_xxx_irq_affinity to irq balance from
one cpu to another, _assign_irq_vector will get to increase last used
vector and get new vector. this will use up the vector if enough
set_xxx_irq_affintiy are called. and end with using same vector in
different cpu for different irq. (that is not what we want, we only
want to use same vector in different cpu for different irq when more
than 0x240 irq needed). To keep it simple, the vector should be resued
from one cpu to another instead of getting new vector.
Signed-off-by: Yinghai Lu <[email protected]>
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index b000017..3989fa5 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -624,11 +624,32 @@ static int __assign_irq_vector(int irq,
if (irq_vector[irq] > 0)
old_vector = irq_vector[irq];
if (old_vector > 0) {
+ cpumask_t domain, new_mask, old_mask;
+ int new_cpu, old_cpu;
cpus_and(*result, irq_domain[irq], mask);
if (!cpus_empty(*result))
return old_vector;
+
+ /* try to reuse vector for phys flat */
+ domain = vector_allocation_domain(cpu);
+ cpus_and(new_mask, domain, cpu_online_map);
+ for_each_cpu_mask(new_cpu, new_mask)
+ if (per_cpu(vector_irq, new_cpu)[old_vector] != -1)
+ goto new_vector;
+ /* We can reuse it */
+ cpus_and(old_mask, irq_domain[irq], cpu_online_map);
+ for_each_cpu_mask(old_cpu, old_mask);
+ per_cpu(vector_irq, old_cpu)[old_vector] = -1;
+ for_each_cpu_mask(new_cpu, new_mask)
+ per_cpu(vector_irq, new_cpu)[old_vector] = irq;
+ irq_domain[irq] = domain;
+ cpus_and(*result, domain, mask);
+ return old_vector;
+
}
+new_vector:
+
for_each_cpu_mask(cpu, mask) {
cpumask_t domain;
int first, new_cpu;
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]