This patch replaces for_each_cpu with for_each_possible_cpu.
under arch/i386.
kernel/acpi/boot.c | 2 +-
kernel/cpu/cpufreq/acpi-cpufreq.c | 6 +++---
kernel/cpu/cpufreq/powernow-k8.c | 2 +-
kernel/cpu/cpufreq/speedstep-centrino.c | 6 +++---
kernel/io_apic.c | 4 ++--
kernel/nmi.c | 6 +++---
oprofile/nmi_int.c | 8 +++++---
Signed-Off-By: KAMEZAWA Hiroyuki <[email protected]>
Index: linux-2.6.16-rc6-mm1/arch/i386/kernel/io_apic.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/arch/i386/kernel/io_apic.c
+++ linux-2.6.16-rc6-mm1/arch/i386/kernel/io_apic.c
@@ -381,7 +381,7 @@ static void do_irq_balance(void)
unsigned long imbalance = 0;
cpumask_t allowed_mask, target_cpu_mask, tmp;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
int package_index;
CPU_IRQ(i) = 0;
if (!cpu_online(i))
@@ -632,7 +632,7 @@ static int __init balanced_irq_init(void
else
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
failed:
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
kfree(irq_cpu_data[i].irq_delta);
irq_cpu_data[i].irq_delta = NULL;
kfree(irq_cpu_data[i].last_irq);
Index: linux-2.6.16-rc6-mm1/arch/i386/kernel/acpi/boot.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/arch/i386/kernel/acpi/boot.c
+++ linux-2.6.16-rc6-mm1/arch/i386/kernel/acpi/boot.c
@@ -571,7 +571,7 @@ int acpi_unmap_lsapic(int cpu)
{
#ifndef CONFIG_LIMIT_CPUS
int i;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
x86_acpiid_to_apicid[i] = -1;
break;
Index: linux-2.6.16-rc6-mm1/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ linux-2.6.16-rc6-mm1/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -370,11 +370,11 @@ static int acpi_cpufreq_early_init_acpi(
dprintk("acpi_cpufreq_early_init\n");
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
- for_each_cpu(j) {
+ for_each_possible_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
@@ -582,7 +582,7 @@ acpi_cpufreq_exit (void)
cpufreq_unregister_driver(&acpi_cpufreq_driver);
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
kfree(acpi_perf_data[i]);
acpi_perf_data[i] = NULL;
}
Index: linux-2.6.16-rc6-mm1/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ linux-2.6.16-rc6-mm1/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -1136,7 +1136,7 @@ static int __cpuinit powernowk8_init(voi
{
unsigned int i, supported_cpus = 0;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
if (check_supported_cpu(i))
supported_cpus++;
}
Index: linux-2.6.16-rc6-mm1/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ linux-2.6.16-rc6-mm1/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -365,11 +365,11 @@ static int centrino_cpu_early_init_acpi(
unsigned int i, j;
struct acpi_processor_performance *data;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
- for_each_cpu(j) {
+ for_each_possible_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
@@ -807,7 +807,7 @@ static void __exit centrino_exit(void)
cpufreq_unregister_driver(¢rino_driver);
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
- for_each_cpu(j) {
+ for_each_possible_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
Index: linux-2.6.16-rc6-mm1/arch/i386/kernel/nmi.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/arch/i386/kernel/nmi.c
+++ linux-2.6.16-rc6-mm1/arch/i386/kernel/nmi.c
@@ -140,12 +140,12 @@ static int __init check_nmi_watchdog(voi
if (nmi_watchdog == NMI_LOCAL_APIC)
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
- for_each_cpu(cpu)
+ for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
#ifdef CONFIG_SMP
/* Check cpu_callin_map here because that is set
after the timer is started. */
@@ -512,7 +512,7 @@ void touch_nmi_watchdog (void)
* Just reset the alert counters, (other CPUs might be
* spinning on locks we hold):
*/
- for_each_cpu(i)
+ for_each_possible_cpu(i)
alert_counter[i] = 0;
/*
Index: linux-2.6.16-rc6-mm1/arch/i386/oprofile/nmi_int.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/arch/i386/oprofile/nmi_int.c
+++ linux-2.6.16-rc6-mm1/arch/i386/oprofile/nmi_int.c
@@ -122,10 +122,12 @@ static void nmi_save_registers(void * du
static void free_msrs(void)
{
int i;
- for_each_cpu(i) {
- kfree(cpu_msrs[i].counters);
+ for_each_possible_cpu(i) {
+ if (cpu_msrs[i].counters)
+ kfree(cpu_msrs[i].counters);
cpu_msrs[i].counters = NULL;
- kfree(cpu_msrs[i].controls);
+ if (cpu_msrs[i].controls)
+ kfree(cpu_msrs[i].controls);
cpu_msrs[i].controls = NULL;
}
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]