Hmmmm... Got this to run on an ia64 big iron. One problem is the sizing of
the pool. Somehow this needs to be dynamic.
Apply this fix on top of the others.
---
include/asm-ia64/page.h | 2 +-
include/asm-ia64/percpu.h | 9 ++++++---
mm/allocpercpu.c | 12 ++++++++++--
3 files changed, 17 insertions(+), 6 deletions(-)
Index: linux-2.6/mm/allocpercpu.c
===================================================================
--- linux-2.6.orig/mm/allocpercpu.c 2007-10-31 20:53:16.565486654 -0700
+++ linux-2.6/mm/allocpercpu.c 2007-10-31 21:00:27.553486484 -0700
@@ -28,7 +28,12 @@
/*
* Maximum allowed per cpu data per cpu
*/
+#ifdef CONFIG_NUMA
+#define PER_CPU_ALLOC_SIZE (32768 + MAX_NUMNODES * 512)
+#else
#define PER_CPU_ALLOC_SIZE 32768
+#endif
+
#define UNIT_SIZE sizeof(unsigned long long)
#define UNITS_PER_CPU (PER_CPU_ALLOC_SIZE / UNIT_SIZE)
@@ -37,7 +42,7 @@ enum unit_type { FREE, END, USED };
static u8 cpu_alloc_map[UNITS_PER_CPU] = { 1, };
static DEFINE_SPINLOCK(cpu_alloc_map_lock);
-static DEFINE_PER_CPU(int, cpu_area)[UNITS_PER_CPU];
+static DEFINE_PER_CPU(unsigned long long, cpu_area)[UNITS_PER_CPU];
#define CPU_DATA_OFFSET ((unsigned long)&per_cpu__cpu_area)
@@ -97,8 +102,11 @@ static void *cpu_alloc(unsigned long siz
while (start < UNITS_PER_CPU &&
cpu_alloc_map[start] != FREE)
start++;
- if (start == UNITS_PER_CPU)
+ if (start == UNITS_PER_CPU) {
+ spin_unlock(&cpu_alloc_map_lock);
+ printk(KERN_CRIT "Dynamic per cpu memory exhausted\n");
return NULL;
+ }
end = start + 1;
while (end < UNITS_PER_CPU && end - start < units &&
Index: linux-2.6/include/asm-ia64/page.h
===================================================================
--- linux-2.6.orig/include/asm-ia64/page.h 2007-10-31 20:53:16.573486483 -0700
+++ linux-2.6/include/asm-ia64/page.h 2007-10-31 20:56:19.372870091 -0700
@@ -44,7 +44,7 @@
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
-#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
+#define PERCPU_PAGE_SHIFT 20 /* log2() of max. size of per-CPU area */
#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
Index: linux-2.6/include/asm-ia64/percpu.h
===================================================================
--- linux-2.6.orig/include/asm-ia64/percpu.h 2007-10-31 20:53:30.424553062 -0700
+++ linux-2.6/include/asm-ia64/percpu.h 2007-10-31 20:53:36.248486656 -0700
@@ -40,6 +40,12 @@
#endif
/*
+ * This will make per cpu access to the local area use the virtually mapped
+ * areas.
+ */
+#define this_cpu_offset() 0
+
+/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
* external routine, to avoid include-hell.
*/
@@ -51,8 +57,6 @@ extern unsigned long __per_cpu_offset[NR
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
-#define this_cpu_offset() __ia64_per_cpu_var(local_per_cpu_offset)
-
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
@@ -67,7 +71,6 @@ extern void *per_cpu_init(void);
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
#define per_cpu_init() (__phys_per_cpu_start)
-#define this_cpu_offset() 0
#endif /* SMP */
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]