[Patch:004/005] wait_table and zonelist initializing for memory hotadd (wait_table initialization)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Wait_table is initialized according to zone size at boot time.
But, we cannot know the maixmum zone size when memory hotplug is enabled.
It can be changed.... And resizing of wait_table is hard.

So kernel allocate and initialzie wait_table as its maximum size.

Signed-off-by: KAMEZAWA Hiroyuki <[email protected]>
Signed-off-by: Yasunori Goto <[email protected]>

 mm/page_alloc.c |   47 +++++++++++++++++++++++++++++++++++++++++------
 1 files changed, 41 insertions(+), 6 deletions(-)

Index: pgdat10/mm/page_alloc.c
===================================================================
--- pgdat10.orig/mm/page_alloc.c	2006-04-11 15:15:36.000000000 +0900
+++ pgdat10/mm/page_alloc.c	2006-04-11 19:03:23.000000000 +0900
@@ -1786,6 +1786,7 @@ void __init build_all_zonelists(void)
  */
 #define PAGES_PER_WAITQUEUE	256
 
+#ifndef CONFIG_MEMORY_HOTPLUG
 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
 {
 	unsigned long size = 1;
@@ -1804,6 +1805,33 @@ static inline unsigned long wait_table_h
 
 	return max(size, 4UL);
 }
+#else
+/*
+ * XXX: Because zone size might be changed by hot-add,
+ *	It is hard to determin suitable value for wait_table as traditional.
+ *	So, we use maximum entries now.
+ *
+ *	  The max wait table size = 4096 x sizeof(wait_queue_head_t) byte.
+ *	    ex:
+ *	      i386 (preemption config)    : 4096 x 16 = 64Kbyte.
+ *	      ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
+ *	      ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
+ *
+ *	  The maximum entries are prepared when a zone's memory is
+ *	  (512K + 256) pages or more by traditional way. (See above)
+ *	  It equals ....
+ *	    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
+ *	    ia64(16K page size)                 : =  ( 8G + 4M)byte.
+ *	    powerpc (64K page size)             : =  (32G +16M)byte.
+ *
+ *	  If system doesn't have this size or more memory in the future,
+ *	  wait_table might be too bigger than suitable.
+ */
+static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
+{
+	return 4096UL;
+}
+#endif
 
 /*
  * This is an integer logarithm so that shifts can be used later
@@ -2072,10 +2100,11 @@ void __init setup_per_cpu_pageset(void)
 #endif
 
 static __meminit
-void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
+int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 {
 	int i;
 	struct pglist_data *pgdat = zone->zone_pgdat;
+	size_t alloc_size;
 
 	/*
 	 * The per-page waitqueue mechanism uses hashed waitqueues
@@ -2085,12 +2114,32 @@ void zone_wait_table_init(struct zone *z
 		 wait_table_hash_nr_entries(zone_size_pages);
 	zone->wait_table_bits =
 		wait_table_bits(zone->wait_table_hash_nr_entries);
-	zone->wait_table = (wait_queue_head_t *)
-		alloc_bootmem_node(pgdat, zone->wait_table_hash_nr_entries
-					* sizeof(wait_queue_head_t));
+	alloc_size = zone->wait_table_hash_nr_entries
+					* sizeof(wait_queue_head_t);
+
+ 	if (system_state == SYSTEM_BOOTING) {
+		zone->wait_table = (wait_queue_head_t *)
+			alloc_bootmem_node(pgdat, alloc_size);
+	} else {
+		/*
+		 * XXX: This case means that a zone whose size was 0 gets new
+		 *      memory by memory hot-add.
+		 *      But, this may be the case that "new node" is hotadded.
+		 * 	If its case, vmalloc() will not get this new node's
+		 *      memory. Because this wait_table must be initialized
+		 *      to use this new node itself too.
+		 *      To use this new node's memory, further consideration
+		 *      will be necessary.
+		 */
+		zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
+	}
+	if (!zone->wait_table)
+		return -ENOMEM;
 
 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
 		init_waitqueue_head(zone->wait_table + i);
+
+	return 0;
 }
 
 static __meminit void zone_pcp_init(struct zone *zone)
@@ -2117,8 +2166,10 @@ __meminit int init_currently_empty_zone(
 					unsigned long size)
 {
 	struct pglist_data *pgdat = zone->zone_pgdat;
-
-	zone_wait_table_init(zone, size);
+	int ret;
+	ret = zone_wait_table_init(zone, size);
+	if (ret)
+		return ret;
 	pgdat->nr_zones = zone_idx(zone) + 1;
 
 	zone->zone_start_pfn = zone_start_pfn;

-- 
Yasunori Goto 


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux