(Note: Patch also attached because the inline version is certain to get line wrapped.) An alternative to (on i386) boot-time ioremap approaches, which is more architecture independent (though arch dependent code needs adjustments if this is to be made use of, which with this patch only happens for i386 and x86_64) and doesn't require alternative boot-time interfaces. Signed-off-by: Jan Beulich <[email protected]> diff -Npru 2.6.13/arch/i386/kernel/setup.c 2.6.13-early-ioremap/arch/i386/kernel/setup.c --- 2.6.13/arch/i386/kernel/setup.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/arch/i386/kernel/setup.c 2005-09-01 13:58:18.000000000 +0200 @@ -1119,6 +1119,9 @@ static unsigned long __init setup_memory } printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); + high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; +#else + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); diff -Npru 2.6.13/arch/i386/mm/init.c 2.6.13-early-ioremap/arch/i386/mm/init.c --- 2.6.13/arch/i386/mm/init.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/arch/i386/mm/init.c 2005-09-01 13:58:34.000000000 +0200 @@ -559,12 +559,6 @@ void __init mem_init(void) set_max_mapnr_init(); -#ifdef CONFIG_HIGHMEM - high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; -#else - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; -#endif - /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); diff -Npru 2.6.13/arch/i386/mm/ioremap.c 2.6.13-early-ioremap/arch/i386/mm/ioremap.c --- 2.6.13/arch/i386/mm/ioremap.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/arch/i386/mm/ioremap.c 2005-09-01 14:00:53.000000000 +0200 @@ -257,7 +257,8 @@ void iounmap(volatile void __iomem *addr } out_unlock: write_unlock(&vmlist_lock); - kfree(p); + if (p) + free_vm_area(p); } EXPORT_SYMBOL(iounmap); diff -Npru 2.6.13/arch/i386/mm/pgtable.c 2.6.13-early-ioremap/arch/i386/mm/pgtable.c --- 2.6.13/arch/i386/mm/pgtable.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/arch/i386/mm/pgtable.c 2005-09-01 11:32:11.000000000 +0200 @@ -11,6 +11,7 @@ #include <linux/smp.h> #include <linux/highmem.h> #include <linux/slab.h> +#include <linux/bootmem.h> #include <linux/pagemap.h> #include <linux/spinlock.h> @@ -148,7 +149,9 @@ void __set_fixmap (enum fixed_addresses pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + if (likely(totalram_pages)) + return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + return alloc_bootmem_pages(PAGE_SIZE); } struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) diff -Npru 2.6.13/arch/x86_64/kernel/setup.c 2.6.13-early-ioremap/arch/x86_64/kernel/setup.c --- 2.6.13/arch/x86_64/kernel/setup.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/arch/x86_64/kernel/setup.c 2005-09-01 15:34:02.000000000 +0200 @@ -561,6 +561,7 @@ void __init setup_arch(char **cmdline_p) * we are rounding upwards: */ end_pfn = e820_end_of_ram(); + high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1; check_efer(); diff -Npru 2.6.13/arch/x86_64/mm/init.c 2.6.13-early-ioremap/arch/x86_64/mm/init.c --- 2.6.13/arch/x86_64/mm/init.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/arch/x86_64/mm/init.c 2005-09-01 11:32:11.000000000 +0200 @@ -427,7 +427,6 @@ void __init mem_init(void) max_low_pfn = end_pfn; max_pfn = end_pfn; num_physpages = end_pfn; - high_memory = (void *) __va(end_pfn * PAGE_SIZE); /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); diff -Npru 2.6.13/arch/x86_64/mm/ioremap.c 2.6.13-early-ioremap/arch/x86_64/mm/ioremap.c --- 2.6.13/arch/x86_64/mm/ioremap.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/arch/x86_64/mm/ioremap.c 2005-09-01 16:47:08.000000000 +0200 @@ -266,5 +266,6 @@ void iounmap(volatile void __iomem *addr else if (p->flags >> 20) ioremap_change_attr(p->phys_addr, p->size, 0); write_unlock(&vmlist_lock); - kfree(p); + if(p) + free_vm_area(p); } diff -Npru 2.6.13/include/linux/vmalloc.h 2.6.13-early-ioremap/include/linux/vmalloc.h --- 2.6.13/include/linux/vmalloc.h 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/include/linux/vmalloc.h 2005-09-01 17:47:16.000000000 +0200 @@ -8,6 +8,7 @@ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ +#define VM_BOOTMEM 0x00000008 /* allocated from bootmem */ /* bits [20..32] reserved for arch specific ioremap internals */ struct vm_struct { @@ -42,6 +43,7 @@ extern struct vm_struct *__get_vm_area(u unsigned long start, unsigned long end); extern struct vm_struct *remove_vm_area(void *addr); extern struct vm_struct *__remove_vm_area(void *addr); +extern void free_vm_area(struct vm_struct *area); extern int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages); extern void unmap_vm_area(struct vm_struct *area); diff -Npru 2.6.13/mm/page_alloc.c 2.6.13-early-ioremap/mm/page_alloc.c --- 2.6.13/mm/page_alloc.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/mm/page_alloc.c 2005-09-02 10:02:41.000000000 +0200 @@ -987,6 +987,15 @@ fastcall unsigned long get_zeroed_page(u { struct page * page; + if (unlikely(!totalram_pages)) { + /* This exists solely for page table allocation; pages + allocated this way must never be freed! */ + void *address = alloc_bootmem_pages(PAGE_SIZE); + + clear_page(address); + return (unsigned long) address; + } + /* * get_zeroed_page() returns a 32-bit address, which cannot represent * a highmem page diff -Npru 2.6.13/mm/vmalloc.c 2.6.13-early-ioremap/mm/vmalloc.c --- 2.6.13/mm/vmalloc.c 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-early-ioremap/mm/vmalloc.c 2005-09-02 10:09:12.000000000 +0200 @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/highmem.h> #include <linux/slab.h> +#include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/interrupt.h> @@ -179,20 +180,24 @@ struct vm_struct *__get_vm_area(unsigned } addr = ALIGN(start, align); size = PAGE_ALIGN(size); - - area = kmalloc(sizeof(*area), GFP_KERNEL); - if (unlikely(!area)) - return NULL; - - if (unlikely(!size)) { - kfree (area); + if (unlikely(!size)) return NULL; - } - /* * We always allocate a guard page. */ size += PAGE_SIZE; + if (unlikely(!size)) + return NULL; + + BUG_ON(flags & VM_BOOTMEM); + if (likely(malloc_sizes->cs_cachep)) + area = kmalloc(sizeof(*area), GFP_KERNEL); + else { + area = alloc_bootmem(sizeof(*area)); + flags |= VM_BOOTMEM; + } + if (unlikely(!area)) + return NULL; write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { @@ -227,7 +232,7 @@ found: out: write_unlock(&vmlist_lock); - kfree(area); + free_vm_area(area); if (printk_ratelimit()) printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); return NULL; @@ -288,6 +293,14 @@ struct vm_struct *remove_vm_area(void *a return v; } +void free_vm_area(struct vm_struct *area) +{ + if (likely(!(area->flags & VM_BOOTMEM))) + kfree(area); + else if (!malloc_sizes->cs_cachep) + free_bootmem(virt_to_phys(area), sizeof(*area)); +} + void __vunmap(void *addr, int deallocate_pages) { struct vm_struct *area; @@ -324,7 +337,7 @@ void __vunmap(void *addr, int deallocate kfree(area->pages); } - kfree(area); + free_vm_area(area); return; } @@ -413,7 +426,7 @@ void *__vmalloc_area(struct vm_struct *a area->pages = pages; if (!area->pages) { remove_vm_area(area->addr); - kfree(area); + free_vm_area(area); return NULL; } memset(area->pages, 0, array_size);
Attachment:
linux-2.6.13-early-ioremap.patch
Description: Binary data
- Prev by Date: Re: [PATCH] minor ELF definitions addition
- Next by Date: [PATCH] minor fbcon_scroll adjustment
- Previous by thread: [PATCH] minor ELF definitions addition
- Next by thread: [PATCH] minor fbcon_scroll adjustment
- Index(es):