1/14
--
SUSE Labs, Novell Inc.
Slightly optimise some page allocation and freeing functions by
taking advantage of knowing whether or not interrupts are disabled.
Index: linux-2.6/mm/page_alloc.c
===================================================================
--- linux-2.6.orig/mm/page_alloc.c
+++ linux-2.6/mm/page_alloc.c
@@ -373,11 +373,10 @@ static int
free_pages_bulk(struct zone *zone, int count,
struct list_head *list, unsigned int order)
{
- unsigned long flags;
struct page *page = NULL;
int ret = 0;
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (!list_empty(list) && count--) {
@@ -387,12 +386,13 @@ free_pages_bulk(struct zone *zone, int c
__free_pages_bulk(page, zone, order);
ret++;
}
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock(&zone->lock);
return ret;
}
void __free_pages_ok(struct page *page, unsigned int order)
{
+ unsigned long flags;
LIST_HEAD(list);
int i;
@@ -410,7 +410,9 @@ void __free_pages_ok(struct page *page,
free_pages_check(__FUNCTION__, page + i);
list_add(&page->lru, &list);
kernel_map_pages(page, 1<<order, 0);
+ local_irq_save(flags);
free_pages_bulk(page_zone(page), 1, &list, order);
+ local_irq_restore(flags);
}
@@ -526,12 +528,11 @@ static struct page *__rmqueue(struct zon
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list)
{
- unsigned long flags;
int i;
int allocated = 0;
struct page *page;
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
page = __rmqueue(zone, order);
if (page == NULL)
@@ -539,7 +540,7 @@ static int rmqueue_bulk(struct zone *zon
allocated++;
list_add_tail(&page->lru, list);
}
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock(&zone->lock);
return allocated;
}
@@ -576,6 +577,7 @@ void drain_remote_pages(void)
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
+ unsigned long flags;
struct zone *zone;
int i;
@@ -587,8 +589,10 @@ static void __drain_pages(unsigned int c
struct per_cpu_pages *pcp;
pcp = &pset->pcp[i];
+ local_irq_save(flags);
pcp->count -= free_pages_bulk(zone, pcp->count,
&pcp->list, 0);
+ local_irq_restore(flags);
}
}
}
@@ -726,16 +730,14 @@ buffered_rmqueue(struct zone *zone, int
if (pcp->count <= pcp->low)
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list);
- if (pcp->count) {
+ if (likely(pcp->count)) {
page = list_entry(pcp->list.next, struct page, lru);
list_del(&page->lru);
pcp->count--;
}
local_irq_restore(flags);
put_cpu();
- }
-
- if (page == NULL) {
+ } else {
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order);
spin_unlock_irqrestore(&zone->lock, flags);
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]