We can now reclaim larger pages. Adjust the VM counters to deal with it.
Signed-off-by: Christoph Lameter <[email protected]>
---
mm/vmscan.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
Index: linux-2.6.21-rc7/mm/vmscan.c
===================================================================
--- linux-2.6.21-rc7.orig/mm/vmscan.c 2007-04-23 22:15:30.000000000 -0700
+++ linux-2.6.21-rc7/mm/vmscan.c 2007-04-23 22:19:21.000000000 -0700
@@ -471,14 +471,14 @@ static unsigned long shrink_page_list(st
VM_BUG_ON(PageActive(page));
- sc->nr_scanned++;
+ sc->nr_scanned += compound_pages(page);
if (!sc->may_swap && page_mapped(page))
goto keep_locked;
/* Double the slab pressure for mapped and swapcache pages */
if (page_mapped(page) || PageSwapCache(page))
- sc->nr_scanned++;
+ sc->nr_scanned += compound_pages(page);
if (PageWriteback(page))
goto keep_locked;
@@ -581,7 +581,7 @@ static unsigned long shrink_page_list(st
free_it:
unlock_page(page);
- nr_reclaimed++;
+ nr_reclaimed += compound_pages(page);
if (!pagevec_add(&freed_pvec, page))
__pagevec_release_nonlru(&freed_pvec);
continue;
@@ -627,7 +627,7 @@ static unsigned long isolate_lru_pages(u
struct page *page;
unsigned long scan;
- for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
+ for (scan = 0; scan < nr_to_scan && !list_empty(src); ) {
struct list_head *target;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
@@ -644,10 +644,11 @@ static unsigned long isolate_lru_pages(u
*/
ClearPageLRU(page);
target = dst;
- nr_taken++;
+ nr_taken += compound_pages(page);
} /* else it is being freed elsewhere */
list_add(&page->lru, target);
+ scan += compound_pages(page);
}
*scanned = scan;
@@ -856,7 +857,7 @@ force_reclaim_mapped:
ClearPageActive(page);
list_move(&page->lru, &zone->inactive_list);
- pgmoved++;
+ pgmoved += compound_pages(page);
if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
spin_unlock_irq(&zone->lru_lock);
@@ -884,7 +885,7 @@ force_reclaim_mapped:
SetPageLRU(page);
VM_BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list);
- pgmoved++;
+ pgmoved += compound_pages(page);
if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
pgmoved = 0;
--
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]