[RFC PATCH 7/10] clean up the LRU array arithmetic

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Make the LRU arithmetic more explicit.  Hopefully this will make
the code a little easier to read and less prone to future errors.

Signed-off-by: Rik van Riel <[email protected]>

Index: linux-2.6.23-mm1/include/linux/mm_inline.h
===================================================================
--- linux-2.6.23-mm1.orig/include/linux/mm_inline.h
+++ linux-2.6.23-mm1/include/linux/mm_inline.h
@@ -28,7 +28,7 @@ static inline int page_file_cache(struct
 		return 0;
 
 	/* The page is page cache backed by a normal filesystem. */
-	return (LRU_INACTIVE_FILE - LRU_INACTIVE_ANON);
+	return LRU_FILE;
 }
 
 static inline void
Index: linux-2.6.23-mm1/mm/swap.c
===================================================================
--- linux-2.6.23-mm1.orig/mm/swap.c
+++ linux-2.6.23-mm1/mm/swap.c
@@ -180,12 +180,12 @@ void fastcall activate_page(struct page 
 
 	spin_lock_irq(&zone->lru_lock);
 	if (PageLRU(page) && !PageActive(page)) {
-		int l = LRU_INACTIVE_ANON;
+		int l = LRU_BASE;
 		l += page_file_cache(page);
 		del_page_from_lru_list(zone, page, l);
 
 		SetPageActive(page);
-		l += LRU_ACTIVE_ANON - LRU_INACTIVE_ANON;
+		l += LRU_ACTIVE;
 		add_page_to_lru_list(zone, page, l);
 		__count_vm_event(PGACTIVATE);
 		mem_cgroup_move_lists(page_get_page_cgroup(page), true);
Index: linux-2.6.23-mm1/mm/vmscan.c
===================================================================
--- linux-2.6.23-mm1.orig/mm/vmscan.c
+++ linux-2.6.23-mm1/mm/vmscan.c
@@ -786,11 +786,11 @@ static unsigned long isolate_pages_globa
 					struct mem_cgroup *mem_cont,
 					int active, int file)
 {
-	int l = LRU_INACTIVE_ANON;
+	int l = LRU_BASE;
 	if (active)
-		l += LRU_ACTIVE_ANON - LRU_INACTIVE_ANON;
+		l += LRU_ACTIVE;
 	if (file)
-		l += LRU_INACTIVE_FILE - LRU_INACTIVE_ANON;
+		l += LRU_FILE;
 	return isolate_lru_pages(nr, &z->list[l], dst, scanned, order,
 								mode, !!file);
 }
@@ -842,7 +842,7 @@ int isolate_lru_page(struct page *page)
 
 		spin_lock_irq(&zone->lru_lock);
 		if (PageLRU(page) && get_page_unless_zero(page)) {
-			int l = LRU_INACTIVE_ANON;
+			int l = LRU_BASE;
 			ret = 0;
 			ClearPageLRU(page);
 
@@ -938,19 +938,19 @@ static unsigned long shrink_inactive_lis
 		 * Put back any unfreeable pages.
 		 */
 		while (!list_empty(&page_list)) {
-			int l = LRU_INACTIVE_ANON;
+			int l = LRU_BASE;
 			page = lru_to_page(&page_list);
 			VM_BUG_ON(PageLRU(page));
 			SetPageLRU(page);
 			list_del(&page->lru);
 			if (file) {
-				l += LRU_INACTIVE_FILE - LRU_INACTIVE_ANON;
+				l += LRU_FILE;
 				zone->recent_rotated_file += sc->activated;
 			} else {
 				zone->recent_rotated_anon += sc->activated;
 			}
 			if (PageActive(page))
-				l += LRU_ACTIVE_ANON - LRU_INACTIVE_ANON;
+				l += LRU_ACTIVE;
 			add_page_to_lru_list(zone, page, l);
 			if (!pagevec_add(&pvec, page)) {
 				spin_unlock_irq(&zone->lru_lock);
@@ -1051,7 +1051,7 @@ static void shrink_active_list(unsigned 
 	 */
 	pagevec_init(&pvec, 1);
 	pgmoved = 0;
-	l = LRU_INACTIVE_ANON + file * (LRU_INACTIVE_FILE - LRU_INACTIVE_ANON);
+	l = LRU_BASE + file * LRU_FILE;
 	spin_lock_irq(&zone->lru_lock);
 	while (!list_empty(&list[LRU_INACTIVE_ANON])) {
 		page = lru_to_page(&list[LRU_INACTIVE_ANON]);
@@ -1083,7 +1083,7 @@ static void shrink_active_list(unsigned 
 	if (buffer_heads_over_limit)
 		pagevec_strip(&pvec);
 	pgmoved = 0;
-	l = LRU_ACTIVE_ANON + file * (LRU_ACTIVE_FILE - LRU_ACTIVE_ANON);
+	l = LRU_ACTIVE + file * LRU_FILE;
 	spin_lock_irq(&zone->lru_lock);
 	while (!list_empty(&list[LRU_ACTIVE_ANON])) {
 		page = lru_to_page(&list[LRU_ACTIVE_ANON]);
Index: linux-2.6.23-mm1/include/linux/mmzone.h
===================================================================
--- linux-2.6.23-mm1.orig/include/linux/mmzone.h
+++ linux-2.6.23-mm1/include/linux/mmzone.h
@@ -107,11 +107,22 @@ enum zone_stat_item {
 #endif
 	NR_VM_ZONE_STAT_ITEMS };
 
+/*
+ * We do arithmetic on the LRU lists in various places in the code,
+ * so it is important to keep the active lists LRU_ACTIVE higher in
+ * the array than the corresponding inactive lists, and to keep
+ * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
+ */
+#define LRU_BASE 0
+#define LRU_ANON LRU_BASE
+#define LRU_ACTIVE 1
+#define LRU_FILE 2
+
 enum lru_list {
-	LRU_INACTIVE_ANON,	/* must be first enum  */
-	LRU_ACTIVE_ANON,	/* must match order of NR_[IN]ACTIVE_* */
-	LRU_INACTIVE_FILE,	/*  "     "     "   "       "          */
-	LRU_ACTIVE_FILE,	/*  "     "     "   "       "          */
+	LRU_INACTIVE_ANON = LRU_BASE,
+	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
+	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
+	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
 	NR_LRU_LISTS };
 
 #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux