4/14
--
SUSE Labs, Novell Inc.
Slightly optimise rmap functions by minimising atomic operations when
we know there will be no concurrent modifications.
Index: linux-2.6/include/linux/rmap.h
===================================================================
--- linux-2.6.orig/include/linux/rmap.h
+++ linux-2.6/include/linux/rmap.h
@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_stru
* rmap interfaces called when adding or removing pte of page
*/
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *);
Index: linux-2.6/mm/memory.c
===================================================================
--- linux-2.6.orig/mm/memory.c
+++ linux-2.6/mm/memory.c
@@ -1337,14 +1337,15 @@ static int do_wp_page(struct mm_struct *
inc_mm_counter(mm, anon_rss);
dec_mm_counter(mm, file_rss);
}
+
flush_cache_page(vma, address, pfn);
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
ptep_establish(vma, address, page_table, entry);
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
+ page_add_new_anon_rmap(new_page, vma, address);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
/* Free the old page.. */
new_page = old_page;
@@ -1796,9 +1797,8 @@ static int do_anonymous_page(struct mm_s
if (!pte_none(*page_table))
goto release;
inc_mm_counter(mm, anon_rss);
+ page_add_new_anon_rmap(page, vma, address);
lru_cache_add_active(page);
- SetPageReferenced(page);
- page_add_anon_rmap(page, vma, address);
} else {
/* Map the ZERO_PAGE - vm_page_prot is readonly */
page = ZERO_PAGE(address);
@@ -1924,11 +1924,10 @@ retry:
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- set_pte_at(mm, address, page_table, entry);
if (anon) {
inc_mm_counter(mm, anon_rss);
+ page_add_new_anon_rmap(new_page, vma, address);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
} else if (!(vma->vm_flags & VM_RESERVED)) {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
@@ -1939,6 +1938,7 @@ retry:
goto unlock;
}
+ set_pte_at(mm, address, page_table, entry);
/* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
Index: linux-2.6/mm/rmap.c
===================================================================
--- linux-2.6.orig/mm/rmap.c
+++ linux-2.6/mm/rmap.c
@@ -440,6 +440,26 @@ int page_referenced(struct page *page, i
}
/**
+ * page_set_anon_rmap - setup new anonymous rmap
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ */
+static void __page_set_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct anon_vma *anon_vma = vma->anon_vma;
+
+ BUG_ON(!anon_vma);
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ page->mapping = (struct address_space *) anon_vma;
+
+ page->index = linear_page_index(vma, address);
+
+ inc_page_state(nr_mapped);
+}
+
+/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
@@ -450,21 +470,28 @@ int page_referenced(struct page *page, i
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- struct anon_vma *anon_vma = vma->anon_vma;
-
- BUG_ON(!anon_vma);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
-
- page->index = linear_page_index(vma, address);
-
- inc_page_state(nr_mapped);
- }
+ if (atomic_inc_and_test(&page->_mapcount))
+ __page_set_anon_rmap(page, vma, address);
/* else checking page index and mapping is racy */
}
/**
+ * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ *
+ * same as page_add_anon_rmap but must only be called on *new* pages.
+ */
+void page_add_new_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+ __page_set_anon_rmap(page, vma, address);
+}
+
+
+/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
*
@@ -487,21 +514,28 @@ void page_add_file_rmap(struct page *pag
*/
void page_remove_rmap(struct page *page)
{
- if (atomic_add_negative(-1, &page->_mapcount)) {
+ int fast = (page_mapcount(page) == 1) &
+ PageAnon(page) & (!PageSwapCache(page));
+
+ /* fast page may become SwapCache here, but nothing new will map it. */
+ if (fast)
+ reset_page_mapcount(page);
+ else if (atomic_add_negative(-1, &page->_mapcount))
BUG_ON(page_mapcount(page) < 0);
- /*
- * It would be tidy to reset the PageAnon mapping here,
- * but that might overwrite a racing page_add_anon_rmap
- * which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
- * and remember that it's only reliable while mapped.
- * Leaving it set also helps swapoff to reinstate ptes
- * faster for those pages still in swapcache.
- */
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
- dec_page_state(nr_mapped);
- }
+ else
+ return; /* non zero mapcount */
+ /*
+ * It would be tidy to reset the PageAnon mapping here,
+ * but that might overwrite a racing page_add_anon_rmap
+ * which increments mapcount after us but sets mapping
+ * before us: so leave the reset to free_hot_cold_page,
+ * and remember that it's only reliable while mapped.
+ * Leaving it set also helps swapoff to reinstate ptes
+ * faster for those pages still in swapcache.
+ */
+ dec_page_state(nr_mapped);
}
/*
Index: linux-2.6/include/linux/page-flags.h
===================================================================
--- linux-2.6.orig/include/linux/page-flags.h
+++ linux-2.6/include/linux/page-flags.h
@@ -182,6 +182,7 @@ extern void __mod_page_state(unsigned lo
#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define __SetPageReferenced(page) __set_bit(PG_referenced, &(page)->flags)
#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]