[PATCH] Problem with smaps in 2.6.13-rc4-mm1

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Saturday 06 August 2005 09:15, Torsten Foertsch wrote:
> when trying out smaps I have encountered the following problem:
> > cat /proc/$P/smaps | diff - /proc/$P/smaps
>
> 239,241c239,241
> < bfbaf000-bfbc4000 rw-p bfbaf000 00:00 0          [stack]
> < Size:                84 kB
> < Rss:                 24 kB
> ---
>
> > b7fc4000-b7fc6000 rwxp 00015000 08:02 12558      /lib/ld-2.3.4.so
> > Size:                 8 kB
> > Rss:                  8 kB
>
> 245c245
> < Private_Dirty:       24 kB
> ---
>
> > Private_Dirty:        8 kB

The problem occures because show_smap calls first show_map and then prints its 
additional information to the seq_file. show_map checks if all it has to 
print fits into the buffer and if yes marks the current vma as written. While 
that is correct for show_map it is not for show_smap. Here the vma should be 
marked as written only after the additional information is also written.

The attached patch cures the problem. It moves the functionality of the 
show_map function to a new function show_map_internal that is called with an 
additional struct mem_size_stats* argument. Then show_map calls 
show_map_internal with NULL as struct mem_size_stats* whereas show_smap calls 
it with a real pointer. Now the final

	if (m->count < m->size)  /* vma is copied successfully */
		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;

is done only if the whole entry fits into the buffer.

Torsten
--- linux-2.6.13-rc4-mm1/fs/proc/task_mmu.c~	2005-08-03 12:48:35.000000000 +0200
+++ linux-2.6.13-rc4-mm1/fs/proc/task_mmu.c	2005-08-06 18:43:11.160005664 +0200
@@ -92,7 +92,16 @@
 	seq_printf(m, "%*c", len, ' ');
 }
 
-static int show_map(struct seq_file *m, void *v)
+struct mem_size_stats
+{
+	unsigned long resident;
+	unsigned long shared_clean;
+	unsigned long shared_dirty;
+	unsigned long private_clean;
+	unsigned long private_dirty;
+};
+
+static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
 {
 	struct task_struct *task = m->private;
 	struct vm_area_struct *vma = v;
@@ -146,19 +155,31 @@
 		}
 	}
 	seq_putc(m, '\n');
+
+	if (mss)
+		seq_printf(m,
+			   "Size:          %8lu kB\n"
+			   "Rss:           %8lu kB\n"
+			   "Shared_Clean:  %8lu kB\n"
+			   "Shared_Dirty:  %8lu kB\n"
+			   "Private_Clean: %8lu kB\n"
+			   "Private_Dirty: %8lu kB\n",
+			   (vma->vm_end - vma->vm_start) >> 10,
+			   mss->resident >> 10,
+			   mss->shared_clean  >> 10,
+			   mss->shared_dirty  >> 10,
+			   mss->private_clean >> 10,
+			   mss->private_dirty >> 10);
+
 	if (m->count < m->size)  /* vma is copied successfully */
 		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
 	return 0;
 }
 
-struct mem_size_stats
+static int show_map(struct seq_file *m, void *v)
 {
-	unsigned long resident;
-	unsigned long shared_clean;
-	unsigned long shared_dirty;
-	unsigned long private_clean;
-	unsigned long private_dirty;
-};
+	return show_map_internal(m, v, 0);
+}
 
 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 				unsigned long addr, unsigned long end,
@@ -248,33 +269,17 @@
 {
 	struct vm_area_struct *vma = v;
 	struct mm_struct *mm = vma->vm_mm;
-	unsigned long vma_len = (vma->vm_end - vma->vm_start);
 	struct mem_size_stats mss;
 
 	memset(&mss, 0, sizeof mss);
 
-	show_map(m, v);
-
 	if (mm) {
 		spin_lock(&mm->page_table_lock);
 		smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
 		spin_unlock(&mm->page_table_lock);
 	}
 
-	seq_printf(m,
-		   "Size:          %8lu kB\n"
-		   "Rss:           %8lu kB\n"
-		   "Shared_Clean:  %8lu kB\n"
-		   "Shared_Dirty:  %8lu kB\n"
-		   "Private_Clean: %8lu kB\n"
-		   "Private_Dirty: %8lu kB\n",
-		   vma_len >> 10,
-		   mss.resident >> 10,
-		   mss.shared_clean  >> 10,
-		   mss.shared_dirty  >> 10,
-		   mss.private_clean >> 10,
-		   mss.private_dirty >> 10);
-	return 0;
+	return show_map_internal(m, v, &mss);
 }
 
 static void *m_start(struct seq_file *m, loff_t *pos)
@@ -288,7 +293,7 @@
 	/*
 	 * We remember last_addr rather than next_addr to hit with
 	 * mmap_cache most of the time. We have zero last_addr at
-	 * the begining and also after lseek. We will have -1 last_addr
+	 * the beginning and also after lseek. We will have -1 last_addr
 	 * after the end of the vmas.
 	 */
 

Attachment: pgpDekOTnDrhR.pgp
Description: PGP signature


[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]
  Powered by Linux