Use 'unsigned int' instead of 'unsigned long' for the readahead indexes/sizes.
This helps reduce memory consumption on 64bit CPU when
a lot of files are opened.
Note that the (smaller) 32bit index can support up to 16PB file.
Which should be sufficient large at least for now.
Signed-off-by: Fengguang Wu <[email protected]>
---
include/linux/fs.h | 10 +++++-----
mm/filemap.c | 2 +-
mm/readahead.c | 5 +++--
3 files changed, 9 insertions(+), 8 deletions(-)
--- linux-2.6.22-rc6-mm1.orig/include/linux/fs.h
+++ linux-2.6.22-rc6-mm1/include/linux/fs.h
@@ -771,16 +771,16 @@ struct fown_struct {
* Track a single file's readahead state
*/
struct file_ra_state {
- pgoff_t start; /* where readahead started */
- unsigned long size; /* # of readahead pages */
- unsigned long async_size; /* do asynchronous readahead when
+ unsigned int start; /* where readahead started */
+ unsigned int size; /* # of readahead pages */
+ unsigned int async_size; /* do asynchronous readahead when
there are only # of pages ahead */
- unsigned long ra_pages; /* Maximum readahead window */
+ unsigned int ra_pages; /* Maximum readahead window */
unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
unsigned long mmap_miss; /* Cache miss stat for mmap accesses */
- unsigned long prev_index; /* Cache last read() position */
unsigned int prev_offset; /* Offset where last read() ended in a page */
+ unsigned int prev_index; /* Cache last read() position */
};
/*
--- linux-2.6.22-rc6-mm1.orig/mm/filemap.c
+++ linux-2.6.22-rc6-mm1/mm/filemap.c
@@ -840,7 +840,7 @@ static void shrink_readahead_size_eio(st
if (count > 5)
return;
count++;
- printk(KERN_WARNING "Reducing readahead size to %luK\n",
+ printk(KERN_WARNING "Reducing readahead size to %dK\n",
ra->ra_pages << (PAGE_CACHE_SHIFT - 10));
}
--- linux-2.6.22-rc6-mm1.orig/mm/readahead.c
+++ linux-2.6.22-rc6-mm1/mm/readahead.c
@@ -342,11 +342,12 @@ ondemand_readahead(struct address_space
bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size)
{
- unsigned long max; /* max readahead pages */
+ int max; /* max readahead pages */
int sequential;
max = ra->ra_pages;
- sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
+ sequential = ((unsigned int)offset - ra->prev_index <= 1UL) ||
+ (req_size > max);
/*
* It's the expected callback offset, assume sequential access.
--
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]