Eric Dumazet wrote:
Andrew Morton a écrit :
Eric Dumazet <[email protected]> wrote:
Considering :
[root@dada1 linux-2.6.13-rc6]# find .|xargs grep f_maxcount
./fs/file_table.c: f->f_maxcount = INT_MAX;
./fs/read_write.c: if (unlikely(count > file->f_maxcount))
./include/linux/fs.h: size_t f_maxcount;
I was wondering if f_maxcount has a real use these days...
No, I guess we can just stick a hard-wired INT_MAX in there.
OK here is a patch doing the hard wiring then :)
* struct file cleanup : f_maxcount has an unique value (INT_MAX). Just
use the hard-wired value.
Signed-off-by: Eric Dumazet <[email protected]>
------------------------------------------------------------------------
diff -Nru linux-2.6.13-rc6/fs/read_write.c linux-2.6.13-rc6-ed/fs/read_write.c
--- linux-2.6.13-rc6/fs/read_write.c 2005-08-07 20:18:56.000000000 +0200
+++ linux-2.6.13-rc6-ed/fs/read_write.c 2005-08-19 23:51:20.000000000 +0200
@@ -188,7 +188,7 @@
struct inode *inode;
loff_t pos;
- if (unlikely(count > file->f_maxcount))
+ if (unlikely(count > INT_MAX))
goto Einval;
pos = *ppos;
if (unlikely((pos < 0) || (loff_t) (pos + count) < 0))
And depending upon how you feel about read(2) and write(2) returning larger
than can be represented by a ssize_t, you can get rid of this test too and
apply the attached patch to prevent failures occuring in the direct-io
subsystem.
Limiting i/o requests to INT_MAX is starting to seem a little small.
Thanx...
ps
Signed-off-by: Peter Staubach <[email protected]>
--- linux-2.6.12/fs/direct-io.c.org 2005-08-22 08:56:40.000000000 -0400
+++ linux-2.6.12/fs/direct-io.c 2005-08-22 08:59:49.000000000 -0400
@@ -1139,6 +1139,12 @@ direct_io_worker(int rw, struct kiocb *i
}
/*
+ * The maximum size of an i/o request which can handled by block
+ * devices.
+ */
+#define MAX_DIO_SIZE ((u32)INT_MAX + 1)
+
+/*
* This is a library function for use by filesystem drivers.
* The locking rules are governed by the dio_lock_type parameter.
*
@@ -1174,6 +1180,12 @@ __blockdev_direct_IO(int rw, struct kioc
loff_t end = offset;
struct dio *dio;
int reader_with_isem = (rw == READ && dio_lock_type == DIO_OWN_LOCKING);
+ int nseg;
+ unsigned long segs_reqd;
+ struct iovec *niov = NULL;
+ int asegs;
+ caddr_t nbase;
+ size_t nlen;
if (rw & WRITE)
current->flags |= PF_SYNCWRITE;
@@ -1189,6 +1201,44 @@ __blockdev_direct_IO(int rw, struct kioc
goto out;
}
+ /*
+ * Check to see if any individual segment is larger than
+ * 2G. If so, then break it up into 2G sized chunks.
+ */
+ segs_reqd = 0;
+ for (seg = 0; seg < nr_segs; seg++)
+ segs_reqd += ((iov[seg].iov_len - 1) / MAX_DIO_SIZE) + 1;
+ if (segs_reqd != nr_segs) {
+ if (segs_reqd > UIO_MAXIOV) {
+ retval = -EINVAL;
+ goto out;
+ }
+ niov = kmalloc(sizeof(*niov) * segs_reqd, GFP_KERNEL);
+ if (!niov) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ nseg = 0;
+ for (seg = 0; seg < nr_segs; seg++) {
+ nbase = iov[seg].iov_base;
+ nlen = iov[seg].iov_len;
+ asegs = (iov[seg].iov_len - 1) / MAX_DIO_SIZE;
+ while (asegs > 0) {
+ niov[nseg].iov_base = nbase;
+ niov[nseg].iov_len = MAX_DIO_SIZE;
+ nbase += MAX_DIO_SIZE;
+ nlen -= MAX_DIO_SIZE;
+ nseg++;
+ asegs--;
+ }
+ niov[nseg].iov_base = nbase;
+ niov[nseg].iov_len = nlen;
+ nseg++;
+ }
+ iov = niov;
+ nr_segs = segs_reqd;
+ }
+
/* Check the memory alignment. Blocks cannot straddle pages */
for (seg = 0; seg < nr_segs; seg++) {
addr = (unsigned long)iov[seg].iov_base;
@@ -1265,6 +1315,8 @@ __blockdev_direct_IO(int rw, struct kioc
out:
if (reader_with_isem)
up(&inode->i_sem);
+ if (niov)
+ kfree(niov);
if (rw & WRITE)
current->flags &= ~PF_SYNCWRITE;
return retval;
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
|
|