Eric Dumazet a écrit :
Andi Kleen a écrit :
This is a RFC for now. I would be interested in testing
feedback. Patch is for 2.6.15.
Optimize select and poll by a using stack space for small fd sets
This brings back an old optimization from Linux 2.0. Using
the stack is faster than kmalloc. On a Intel P4 system
it speeds up a select of a single pty fd by about 13%
(~4000 cycles -> ~3500)
Was this result on UP or SMP kernel ? Preempt or not ?
I think we might play in do_pollfd() and use fget_light()/fput_light()
instead of fget()/fput() that are somewhat expensive because of atomic
inc/dec on SMP.
Just for completeness I include this patch against 2.6.15
--- linux-2.6.15/fs/select.c 2006-01-03 04:21:10.000000000 +0100
+++ linux-2.6.15-ed/fs/select.c 2006-01-04 00:04:10.000000000 +0100
@@ -221,17 +221,18 @@
}
for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
+ int fput_needed;
if (i >= n)
break;
if (!(bit & all_bits))
continue;
- file = fget(i);
+ file = fget_light(i, &fput_needed);
if (file) {
f_op = file->f_op;
mask = DEFAULT_POLLMASK;
if (f_op && f_op->poll)
mask = (*f_op->poll)(file, retval ? NULL : wait);
- fput(file);
+ fput_light(file, fput_needed);
if ((mask & POLLIN_SET) && (in & bit)) {
res_in |= bit;
retval++;
@@ -417,14 +418,15 @@
fdp = fdpage+i;
fd = fdp->fd;
if (fd >= 0) {
- struct file * file = fget(fd);
+ int fput_needed;
+ struct file * file = fget_light(fd, &fput_needed);
mask = POLLNVAL;
if (file != NULL) {
mask = DEFAULT_POLLMASK;
if (file->f_op && file->f_op->poll)
mask = file->f_op->poll(file, *pwait);
mask &= fdp->events | POLLERR | POLLHUP;
- fput(file);
+ fput_light(file, fput_needed);
}
if (mask) {
*pwait = NULL;
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]