The current code unconditionally copy the first block for every call to
sha1_update(). This can be avoided if there is no pending partial block.
This is always the case on the first call to sha1_update() (if the length
is >= 64 of course0.
In the case where sha1_update() is called successively with len=64,
a 8.5% performance increase can be observed on i386, and 8.2% on ARm.
Signed-off-by: Nicolas Pitre <[email protected]>
Index: linux-2.6/crypto/sha1.c
===================================================================
--- linux-2.6.orig/crypto/sha1.c
+++ linux-2.6/crypto/sha1.c
@@ -48,23 +48,26 @@
static void sha1_update(void *ctx, const u8 *data, unsigned int len)
{
struct sha1_ctx *sctx = ctx;
- unsigned int i, j;
+ unsigned int partial, done;
u32 temp[SHA_WORKSPACE_WORDS];
- j = (sctx->count >> 3) & 0x3f;
+ partial = (sctx->count >> 3) & 0x3f;
sctx->count += len << 3;
+ done = 0;
- if ((j + len) > 63) {
- memcpy(&sctx->buffer[j], data, (i = 64-j));
- sha_transform(sctx->state, sctx->buffer, temp);
- for ( ; i + 63 < len; i += 64) {
- sha_transform(sctx->state, &data[i], temp);
+ if ((partial + len) > 63) {
+ if (partial) {
+ done = 64 - partial;
+ memcpy(sctx->buffer + partial, data, done);
+ sha_transform(sctx->state, sctx->buffer, temp);
+ partial = 0;
}
- j = 0;
+ for ( ; done + 63 < len; done += 64)
+ sha_transform(sctx->state, data + done, temp);
}
- else i = 0;
+ if (len - done)
+ memcpy(sctx->buffer + partial, data + done, len - done);
memset(temp, 0, sizeof(temp));
- memcpy(&sctx->buffer[j], &data[i], len - i);
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]