> > It looks like it panics during a mem_cpy but I know its
> > difficult to tell just by the output.
> >
> > I get a code: f3 a4 c3 66 66 66 90 66 66 66 90 66 66 66 90 66
> >
> > The problem appears very reproducable so I can provide more
> > information upon request.
>
> What does the rest of the panic say? There should be text above this
> that tells where the panic occured and why. Can you please send that
> here?
Ok, could you please try the this patch, I'll attach it aswell:
From: Andreas Steinmetz <[email protected]>
from include/linux/kernel.h:
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
from crypto/cipher.c:
unsigned int alignmask = ...
u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
...
unsigned int alignmask = ...
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
...
unsigned int align;
addr = ALIGN(addr, align);
addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
The compiler first does ~((a)-1)) and then expands the unsigned int to
unsigned long for the & operation. So we end up with only the lower 32
bits of the address. Who did smoke what to do this? Patch attached.
--
Andreas Steinmetz SPAMmers use [email protected]
--- linux.orig/crypto/cipher.c 2005-07-17 13:35:15.000000000 +0200
+++ linux/crypto/cipher.c 2005-07-17 14:04:00.000000000 +0200
@@ -41,7 +41,7 @@
struct scatter_walk *in,
struct scatter_walk *out, unsigned int bsize)
{
- unsigned int alignmask = crypto_tfm_alg_alignmask(desc->tfm);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
u8 buffer[bsize * 2 + alignmask];
u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
u8 *dst = src + bsize;
@@ -160,7 +160,7 @@
unsigned int nbytes)
{
struct crypto_tfm *tfm = desc->tfm;
- unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
u8 *iv = desc->info;
if (unlikely(((unsigned long)iv & alignmask))) {
@@ -424,7 +424,7 @@
}
if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
- unsigned int align;
+ unsigned long align;
unsigned long addr;
switch (crypto_tfm_alg_blocksize(tfm)) {
--------------080406080505060706090703--
-
--- Begin Message ---
- Subject: No Subject
- From: Andreas Steinmetz <[email protected]>
- Date: Tue, 19 Jul 2005 11:32:26 +0200
from include/linux/kernel.h:
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
from crypto/cipher.c:
unsigned int alignmask = ...
u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
...
unsigned int alignmask = ...
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
...
unsigned int align;
addr = ALIGN(addr, align);
addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
The compiler first does ~((a)-1)) and then expands the unsigned int to
unsigned long for the & operation. So we end up with only the lower 32
bits of the address. Who did smoke what to do this? Patch attached.
--
Andreas Steinmetz SPAMmers use [email protected]
--- linux.orig/crypto/cipher.c 2005-07-17 13:35:15.000000000 +0200
+++ linux/crypto/cipher.c 2005-07-17 14:04:00.000000000 +0200
@@ -41,7 +41,7 @@
struct scatter_walk *in,
struct scatter_walk *out, unsigned int bsize)
{
- unsigned int alignmask = crypto_tfm_alg_alignmask(desc->tfm);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
u8 buffer[bsize * 2 + alignmask];
u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
u8 *dst = src + bsize;
@@ -160,7 +160,7 @@
unsigned int nbytes)
{
struct crypto_tfm *tfm = desc->tfm;
- unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
u8 *iv = desc->info;
if (unlikely(((unsigned long)iv & alignmask))) {
@@ -424,7 +424,7 @@
}
if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
- unsigned int align;
+ unsigned long align;
unsigned long addr;
switch (crypto_tfm_alg_blocksize(tfm)) {
--------------080406080505060706090703--
-
--- End Message ---
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
|
|