Hello,
Attached patch fixes the another bug in dm-raid1.c that
the dirty region may stay in or be moved to clean list
and freed while in use.
It happens as follows:
CPU0 CPU1
------------------------------------------------------------------------------
rh_dec()
if (atomic_dec_and_test(pending))
<the region is still marked dirty>
rh_inc()
if the region is clean
mark the region dirty
and remove from clean list
mark the region clean
and move to clean list
atomic_inc(pending)
At this stage, the region is in clean list and
will be mistakenly reclaimed by rh_update_states() later.
Signed-off-by: Jun'ichi Nomura <[email protected]>
--- kernel/drivers/md/dm-raid1.c.orig 2005-06-16 07:13:50.610325768 -0400
+++ kernel/drivers/md/dm-raid1.c 2005-06-16 10:34:12.510719112 -0400
@@ -375,16 +380,18 @@ static void rh_inc(struct region_hash *r
read_lock(&rh->hash_lock);
reg = __rh_find(rh, region);
+
+ atomic_inc(®->pending);
+
+ spin_lock_irq(&rh->region_lock);
if (reg->state == RH_CLEAN) {
rh->log->type->mark_region(rh->log, reg->key);
- spin_lock_irq(&rh->region_lock);
reg->state = RH_DIRTY;
list_del_init(®->list); /* take off the clean list */
- spin_unlock_irq(&rh->region_lock);
}
+ spin_unlock_irq(&rh->region_lock);
- atomic_inc(®->pending);
read_unlock(&rh->hash_lock);
}
@@ -408,6 +414,10 @@ static void rh_dec(struct region_hash *r
if (atomic_dec_and_test(®->pending)) {
spin_lock_irqsave(&rh->region_lock, flags);
+ if (atomic_read(®->pending)) { /* check race */
+ spin_unlock_irqrestore(&rh->region_lock, flags);
+ return;
+ }
if (reg->state == RH_RECOVERING) {
list_add_tail(®->list, &rh->quiesced_regions);
} else {
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]