In test_path, instead of driving the test I/O, queue it on a list. When all test I/Os have been queued, they can then be driven down without having to hold the path_lock. [Kevin Corry] --- diff/drivers/md/dm-mpath.c 2004-01-16 14:38:03.000000000 +0000 +++ source/drivers/md/dm-mpath.c 2004-01-16 14:39:33.000000000 +0000 @@ -61,6 +61,7 @@ spinlock_t failed_lock; struct bio_list failed_ios; + struct bio_list test_ios; unsigned test_interval; atomic_t trigger_event; @@ -222,7 +223,7 @@ p->test_bio->bi_size = bdev_hardsect_size(p->dev->bdev); p->test_bio->bi_idx = 0; - generic_make_request(p->test_bio); + bio_list_add(&p->pg->m->test_ios, p->test_bio); } /*----------------------------------------------------------------- @@ -233,21 +234,27 @@ static LIST_HEAD(_mpaths); static spinlock_t _mpath_lock = SPIN_LOCK_UNLOCKED; +static void submit_ios(struct bio *bio) +{ + struct bio *next; + while (bio) { + next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } +} + static void dispatch_failed_ios(struct multipath *m) { unsigned long flags; - struct bio *bio, *next_bio; + struct bio *bio; spin_lock_irqsave(&m->failed_lock, flags); bio = bio_list_get(&m->failed_ios); spin_unlock_irqrestore(&m->failed_lock, flags); - while (bio) { - next_bio = bio->bi_next; - bio->bi_next = NULL; - generic_make_request(bio); - bio = next_bio; - } + submit_ios(bio); } static void iterate_paths(struct multipath *m, void (*fn)(struct path *p)) @@ -276,6 +283,7 @@ list_for_each_entry (m, &_mpaths, list) { dispatch_failed_ios(m); iterate_paths(m, test_path); + submit_ios(bio_list_get(&m->test_ios)); if (atomic_dec_and_test(&m->trigger_event)) dm_table_event(m->ti->table);