Add initialising_pg flag and remap ios when processing queue. --- diff/drivers/md/dm-mpath.c 2004-10-29 15:38:23.000000000 +0100 +++ source/drivers/md/dm-mpath.c 2004-10-29 15:38:29.000000000 +0100 @@ -53,6 +53,7 @@ unsigned nr_priority_groups; struct list_head priority_groups; + int initialising_pg; spinlock_t lock; unsigned nr_valid_paths; @@ -208,40 +209,52 @@ return 0; } -static struct path *__get_current_path(struct multipath *m) +static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio) { - struct path *path; + unsigned long flags; + struct path *path = NULL; + int must_queue = 0; + + spin_lock_irqsave(&m->lock, flags); /* Do we need to select a new path? */ - if (!m->current_path || (m->current_count && --m->current_count == 0)) + if (!m->initialising_pg && + (!m->current_path || (m->current_count && --m->current_count == 0))) __choose_path(m); - path = m->current_path; + if (m->initialising_pg) + must_queue = 1; + else if (m->current_path) + path = m->current_path; - return path; -} - -static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio) -{ - unsigned long flags; - - spin_lock_irqsave(&m->lock, flags); - mpio->path = __get_current_path(m); spin_unlock_irqrestore(&m->lock, flags); - if (!mpio->path) + if (!must_queue && !path) return -EIO; + mpio->path = path; bio->bi_bdev = mpio->path->dev->bdev; - return 1; + + if (!must_queue) + return 1; /* Mapped successfully */ + + /* queue for the daemon to resubmit */ + spin_lock_irqsave(&m->lock, flags); + bio_list_add(&m->queued_ios, bio); + spin_unlock_irqrestore(&m->lock, flags); + + return 0; /* Queued */ } static void dispatch_queued_ios(void *data) { struct multipath *m = (struct multipath *) data; + int r; unsigned long flags; struct bio *bio = NULL, *next; + struct mpath_io *mpio; + union map_info *info; spin_lock_irqsave(&m->lock, flags); bio = bio_list_get(&m->queued_ios); @@ -250,7 +263,16 @@ while (bio) { next = bio->bi_next; bio->bi_next = NULL; - generic_make_request(bio); + + info = dm_get_mapinfo(bio); + mpio = info->ptr; + + r = map_io(m, bio, mpio); + if (r < 0) + bio_endio(bio, bio->bi_size, r); + else if (r == 1) + generic_make_request(bio); + bio = next; } } @@ -536,12 +558,11 @@ mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); dm_bio_record(&mpio->details, bio); + map_context->ptr = mpio; bio->bi_rw |= (1 << BIO_RW_FAILFAST); r = map_io(m, bio, mpio); if (r < 0) mempool_free(mpio, m->mpio_pool); - else - map_context->ptr = mpio; return r; } @@ -673,7 +694,6 @@ static int do_end_io(struct multipath *m, struct bio *bio, int error, struct mpath_io *mpio) { - int r; struct hw_handler *hwh = &m->hw_handler; unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ @@ -697,19 +717,15 @@ if (!(err_flags & MP_RETRY_IO)) return -EIO; - /* remap */ dm_bio_restore(&mpio->details, bio); - r = map_io(m, bio, mpio); - if (r < 0) - /* no paths left */ - return r; - /* queue for the daemon to resubmit */ + /* queue for the daemon to resubmit or fail */ spin_lock(&m->lock); bio_list_add(&m->queued_ios, bio); + if (!m->initialising_pg) + schedule_work(&m->dispatch_queued); spin_unlock(&m->lock); - schedule_work(&m->dispatch_queued); return 1; /* io not complete */ } --- diff/drivers/md/dm.c 2004-10-29 15:37:22.000000000 +0100 +++ source/drivers/md/dm.c 2004-10-29 15:38:29.000000000 +0100 @@ -43,6 +43,13 @@ union map_info info; }; +union map_info *dm_get_mapinfo(struct bio *bio) +{ + if (bio && bio->bi_private) + return &((struct target_io *)bio->bi_private)->info; + return NULL; +} + /* * Bits for the md->flags field. */ @@ -1162,6 +1169,8 @@ .owner = THIS_MODULE }; +EXPORT_SYMBOL(dm_get_mapinfo); + /* * module hooks */ --- diff/drivers/md/dm.h 2004-10-29 15:37:22.000000000 +0100 +++ source/drivers/md/dm.h 2004-10-29 15:38:29.000000000 +0100 @@ -187,5 +187,6 @@ void dm_stripe_exit(void); void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); +union map_info *dm_get_mapinfo(struct bio *bio); #endif