Store deferred ios in a queue so that we preserve ordering and hence don't break write barriers. dm_request is now blocked while deferred ios are resubmitted. [Christophe Saout] --- diff/drivers/md/dm.c 2004-01-02 11:36:02.000000000 +0000 +++ source/drivers/md/dm.c 2004-01-02 12:04:52.000000000 +0000 @@ -63,7 +63,8 @@ */ atomic_t pending; wait_queue_head_t wait; - struct bio *deferred; + struct bio *deferred_head; + struct bio *deferred_tail; /* * The current mapping. @@ -230,8 +231,13 @@ return 1; } - bio->bi_next = md->deferred; - md->deferred = bio; + bio->bi_next = NULL; + + if (md->deferred_tail) + md->deferred_tail->bi_next = bio; + else + md->deferred_head = bio; + md->deferred_tail = bio; up_write(&md->lock); return 0; /* deferred successfully */ @@ -511,6 +517,16 @@ *---------------------------------------------------------------*/ +static inline void __dm_request(struct mapped_device *md, struct bio *bio) +{ + if (!md->map) { + bio_io_error(bio, bio->bi_size); + return; + } + + __split_bio(md, bio); +} + /* * The request function that just remaps the bio built up by * dm_merge_bvec. @@ -549,12 +565,7 @@ down_read(&md->lock); } - if (!md->map) { - bio_io_error(bio, bio->bi_size); - return 0; - } - - __split_bio(md, bio); + __dm_request(md, bio); up_read(&md->lock); return 0; } @@ -787,16 +798,16 @@ } /* - * Requeue the deferred bios by calling generic_make_request. + * Process the deferred bios */ -static void flush_deferred_io(struct bio *c) +static void __flush_deferred_io(struct mapped_device *md, struct bio *c) { struct bio *n; while (c) { n = c->bi_next; c->bi_next = NULL; - generic_make_request(c); + __dm_request(md, c); c = n; } } @@ -891,11 +902,12 @@ dm_table_resume_targets(md->map); clear_bit(DMF_SUSPENDED, &md->flags); clear_bit(DMF_BLOCK_IO, &md->flags); - def = md->deferred; - md->deferred = NULL; + + def = md->deferred_head; + md->deferred_head = md->deferred_tail = NULL; + __flush_deferred_io(md, def); up_write(&md->lock); - flush_deferred_io(def); blk_run_queues(); return 0;