Coding style changes using suggestions from LKML, no functionality changes. [Christophe Saout] --- diff/drivers/md/dm-crypt.c 2004-01-02 13:27:19.000000000 +0000 +++ source/drivers/md/dm-crypt.c 2004-01-03 12:14:13.000000000 +0000 @@ -4,9 +4,6 @@ * This file is released under the GPL. */ -#include "dm.h" -#include "dm-daemon.h" - #include #include #include @@ -16,6 +13,9 @@ #include #include +#include "dm.h" +#include "dm-daemon.h" + /* * per bio private data */ @@ -45,7 +45,7 @@ * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */ -struct crypt_c { +struct crypt_config { struct dm_dev *dev; sector_t start; @@ -70,10 +70,10 @@ #define MIN_POOL_PAGES 16 #define MIN_BIO_PAGES 8 -static kmem_cache_t *_io_cache; +static kmem_cache_t *_crypt_io_pool; /* - * Mempool alloc and free functions for the page and io pool + * Mempool alloc and free functions for the page */ static void *mempool_alloc_page(int gfp_mask, void *data) { @@ -85,26 +85,6 @@ __free_page(page); } -static inline struct page *crypt_alloc_page(struct crypt_c *cc, int gfp_mask) -{ - return mempool_alloc(cc->page_pool, gfp_mask); -} - -static inline void crypt_free_page(struct crypt_c *cc, struct page *page) -{ - mempool_free(page, cc->page_pool); -} - -static inline struct crypt_io *crypt_alloc_io(struct crypt_c *cc) -{ - return mempool_alloc(cc->io_pool, GFP_NOIO); -} - -static inline void crypt_free_io(struct crypt_c *cc, struct crypt_io *io) -{ - return mempool_free(io, cc->io_pool); -} - /* * Encrypt / decrypt a single sector, source and destination buffers * are stored in scatterlists. In CBC mode initialise the "previous @@ -112,7 +92,7 @@ * it would not allow to seek on the device...) */ static inline int -crypt_convert_scatterlist(struct crypt_c *cc, struct scatterlist *out, +crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, struct scatterlist *in, unsigned int length, int write, sector_t sector) { @@ -140,7 +120,7 @@ } static void -crypt_convert_init(struct crypt_c *cc, struct convert_context *ctx, +crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, sector_t sector, int write) { @@ -157,7 +137,7 @@ /* * Encrypt / decrypt data from one bio to another one (may be the same) */ -static int crypt_convert(struct crypt_c *cc, +static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { int r = 0; @@ -206,7 +186,7 @@ * May return a smaller bio when running out of pages */ static struct bio * -crypt_alloc_buffer(struct crypt_c *cc, unsigned int size, +crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, struct bio *base_bio, int *bio_vec_idx) { struct bio *bio; @@ -232,7 +212,7 @@ for(i = bio->bi_idx; i < nr_iovecs; i++) { struct bio_vec *bv = bio_iovec_idx(bio, i); - bv->bv_page = crypt_alloc_page(cc, gfp_mask); + bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); if (!bv->bv_page) break; @@ -269,14 +249,14 @@ return bio; } -static void crypt_free_buffer_pages(struct crypt_c *cc, struct bio *bio, - unsigned int bytes) +static void crypt_free_buffer_pages(struct crypt_config *cc, + struct bio *bio, unsigned int bytes) { int i = bio->bi_idx; while(bytes) { struct bio_vec *bv = bio_iovec_idx(bio, i++); - crypt_free_page(cc, bv->bv_page); + mempool_free(bv->bv_page, cc->page_pool); bytes -= bv->bv_len; } } @@ -287,7 +267,7 @@ */ static void dec_pending(struct crypt_io *io, int error) { - struct crypt_c *cc = (struct crypt_c *) io->target->private; + struct crypt_config *cc = (struct crypt_config *) io->target->private; if (!atomic_dec_and_test(&io->pending)) return; @@ -301,7 +281,7 @@ if (io->bio) bio_endio(io->bio, io->bio->bi_size, io->error); - crypt_free_io(cc, io); + mempool_free(io, cc->io_pool); } /* @@ -312,8 +292,8 @@ * queued here. */ static spinlock_t _kcryptd_lock = SPIN_LOCK_UNLOCKED; -static struct bio *_bio_head; -static struct bio *_bio_tail; +static struct bio *_kcryptd_bio_head; +static struct bio *_kcryptd_bio_tail; static struct dm_daemon _kcryptd; @@ -325,9 +305,9 @@ struct bio *bio; spin_lock_irq(&_kcryptd_lock); - bio = _bio_head; + bio = _kcryptd_bio_head; if (bio) - _bio_head = _bio_tail = NULL; + _kcryptd_bio_head = _kcryptd_bio_tail = NULL; spin_unlock_irq(&_kcryptd_lock); return bio; @@ -341,36 +321,36 @@ unsigned long flags; spin_lock_irqsave(&_kcryptd_lock, flags); - if (_bio_tail) - _bio_tail->bi_next = bio; + if (_kcryptd_bio_tail) + _kcryptd_bio_tail->bi_next = bio; else - _bio_head = bio; - _bio_tail = bio; + _kcryptd_bio_head = bio; + _kcryptd_bio_tail = bio; spin_unlock_irqrestore(&_kcryptd_lock, flags); } -static jiffy_t kcryptd(void) +static jiffy_t kcryptd_do_work(void) { int r; struct bio *bio; struct bio *next_bio; struct crypt_io *io; - struct crypt_c *cc; + struct crypt_config *cc; struct convert_context ctx; bio = kcryptd_get_bios(); while (bio) { io = (struct crypt_io *) bio->bi_private; - cc = (struct crypt_c *) io->target->private; + cc = (struct crypt_config *) io->target->private; crypt_convert_init(cc, &ctx, io->bio, io->bio, io->bio->bi_sector - io->target->begin, 0); r = crypt_convert(cc, &ctx); next_bio = bio->bi_next; - bio->bi_next = NULL; + bio_put(bio); dec_pending(io, r); @@ -442,7 +422,7 @@ */ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { - struct crypt_c *cc; + struct crypt_config *cc; struct crypto_tfm *tfm; char *tmp; char *cipher; @@ -493,7 +473,7 @@ } cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, - mempool_free_slab, _io_cache); + mempool_free_slab, _crypt_io_pool); if (!cc->io_pool) { ti->error = "dm-crypt: Cannot allocate crypt io mempool"; goto bad1; @@ -551,7 +531,7 @@ static void crypt_dtr(struct dm_target *ti) { - struct crypt_c *cc = (struct crypt_c *) ti->private; + struct crypt_config *cc = (struct crypt_config *) ti->private; mempool_destroy(cc->page_pool); mempool_destroy(cc->io_pool); @@ -564,7 +544,7 @@ static int crypt_endio(struct bio *bio, unsigned int done, int error) { struct crypt_io *io = (struct crypt_io *) bio->bi_private; - struct crypt_c *cc = (struct crypt_c *) io->target->private; + struct crypt_config *cc = (struct crypt_config *) io->target->private; if (bio_rw(bio) == WRITE) { /* @@ -595,10 +575,10 @@ } static int crypt_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) + union map_info *map_context) { - struct crypt_c *cc = (struct crypt_c *) ti->private; - struct crypt_io *io = crypt_alloc_io(cc); + struct crypt_config *cc = (struct crypt_config *) ti->private; + struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO); struct bio *clone = NULL; struct convert_context ctx; unsigned int remaining = bio->bi_size; @@ -622,14 +602,14 @@ while (remaining) { if (bio_rw(bio) == WRITE) { clone = crypt_alloc_buffer(cc, bio->bi_size, - io->first_clone, + io->first_clone, &bio_vec_idx); if (clone) { ctx.bio_out = clone; r = crypt_convert(cc, &ctx); if (r < 0) { crypt_free_buffer_pages(cc, clone, - clone->bi_size); + clone->bi_size); bio_put(clone); goto cleanup; } @@ -676,14 +656,14 @@ } /* if no bio has been dispatched yet, we can directly return the error */ - crypt_free_io(cc, io); + mempool_free(io, cc->io_pool); return r; } static int crypt_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { - struct crypt_c *cc = (struct crypt_c *) ti->private; + struct crypt_config *cc = (struct crypt_config *) ti->private; char buffer[32]; const char *cipher; const char *mode = NULL; @@ -741,19 +721,20 @@ .status = crypt_status, }; -int __init dm_crypt_init(void) +static int __init dm_crypt_init(void) { int r; - _io_cache = kmem_cache_create("dm-crypt_io", sizeof(struct crypt_io), - 0, 0, NULL, NULL); - if (!_io_cache) + _crypt_io_pool = kmem_cache_create("dm-crypt_io", + sizeof(struct crypt_io), + 0, 0, NULL, NULL); + if (!_crypt_io_pool) return -ENOMEM; - r = dm_daemon_start(&_kcryptd, "kcryptd", kcryptd); + r = dm_daemon_start(&_kcryptd, "kcryptd", kcryptd_do_work); if (r) { DMERR("couldn't create kcryptd: %d", r); - kmem_cache_destroy(_io_cache); + kmem_cache_destroy(_crypt_io_pool); return r; } @@ -761,13 +742,13 @@ if (r < 0) { DMERR("crypt: register failed %d", r); dm_daemon_stop(&_kcryptd); - kmem_cache_destroy(_io_cache); + kmem_cache_destroy(_crypt_io_pool); } return r; } -void __exit dm_crypt_exit(void) +static void __exit dm_crypt_exit(void) { int r = dm_unregister_target(&crypt_target); @@ -775,14 +756,11 @@ DMERR("crypt: unregister failed %d", r); dm_daemon_stop(&_kcryptd); - kmem_cache_destroy(_io_cache); + kmem_cache_destroy(_crypt_io_pool); } -/* - * module hooks - */ -module_init(dm_crypt_init) -module_exit(dm_crypt_exit) +module_init(dm_crypt_init); +module_exit(dm_crypt_exit); MODULE_AUTHOR("Christophe Saout "); MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");