diff options
Diffstat (limited to 'drivers/md')
| -rw-r--r-- | drivers/md/Kconfig | 17 | ||||
| -rw-r--r-- | drivers/md/Makefile | 1 | ||||
| -rw-r--r-- | drivers/md/bcache/request.c | 10 | ||||
| -rw-r--r-- | drivers/md/bcache/super.c | 8 | ||||
| -rw-r--r-- | drivers/md/dm-android-verity.c | 6 | ||||
| -rw-r--r-- | drivers/md/dm-cache-target.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm-era-target.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm-req-crypt.c | 1365 | ||||
| -rw-r--r-- | drivers/md/dm-table.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm-thin.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm.c | 22 | ||||
| -rw-r--r-- | drivers/md/linear.c | 2 | ||||
| -rw-r--r-- | drivers/md/md.c | 6 | ||||
| -rw-r--r-- | drivers/md/multipath.c | 2 | ||||
| -rw-r--r-- | drivers/md/raid0.c | 6 | ||||
| -rw-r--r-- | drivers/md/raid1.c | 4 | ||||
| -rw-r--r-- | drivers/md/raid10.c | 10 | ||||
| -rw-r--r-- | drivers/md/raid5.c | 12 |
18 files changed, 1436 insertions, 43 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5649d1ab0083..27713412c881 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -267,6 +267,23 @@ config DM_CRYPT If unsure, say N. +config DM_REQ_CRYPT + tristate "Req Crypt target support" + depends on BLK_DEV_DM + select XTS + select CRYPTO_XTS + ---help--- + This request based device-mapper target allows you to create a device that + transparently encrypts the data on it. You'll need to activate + the ciphers you're going to use in the cryptoapi configuration. + The DM REQ CRYPT operates on requests (bigger payloads) to utilize + crypto hardware better. + + To compile this code as a module, choose M here: the module will + be called dm-req-crypt. + + If unsure, say N. + config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index c22cc74c9fa8..41ba86576d04 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -60,6 +60,7 @@ obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o obj-$(CONFIG_DM_ERA) += dm-era.o obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o +obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o ifeq ($(CONFIG_DM_UEVENT),y) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index e73aeb0e892c..53c0fa005821 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1025,7 +1025,7 @@ static int cached_dev_congested(void *data, int bits) struct request_queue *q = bdev_get_queue(dc->bdev); int ret = 0; - if (bdi_congested(&q->backing_dev_info, bits)) + if (bdi_congested(q->backing_dev_info, bits)) return 1; if (cached_dev_get(dc)) { @@ -1034,7 +1034,7 @@ static int cached_dev_congested(void *data, int bits) for_each_cache(ca, d->c, i) { q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } cached_dev_put(dc); @@ -1048,7 +1048,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc) struct gendisk *g = dc->disk.disk; g->queue->make_request_fn = cached_dev_make_request; - g->queue->backing_dev_info.congested_fn = cached_dev_congested; + g->queue->backing_dev_info->congested_fn = cached_dev_congested; dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.ioctl = cached_dev_ioctl; } @@ -1141,7 +1141,7 @@ static int flash_dev_congested(void *data, int bits) for_each_cache(ca, d->c, i) { q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; @@ -1152,7 +1152,7 @@ void bch_flash_dev_request_init(struct bcache_device *d) struct gendisk *g = d->disk; g->queue->make_request_fn = flash_dev_make_request; - g->queue->backing_dev_info.congested_fn = flash_dev_congested; + g->queue->backing_dev_info->congested_fn = flash_dev_congested; d->cache_miss = flash_dev_cache_miss; d->ioctl = flash_dev_ioctl; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f636af441da6..b9a526271f02 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -802,7 +802,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, blk_queue_make_request(q, NULL); d->disk->queue = q; q->queuedata = d; - q->backing_dev_info.congested_data = d; + q->backing_dev_info->congested_data = d; q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; @@ -1146,9 +1146,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) set_capacity(dc->disk.disk, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); - dc->disk.disk->queue->backing_dev_info.ra_pages = - max(dc->disk.disk->queue->backing_dev_info.ra_pages, - q->backing_dev_info.ra_pages); + dc->disk.disk->queue->backing_dev_info->ra_pages = + max(dc->disk.disk->queue->backing_dev_info->ra_pages, + q->backing_dev_info->ra_pages); bch_cached_dev_request_init(dc); bch_cached_dev_writeback_init(dc); diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c index c521df010ee3..43d566fd38ae 100644 --- a/drivers/md/dm-android-verity.c +++ b/drivers/md/dm-android-verity.c @@ -48,7 +48,7 @@ static char buildvariant[BUILD_VARIANT]; static bool target_added; static bool verity_enabled = true; -struct dentry *debug_dir; +static struct dentry *debug_dir; static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv); static struct target_type android_verity_target = { @@ -538,7 +538,7 @@ blkdev_release: } /* helper functions to extract properties from dts */ -const char *find_dt_value(const char *name) +static const char *find_dt_value(const char *name) { struct device_node *firmware; const char *value; @@ -693,7 +693,7 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) dev_t uninitialized_var(dev); struct android_metadata *metadata = NULL; int err = 0, i, mode; - char *key_id, *table_ptr, dummy, *target_device, + char *key_id = NULL, *table_ptr, dummy, *target_device, *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; /* One for specifying number of opt args and one for mode */ sector_t data_sectors; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0da5efaad85c..54e50fc908e9 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2288,7 +2288,7 @@ static void do_waker(struct work_struct *ws) static int is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 32e76c5ee741..11c52567304f 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era) static int dev_is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c new file mode 100644 index 000000000000..0ec61ee586b7 --- /dev/null +++ b/drivers/md/dm-req-crypt.c @@ -0,0 +1,1365 @@ +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/completion.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/mempool.h> +#include <linux/slab.h> +#include <linux/crypto.h> +#include <linux/qcrypto.h> +#include <linux/workqueue.h> +#include <linux/backing-dev.h> +#include <linux/atomic.h> +#include <linux/scatterlist.h> +#include <linux/device-mapper.h> +#include <linux/printk.h> + +#include <asm/page.h> +#include <asm/unaligned.h> +#include <crypto/scatterwalk.h> +#include <crypto/hash.h> +#include <crypto/md5.h> +#include <crypto/algapi.h> +#include <crypto/ice.h> + +#define DM_MSG_PREFIX "req-crypt" + +#define MAX_SG_LIST 1024 +#define REQ_DM_512_KB (512*1024) +#define MAX_ENCRYPTION_BUFFERS 1 +#define MIN_IOS 256 +#define MIN_POOL_PAGES 32 +#define KEY_SIZE_XTS 32 +#define AES_XTS_IV_LEN 16 +#define MAX_MSM_ICE_KEY_LUT_SIZE 32 +#define SECTOR_SIZE 512 +#define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024) + +#define DM_REQ_CRYPT_ERROR -1 +#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2 + +/* + * ENCRYPTION_MODE_CRYPTO means dm-req-crypt would invoke crypto operations + * for all of the requests. Crypto operations are performed by crypto engine + * plugged with Linux Kernel Crypto APIs + */ +#define DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO 0 +/* + * ENCRYPTION_MODE_TRANSPARENT means dm-req-crypt would not invoke crypto + * operations for any of the requests. Data would be encrypted or decrypted + * using Inline Crypto Engine(ICE) embedded in storage hardware + */ +#define DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT 1 + +#define DM_REQ_CRYPT_QUEUE_SIZE 256 + +struct req_crypt_result { + struct completion completion; + int err; +}; + +#define FDE_KEY_ID 0 +#define PFE_KEY_ID 1 + +static struct dm_dev *dev; +static struct kmem_cache *_req_crypt_io_pool; +static struct kmem_cache *_req_dm_scatterlist_pool; +static sector_t start_sector_orig; +static struct workqueue_struct *req_crypt_queue; +static struct workqueue_struct *req_crypt_split_io_queue; +static mempool_t *req_io_pool; +static mempool_t *req_page_pool; +static mempool_t *req_scatterlist_pool; +static bool is_fde_enabled; +static struct crypto_ablkcipher *tfm; +static unsigned int encryption_mode; +static struct ice_crypto_setting *ice_settings; + +unsigned int num_engines; +unsigned int num_engines_fde, fde_cursor; +unsigned int num_engines_pfe, pfe_cursor; +struct crypto_engine_entry *fde_eng, *pfe_eng; +DEFINE_MUTEX(engine_list_mutex); + +struct req_dm_crypt_io { + struct ice_crypto_setting ice_settings; + struct work_struct work; + struct request *cloned_request; + int error; + atomic_t pending; + struct timespec start_time; + bool should_encrypt; + bool should_decrypt; + u32 key_id; +}; + +struct req_dm_split_req_io { + struct work_struct work; + struct scatterlist *req_split_sg_read; + struct req_crypt_result result; + struct crypto_engine_entry *engine; + u8 IV[AES_XTS_IV_LEN]; + int size; + struct request *clone; +}; + +#ifdef CONFIG_FIPS_ENABLE +static struct qcrypto_func_set dm_qcrypto_func; +#else +static struct qcrypto_func_set dm_qcrypto_func = { + qcrypto_cipher_set_device_hw, + qcrypto_cipher_set_flag, + qcrypto_get_num_engines, + qcrypto_get_engine_list +}; +#endif +static void req_crypt_cipher_complete + (struct crypto_async_request *req, int err); +static void req_cryptd_split_req_queue_cb + (struct work_struct *work); +static void req_cryptd_split_req_queue + (struct req_dm_split_req_io *io); +static void req_crypt_split_io_complete + (struct req_crypt_result *res, int err); + +static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req) +{ + int ret = 0; + bool should_encrypt = false; + struct bio *bio = NULL; + bool is_encrypted = false; + bool is_inplace = false; + + if (!req || !req->cloned_request || !req->cloned_request->bio) + return false; + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) + return false; + bio = req->cloned_request->bio; + + /* req->key_id = key_id; @todo support more than 1 pfe key */ + if ((ret == 0) && (is_encrypted || is_inplace)) { + should_encrypt = true; + req->key_id = PFE_KEY_ID; + } else if (is_fde_enabled) { + should_encrypt = true; + req->key_id = FDE_KEY_ID; + } + + return should_encrypt; +} + +static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req) +{ + int ret = 0; + bool should_deccrypt = false; + struct bio *bio = NULL; + bool is_encrypted = false; + bool is_inplace = false; + + if (!req || !req->cloned_request || !req->cloned_request->bio) + return false; + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) + return false; + + bio = req->cloned_request->bio; + + /* req->key_id = key_id; @todo support more than 1 pfe key */ + if ((ret == 0) && (is_encrypted && !is_inplace)) { + should_deccrypt = true; + req->key_id = PFE_KEY_ID; + } else if (is_fde_enabled) { + should_deccrypt = true; + req->key_id = FDE_KEY_ID; + } + + return should_deccrypt; +} + +static void req_crypt_inc_pending(struct req_dm_crypt_io *io) +{ + atomic_inc(&io->pending); +} + +static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io) +{ + int error = 0; + struct request *clone = NULL; + + if (io) { + error = io->error; + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + BUG(); + } + } else { + DMERR("%s io is NULL\n", __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + BUG(); + } + + atomic_dec(&io->pending); + + if (error < 0) { + dm_kill_unmapped_request(clone, error); + mempool_free(io, req_io_pool); + } else + dm_dispatch_request(clone); +} + +static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io) +{ + int error = 0; + struct request *clone = NULL; + + if (io) { + error = io->error; + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + BUG(); + } + } else { + DMERR("%s io is NULL\n", + __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + BUG(); + } + + /* Should never get here if io or Clone is NULL */ + dm_end_request(clone, error); + atomic_dec(&io->pending); + mempool_free(io, req_io_pool); +} + +/* + * The callback that will be called by the worker queue to perform Decryption + * for reads and use the dm function to complete the bios and requests. + */ +static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + int error = DM_REQ_CRYPT_ERROR; + int total_sg_len = 0, total_bytes_in_req = 0, temp_size = 0, i = 0; + struct scatterlist *sg = NULL; + struct scatterlist *req_sg_read = NULL; + + unsigned int engine_list_total = 0; + struct crypto_engine_entry *curr_engine_list = NULL; + bool split_transfers = 0; + sector_t tempiv; + struct req_dm_split_req_io *split_io = NULL; + + if (io) { + error = io->error; + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + } else { + DMERR("%s io is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + + req_crypt_inc_pending(io); + + mutex_lock(&engine_list_mutex); + + engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde : + (io->key_id == PFE_KEY_ID ? + num_engines_pfe : 0)); + + curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng : + (io->key_id == PFE_KEY_ID ? + pfe_eng : NULL)); + + mutex_unlock(&engine_list_mutex); + + req_sg_read = (struct scatterlist *)mempool_alloc(req_scatterlist_pool, + GFP_KERNEL); + if (!req_sg_read) { + DMERR("%s req_sg_read allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST); + + total_sg_len = blk_rq_map_sg_no_cluster(clone->q, clone, req_sg_read); + if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) { + DMERR("%s Request Error%d", __func__, total_sg_len); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + total_bytes_in_req = clone->__data_len; + if (total_bytes_in_req > REQ_DM_512_KB) { + DMERR("%s total_bytes_in_req > 512 MB %d", + __func__, total_bytes_in_req); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + + if ((clone->__data_len >= (MIN_CRYPTO_TRANSFER_SIZE * + engine_list_total)) + && (engine_list_total > 1)) + split_transfers = 1; + + if (split_transfers) { + split_io = kzalloc(sizeof(struct req_dm_split_req_io) + * engine_list_total, GFP_KERNEL); + if (!split_io) { + DMERR("%s split_io allocation failed\n", __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + split_io[0].req_split_sg_read = sg = req_sg_read; + split_io[engine_list_total - 1].size = total_bytes_in_req; + for (i = 0; i < (engine_list_total); i++) { + while ((sg) && i < (engine_list_total - 1)) { + split_io[i].size += sg->length; + split_io[engine_list_total - 1].size -= + sg->length; + if (split_io[i].size >= + (total_bytes_in_req / + engine_list_total)) { + split_io[i + 1].req_split_sg_read = + sg_next(sg); + sg_mark_end(sg); + break; + } + sg = sg_next(sg); + } + split_io[i].engine = &curr_engine_list[i]; + init_completion(&split_io[i].result.completion); + memset(&split_io[i].IV, 0, AES_XTS_IV_LEN); + tempiv = clone->__sector + (temp_size / SECTOR_SIZE); + memcpy(&split_io[i].IV, &tempiv, sizeof(sector_t)); + temp_size += split_io[i].size; + split_io[i].clone = clone; + req_cryptd_split_req_queue(&split_io[i]); + } + } else { + split_io = kzalloc(sizeof(struct req_dm_split_req_io), + GFP_KERNEL); + if (!split_io) { + DMERR("%s split_io allocation failed\n", __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + split_io->engine = &curr_engine_list[0]; + init_completion(&split_io->result.completion); + memcpy(split_io->IV, &clone->__sector, sizeof(sector_t)); + split_io->req_split_sg_read = req_sg_read; + split_io->size = total_bytes_in_req; + split_io->clone = clone; + req_cryptd_split_req_queue(split_io); + } + + if (!split_transfers) { + wait_for_completion_interruptible(&split_io->result.completion); + if (split_io->result.err) { + DMERR("%s error = %d for request\n", + __func__, split_io->result.err); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + } else { + for (i = 0; i < (engine_list_total); i++) { + wait_for_completion_interruptible( + &split_io[i].result.completion); + if (split_io[i].result.err) { + DMERR("%s error = %d for %dst request\n", + __func__, split_io[i].result.err, i); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + } + } + error = 0; +ablkcipher_req_alloc_failure: + + mempool_free(req_sg_read, req_scatterlist_pool); + kfree(split_io); +submit_request: + if (io) + io->error = error; + req_crypt_dec_pending_decrypt(io); +} + +/* + * This callback is called by the worker queue to perform non-decrypt reads + * and use the dm function to complete the bios and requests. + */ +static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + int error = 0; + + if (!io || !io->cloned_request) { + DMERR("%s io is invalid\n", __func__); + BUG(); /* should not happen */ + } + + clone = io->cloned_request; + + dm_end_request(clone, error); + mempool_free(io, req_io_pool); +} + +/* + * The callback that will be called by the worker queue to perform Encryption + * for writes and submit the request using the elevelator. + */ +static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + struct bio *bio_src = NULL; + unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0, + total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0; + struct req_iterator iter; + struct req_iterator iter1; + struct ablkcipher_request *req = NULL; + struct req_crypt_result result; + struct bio_vec bvec; + struct scatterlist *req_sg_in = NULL; + struct scatterlist *req_sg_out = NULL; + int copy_bio_sector_to_req = 0; + gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; + struct page *page = NULL; + u8 IV[AES_XTS_IV_LEN]; + int remaining_size = 0, err = 0; + struct crypto_engine_entry engine; + unsigned int engine_list_total = 0; + struct crypto_engine_entry *curr_engine_list = NULL; + unsigned int *engine_cursor = NULL; + + + if (io) { + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + } else { + DMERR("%s io is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + + req_crypt_inc_pending(io); + + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + DMERR("%s ablkcipher request allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + req_crypt_cipher_complete, &result); + + mutex_lock(&engine_list_mutex); + engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde : + (io->key_id == PFE_KEY_ID ? + num_engines_pfe : 0)); + + curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng : + (io->key_id == PFE_KEY_ID ? + pfe_eng : NULL)); + + engine_cursor = (io->key_id == FDE_KEY_ID ? &fde_cursor : + (io->key_id == PFE_KEY_ID ? &pfe_cursor + : NULL)); + if ((engine_list_total < 1) || (NULL == curr_engine_list) + || (NULL == engine_cursor)) { + DMERR("%s Unknown Key ID!\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + mutex_unlock(&engine_list_mutex); + goto ablkcipher_req_alloc_failure; + } + + engine = curr_engine_list[*engine_cursor]; + (*engine_cursor)++; + (*engine_cursor) %= engine_list_total; + + err = (dm_qcrypto_func.cipher_set)(req, engine.ce_device, + engine.hw_instance); + if (err) { + DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n", + __func__, err); + mutex_unlock(&engine_list_mutex); + goto ablkcipher_req_alloc_failure; + } + mutex_unlock(&engine_list_mutex); + + init_completion(&result.completion); + + (dm_qcrypto_func.cipher_flag)(req, + QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); + crypto_ablkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); + + req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool, + GFP_KERNEL); + if (!req_sg_in) { + DMERR("%s req_sg_in allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST); + + req_sg_out = (struct scatterlist *)mempool_alloc(req_scatterlist_pool, + GFP_KERNEL); + if (!req_sg_out) { + DMERR("%s req_sg_out allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST); + + total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in); + if ((total_sg_len_req_in <= 0) || + (total_sg_len_req_in > MAX_SG_LIST)) { + DMERR("%s Request Error%d", __func__, total_sg_len_req_in); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + total_bytes_in_req = clone->__data_len; + if (total_bytes_in_req > REQ_DM_512_KB) { + DMERR("%s total_bytes_in_req > 512 MB %d", + __func__, total_bytes_in_req); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + rq_for_each_segment(bvec, clone, iter) { + if (bvec.bv_len > remaining_size) { + page = NULL; + while (page == NULL) { + page = mempool_alloc(req_page_pool, gfp_mask); + if (!page) { + DMERR("%s Crypt page alloc failed", + __func__); + congestion_wait(BLK_RW_ASYNC, HZ/100); + } + } + + bvec.bv_page = page; + bvec.bv_offset = 0; + remaining_size = PAGE_SIZE - bvec.bv_len; + if (remaining_size < 0) + BUG(); + } else { + bvec.bv_page = page; + bvec.bv_offset = PAGE_SIZE - remaining_size; + remaining_size = remaining_size - bvec.bv_len; + } + } + + total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out); + if ((total_sg_len_req_out <= 0) || + (total_sg_len_req_out > MAX_SG_LIST)) { + DMERR("%s Request Error %d", __func__, total_sg_len_req_out); + error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; + goto ablkcipher_req_alloc_failure; + } + + memset(IV, 0, AES_XTS_IV_LEN); + memcpy(IV, &clone->__sector, sizeof(sector_t)); + + ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out, + total_bytes_in_req, (void *) IV); + + rc = crypto_ablkcipher_encrypt(req); + + switch (rc) { + case 0: + break; + + case -EBUSY: + /* + * Lets make this synchronous request by waiting on + * in progress as well + */ + case -EINPROGRESS: + wait_for_completion_interruptible(&result.completion); + if (result.err) { + DMERR("%s error = %d encrypting the request\n", + __func__, result.err); + error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; + goto ablkcipher_req_alloc_failure; + } + break; + + default: + error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; + goto ablkcipher_req_alloc_failure; + } + + __rq_for_each_bio(bio_src, clone) { + if (copy_bio_sector_to_req == 0) { + copy_bio_sector_to_req++; + } + blk_queue_bounce(clone->q, &bio_src); + } + + /* + * Recalculate the phy_segments as we allocate new pages + * This is used by storage driver to fill the sg list. + */ + blk_recalc_rq_segments(clone); + +ablkcipher_req_alloc_failure: + if (req) + ablkcipher_request_free(req); + + if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) { + rq_for_each_segment(bvec, clone, iter1) { + if (bvec.bv_offset == 0) { + mempool_free(bvec.bv_page, req_page_pool); + bvec.bv_page = NULL; + } else + bvec.bv_page = NULL; + } + } + + mempool_free(req_sg_in, req_scatterlist_pool); + mempool_free(req_sg_out, req_scatterlist_pool); +submit_request: + if (io) + io->error = error; + req_crypt_dec_pending_encrypt(io); +} + +/* + * This callback is called by the worker queue to perform non-encrypted writes + * and submit the request using the elevelator. + */ +static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + + if (!io || !io->cloned_request) { + DMERR("%s io is invalid\n", __func__); + BUG(); /* should not happen */ + } + + clone = io->cloned_request; + io->error = 0; + dm_dispatch_request(clone); +} + +/* Queue callback function that will get triggered */ +static void req_cryptd_crypt(struct work_struct *work) +{ + struct req_dm_crypt_io *io = + container_of(work, struct req_dm_crypt_io, work); + + if (rq_data_dir(io->cloned_request) == WRITE) { + if (io->should_encrypt) + req_cryptd_crypt_write_convert(io); + else + req_cryptd_crypt_write_plain(io); + } else if (rq_data_dir(io->cloned_request) == READ) { + if (io->should_decrypt) + req_cryptd_crypt_read_convert(io); + else + req_cryptd_crypt_read_plain(io); + } else { + DMERR("%s received non-write request for Clone 0x%p\n", + __func__, io->cloned_request); + } +} + +static void req_cryptd_split_req_queue_cb(struct work_struct *work) +{ + struct req_dm_split_req_io *io = + container_of(work, struct req_dm_split_req_io, work); + struct ablkcipher_request *req = NULL; + struct req_crypt_result result; + int err = 0; + struct crypto_engine_entry *engine = NULL; + + if ((!io) || (!io->req_split_sg_read) || (!io->engine)) { + DMERR("%s Input invalid\n", + __func__); + err = DM_REQ_CRYPT_ERROR; + /* If io is not populated this should not be called */ + BUG(); + } + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + DMERR("%s ablkcipher request allocation failed\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + req_crypt_cipher_complete, &result); + + engine = io->engine; + + err = (dm_qcrypto_func.cipher_set)(req, engine->ce_device, + engine->hw_instance); + if (err) { + DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n", + __func__, err); + goto ablkcipher_req_alloc_failure; + } + init_completion(&result.completion); + (dm_qcrypto_func.cipher_flag)(req, + QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); + + crypto_ablkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); + + ablkcipher_request_set_crypt(req, io->req_split_sg_read, + io->req_split_sg_read, io->size, (void *) io->IV); + + err = crypto_ablkcipher_decrypt(req); + switch (err) { + case 0: + break; + + case -EBUSY: + /* + * Lets make this synchronous request by waiting on + * in progress as well + */ + case -EINPROGRESS: + wait_for_completion_io(&result.completion); + if (result.err) { + DMERR("%s error = %d encrypting the request\n", + __func__, result.err); + err = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + break; + + default: + err = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + err = 0; +ablkcipher_req_alloc_failure: + if (req) + ablkcipher_request_free(req); + + req_crypt_split_io_complete(&io->result, err); +} + +static void req_cryptd_split_req_queue(struct req_dm_split_req_io *io) +{ + INIT_WORK(&io->work, req_cryptd_split_req_queue_cb); + queue_work(req_crypt_split_io_queue, &io->work); +} + +static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io) +{ + INIT_WORK(&io->work, req_cryptd_crypt); + queue_work(req_crypt_queue, &io->work); +} + +/* + * Cipher complete callback, this is triggered by the Linux crypto api once + * the operation is done. This signals the waiting thread that the crypto + * operation is complete. + */ +static void req_crypt_cipher_complete(struct crypto_async_request *req, int err) +{ + struct req_crypt_result *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + +static void req_crypt_split_io_complete(struct req_crypt_result *res, int err) +{ + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} +/* + * If bio->bi_dev is a partition, remap the location + */ +static inline void req_crypt_blk_partition_remap(struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + + if (bio_sectors(bio) && bdev != bdev->bd_contains) { + struct hd_struct *p = bdev->bd_part; + /* + * Check for integer overflow, should never happen. + */ + if (p->start_sect > (UINT_MAX - bio->bi_iter.bi_sector)) + BUG(); + + bio->bi_iter.bi_sector += p->start_sect; + bio->bi_bdev = bdev->bd_contains; + } +} + +/* + * The endio function is called from ksoftirqd context (atomic). + * For write operations the new pages created form the mempool + * is freed and returned. * For read operations, decryption is + * required, since this is called in a atomic * context, the + * request is sent to a worker queue to complete decryptiona and + * free the request once done. + */ +static int req_crypt_endio(struct dm_target *ti, struct request *clone, + int error, union map_info *map_context) +{ + int err = 0; + struct req_iterator iter1; + struct bio_vec bvec; + struct req_dm_crypt_io *req_io = map_context->ptr; + + /* If it is for ICE, free up req_io and return */ + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + mempool_free(req_io, req_io_pool); + err = error; + goto submit_request; + } + + if (rq_data_dir(clone) == WRITE) { + rq_for_each_segment(bvec, clone, iter1) { + if (req_io->should_encrypt && bvec.bv_offset == 0) { + mempool_free(bvec.bv_page, req_page_pool); + bvec.bv_page = NULL; + } else + bvec.bv_page = NULL; + } + mempool_free(req_io, req_io_pool); + goto submit_request; + } else if (rq_data_dir(clone) == READ) { + req_io->error = error; + req_cryptd_queue_crypt(req_io); + err = DM_ENDIO_INCOMPLETE; + goto submit_request; + } + +submit_request: + return err; +} + +/* + * This function is called with interrupts disabled + * The function remaps the clone for the underlying device. + * If it is a write request, it calls into the worker queue to + * encrypt the data + * and submit the request directly using the elevator + * For a read request no pre-processing is required the request + * is returned to dm once mapping is done + */ +static int req_crypt_map(struct dm_target *ti, struct request *clone, + union map_info *map_context) +{ + struct req_dm_crypt_io *req_io = NULL; + int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0; + struct bio *bio_src = NULL; + gfp_t gfp_flag = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled()) + gfp_flag = GFP_NOWAIT; + + req_io = mempool_alloc(req_io_pool, gfp_flag); + if (!req_io) { + WARN_ON(1); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + + /* Save the clone in the req_io, the callback to the worker + * queue will get the req_io + */ + req_io->cloned_request = clone; + map_context->ptr = req_io; + atomic_set(&req_io->pending, 0); + + if (rq_data_dir(clone) == WRITE) + req_io->should_encrypt = req_crypt_should_encrypt(req_io); + if (rq_data_dir(clone) == READ) + req_io->should_decrypt = req_crypt_should_deccrypt(req_io); + + /* Get the queue of the underlying original device */ + clone->q = bdev_get_queue(dev->bdev); + clone->rq_disk = dev->bdev->bd_disk; + + __rq_for_each_bio(bio_src, clone) { + bio_src->bi_bdev = dev->bdev; + /* Currently the way req-dm works is that once the underlying + * device driver completes the request by calling into the + * block layer. The block layer completes the bios (clones) and + * then the cloned request. This is undesirable for req-dm-crypt + * hence added a flag BIO_DONTFREE, this flag will ensure that + * blk layer does not complete the cloned bios before completing + * the request. When the crypt endio is called, post-processing + * is done and then the dm layer will complete the bios (clones) + * and free them. + */ + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) + bio_src->bi_flags |= 1 << BIO_INLINECRYPT; + else + bio_src->bi_flags |= 1 << BIO_DONTFREE; + + /* + * If this device has partitions, remap block n + * of partition p to block n+start(p) of the disk. + */ + req_crypt_blk_partition_remap(bio_src); + if (copy_bio_sector_to_req == 0) { + clone->__sector = bio_src->bi_iter.bi_sector; + copy_bio_sector_to_req++; + } + blk_queue_bounce(clone->q, &bio_src); + } + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + /* Set all crypto parameters for inline crypto engine */ + memcpy(&req_io->ice_settings, ice_settings, + sizeof(struct ice_crypto_setting)); + } else { + /* ICE checks for key_index which could be >= 0. If a chip has + * both ICE and GPCE and wanted to use GPCE, there could be + * issue. Storage driver send all requests to ICE driver. If + * it sees key_index as 0, it would assume it is for ICE while + * it is not. Hence set invalid key index by default. + */ + req_io->ice_settings.key_index = -1; + + } + + if (rq_data_dir(clone) == READ || + encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + error = DM_MAPIO_REMAPPED; + goto submit_request; + } else if (rq_data_dir(clone) == WRITE) { + req_cryptd_queue_crypt(req_io); + error = DM_MAPIO_SUBMITTED; + goto submit_request; + } + +submit_request: + return error; + +} + +static void deconfigure_qcrypto(void) +{ + if (req_page_pool) { + mempool_destroy(req_page_pool); + req_page_pool = NULL; + } + + if (req_scatterlist_pool) { + mempool_destroy(req_scatterlist_pool); + req_scatterlist_pool = NULL; + } + + if (req_crypt_split_io_queue) { + destroy_workqueue(req_crypt_split_io_queue); + req_crypt_split_io_queue = NULL; + } + if (req_crypt_queue) { + destroy_workqueue(req_crypt_queue); + req_crypt_queue = NULL; + } + + kmem_cache_destroy(_req_dm_scatterlist_pool); + + mutex_lock(&engine_list_mutex); + kfree(pfe_eng); + pfe_eng = NULL; + kfree(fde_eng); + fde_eng = NULL; + mutex_unlock(&engine_list_mutex); + + if (tfm) { + crypto_free_ablkcipher(tfm); + tfm = NULL; + } +} + +static void req_crypt_dtr(struct dm_target *ti) +{ + DMDEBUG("dm-req-crypt Destructor.\n"); + + mempool_destroy(req_io_pool); + req_io_pool = NULL; + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + kfree(ice_settings); + ice_settings = NULL; + } else { + deconfigure_qcrypto(); + } + + if (_req_crypt_io_pool) + kmem_cache_destroy(_req_crypt_io_pool); + + if (dev) { + dm_put_device(ti, dev); + dev = NULL; + } +} + +static int configure_qcrypto(void) +{ + struct crypto_engine_entry *eng_list = NULL; + struct block_device *bdev = NULL; + int err = DM_REQ_CRYPT_ERROR, i; + struct request_queue *q = NULL; + + bdev = dev->bdev; + q = bdev_get_queue(bdev); + blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE); + + /* Allocate the crypto alloc blk cipher and keep the handle */ + tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0); + if (IS_ERR(tfm)) { + DMERR("%s ablkcipher tfm allocation failed : error\n", + __func__); + tfm = NULL; + goto exit_err; + } + + num_engines_fde = num_engines_pfe = 0; + + mutex_lock(&engine_list_mutex); + num_engines = (dm_qcrypto_func.get_num_engines)(); + if (!num_engines) { + DMERR(KERN_INFO "%s qcrypto_get_num_engines failed\n", + __func__); + err = DM_REQ_CRYPT_ERROR; + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + eng_list = kcalloc(num_engines, sizeof(*eng_list), GFP_KERNEL); + if (eng_list == NULL) { + DMERR("%s engine list allocation failed\n", __func__); + err = DM_REQ_CRYPT_ERROR; + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + (dm_qcrypto_func.get_engine_list)(num_engines, eng_list); + + for (i = 0; i < num_engines; i++) { + if (eng_list[i].ce_device == FDE_KEY_ID) + num_engines_fde++; + if (eng_list[i].ce_device == PFE_KEY_ID) + num_engines_pfe++; + } + + fde_eng = kcalloc(num_engines_fde, sizeof(*fde_eng), GFP_KERNEL); + if (fde_eng == NULL) { + DMERR("%s fde engine list allocation failed\n", __func__); + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + pfe_eng = kcalloc(num_engines_pfe, sizeof(*pfe_eng), GFP_KERNEL); + if (pfe_eng == NULL) { + DMERR("%s pfe engine list allocation failed\n", __func__); + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + fde_cursor = 0; + pfe_cursor = 0; + + for (i = 0; i < num_engines; i++) { + if (eng_list[i].ce_device == FDE_KEY_ID) + fde_eng[fde_cursor++] = eng_list[i]; + if (eng_list[i].ce_device == PFE_KEY_ID) + pfe_eng[pfe_cursor++] = eng_list[i]; + } + + fde_cursor = 0; + pfe_cursor = 0; + mutex_unlock(&engine_list_mutex); + + _req_dm_scatterlist_pool = kmem_cache_create("req_dm_scatterlist", + sizeof(struct scatterlist) * MAX_SG_LIST, + __alignof__(struct scatterlist), 0, NULL); + if (!_req_dm_scatterlist_pool) + goto exit_err; + + req_crypt_queue = alloc_workqueue("req_cryptd", + WQ_UNBOUND | + WQ_CPU_INTENSIVE | + WQ_MEM_RECLAIM, + 0); + if (!req_crypt_queue) { + DMERR("%s req_crypt_queue not allocated\n", __func__); + goto exit_err; + } + + req_crypt_split_io_queue = alloc_workqueue("req_crypt_split", + WQ_UNBOUND | + WQ_CPU_INTENSIVE | + WQ_MEM_RECLAIM, + 0); + if (!req_crypt_split_io_queue) { + DMERR("%s req_crypt_split_io_queue not allocated\n", __func__); + goto exit_err; + } + req_scatterlist_pool = mempool_create_slab_pool(MIN_IOS, + _req_dm_scatterlist_pool); + if (!req_scatterlist_pool) { + DMERR("%s req_scatterlist_pool is not allocated\n", __func__); + err = -ENOMEM; + goto exit_err; + } + + req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); + if (!req_page_pool) { + DMERR("%s req_page_pool not allocated\n", __func__); + goto exit_err; + } + + err = 0; + +exit_err: + kfree(eng_list); + return err; +} + +/* + * Construct an encryption mapping: + * <cipher> <key> <iv_offset> <dev_path> <start> + */ +static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + int err = DM_REQ_CRYPT_ERROR; + unsigned long long tmpll; + char dummy; + int ret; + + DMDEBUG("dm-req-crypt Constructor.\n"); + + if (argc < 5) { + DMERR(" %s Not enough args\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + + if (argv[3]) { + if (dm_get_device(ti, argv[3], + dm_table_get_mode(ti->table), &dev)) { + DMERR(" %s Device Lookup failed\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + DMERR(" %s Arg[3] invalid\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + + if (argv[4]) { + if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { + DMERR("%s Invalid device sector\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + DMERR(" %s Arg[4] invalid\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + start_sector_orig = tmpll; + + /* Allow backward compatible */ + if (argc >= 6) { + if (argv[5]) { + if (!strcmp(argv[5], "fde_enabled")) + is_fde_enabled = true; + else + is_fde_enabled = false; + } else { + DMERR(" %s Arg[5] invalid\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__); + is_fde_enabled = true; /* backward compatible */ + } + + _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0); + if (!_req_crypt_io_pool) { + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + + encryption_mode = DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO; + if (argc >= 7 && argv[6]) { + if (!strcmp(argv[6], "ice")) + encryption_mode = + DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT; + } + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + /* configure ICE settings */ + ice_settings = + kzalloc(sizeof(struct ice_crypto_setting), GFP_KERNEL); + if (!ice_settings) { + err = -ENOMEM; + goto ctr_exit; + } + ice_settings->key_size = ICE_CRYPTO_KEY_SIZE_128; + ice_settings->algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS; + ice_settings->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY; + if (kstrtou16(argv[1], 0, &ice_settings->key_index) || + ice_settings->key_index < 0 || + ice_settings->key_index > MAX_MSM_ICE_KEY_LUT_SIZE) { + DMERR("%s Err: key index %d received for ICE\n", + __func__, ice_settings->key_index); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + ret = configure_qcrypto(); + if (ret) { + DMERR("%s failed to configure qcrypto\n", __func__); + err = ret; + goto ctr_exit; + } + } + + req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool); + if (!req_io_pool) { + DMERR("%s req_io_pool not allocated\n", __func__); + err = -ENOMEM; + goto ctr_exit; + } + + /* + * If underlying device supports flush/discard, mapped target + * should also allow it + */ + ti->num_flush_bios = 1; + ti->num_discard_bios = 1; + + err = 0; + DMINFO("%s: Mapping block_device %s to dm-req-crypt ok!\n", + __func__, argv[3]); +ctr_exit: + if (err) + req_crypt_dtr(ti); + + return err; +} + +static int req_crypt_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + return fn(ti, dev, start_sector_orig, ti->len, data); +} +void set_qcrypto_func_dm(void *dev, + void *flag, + void *engines, + void *engine_list) +{ + dm_qcrypto_func.cipher_set = dev; + dm_qcrypto_func.cipher_flag = flag; + dm_qcrypto_func.get_num_engines = engines; + dm_qcrypto_func.get_engine_list = engine_list; +} +EXPORT_SYMBOL(set_qcrypto_func_dm); + +static struct target_type req_crypt_target = { + .name = "req-crypt", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = req_crypt_ctr, + .dtr = req_crypt_dtr, + .map_rq = req_crypt_map, + .rq_end_io = req_crypt_endio, + .iterate_devices = req_crypt_iterate_devices, +}; + +static int __init req_dm_crypt_init(void) +{ + int r; + + + r = dm_register_target(&req_crypt_target); + if (r < 0) { + DMERR("register failed %d", r); + return r; + } + + DMINFO("dm-req-crypt successfully initalized.\n"); + + return r; +} + +static void __exit req_dm_crypt_exit(void) +{ + dm_unregister_target(&req_crypt_target); +} + +module_init(req_dm_crypt_init); +module_exit(req_dm_crypt_exit); + +MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index b3d78bba3a79..9411deaaddf9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1660,7 +1660,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) char b[BDEVNAME_SIZE]; if (likely(q)) - r |= bdi_congested(&q->backing_dev_info, bdi_bits); + r |= bdi_congested(q->backing_dev_info, bdi_bits); else DMWARN_LIMIT("%s: any_congested: nonexistent device %s", dm_device_name(t->md), diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index a1cc797fe88f..5f1a943d9e81 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2634,7 +2634,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) return 1; q = bdev_get_queue(pt->data_dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static void requeue_bios(struct pool *pool) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3d9a80759d95..f002d2ce9c9f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1147,7 +1147,7 @@ static void free_rq_clone(struct request *clone) * Must be called without clone's queue lock held, * see end_clone_request() for more details. */ -static void dm_end_request(struct request *clone, int error) +void dm_end_request(struct request *clone, int error) { int rw = rq_data_dir(clone); struct dm_rq_target_io *tio = clone->end_io_data; @@ -1345,7 +1345,7 @@ static void dm_complete_request(struct request *rq, int error) * Target's rq_end_io() function isn't called. * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. */ -static void dm_kill_unmapped_request(struct request *rq, int error) +void dm_kill_unmapped_request(struct request *rq, int error) { rq->cmd_flags |= REQ_FAILED; dm_complete_request(rq, error); @@ -1862,6 +1862,13 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq) dm_complete_request(rq, r); } +void dm_dispatch_request(struct request *rq) +{ + struct dm_rq_target_io *tio = tio_from_request(rq); + + dm_dispatch_clone_request(tio->clone, rq); +} + static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, void *data) { @@ -2185,8 +2192,11 @@ static void dm_request_fn(struct request_queue *q) tio = tio_from_request(rq); /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; - queue_kthread_work(&md->kworker, &tio->work); + spin_unlock(q->queue_lock); + if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) + dm_requeue_original_request(md, rq); BUG_ON(!irqs_disabled()); + spin_lock(q->queue_lock); } goto out; @@ -2211,7 +2221,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) * the query about congestion status of request_queue */ if (dm_request_based(md)) - r = md->queue->backing_dev_info.wb.state & + r = md->queue->backing_dev_info->wb.state & bdi_bits; else r = dm_table_any_congested(map, bdi_bits); @@ -2293,7 +2303,7 @@ static void dm_init_md_queue(struct mapped_device *md) * - must do so here (in alloc_dev callchain) before queue is used */ md->queue->queuedata = md; - md->queue->backing_dev_info.congested_data = md; + md->queue->backing_dev_info->congested_data = md; } static void dm_init_old_md_queue(struct mapped_device *md) @@ -2304,7 +2314,7 @@ static void dm_init_old_md_queue(struct mapped_device *md) /* * Initialize aspects of queue that aren't relevant for blk-mq */ - md->queue->backing_dev_info.congested_fn = dm_any_congested; + md->queue->backing_dev_info->congested_fn = dm_any_congested; blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); } diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 7ffb20ec1a46..b19205ea1a10 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -68,7 +68,7 @@ static int linear_congested(struct mddev *mddev, int bits) for (i = 0; i < conf->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } rcu_read_unlock(); diff --git a/drivers/md/md.c b/drivers/md/md.c index 62c3328e2a1d..3a9685fe115c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5286,8 +5286,8 @@ int md_run(struct mddev *mddev) return err; } if (mddev->queue) { - mddev->queue->backing_dev_info.congested_data = mddev; - mddev->queue->backing_dev_info.congested_fn = md_congested; + mddev->queue->backing_dev_info->congested_data = mddev; + mddev->queue->backing_dev_info->congested_fn = md_congested; } if (pers->sync_request) { if (mddev->kobj.sd && @@ -5644,7 +5644,7 @@ static int do_md_stop(struct mddev *mddev, int mode, __md_stop_writes(mddev); __md_stop(mddev); - mddev->queue->backing_dev_info.congested_fn = NULL; + mddev->queue->backing_dev_info->congested_fn = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index dd483bb2e111..fb03ed86d57a 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -166,7 +166,7 @@ static int multipath_congested(struct mddev *mddev, int bits) if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); /* Just like multipath_map, we just check the * first available device */ diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f8e5db0cb5aa..7a67e7dcf546 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -35,7 +35,7 @@ static int raid0_congested(struct mddev *mddev, int bits) for (i = 0; i < raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(devlist[i]->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; } @@ -415,8 +415,8 @@ static int raid0_run(struct mddev *mddev) */ int stripe = mddev->raid_disks * (mddev->chunk_sectors << 9) / PAGE_SIZE; - if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) - mddev->queue->backing_dev_info.ra_pages = 2* stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2* stripe) + mddev->queue->backing_dev_info->ra_pages = 2* stripe; } dump_zones(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f24a9e14021d..a3ec3c5a8ee9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -730,9 +730,9 @@ static int raid1_congested(struct mddev *mddev, int bits) * non-congested targets, it can be removed */ if ((bits & (1 << WB_async_congested)) || 1) - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); else - ret &= bdi_congested(&q->backing_dev_info, bits); + ret &= bdi_congested(q->backing_dev_info, bits); } } rcu_read_unlock(); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index bf0410403a6f..45e7a47e5f7b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -838,7 +838,7 @@ static int raid10_congested(struct mddev *mddev, int bits) if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } } rcu_read_unlock(); @@ -3717,8 +3717,8 @@ static int run(struct mddev *mddev) * maybe... */ stripe /= conf->geo.near_copies; - if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } if (md_integrity_register(mddev)) @@ -4513,8 +4513,8 @@ static void end_reshape(struct r10conf *conf) int stripe = conf->geo.raid_disks * ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); stripe /= conf->geo.near_copies; - if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e2130fb4597d..9284acea4f7b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6141,10 +6141,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) mddev_suspend(mddev); conf->skip_copy = new; if (new) - mddev->queue->backing_dev_info.capabilities |= + mddev->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else - mddev->queue->backing_dev_info.capabilities &= + mddev->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; mddev_resume(mddev); } @@ -6988,8 +6988,8 @@ static int run(struct mddev *mddev) int data_disks = conf->previous_raid_disks - conf->max_degraded; int stripe = data_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); - if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + mddev->queue->backing_dev_info->ra_pages = 2 * stripe; chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); @@ -7570,8 +7570,8 @@ static void end_reshape(struct r5conf *conf) int data_disks = conf->raid_disks - conf->max_degraded; int stripe = data_disks * ((conf->chunk_sectors << 9) / PAGE_SIZE); - if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } } } |
