diff options
Diffstat (limited to 'crypto')
| -rw-r--r-- | crypto/Kconfig | 27 | ||||
| -rw-r--r-- | crypto/Makefile | 2 | ||||
| -rw-r--r-- | crypto/algapi.c | 15 | ||||
| -rw-r--r-- | crypto/api.c | 13 | ||||
| -rw-r--r-- | crypto/blkcipher.c | 21 | ||||
| -rw-r--r-- | crypto/gf128mul.c | 171 | ||||
| -rw-r--r-- | crypto/heh.c | 1033 | ||||
| -rw-r--r-- | crypto/internal.h | 3 | ||||
| -rw-r--r-- | crypto/shash.c | 8 | ||||
| -rw-r--r-- | crypto/skcipher.c | 200 | ||||
| -rw-r--r-- | crypto/testmgr.c | 31 | ||||
| -rw-r--r-- | crypto/testmgr.h | 268 | ||||
| -rw-r--r-- | crypto/zstd.c | 209 |
13 files changed, 1957 insertions, 44 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 7240821137fd..7df8c42a3e01 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -289,6 +289,24 @@ config CRYPTO_CBC CBC: Cipher Block Chaining mode This block cipher algorithm is required for IPSec. +config CRYPTO_HEH + tristate "HEH support" + select CRYPTO_CMAC + select CRYPTO_ECB + select CRYPTO_GF128MUL + select CRYPTO_MANAGER + select CRYPTO_POLY_HASH_ARM64_CE if ARM64 && KERNEL_MODE_NEON + help + HEH: Hash-Encrypt-Hash mode + HEH is a proposed block cipher mode of operation which extends the + strong pseudo-random permutation (SPRP) property of block ciphers to + arbitrary-length input strings. This provides a stronger notion of + security than existing block cipher modes of operation (e.g. CBC, CTR, + XTS), though it is usually less performant. Applications include disk + encryption and encryption of file names and contents. Currently, this + implementation only provides a symmetric cipher interface, so it can't + yet be used as an AEAD. + config CRYPTO_CTR tristate "CTR support" select CRYPTO_BLKCIPHER @@ -1545,6 +1563,15 @@ config CRYPTO_LZ4HC help This is the LZ4 high compression mode algorithm. +config CRYPTO_ZSTD + tristate "Zstd compression algorithm" + select CRYPTO_ALGAPI + select CRYPTO_ACOMP2 + select ZSTD_COMPRESS + select ZSTD_DECOMPRESS + help + This is the zstd algorithm. + comment "Random Number Generation" config CRYPTO_ANSI_CPRNG diff --git a/crypto/Makefile b/crypto/Makefile index 03e66097eb0c..34a6b0bbccef 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o +obj-$(CONFIG_CRYPTO_HEH) += heh.o obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_CTS) += cts.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o @@ -124,6 +125,7 @@ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o +obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o # # generic algorithms and the async_tx api diff --git a/crypto/algapi.c b/crypto/algapi.c index eb58b73ca925..ac70fd5cd404 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1001,6 +1001,21 @@ unsigned int crypto_alg_extsize(struct crypto_alg *alg) } EXPORT_SYMBOL_GPL(crypto_alg_extsize); +int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, + u32 type, u32 mask) +{ + int ret = 0; + struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask); + + if (!IS_ERR(alg)) { + crypto_mod_put(alg); + ret = 1; + } + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_type_has_alg); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/crypto/api.c b/crypto/api.c index bbc147cb5dec..e5c1abfd451f 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/completion.h> #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -611,5 +612,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index d524f838eb10..a1150dce0f02 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -371,6 +371,27 @@ int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, } EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block); +/* + * This function allows ablkcipher algorithms to use the blkcipher_walk API to + * walk over their data. The specified crypto_ablkcipher tfm is used to + * initialize the struct blkcipher_walk, and the crypto_blkcipher specified in + * desc->tfm is never used so it can be left NULL. (Yes, this design is ugly, + * but it parallels blkcipher_aead_walk_virt_block() above. In the 4.10 kernel + * this is starting to be cleaned up...) + */ +int blkcipher_ablkcipher_walk_virt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_ablkcipher *tfm) +{ + walk->flags &= ~BLKCIPHER_WALK_PHYS; + walk->walk_blocksize = crypto_ablkcipher_blocksize(tfm); + walk->cipher_blocksize = walk->walk_blocksize; + walk->ivsize = crypto_ablkcipher_ivsize(tfm); + walk->alignmask = crypto_ablkcipher_alignmask(tfm); + return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_ablkcipher_walk_virt); + static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 5276607c72d0..f3d9f6da0767 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -44,7 +44,7 @@ --------------------------------------------------------------------------- Issue 31/01/2006 - This file provides fast multiplication in GF(128) as required by several + This file provides fast multiplication in GF(2^128) as required by several cryptographic authentication modes */ @@ -88,37 +88,52 @@ q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \ } -/* Given the value i in 0..255 as the byte overflow when a field element - in GHASH is multiplied by x^8, this function will return the values that - are generated in the lo 16-bit word of the field value by applying the - modular polynomial. The values lo_byte and hi_byte are returned via the - macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into - memory as required by a suitable definition of this macro operating on - the table above -*/ - -#define xx(p, q) 0x##p##q +/* + * Given a value i in 0..255 as the byte overflow when a field element + * in GF(2^128) is multiplied by x^8, the following macro returns the + * 16-bit value that must be XOR-ed into the low-degree end of the + * product to reduce it modulo the irreducible polynomial x^128 + x^7 + + * x^2 + x + 1. + * + * There are two versions of the macro, and hence two tables: one for + * the "be" convention where the highest-order bit is the coefficient of + * the highest-degree polynomial term, and one for the "le" convention + * where the highest-order bit is the coefficient of the lowest-degree + * polynomial term. In both cases the values are stored in CPU byte + * endianness such that the coefficients are ordered consistently across + * bytes, i.e. in the "be" table bits 15..0 of the stored value + * correspond to the coefficients of x^15..x^0, and in the "le" table + * bits 15..0 correspond to the coefficients of x^0..x^15. + * + * Therefore, provided that the appropriate byte endianness conversions + * are done by the multiplication functions (and these must be in place + * anyway to support both little endian and big endian CPUs), the "be" + * table can be used for multiplications of both "bbe" and "ble" + * elements, and the "le" table can be used for multiplications of both + * "lle" and "lbe" elements. + */ -#define xda_bbe(i) ( \ - (i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \ - (i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \ - (i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \ - (i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \ +#define xda_be(i) ( \ + (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \ + (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \ + (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \ + (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \ ) -#define xda_lle(i) ( \ - (i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \ - (i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \ - (i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \ - (i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \ +#define xda_le(i) ( \ + (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \ + (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \ + (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \ + (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \ ) -static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle); -static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe); +static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le); +static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be); -/* These functions multiply a field element by x, by x^4 and by x^8 - * in the polynomial field representation. It uses 32-bit word operations - * to gain speed but compensates for machine endianess and hence works +/* + * The following functions multiply a field element by x or by x^8 in + * the polynomial field representation. They use 64-bit word operations + * to gain speed but compensate for machine endianness and hence work * correctly on both styles of machine. */ @@ -126,7 +141,7 @@ static void gf128mul_x_lle(be128 *r, const be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_lle[(b << 7) & 0xff]; + u64 _tt = gf128mul_table_le[(b << 7) & 0xff]; r->b = cpu_to_be64((b >> 1) | (a << 63)); r->a = cpu_to_be64((a >> 1) ^ (_tt << 48)); @@ -136,7 +151,7 @@ static void gf128mul_x_bbe(be128 *r, const be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_bbe[a >> 63]; + u64 _tt = gf128mul_table_be[a >> 63]; r->a = cpu_to_be64((a << 1) | (b >> 63)); r->b = cpu_to_be64((b << 1) ^ _tt); @@ -146,7 +161,7 @@ void gf128mul_x_ble(be128 *r, const be128 *x) { u64 a = le64_to_cpu(x->a); u64 b = le64_to_cpu(x->b); - u64 _tt = gf128mul_table_bbe[b >> 63]; + u64 _tt = gf128mul_table_be[b >> 63]; r->a = cpu_to_le64((a << 1) ^ _tt); r->b = cpu_to_le64((b << 1) | (a >> 63)); @@ -157,7 +172,7 @@ static void gf128mul_x8_lle(be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_lle[b & 0xff]; + u64 _tt = gf128mul_table_le[b & 0xff]; x->b = cpu_to_be64((b >> 8) | (a << 56)); x->a = cpu_to_be64((a >> 8) ^ (_tt << 48)); @@ -167,12 +182,22 @@ static void gf128mul_x8_bbe(be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_bbe[a >> 56]; + u64 _tt = gf128mul_table_be[a >> 56]; x->a = cpu_to_be64((a << 8) | (b >> 56)); x->b = cpu_to_be64((b << 8) ^ _tt); } +static void gf128mul_x8_ble(be128 *x) +{ + u64 a = le64_to_cpu(x->b); + u64 b = le64_to_cpu(x->a); + u64 _tt = gf128mul_table_be[a >> 56]; + + x->b = cpu_to_le64((a << 8) | (b >> 56)); + x->a = cpu_to_le64((b << 8) ^ _tt); +} + void gf128mul_lle(be128 *r, const be128 *b) { be128 p[8]; @@ -249,9 +274,48 @@ void gf128mul_bbe(be128 *r, const be128 *b) } EXPORT_SYMBOL(gf128mul_bbe); +void gf128mul_ble(be128 *r, const be128 *b) +{ + be128 p[8]; + int i; + + p[0] = *r; + for (i = 0; i < 7; ++i) + gf128mul_x_ble((be128 *)&p[i + 1], (be128 *)&p[i]); + + memset(r, 0, sizeof(*r)); + for (i = 0;;) { + u8 ch = ((u8 *)b)[15 - i]; + + if (ch & 0x80) + be128_xor(r, r, &p[7]); + if (ch & 0x40) + be128_xor(r, r, &p[6]); + if (ch & 0x20) + be128_xor(r, r, &p[5]); + if (ch & 0x10) + be128_xor(r, r, &p[4]); + if (ch & 0x08) + be128_xor(r, r, &p[3]); + if (ch & 0x04) + be128_xor(r, r, &p[2]); + if (ch & 0x02) + be128_xor(r, r, &p[1]); + if (ch & 0x01) + be128_xor(r, r, &p[0]); + + if (++i >= 16) + break; + + gf128mul_x8_ble(r); + } +} +EXPORT_SYMBOL(gf128mul_ble); + + /* This version uses 64k bytes of table space. A 16 byte buffer has to be multiplied by a 16 byte key - value in GF(128). If we consider a GF(128) value in + value in GF(2^128). If we consider a GF(2^128) value in the buffer's lowest byte, we can construct a table of the 256 16 byte values that result from the 256 values of this byte. This requires 4096 bytes. But we also @@ -352,8 +416,8 @@ void gf128mul_free_64k(struct gf128mul_64k *t) int i; for (i = 0; i < 16; i++) - kfree(t->t[i]); - kfree(t); + kzfree(t->t[i]); + kzfree(t); } EXPORT_SYMBOL(gf128mul_free_64k); @@ -385,7 +449,7 @@ EXPORT_SYMBOL(gf128mul_64k_bbe); /* This version uses 4k bytes of table space. A 16 byte buffer has to be multiplied by a 16 byte key - value in GF(128). If we consider a GF(128) value in a + value in GF(2^128). If we consider a GF(2^128) value in a single byte, we can construct a table of the 256 16 byte values that result from the 256 values of this byte. This requires 4096 bytes. If we take the highest byte in @@ -443,6 +507,28 @@ out: } EXPORT_SYMBOL(gf128mul_init_4k_bbe); +struct gf128mul_4k *gf128mul_init_4k_ble(const be128 *g) +{ + struct gf128mul_4k *t; + int j, k; + + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) + goto out; + + t->t[1] = *g; + for (j = 1; j <= 64; j <<= 1) + gf128mul_x_ble(&t->t[j + j], &t->t[j]); + + for (j = 2; j < 256; j += j) + for (k = 1; k < j; ++k) + be128_xor(&t->t[j + k], &t->t[j], &t->t[k]); + +out: + return t; +} +EXPORT_SYMBOL(gf128mul_init_4k_ble); + void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t) { u8 *ap = (u8 *)a; @@ -473,5 +559,20 @@ void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t) } EXPORT_SYMBOL(gf128mul_4k_bbe); +void gf128mul_4k_ble(be128 *a, struct gf128mul_4k *t) +{ + u8 *ap = (u8 *)a; + be128 r[1]; + int i = 15; + + *r = t->t[ap[15]]; + while (i--) { + gf128mul_x8_ble(r); + be128_xor(r, r, &t->t[ap[i]]); + } + *a = *r; +} +EXPORT_SYMBOL(gf128mul_4k_ble); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)"); diff --git a/crypto/heh.c b/crypto/heh.c new file mode 100644 index 000000000000..10c00aaf797e --- /dev/null +++ b/crypto/heh.c @@ -0,0 +1,1033 @@ +/* + * HEH: Hash-Encrypt-Hash mode + * + * Copyright (c) 2016 Google Inc. + * + * Authors: + * Alex Cope <alexcope@google.com> + * Eric Biggers <ebiggers@google.com> + */ + +/* + * Hash-Encrypt-Hash (HEH) is a proposed block cipher mode of operation which + * extends the strong pseudo-random permutation (SPRP) property of block ciphers + * (e.g. AES) to arbitrary length input strings. It uses two keyed invertible + * hash functions with a layer of ECB encryption applied in-between. The + * algorithm is specified by the following Internet Draft: + * + * https://tools.ietf.org/html/draft-cope-heh-01 + * + * Although HEH can be used as either a regular symmetric cipher or as an AEAD, + * currently this module only provides it as a symmetric cipher. Additionally, + * only 16-byte nonces are supported. + */ + +#include <crypto/gf128mul.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <crypto/skcipher.h> +#include "internal.h" + +/* + * The block size is the size of GF(2^128) elements and also the required block + * size of the underlying block cipher. + */ +#define HEH_BLOCK_SIZE 16 + +struct heh_instance_ctx { + struct crypto_shash_spawn cmac; + struct crypto_shash_spawn poly_hash; + struct crypto_skcipher_spawn ecb; +}; + +struct heh_tfm_ctx { + struct crypto_shash *cmac; + struct crypto_shash *poly_hash; /* keyed with tau_key */ + struct crypto_ablkcipher *ecb; +}; + +struct heh_cmac_data { + u8 nonce[HEH_BLOCK_SIZE]; + __le32 nonce_length; + __le32 aad_length; + __le32 message_length; + __le32 padding; +}; + +struct heh_req_ctx { /* aligned to alignmask */ + be128 beta1_key; + be128 beta2_key; + union { + struct { + struct heh_cmac_data data; + struct shash_desc desc; + /* + crypto_shash_descsize(cmac) */ + } cmac; + struct { + struct shash_desc desc; + /* + crypto_shash_descsize(poly_hash) */ + } poly_hash; + struct { + u8 keystream[HEH_BLOCK_SIZE]; + u8 tmp[HEH_BLOCK_SIZE]; + struct scatterlist tmp_sgl[2]; + struct ablkcipher_request req; + /* + crypto_ablkcipher_reqsize(ecb) */ + } ecb; + } u; +}; + +/* + * Get the offset in bytes to the last full block, or equivalently the length of + * all full blocks excluding the last + */ +static inline unsigned int get_tail_offset(unsigned int len) +{ + len -= len % HEH_BLOCK_SIZE; + return len - HEH_BLOCK_SIZE; +} + +static inline struct heh_req_ctx *heh_req_ctx(struct ablkcipher_request *req) +{ + unsigned int alignmask = crypto_ablkcipher_alignmask( + crypto_ablkcipher_reqtfm(req)); + + return (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), + alignmask + 1); +} + +static inline void async_done(struct crypto_async_request *areq, int err, + int (*next_step)(struct ablkcipher_request *, + u32)) +{ + struct ablkcipher_request *req = areq->data; + + if (err) + goto out; + + err = next_step(req, req->base.flags & ~CRYPTO_TFM_REQ_MAY_SLEEP); + if (err == -EINPROGRESS || + (err == -EBUSY && (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return; +out: + ablkcipher_request_complete(req, err); +} + +/* + * Generate the per-message "beta" keys used by the hashing layers of HEH. The + * first beta key is the CMAC of the nonce, the additional authenticated data + * (AAD), and the lengths in bytes of the nonce, AAD, and message. The nonce + * and AAD are each zero-padded to the next 16-byte block boundary, and the + * lengths are serialized as 4-byte little endian integers and zero-padded to + * the next 16-byte block boundary. + * The second beta key is the first one interpreted as an element in GF(2^128) + * and multiplied by x. + * + * Note that because the nonce and AAD may, in general, be variable-length, the + * key generation must be done by a pseudo-random function (PRF) on + * variable-length inputs. CBC-MAC does not satisfy this, as it is only a PRF + * on fixed-length inputs. CMAC remedies this flaw. Including the lengths of + * the nonce, AAD, and message is also critical to avoid collisions. + * + * That being said, this implementation does not yet operate as an AEAD and + * therefore there is never any AAD, nor are variable-length nonces supported. + */ +static int generate_betas(struct ablkcipher_request *req, + be128 *beta1_key, be128 *beta2_key) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct heh_req_ctx *rctx = heh_req_ctx(req); + struct heh_cmac_data *data = &rctx->u.cmac.data; + struct shash_desc *desc = &rctx->u.cmac.desc; + int err; + + BUILD_BUG_ON(sizeof(*data) != 2 * HEH_BLOCK_SIZE); + memcpy(data->nonce, req->info, HEH_BLOCK_SIZE); + data->nonce_length = cpu_to_le32(HEH_BLOCK_SIZE); + data->aad_length = cpu_to_le32(0); + data->message_length = cpu_to_le32(req->nbytes); + data->padding = cpu_to_le32(0); + + desc->tfm = ctx->cmac; + desc->flags = req->base.flags; + + err = crypto_shash_digest(desc, (const u8 *)data, sizeof(*data), + (u8 *)beta1_key); + if (err) + return err; + + gf128mul_x_ble(beta2_key, beta1_key); + return 0; +} + +/*****************************************************************************/ + +/* + * This is the generic version of poly_hash. It does the GF(2^128) + * multiplication by 'tau_key' using a precomputed table, without using any + * special CPU instructions. On some platforms, an accelerated version (with + * higher cra_priority) may be used instead. + */ + +struct poly_hash_tfm_ctx { + struct gf128mul_4k *tau_key; +}; + +struct poly_hash_desc_ctx { + be128 digest; + unsigned int count; +}; + +static int poly_hash_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(tfm); + be128 key128; + + if (keylen != HEH_BLOCK_SIZE) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + if (tctx->tau_key) + gf128mul_free_4k(tctx->tau_key); + memcpy(&key128, key, HEH_BLOCK_SIZE); + tctx->tau_key = gf128mul_init_4k_ble(&key128); + if (!tctx->tau_key) + return -ENOMEM; + return 0; +} + +static int poly_hash_init(struct shash_desc *desc) +{ + struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->digest = (be128) { 0 }; + ctx->count = 0; + return 0; +} + +static int poly_hash_update(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); + unsigned int partial = ctx->count % HEH_BLOCK_SIZE; + u8 *dst = (u8 *)&ctx->digest + partial; + + ctx->count += len; + + /* Finishing at least one block? */ + if (partial + len >= HEH_BLOCK_SIZE) { + + if (partial) { + /* Finish the pending block. */ + unsigned int n = HEH_BLOCK_SIZE - partial; + + len -= n; + do { + *dst++ ^= *src++; + } while (--n); + + gf128mul_4k_ble(&ctx->digest, tctx->tau_key); + } + + /* Process zero or more full blocks. */ + while (len >= HEH_BLOCK_SIZE) { + be128 coeff; + + memcpy(&coeff, src, HEH_BLOCK_SIZE); + be128_xor(&ctx->digest, &ctx->digest, &coeff); + src += HEH_BLOCK_SIZE; + len -= HEH_BLOCK_SIZE; + gf128mul_4k_ble(&ctx->digest, tctx->tau_key); + } + dst = (u8 *)&ctx->digest; + } + + /* Continue adding the next block to 'digest'. */ + while (len--) + *dst++ ^= *src++; + return 0; +} + +static int poly_hash_final(struct shash_desc *desc, u8 *out) +{ + struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); + + /* Finish the last block if needed. */ + if (ctx->count % HEH_BLOCK_SIZE) { + struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + + gf128mul_4k_ble(&ctx->digest, tctx->tau_key); + } + + memcpy(out, &ctx->digest, HEH_BLOCK_SIZE); + return 0; +} + +static void poly_hash_exit(struct crypto_tfm *tfm) +{ + struct poly_hash_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + + gf128mul_free_4k(tctx->tau_key); +} + +static struct shash_alg poly_hash_alg = { + .digestsize = HEH_BLOCK_SIZE, + .init = poly_hash_init, + .update = poly_hash_update, + .final = poly_hash_final, + .setkey = poly_hash_setkey, + .descsize = sizeof(struct poly_hash_desc_ctx), + .base = { + .cra_name = "poly_hash", + .cra_driver_name = "poly_hash-generic", + .cra_priority = 100, + .cra_ctxsize = sizeof(struct poly_hash_tfm_ctx), + .cra_exit = poly_hash_exit, + .cra_module = THIS_MODULE, + }, +}; + +/*****************************************************************************/ + +/* + * Split the message into 16 byte blocks, padding out the last block, and use + * the blocks as coefficients in the evaluation of a polynomial over GF(2^128) + * at the secret point 'tau_key'. For ease of implementing the higher-level + * heh_hash_inv() function, the constant and degree-1 coefficients are swapped + * if there is a partial block. + * + * Mathematically, compute: + * if (no partial block) + * k^{N-1} * m_0 + ... + k * m_{N-2} + m_{N-1} + * else if (partial block) + * k^N * m_0 + ... + k^2 * m_{N-2} + k * m_N + m_{N-1} + * + * where: + * t is tau_key + * N is the number of full blocks in the message + * m_i is the i-th full block in the message for i = 0 to N-1 inclusive + * m_N is the partial block of the message zero-padded up to 16 bytes + * + * Note that most of this is now separated out into its own keyed hash + * algorithm, to allow optimized implementations. However, we still handle the + * swapping of the last two coefficients here in the HEH template because this + * simplifies the poly_hash algorithms: they don't have to buffer an extra + * block, don't have to duplicate as much code, and are more similar to GHASH. + */ +static int poly_hash(struct ablkcipher_request *req, struct scatterlist *sgl, + be128 *hash) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct shash_desc *desc = &rctx->u.poly_hash.desc; + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int tail_len = req->nbytes - tail_offset; + be128 tail[2]; + unsigned int i, n; + struct sg_mapping_iter miter; + int err; + + desc->tfm = ctx->poly_hash; + desc->flags = req->base.flags; + + /* Handle all full blocks except the last */ + err = crypto_shash_init(desc); + sg_miter_start(&miter, sgl, sg_nents(sgl), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + for (i = 0; i < tail_offset && !err; i += n) { + sg_miter_next(&miter); + n = min_t(unsigned int, miter.length, tail_offset - i); + err = crypto_shash_update(desc, miter.addr, n); + } + sg_miter_stop(&miter); + if (err) + return err; + + /* Handle the last full block and the partial block */ + scatterwalk_map_and_copy(tail, sgl, tail_offset, tail_len, 0); + + if (tail_len != HEH_BLOCK_SIZE) { + /* handle the partial block */ + memset((u8 *)tail + tail_len, 0, sizeof(tail) - tail_len); + err = crypto_shash_update(desc, (u8 *)&tail[1], HEH_BLOCK_SIZE); + if (err) + return err; + } + err = crypto_shash_final(desc, (u8 *)hash); + if (err) + return err; + be128_xor(hash, hash, &tail[0]); + return 0; +} + +/* + * Transform all full blocks except the last. + * This is used by both the hash and inverse hash phases. + */ +static int heh_tfm_blocks(struct ablkcipher_request *req, + struct scatterlist *src_sgl, + struct scatterlist *dst_sgl, unsigned int len, + const be128 *hash, const be128 *beta_key) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct blkcipher_desc desc = { .flags = req->base.flags }; + struct blkcipher_walk walk; + be128 e = *beta_key; + int err; + unsigned int nbytes; + + blkcipher_walk_init(&walk, dst_sgl, src_sgl, len); + + err = blkcipher_ablkcipher_walk_virt(&desc, &walk, tfm); + + while ((nbytes = walk.nbytes)) { + const be128 *src = (be128 *)walk.src.virt.addr; + be128 *dst = (be128 *)walk.dst.virt.addr; + + do { + gf128mul_x_ble(&e, &e); + be128_xor(dst, src, hash); + be128_xor(dst, dst, &e); + src++; + dst++; + } while ((nbytes -= HEH_BLOCK_SIZE) >= HEH_BLOCK_SIZE); + err = blkcipher_walk_done(&desc, &walk, nbytes); + } + return err; +} + +/* + * The hash phase of HEH. Given a message, compute: + * + * (m_0 + H, ..., m_{N-2} + H, H, m_N) + (xb, x^2b, ..., x^{N-1}b, b, 0) + * + * where: + * N is the number of full blocks in the message + * m_i is the i-th full block in the message for i = 0 to N-1 inclusive + * m_N is the unpadded partial block, possibly empty + * H is the poly_hash() of the message, keyed by tau_key + * b is beta_key + * x is the element x in our representation of GF(2^128) + * + * Note that the partial block remains unchanged, but it does affect the result + * of poly_hash() and therefore the transformation of all the full blocks. + */ +static int heh_hash(struct ablkcipher_request *req, const be128 *beta_key) +{ + be128 hash; + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int partial_len = req->nbytes % HEH_BLOCK_SIZE; + int err; + + /* poly_hash() the full message including the partial block */ + err = poly_hash(req, req->src, &hash); + if (err) + return err; + + /* Transform all full blocks except the last */ + err = heh_tfm_blocks(req, req->src, req->dst, tail_offset, &hash, + beta_key); + if (err) + return err; + + /* Set the last full block to hash XOR beta_key */ + be128_xor(&hash, &hash, beta_key); + scatterwalk_map_and_copy(&hash, req->dst, tail_offset, HEH_BLOCK_SIZE, + 1); + + /* Copy the partial block if needed */ + if (partial_len != 0 && req->src != req->dst) { + unsigned int offs = tail_offset + HEH_BLOCK_SIZE; + + scatterwalk_map_and_copy(&hash, req->src, offs, partial_len, 0); + scatterwalk_map_and_copy(&hash, req->dst, offs, partial_len, 1); + } + return 0; +} + +/* + * The inverse hash phase of HEH. This undoes the result of heh_hash(). + */ +static int heh_hash_inv(struct ablkcipher_request *req, const be128 *beta_key) +{ + be128 hash; + be128 tmp; + struct scatterlist tmp_sgl[2]; + struct scatterlist *tail_sgl; + unsigned int tail_offset = get_tail_offset(req->nbytes); + struct scatterlist *sgl = req->dst; + int err; + + /* + * The last full block was computed as hash XOR beta_key, so XOR it with + * beta_key to recover hash. + */ + tail_sgl = scatterwalk_ffwd(tmp_sgl, sgl, tail_offset); + scatterwalk_map_and_copy(&hash, tail_sgl, 0, HEH_BLOCK_SIZE, 0); + be128_xor(&hash, &hash, beta_key); + + /* Transform all full blocks except the last */ + err = heh_tfm_blocks(req, sgl, sgl, tail_offset, &hash, beta_key); + if (err) + return err; + + /* + * Recover the last full block. We know 'hash', i.e. the poly_hash() of + * the the original message. The last full block was the constant term + * of the polynomial. To recover the last full block, temporarily zero + * it, compute the poly_hash(), and take the difference from 'hash'. + */ + memset(&tmp, 0, sizeof(tmp)); + scatterwalk_map_and_copy(&tmp, tail_sgl, 0, HEH_BLOCK_SIZE, 1); + err = poly_hash(req, sgl, &tmp); + if (err) + return err; + be128_xor(&tmp, &tmp, &hash); + scatterwalk_map_and_copy(&tmp, tail_sgl, 0, HEH_BLOCK_SIZE, 1); + return 0; +} + +static int heh_hash_inv_step(struct ablkcipher_request *req, u32 flags) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + + return heh_hash_inv(req, &rctx->beta2_key); +} + +static int heh_ecb_step_3(struct ablkcipher_request *req, u32 flags) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + u8 partial_block[HEH_BLOCK_SIZE] __aligned(__alignof__(u32)); + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int partial_offset = tail_offset + HEH_BLOCK_SIZE; + unsigned int partial_len = req->nbytes - partial_offset; + + /* + * Extract the pad in req->dst at tail_offset, and xor the partial block + * with it to create encrypted partial block + */ + scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, + HEH_BLOCK_SIZE, 0); + scatterwalk_map_and_copy(partial_block, req->dst, partial_offset, + partial_len, 0); + crypto_xor(partial_block, rctx->u.ecb.keystream, partial_len); + + /* + * Store the encrypted final block and partial block back in dst_sg + */ + scatterwalk_map_and_copy(&rctx->u.ecb.tmp, req->dst, tail_offset, + HEH_BLOCK_SIZE, 1); + scatterwalk_map_and_copy(partial_block, req->dst, partial_offset, + partial_len, 1); + + return heh_hash_inv_step(req, flags); +} + +static void heh_ecb_step_2_done(struct crypto_async_request *areq, int err) +{ + return async_done(areq, err, heh_ecb_step_3); +} + +static int heh_ecb_step_2(struct ablkcipher_request *req, u32 flags) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + unsigned int partial_len = req->nbytes % HEH_BLOCK_SIZE; + struct scatterlist *tmp_sgl; + int err; + unsigned int tail_offset = get_tail_offset(req->nbytes); + + if (partial_len == 0) + return heh_hash_inv_step(req, flags); + + /* + * Extract the final full block, store it in tmp, and then xor that with + * the value saved in u.ecb.keystream + */ + scatterwalk_map_and_copy(rctx->u.ecb.tmp, req->dst, tail_offset, + HEH_BLOCK_SIZE, 0); + crypto_xor(rctx->u.ecb.keystream, rctx->u.ecb.tmp, HEH_BLOCK_SIZE); + + /* + * Encrypt the value in rctx->u.ecb.keystream to create the pad for the + * partial block. + * We cannot encrypt stack buffers, so re-use the dst_sg to do this + * encryption to avoid a malloc. The value at tail_offset is stored in + * tmp, and will be restored later. + */ + scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, + HEH_BLOCK_SIZE, 1); + tmp_sgl = scatterwalk_ffwd(rctx->u.ecb.tmp_sgl, req->dst, tail_offset); + ablkcipher_request_set_callback(&rctx->u.ecb.req, flags, + heh_ecb_step_2_done, req); + ablkcipher_request_set_crypt(&rctx->u.ecb.req, tmp_sgl, tmp_sgl, + HEH_BLOCK_SIZE, NULL); + err = crypto_ablkcipher_encrypt(&rctx->u.ecb.req); + if (err) + return err; + return heh_ecb_step_3(req, flags); +} + +static void heh_ecb_full_done(struct crypto_async_request *areq, int err) +{ + return async_done(areq, err, heh_ecb_step_2); +} + +/* + * The encrypt phase of HEH. This uses ECB encryption, with special handling + * for the partial block at the end if any. The source data is already in + * req->dst, so the encryption happens in-place. + * + * After the encrypt phase we continue on to the inverse hash phase. The + * functions calls are chained to support asynchronous ECB algorithms. + */ +static int heh_ecb(struct ablkcipher_request *req, bool decrypt) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct heh_req_ctx *rctx = heh_req_ctx(req); + struct ablkcipher_request *ecb_req = &rctx->u.ecb.req; + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int full_len = tail_offset + HEH_BLOCK_SIZE; + int err; + + /* + * Save the last full block before it is encrypted/decrypted. This will + * be used later to encrypt/decrypt the partial block + */ + scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, + HEH_BLOCK_SIZE, 0); + + /* Encrypt/decrypt all full blocks */ + ablkcipher_request_set_tfm(ecb_req, ctx->ecb); + ablkcipher_request_set_callback(ecb_req, req->base.flags, + heh_ecb_full_done, req); + ablkcipher_request_set_crypt(ecb_req, req->dst, req->dst, full_len, + NULL); + if (decrypt) + err = crypto_ablkcipher_decrypt(ecb_req); + else + err = crypto_ablkcipher_encrypt(ecb_req); + if (err) + return err; + + return heh_ecb_step_2(req, req->base.flags); +} + +static int heh_crypt(struct ablkcipher_request *req, bool decrypt) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + int err; + + /* Inputs must be at least one full block */ + if (req->nbytes < HEH_BLOCK_SIZE) + return -EINVAL; + + err = generate_betas(req, &rctx->beta1_key, &rctx->beta2_key); + if (err) + return err; + + if (decrypt) + swap(rctx->beta1_key, rctx->beta2_key); + + err = heh_hash(req, &rctx->beta1_key); + if (err) + return err; + + return heh_ecb(req, decrypt); +} + +static int heh_encrypt(struct ablkcipher_request *req) +{ + return heh_crypt(req, false); +} + +static int heh_decrypt(struct ablkcipher_request *req) +{ + return heh_crypt(req, true); +} + +static int heh_setkey(struct crypto_ablkcipher *parent, const u8 *key, + unsigned int keylen) +{ + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(parent); + struct crypto_shash *cmac = ctx->cmac; + struct crypto_ablkcipher *ecb = ctx->ecb; + SHASH_DESC_ON_STACK(desc, cmac); + u8 *derived_keys; + u8 digest[HEH_BLOCK_SIZE]; + unsigned int i; + int err; + + /* set prf_key = key */ + crypto_shash_clear_flags(cmac, CRYPTO_TFM_REQ_MASK); + crypto_shash_set_flags(cmac, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_shash_setkey(cmac, key, keylen); + crypto_ablkcipher_set_flags(parent, crypto_shash_get_flags(cmac) & + CRYPTO_TFM_RES_MASK); + if (err) + return err; + + /* + * Generate tau_key and ecb_key as follows: + * tau_key = cmac(prf_key, 0x00...01) + * ecb_key = cmac(prf_key, 0x00...02) || cmac(prf_key, 0x00...03) || ... + * truncated to keylen bytes + */ + derived_keys = kzalloc(round_up(HEH_BLOCK_SIZE + keylen, + HEH_BLOCK_SIZE), GFP_KERNEL); + if (!derived_keys) + return -ENOMEM; + desc->tfm = cmac; + desc->flags = (crypto_shash_get_flags(cmac) & CRYPTO_TFM_REQ_MASK); + for (i = 0; i < keylen + HEH_BLOCK_SIZE; i += HEH_BLOCK_SIZE) { + derived_keys[i + HEH_BLOCK_SIZE - 1] = + 0x01 + i / HEH_BLOCK_SIZE; + err = crypto_shash_digest(desc, derived_keys + i, + HEH_BLOCK_SIZE, digest); + if (err) + goto out; + memcpy(derived_keys + i, digest, HEH_BLOCK_SIZE); + } + + err = crypto_shash_setkey(ctx->poly_hash, derived_keys, HEH_BLOCK_SIZE); + if (err) + goto out; + + crypto_ablkcipher_clear_flags(ecb, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(ecb, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(ecb, derived_keys + HEH_BLOCK_SIZE, + keylen); + crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(ecb) & + CRYPTO_TFM_RES_MASK); +out: + kzfree(derived_keys); + return err; +} + +static int heh_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct heh_instance_ctx *ictx = crypto_instance_ctx(inst); + struct heh_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *cmac; + struct crypto_shash *poly_hash; + struct crypto_ablkcipher *ecb; + unsigned int reqsize; + int err; + + cmac = crypto_spawn_shash(&ictx->cmac); + if (IS_ERR(cmac)) + return PTR_ERR(cmac); + + poly_hash = crypto_spawn_shash(&ictx->poly_hash); + err = PTR_ERR(poly_hash); + if (IS_ERR(poly_hash)) + goto err_free_cmac; + + ecb = crypto_spawn_skcipher(&ictx->ecb); + err = PTR_ERR(ecb); + if (IS_ERR(ecb)) + goto err_free_poly_hash; + + ctx->cmac = cmac; + ctx->poly_hash = poly_hash; + ctx->ecb = ecb; + + reqsize = crypto_tfm_alg_alignmask(tfm) & + ~(crypto_tfm_ctx_alignment() - 1); + reqsize += max3(offsetof(struct heh_req_ctx, u.cmac.desc) + + sizeof(struct shash_desc) + + crypto_shash_descsize(cmac), + offsetof(struct heh_req_ctx, u.poly_hash.desc) + + sizeof(struct shash_desc) + + crypto_shash_descsize(poly_hash), + offsetof(struct heh_req_ctx, u.ecb.req) + + sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(ecb)); + tfm->crt_ablkcipher.reqsize = reqsize; + + return 0; + +err_free_poly_hash: + crypto_free_shash(poly_hash); +err_free_cmac: + crypto_free_shash(cmac); + return err; +} + +static void heh_exit_tfm(struct crypto_tfm *tfm) +{ + struct heh_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(ctx->cmac); + crypto_free_shash(ctx->poly_hash); + crypto_free_ablkcipher(ctx->ecb); +} + +static void heh_free_instance(struct crypto_instance *inst) +{ + struct heh_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_shash(&ctx->cmac); + crypto_drop_shash(&ctx->poly_hash); + crypto_drop_skcipher(&ctx->ecb); + kfree(inst); +} + +/* + * Create an instance of HEH as a ablkcipher. + * + * This relies on underlying CMAC and ECB algorithms, usually cmac(aes) and + * ecb(aes). For performance reasons we support asynchronous ECB algorithms. + * However, we do not yet support asynchronous CMAC algorithms because CMAC is + * only used on a small fixed amount of data per request, independent of the + * request length. This would change if AEAD or variable-length nonce support + * were to be exposed. + */ +static int heh_create_common(struct crypto_template *tmpl, struct rtattr **tb, + const char *full_name, const char *cmac_name, + const char *poly_hash_name, const char *ecb_name) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct heh_instance_ctx *ctx; + struct shash_alg *cmac; + struct shash_alg *poly_hash; + struct crypto_alg *ecb; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + /* User must be asking for something compatible with ablkcipher */ + if ((algt->type ^ CRYPTO_ALG_TYPE_ABLKCIPHER) & algt->mask) + return -EINVAL; + + /* Allocate the ablkcipher instance */ + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = crypto_instance_ctx(inst); + + /* Set up the cmac spawn */ + ctx->cmac.base.inst = inst; + err = crypto_grab_shash(&ctx->cmac, cmac_name, 0, 0); + if (err) + goto err_free_inst; + cmac = crypto_spawn_shash_alg(&ctx->cmac); + err = -EINVAL; + if (cmac->digestsize != HEH_BLOCK_SIZE) + goto err_drop_cmac; + + /* Set up the poly_hash spawn */ + ctx->poly_hash.base.inst = inst; + err = crypto_grab_shash(&ctx->poly_hash, poly_hash_name, 0, 0); + if (err) + goto err_drop_cmac; + poly_hash = crypto_spawn_shash_alg(&ctx->poly_hash); + err = -EINVAL; + if (poly_hash->digestsize != HEH_BLOCK_SIZE) + goto err_drop_poly_hash; + + /* Set up the ecb spawn */ + ctx->ecb.base.inst = inst; + err = crypto_grab_skcipher(&ctx->ecb, ecb_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_drop_poly_hash; + ecb = crypto_skcipher_spawn_alg(&ctx->ecb); + + /* HEH only supports block ciphers with 16 byte block size */ + err = -EINVAL; + if (ecb->cra_blocksize != HEH_BLOCK_SIZE) + goto err_drop_ecb; + + /* The underlying "ECB" algorithm must not require an IV */ + err = -EINVAL; + if ((ecb->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { + if (ecb->cra_blkcipher.ivsize != 0) + goto err_drop_ecb; + } else { + if (ecb->cra_ablkcipher.ivsize != 0) + goto err_drop_ecb; + } + + /* Set the instance names */ + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "heh_base(%s,%s,%s)", cmac->base.cra_driver_name, + poly_hash->base.cra_driver_name, + ecb->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ecb; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "%s", full_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ecb; + + /* Finish initializing the instance */ + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + (ecb->cra_flags & CRYPTO_ALG_ASYNC); + inst->alg.cra_blocksize = HEH_BLOCK_SIZE; + inst->alg.cra_ctxsize = sizeof(struct heh_tfm_ctx); + inst->alg.cra_alignmask = ecb->cra_alignmask | (__alignof__(be128) - 1); + inst->alg.cra_priority = ecb->cra_priority; + inst->alg.cra_type = &crypto_ablkcipher_type; + inst->alg.cra_init = heh_init_tfm; + inst->alg.cra_exit = heh_exit_tfm; + + inst->alg.cra_ablkcipher.setkey = heh_setkey; + inst->alg.cra_ablkcipher.encrypt = heh_encrypt; + inst->alg.cra_ablkcipher.decrypt = heh_decrypt; + if ((ecb->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { + inst->alg.cra_ablkcipher.min_keysize = ecb->cra_blkcipher.min_keysize; + inst->alg.cra_ablkcipher.max_keysize = ecb->cra_blkcipher.max_keysize; + } else { + inst->alg.cra_ablkcipher.min_keysize = ecb->cra_ablkcipher.min_keysize; + inst->alg.cra_ablkcipher.max_keysize = ecb->cra_ablkcipher.max_keysize; + } + inst->alg.cra_ablkcipher.ivsize = HEH_BLOCK_SIZE; + + /* Register the instance */ + err = crypto_register_instance(tmpl, inst); + if (err) + goto err_drop_ecb; + return 0; + +err_drop_ecb: + crypto_drop_skcipher(&ctx->ecb); +err_drop_poly_hash: + crypto_drop_shash(&ctx->poly_hash); +err_drop_cmac: + crypto_drop_shash(&ctx->cmac); +err_free_inst: + kfree(inst); + return err; +} + +static int heh_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *cipher_name; + char full_name[CRYPTO_MAX_ALG_NAME]; + char cmac_name[CRYPTO_MAX_ALG_NAME]; + char ecb_name[CRYPTO_MAX_ALG_NAME]; + + /* Get the name of the requested block cipher (e.g. aes) */ + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "heh(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + if (snprintf(cmac_name, CRYPTO_MAX_ALG_NAME, "cmac(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return heh_create_common(tmpl, tb, full_name, cmac_name, "poly_hash", + ecb_name); +} + +static struct crypto_template heh_tmpl = { + .name = "heh", + .create = heh_create, + .free = heh_free_instance, + .module = THIS_MODULE, +}; + +static int heh_base_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + char full_name[CRYPTO_MAX_ALG_NAME]; + const char *cmac_name; + const char *poly_hash_name; + const char *ecb_name; + + cmac_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cmac_name)) + return PTR_ERR(cmac_name); + + poly_hash_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(poly_hash_name)) + return PTR_ERR(poly_hash_name); + + ecb_name = crypto_attr_alg_name(tb[3]); + if (IS_ERR(ecb_name)) + return PTR_ERR(ecb_name); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "heh_base(%s,%s,%s)", + cmac_name, poly_hash_name, ecb_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return heh_create_common(tmpl, tb, full_name, cmac_name, poly_hash_name, + ecb_name); +} + +/* + * If HEH is instantiated as "heh_base" instead of "heh", then specific + * implementations of cmac, poly_hash, and ecb can be specified instead of just + * the cipher. + */ +static struct crypto_template heh_base_tmpl = { + .name = "heh_base", + .create = heh_base_create, + .free = heh_free_instance, + .module = THIS_MODULE, +}; + +static int __init heh_module_init(void) +{ + int err; + + err = crypto_register_template(&heh_tmpl); + if (err) + return err; + + err = crypto_register_template(&heh_base_tmpl); + if (err) + goto out_undo_heh; + + err = crypto_register_shash(&poly_hash_alg); + if (err) + goto out_undo_heh_base; + + return 0; + +out_undo_heh_base: + crypto_unregister_template(&heh_base_tmpl); +out_undo_heh: + crypto_unregister_template(&heh_tmpl); + return err; +} + +static void __exit heh_module_exit(void) +{ + crypto_unregister_template(&heh_tmpl); + crypto_unregister_template(&heh_base_tmpl); + crypto_unregister_shash(&poly_hash_alg); +} + +module_init(heh_module_init); +module_exit(heh_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Hash-Encrypt-Hash block cipher mode"); +MODULE_ALIAS_CRYPTO("heh"); +MODULE_ALIAS_CRYPTO("heh_base"); diff --git a/crypto/internal.h b/crypto/internal.h index 00e42a3ed814..7eefcdb00227 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -104,6 +104,9 @@ int crypto_probing_notify(unsigned long val, void *v); unsigned int crypto_alg_extsize(struct crypto_alg *alg); +int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, + u32 type, u32 mask); + static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) { atomic_inc(&alg->cra_refcnt); diff --git a/crypto/shash.c b/crypto/shash.c index 4f89f78031e2..b086b0174b82 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -686,6 +686,14 @@ void shash_free_instance(struct crypto_instance *inst) } EXPORT_SYMBOL_GPL(shash_free_instance); +int crypto_grab_shash(struct crypto_shash_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_shash_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_shash); + int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, struct shash_alg *alg, struct crypto_instance *inst) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index d199c0b1751c..d248008e7f7b 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -16,7 +16,11 @@ #include <crypto/internal/skcipher.h> #include <linux/bug.h> +#include <linux/cryptouser.h> #include <linux/module.h> +#include <linux/rtnetlink.h> +#include <linux/seq_file.h> +#include <net/netlink.h> #include "internal.h" @@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) if (alg->cra_type == &crypto_blkcipher_type) return sizeof(struct crypto_blkcipher *); - BUG_ON(alg->cra_type != &crypto_ablkcipher_type && - alg->cra_type != &crypto_givcipher_type); + if (alg->cra_type == &crypto_ablkcipher_type || + alg->cra_type == &crypto_givcipher_type) + return sizeof(struct crypto_ablkcipher *); - return sizeof(struct crypto_ablkcipher *); + return crypto_alg_extsize(alg); } static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, @@ -118,7 +123,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) skcipher->decrypt = skcipher_decrypt_blkcipher; skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); - skcipher->has_setkey = calg->cra_blkcipher.max_keysize; + skcipher->keysize = calg->cra_blkcipher.max_keysize; return 0; } @@ -211,31 +216,123 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + sizeof(struct ablkcipher_request); - skcipher->has_setkey = calg->cra_ablkcipher.max_keysize; + skcipher->keysize = calg->cra_ablkcipher.max_keysize; return 0; } +static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + + alg->exit(skcipher); +} + static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) { + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); - BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type && - tfm->__crt_alg->cra_type != &crypto_givcipher_type); + if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || + tfm->__crt_alg->cra_type == &crypto_givcipher_type) + return crypto_init_skcipher_ops_ablkcipher(tfm); + + skcipher->setkey = alg->setkey; + skcipher->encrypt = alg->encrypt; + skcipher->decrypt = alg->decrypt; + skcipher->ivsize = alg->ivsize; + skcipher->keysize = alg->max_keysize; + + if (alg->exit) + skcipher->base.exit = crypto_skcipher_exit_tfm; - return crypto_init_skcipher_ops_ablkcipher(tfm); + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_skcipher_free_instance(struct crypto_instance *inst) +{ + struct skcipher_instance *skcipher = + container_of(inst, struct skcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + seq_printf(m, "type : skcipher\n"); + seq_printf(m, "async : %s\n", + alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->chunksize); } +#ifdef CONFIG_NET +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_blkcipher rblkcipher; + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); + strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->min_keysize; + rblkcipher.max_keysize = skcipher->max_keysize; + rblkcipher.ivsize = skcipher->ivsize; + + if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(struct crypto_report_blkcipher), &rblkcipher)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + static const struct crypto_type crypto_skcipher_type2 = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, + .free = crypto_skcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_skcipher_show, +#endif + .report = crypto_skcipher_report, .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, - .type = CRYPTO_ALG_TYPE_BLKCIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; +int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_skcipher_type2; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_skcipher2); + struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask) { @@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, } EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, + type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_skcipher2); + +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) + return -EINVAL; + + if (!alg->chunksize) + alg->chunksize = base->cra_blocksize; + + base->cra_type = &crypto_skcipher_type2; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; + + return 0; +} + +int crypto_register_skcipher(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_skcipher); + +void crypto_unregister_skcipher(struct skcipher_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); + +int crypto_register_skciphers(struct skcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_skcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_skciphers); + +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); + +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst) +{ + int err; + + err = skcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(skcipher_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5f15f45fcc9f..88f1b6cb4dec 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3215,6 +3215,21 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "heh(aes)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = { + .vecs = aes_heh_enc_tv_template, + .count = AES_HEH_ENC_TEST_VECTORS + }, + .dec = { + .vecs = aes_heh_dec_tv_template, + .count = AES_HEH_DEC_TEST_VECTORS + } + } + } + }, { .alg = "hmac(crc32)", .test = alg_test_hash, .suite = { @@ -3874,6 +3889,22 @@ static const struct alg_test_desc alg_test_descs[] = { } } } + }, { + .alg = "zstd", + .test = alg_test_comp, + .fips_allowed = 1, + .suite = { + .comp = { + .comp = { + .vecs = zstd_comp_tv_template, + .count = ZSTD_COMP_TEST_VECTORS + }, + .decomp = { + .vecs = zstd_decomp_tv_template, + .count = ZSTD_DECOMP_TEST_VECTORS + } + } + } } }; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 743d6cb7f8cd..98ebaf79680a 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -14181,6 +14181,8 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = { #define AES_DEC_TEST_VECTORS 4 #define AES_CBC_ENC_TEST_VECTORS 5 #define AES_CBC_DEC_TEST_VECTORS 5 +#define AES_HEH_ENC_TEST_VECTORS 4 +#define AES_HEH_DEC_TEST_VECTORS 4 #define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 #define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 #define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2 @@ -14553,6 +14555,198 @@ static struct cipher_testvec aes_dec_tv_template[] = { }, }; +static struct cipher_testvec aes_heh_enc_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .ilen = 16, + .result = "\xd8\xbd\x40\xbf\xca\xe5\xee\x81" + "\x0f\x3d\x1f\x1f\xae\x89\x07\x55", + .rlen = 16, + .also_non_np = 1, + .np = 2, + .tap = { 8, 8 }, + }, { + .key = "\xa8\xda\x24\x9b\x5e\xfa\x13\xc2" + "\xc1\x94\xbf\x32\xba\x38\xa3\x77", + .klen = 16, + .iv = "\x4d\x47\x61\x37\x2b\x47\x86\xf0" + "\xd6\x47\xb5\xc2\xe8\xcf\x85\x27", + .input = "\xb8\xee\x29\xe4\xa5\xd1\xe7\x55" + "\xd0\xfd\xe7\x22\x63\x76\x36\xe2" + "\xf8\x0c\xf8\xfe\x65\x76\xe7\xca" + "\xc1\x42\xf5\xca\x5a\xa8\xac\x2a", + .ilen = 32, + .result = "\x59\xf2\x78\x4e\x10\x94\xf9\x5c" + "\x22\x23\x78\x2a\x30\x48\x11\x97" + "\xb1\xfe\x70\xc4\xef\xdf\x04\xef" + "\x16\x39\x04\xcf\xc0\x95\x9a\x98", + .rlen = 32, + .also_non_np = 1, + .np = 3, + .tap = { 16, 13, 3 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .ilen = 63, + .result = "\xe0\x40\xeb\xe9\x52\xbe\x65\x60" + "\xe4\x68\x68\xa3\x73\x75\xb8\x52" + "\xef\x38\x6a\x87\x25\x25\xf6\x04" + "\xe5\x8e\xbe\x14\x8b\x02\x14\x1f" + "\xa9\x73\xb7\xad\x15\xbe\x9c\xa0" + "\xd2\x8a\x2c\xdc\xd4\xe3\x05\x55" + "\x0a\xf5\xf8\x51\xee\xe5\x62\xa5" + "\x71\xa7\x7c\x15\x5d\x7a\x9e", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .ilen = 63, + .result = "\x4b\x1a\x15\xa0\xaf\x08\x6d\x70" + "\xf0\xa7\x97\xb5\x31\x4b\x8c\xc3" + "\x4d\xf2\x7a\x9d\xdd\xd4\x15\x99" + "\x57\xad\xc6\xb1\x35\x69\xf5\x6a" + "\x2d\x70\xe4\x97\x49\xb2\x9f\x71" + "\xde\x22\xb5\x70\x8c\x69\x24\xd3" + "\xad\x80\x58\x48\x90\xe4\xed\xba" + "\x76\x3d\x71\x7c\x57\x25\x87", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + } +}; + +static struct cipher_testvec aes_heh_dec_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xd8\xbd\x40\xbf\xca\xe5\xee\x81" + "\x0f\x3d\x1f\x1f\xae\x89\x07\x55", + .ilen = 16, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .rlen = 16, + .also_non_np = 1, + .np = 2, + .tap = { 8, 8 }, + }, { + .key = "\xa8\xda\x24\x9b\x5e\xfa\x13\xc2" + "\xc1\x94\xbf\x32\xba\x38\xa3\x77", + .klen = 16, + .iv = "\x4d\x47\x61\x37\x2b\x47\x86\xf0" + "\xd6\x47\xb5\xc2\xe8\xcf\x85\x27", + .input = "\x59\xf2\x78\x4e\x10\x94\xf9\x5c" + "\x22\x23\x78\x2a\x30\x48\x11\x97" + "\xb1\xfe\x70\xc4\xef\xdf\x04\xef" + "\x16\x39\x04\xcf\xc0\x95\x9a\x98", + .ilen = 32, + .result = "\xb8\xee\x29\xe4\xa5\xd1\xe7\x55" + "\xd0\xfd\xe7\x22\x63\x76\x36\xe2" + "\xf8\x0c\xf8\xfe\x65\x76\xe7\xca" + "\xc1\x42\xf5\xca\x5a\xa8\xac\x2a", + .rlen = 32, + .also_non_np = 1, + .np = 3, + .tap = { 16, 13, 3 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xe0\x40\xeb\xe9\x52\xbe\x65\x60" + "\xe4\x68\x68\xa3\x73\x75\xb8\x52" + "\xef\x38\x6a\x87\x25\x25\xf6\x04" + "\xe5\x8e\xbe\x14\x8b\x02\x14\x1f" + "\xa9\x73\xb7\xad\x15\xbe\x9c\xa0" + "\xd2\x8a\x2c\xdc\xd4\xe3\x05\x55" + "\x0a\xf5\xf8\x51\xee\xe5\x62\xa5" + "\x71\xa7\x7c\x15\x5d\x7a\x9e", + .ilen = 63, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x4b\x1a\x15\xa0\xaf\x08\x6d\x70" + "\xf0\xa7\x97\xb5\x31\x4b\x8c\xc3" + "\x4d\xf2\x7a\x9d\xdd\xd4\x15\x99" + "\x57\xad\xc6\xb1\x35\x69\xf5\x6a" + "\x2d\x70\xe4\x97\x49\xb2\x9f\x71" + "\xde\x22\xb5\x70\x8c\x69\x24\xd3" + "\xad\x80\x58\x48\x90\xe4\xed\xba" + "\x76\x3d\x71\x7c\x57\x25\x87", + .ilen = 63, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + } +}; + static struct cipher_testvec aes_cbc_enc_tv_template[] = { { /* From RFC 3602 */ .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" @@ -33693,4 +33887,78 @@ static struct comp_testvec lz4hc_decomp_tv_template[] = { }, }; +#define ZSTD_COMP_TEST_VECTORS 2 +#define ZSTD_DECOMP_TEST_VECTORS 2 + +static struct comp_testvec zstd_comp_tv_template[] = { + { + .inlen = 68, + .outlen = 39, + .input = "The algorithm is zstd. " + "The algorithm is zstd. " + "The algorithm is zstd.", + .output = "\x28\xb5\x2f\xfd\x00\x50\xf5\x00\x00\xb8\x54\x68\x65" + "\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x20\x69\x73" + "\x20\x7a\x73\x74\x64\x2e\x20\x01\x00\x55\x73\x36\x01" + , + }, + { + .inlen = 244, + .outlen = 151, + .input = "zstd, short for Zstandard, is a fast lossless " + "compression algorithm, targeting real-time " + "compression scenarios at zlib-level and better " + "compression ratios. The zstd compression library " + "provides in-memory compression and decompression " + "functions.", + .output = "\x28\xb5\x2f\xfd\x00\x50\x75\x04\x00\x42\x4b\x1e\x17" + "\x90\x81\x31\x00\xf2\x2f\xe4\x36\xc9\xef\x92\x88\x32" + "\xc9\xf2\x24\x94\xd8\x68\x9a\x0f\x00\x0c\xc4\x31\x6f" + "\x0d\x0c\x38\xac\x5c\x48\x03\xcd\x63\x67\xc0\xf3\xad" + "\x4e\x90\xaa\x78\xa0\xa4\xc5\x99\xda\x2f\xb6\x24\x60" + "\xe2\x79\x4b\xaa\xb6\x6b\x85\x0b\xc9\xc6\x04\x66\x86" + "\xe2\xcc\xe2\x25\x3f\x4f\x09\xcd\xb8\x9d\xdb\xc1\x90" + "\xa9\x11\xbc\x35\x44\x69\x2d\x9c\x64\x4f\x13\x31\x64" + "\xcc\xfb\x4d\x95\x93\x86\x7f\x33\x7f\x1a\xef\xe9\x30" + "\xf9\x67\xa1\x94\x0a\x69\x0f\x60\xcd\xc3\xab\x99\xdc" + "\x42\xed\x97\x05\x00\x33\xc3\x15\x95\x3a\x06\xa0\x0e" + "\x20\xa9\x0e\x82\xb9\x43\x45\x01", + }, +}; + +static struct comp_testvec zstd_decomp_tv_template[] = { + { + .inlen = 43, + .outlen = 68, + .input = "\x28\xb5\x2f\xfd\x04\x50\xf5\x00\x00\xb8\x54\x68\x65" + "\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x20\x69\x73" + "\x20\x7a\x73\x74\x64\x2e\x20\x01\x00\x55\x73\x36\x01" + "\x6b\xf4\x13\x35", + .output = "The algorithm is zstd. " + "The algorithm is zstd. " + "The algorithm is zstd.", + }, + { + .inlen = 155, + .outlen = 244, + .input = "\x28\xb5\x2f\xfd\x04\x50\x75\x04\x00\x42\x4b\x1e\x17" + "\x90\x81\x31\x00\xf2\x2f\xe4\x36\xc9\xef\x92\x88\x32" + "\xc9\xf2\x24\x94\xd8\x68\x9a\x0f\x00\x0c\xc4\x31\x6f" + "\x0d\x0c\x38\xac\x5c\x48\x03\xcd\x63\x67\xc0\xf3\xad" + "\x4e\x90\xaa\x78\xa0\xa4\xc5\x99\xda\x2f\xb6\x24\x60" + "\xe2\x79\x4b\xaa\xb6\x6b\x85\x0b\xc9\xc6\x04\x66\x86" + "\xe2\xcc\xe2\x25\x3f\x4f\x09\xcd\xb8\x9d\xdb\xc1\x90" + "\xa9\x11\xbc\x35\x44\x69\x2d\x9c\x64\x4f\x13\x31\x64" + "\xcc\xfb\x4d\x95\x93\x86\x7f\x33\x7f\x1a\xef\xe9\x30" + "\xf9\x67\xa1\x94\x0a\x69\x0f\x60\xcd\xc3\xab\x99\xdc" + "\x42\xed\x97\x05\x00\x33\xc3\x15\x95\x3a\x06\xa0\x0e" + "\x20\xa9\x0e\x82\xb9\x43\x45\x01\xaa\x6d\xda\x0d", + .output = "zstd, short for Zstandard, is a fast lossless " + "compression algorithm, targeting real-time " + "compression scenarios at zlib-level and better " + "compression ratios. The zstd compression library " + "provides in-memory compression and decompression " + "functions.", + }, +}; #endif /* _CRYPTO_TESTMGR_H */ diff --git a/crypto/zstd.c b/crypto/zstd.c new file mode 100644 index 000000000000..9bfd28f8cc77 --- /dev/null +++ b/crypto/zstd.c @@ -0,0 +1,209 @@ +/* + * Cryptographic API. + * + * Copyright (c) 2017-present, Facebook, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include <linux/crypto.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/net.h> +#include <linux/vmalloc.h> +#include <linux/zstd.h> + + +#define ZSTD_DEF_LEVEL 3 + +struct zstd_ctx { + ZSTD_CCtx *cctx; + ZSTD_DCtx *dctx; + void *cwksp; + void *dwksp; +}; + +static ZSTD_parameters zstd_params(void) +{ + return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0); +} + +static int zstd_comp_init(struct zstd_ctx *ctx) +{ + int ret = 0; + const ZSTD_parameters params = zstd_params(); + const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams); + + ctx->cwksp = vzalloc(wksp_size); + if (!ctx->cwksp) { + ret = -ENOMEM; + goto out; + } + + ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size); + if (!ctx->cctx) { + ret = -EINVAL; + goto out_free; + } +out: + return ret; +out_free: + vfree(ctx->cwksp); + goto out; +} + +static int zstd_decomp_init(struct zstd_ctx *ctx) +{ + int ret = 0; + const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); + + ctx->dwksp = vzalloc(wksp_size); + if (!ctx->dwksp) { + ret = -ENOMEM; + goto out; + } + + ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size); + if (!ctx->dctx) { + ret = -EINVAL; + goto out_free; + } +out: + return ret; +out_free: + vfree(ctx->dwksp); + goto out; +} + +static void zstd_comp_exit(struct zstd_ctx *ctx) +{ + vfree(ctx->cwksp); + ctx->cwksp = NULL; + ctx->cctx = NULL; +} + +static void zstd_decomp_exit(struct zstd_ctx *ctx) +{ + vfree(ctx->dwksp); + ctx->dwksp = NULL; + ctx->dctx = NULL; +} + +static int __zstd_init(void *ctx) +{ + int ret; + + ret = zstd_comp_init(ctx); + if (ret) + return ret; + ret = zstd_decomp_init(ctx); + if (ret) + zstd_comp_exit(ctx); + return ret; +} + +static int zstd_init(struct crypto_tfm *tfm) +{ + struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); + + return __zstd_init(ctx); +} + +static void __zstd_exit(void *ctx) +{ + zstd_comp_exit(ctx); + zstd_decomp_exit(ctx); +} + +static void zstd_exit(struct crypto_tfm *tfm) +{ + struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); + + __zstd_exit(ctx); +} + +static int __zstd_compress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + size_t out_len; + struct zstd_ctx *zctx = ctx; + const ZSTD_parameters params = zstd_params(); + + out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params); + if (ZSTD_isError(out_len)) + return -EINVAL; + *dlen = out_len; + return 0; +} + +static int zstd_compress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); + + return __zstd_compress(src, slen, dst, dlen, ctx); +} + +static int __zstd_decompress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + size_t out_len; + struct zstd_ctx *zctx = ctx; + + out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen); + if (ZSTD_isError(out_len)) + return -EINVAL; + *dlen = out_len; + return 0; +} + +static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); + + return __zstd_decompress(src, slen, dst, dlen, ctx); +} + +static struct crypto_alg alg = { + .cra_name = "zstd", + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct zstd_ctx), + .cra_module = THIS_MODULE, + .cra_init = zstd_init, + .cra_exit = zstd_exit, + .cra_u = { .compress = { + .coa_compress = zstd_compress, + .coa_decompress = zstd_decompress } } +}; + +static int __init zstd_mod_init(void) +{ + int ret; + + ret = crypto_register_alg(&alg); + if (ret) + return ret; + + return ret; +} + +static void __exit zstd_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(zstd_mod_init); +module_exit(zstd_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zstd Compression Algorithm"); +MODULE_ALIAS_CRYPTO("zstd"); |
