diff options
Diffstat (limited to 'crypto')
| -rw-r--r-- | crypto/Kconfig | 32 | ||||
| -rw-r--r-- | crypto/Makefile | 2 | ||||
| -rw-r--r-- | crypto/algapi.c | 15 | ||||
| -rw-r--r-- | crypto/api.c | 13 | ||||
| -rw-r--r-- | crypto/blkcipher.c | 21 | ||||
| -rw-r--r-- | crypto/gf128mul.c | 171 | ||||
| -rw-r--r-- | crypto/heh.c | 1033 | ||||
| -rw-r--r-- | crypto/internal.h | 3 | ||||
| -rw-r--r-- | crypto/shash.c | 8 | ||||
| -rw-r--r-- | crypto/skcipher.c | 200 | ||||
| -rw-r--r-- | crypto/speck.c | 307 | ||||
| -rw-r--r-- | crypto/testmgr.c | 75 | ||||
| -rw-r--r-- | crypto/testmgr.h | 1680 |
13 files changed, 3516 insertions, 44 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 7240821137fd..7a9f4d3d089b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -289,6 +289,24 @@ config CRYPTO_CBC CBC: Cipher Block Chaining mode This block cipher algorithm is required for IPSec. +config CRYPTO_HEH + tristate "HEH support" + select CRYPTO_CMAC + select CRYPTO_ECB + select CRYPTO_GF128MUL + select CRYPTO_MANAGER + select CRYPTO_POLY_HASH_ARM64_CE if ARM64 && KERNEL_MODE_NEON + help + HEH: Hash-Encrypt-Hash mode + HEH is a proposed block cipher mode of operation which extends the + strong pseudo-random permutation (SPRP) property of block ciphers to + arbitrary-length input strings. This provides a stronger notion of + security than existing block cipher modes of operation (e.g. CBC, CTR, + XTS), though it is usually less performant. Applications include disk + encryption and encryption of file names and contents. Currently, this + implementation only provides a symmetric cipher interface, so it can't + yet be used as an AEAD. + config CRYPTO_CTR tristate "CTR support" select CRYPTO_BLKCIPHER @@ -1372,6 +1390,20 @@ config CRYPTO_SERPENT_AVX2_X86_64 See also: <http://www.cl.cam.ac.uk/~rja14/serpent.html> +config CRYPTO_SPECK + tristate "Speck cipher algorithm" + select CRYPTO_ALGAPI + help + Speck is a lightweight block cipher that is tuned for optimal + performance in software (rather than hardware). + + Speck may not be as secure as AES, and should only be used on systems + where AES is not fast enough. + + See also: <https://eprint.iacr.org/2013/404.pdf> + + If unsure, say N. + config CRYPTO_TEA tristate "TEA, XTEA and XETA cipher algorithms" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 03e66097eb0c..7ae15c47f684 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o +obj-$(CONFIG_CRYPTO_HEH) += heh.o obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_CTS) += cts.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o @@ -97,6 +98,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o +obj-$(CONFIG_CRYPTO_SPECK) += speck.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o diff --git a/crypto/algapi.c b/crypto/algapi.c index eb58b73ca925..ac70fd5cd404 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1001,6 +1001,21 @@ unsigned int crypto_alg_extsize(struct crypto_alg *alg) } EXPORT_SYMBOL_GPL(crypto_alg_extsize); +int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, + u32 type, u32 mask) +{ + int ret = 0; + struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask); + + if (!IS_ERR(alg)) { + crypto_mod_put(alg); + ret = 1; + } + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_type_has_alg); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/crypto/api.c b/crypto/api.c index bbc147cb5dec..e5c1abfd451f 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/completion.h> #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -611,5 +612,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index dca7bc87dad9..7bbfadc195a6 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -373,6 +373,27 @@ int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, } EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block); +/* + * This function allows ablkcipher algorithms to use the blkcipher_walk API to + * walk over their data. The specified crypto_ablkcipher tfm is used to + * initialize the struct blkcipher_walk, and the crypto_blkcipher specified in + * desc->tfm is never used so it can be left NULL. (Yes, this design is ugly, + * but it parallels blkcipher_aead_walk_virt_block() above. In the 4.10 kernel + * this is starting to be cleaned up...) + */ +int blkcipher_ablkcipher_walk_virt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_ablkcipher *tfm) +{ + walk->flags &= ~BLKCIPHER_WALK_PHYS; + walk->walk_blocksize = crypto_ablkcipher_blocksize(tfm); + walk->cipher_blocksize = walk->walk_blocksize; + walk->ivsize = crypto_ablkcipher_ivsize(tfm); + walk->alignmask = crypto_ablkcipher_alignmask(tfm); + return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_ablkcipher_walk_virt); + static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 5276607c72d0..f3d9f6da0767 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -44,7 +44,7 @@ --------------------------------------------------------------------------- Issue 31/01/2006 - This file provides fast multiplication in GF(128) as required by several + This file provides fast multiplication in GF(2^128) as required by several cryptographic authentication modes */ @@ -88,37 +88,52 @@ q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \ } -/* Given the value i in 0..255 as the byte overflow when a field element - in GHASH is multiplied by x^8, this function will return the values that - are generated in the lo 16-bit word of the field value by applying the - modular polynomial. The values lo_byte and hi_byte are returned via the - macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into - memory as required by a suitable definition of this macro operating on - the table above -*/ - -#define xx(p, q) 0x##p##q +/* + * Given a value i in 0..255 as the byte overflow when a field element + * in GF(2^128) is multiplied by x^8, the following macro returns the + * 16-bit value that must be XOR-ed into the low-degree end of the + * product to reduce it modulo the irreducible polynomial x^128 + x^7 + + * x^2 + x + 1. + * + * There are two versions of the macro, and hence two tables: one for + * the "be" convention where the highest-order bit is the coefficient of + * the highest-degree polynomial term, and one for the "le" convention + * where the highest-order bit is the coefficient of the lowest-degree + * polynomial term. In both cases the values are stored in CPU byte + * endianness such that the coefficients are ordered consistently across + * bytes, i.e. in the "be" table bits 15..0 of the stored value + * correspond to the coefficients of x^15..x^0, and in the "le" table + * bits 15..0 correspond to the coefficients of x^0..x^15. + * + * Therefore, provided that the appropriate byte endianness conversions + * are done by the multiplication functions (and these must be in place + * anyway to support both little endian and big endian CPUs), the "be" + * table can be used for multiplications of both "bbe" and "ble" + * elements, and the "le" table can be used for multiplications of both + * "lle" and "lbe" elements. + */ -#define xda_bbe(i) ( \ - (i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \ - (i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \ - (i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \ - (i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \ +#define xda_be(i) ( \ + (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \ + (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \ + (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \ + (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \ ) -#define xda_lle(i) ( \ - (i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \ - (i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \ - (i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \ - (i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \ +#define xda_le(i) ( \ + (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \ + (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \ + (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \ + (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \ ) -static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle); -static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe); +static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le); +static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be); -/* These functions multiply a field element by x, by x^4 and by x^8 - * in the polynomial field representation. It uses 32-bit word operations - * to gain speed but compensates for machine endianess and hence works +/* + * The following functions multiply a field element by x or by x^8 in + * the polynomial field representation. They use 64-bit word operations + * to gain speed but compensate for machine endianness and hence work * correctly on both styles of machine. */ @@ -126,7 +141,7 @@ static void gf128mul_x_lle(be128 *r, const be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_lle[(b << 7) & 0xff]; + u64 _tt = gf128mul_table_le[(b << 7) & 0xff]; r->b = cpu_to_be64((b >> 1) | (a << 63)); r->a = cpu_to_be64((a >> 1) ^ (_tt << 48)); @@ -136,7 +151,7 @@ static void gf128mul_x_bbe(be128 *r, const be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_bbe[a >> 63]; + u64 _tt = gf128mul_table_be[a >> 63]; r->a = cpu_to_be64((a << 1) | (b >> 63)); r->b = cpu_to_be64((b << 1) ^ _tt); @@ -146,7 +161,7 @@ void gf128mul_x_ble(be128 *r, const be128 *x) { u64 a = le64_to_cpu(x->a); u64 b = le64_to_cpu(x->b); - u64 _tt = gf128mul_table_bbe[b >> 63]; + u64 _tt = gf128mul_table_be[b >> 63]; r->a = cpu_to_le64((a << 1) ^ _tt); r->b = cpu_to_le64((b << 1) | (a >> 63)); @@ -157,7 +172,7 @@ static void gf128mul_x8_lle(be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_lle[b & 0xff]; + u64 _tt = gf128mul_table_le[b & 0xff]; x->b = cpu_to_be64((b >> 8) | (a << 56)); x->a = cpu_to_be64((a >> 8) ^ (_tt << 48)); @@ -167,12 +182,22 @@ static void gf128mul_x8_bbe(be128 *x) { u64 a = be64_to_cpu(x->a); u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_bbe[a >> 56]; + u64 _tt = gf128mul_table_be[a >> 56]; x->a = cpu_to_be64((a << 8) | (b >> 56)); x->b = cpu_to_be64((b << 8) ^ _tt); } +static void gf128mul_x8_ble(be128 *x) +{ + u64 a = le64_to_cpu(x->b); + u64 b = le64_to_cpu(x->a); + u64 _tt = gf128mul_table_be[a >> 56]; + + x->b = cpu_to_le64((a << 8) | (b >> 56)); + x->a = cpu_to_le64((b << 8) ^ _tt); +} + void gf128mul_lle(be128 *r, const be128 *b) { be128 p[8]; @@ -249,9 +274,48 @@ void gf128mul_bbe(be128 *r, const be128 *b) } EXPORT_SYMBOL(gf128mul_bbe); +void gf128mul_ble(be128 *r, const be128 *b) +{ + be128 p[8]; + int i; + + p[0] = *r; + for (i = 0; i < 7; ++i) + gf128mul_x_ble((be128 *)&p[i + 1], (be128 *)&p[i]); + + memset(r, 0, sizeof(*r)); + for (i = 0;;) { + u8 ch = ((u8 *)b)[15 - i]; + + if (ch & 0x80) + be128_xor(r, r, &p[7]); + if (ch & 0x40) + be128_xor(r, r, &p[6]); + if (ch & 0x20) + be128_xor(r, r, &p[5]); + if (ch & 0x10) + be128_xor(r, r, &p[4]); + if (ch & 0x08) + be128_xor(r, r, &p[3]); + if (ch & 0x04) + be128_xor(r, r, &p[2]); + if (ch & 0x02) + be128_xor(r, r, &p[1]); + if (ch & 0x01) + be128_xor(r, r, &p[0]); + + if (++i >= 16) + break; + + gf128mul_x8_ble(r); + } +} +EXPORT_SYMBOL(gf128mul_ble); + + /* This version uses 64k bytes of table space. A 16 byte buffer has to be multiplied by a 16 byte key - value in GF(128). If we consider a GF(128) value in + value in GF(2^128). If we consider a GF(2^128) value in the buffer's lowest byte, we can construct a table of the 256 16 byte values that result from the 256 values of this byte. This requires 4096 bytes. But we also @@ -352,8 +416,8 @@ void gf128mul_free_64k(struct gf128mul_64k *t) int i; for (i = 0; i < 16; i++) - kfree(t->t[i]); - kfree(t); + kzfree(t->t[i]); + kzfree(t); } EXPORT_SYMBOL(gf128mul_free_64k); @@ -385,7 +449,7 @@ EXPORT_SYMBOL(gf128mul_64k_bbe); /* This version uses 4k bytes of table space. A 16 byte buffer has to be multiplied by a 16 byte key - value in GF(128). If we consider a GF(128) value in a + value in GF(2^128). If we consider a GF(2^128) value in a single byte, we can construct a table of the 256 16 byte values that result from the 256 values of this byte. This requires 4096 bytes. If we take the highest byte in @@ -443,6 +507,28 @@ out: } EXPORT_SYMBOL(gf128mul_init_4k_bbe); +struct gf128mul_4k *gf128mul_init_4k_ble(const be128 *g) +{ + struct gf128mul_4k *t; + int j, k; + + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) + goto out; + + t->t[1] = *g; + for (j = 1; j <= 64; j <<= 1) + gf128mul_x_ble(&t->t[j + j], &t->t[j]); + + for (j = 2; j < 256; j += j) + for (k = 1; k < j; ++k) + be128_xor(&t->t[j + k], &t->t[j], &t->t[k]); + +out: + return t; +} +EXPORT_SYMBOL(gf128mul_init_4k_ble); + void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t) { u8 *ap = (u8 *)a; @@ -473,5 +559,20 @@ void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t) } EXPORT_SYMBOL(gf128mul_4k_bbe); +void gf128mul_4k_ble(be128 *a, struct gf128mul_4k *t) +{ + u8 *ap = (u8 *)a; + be128 r[1]; + int i = 15; + + *r = t->t[ap[15]]; + while (i--) { + gf128mul_x8_ble(r); + be128_xor(r, r, &t->t[ap[i]]); + } + *a = *r; +} +EXPORT_SYMBOL(gf128mul_4k_ble); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)"); diff --git a/crypto/heh.c b/crypto/heh.c new file mode 100644 index 000000000000..10c00aaf797e --- /dev/null +++ b/crypto/heh.c @@ -0,0 +1,1033 @@ +/* + * HEH: Hash-Encrypt-Hash mode + * + * Copyright (c) 2016 Google Inc. + * + * Authors: + * Alex Cope <alexcope@google.com> + * Eric Biggers <ebiggers@google.com> + */ + +/* + * Hash-Encrypt-Hash (HEH) is a proposed block cipher mode of operation which + * extends the strong pseudo-random permutation (SPRP) property of block ciphers + * (e.g. AES) to arbitrary length input strings. It uses two keyed invertible + * hash functions with a layer of ECB encryption applied in-between. The + * algorithm is specified by the following Internet Draft: + * + * https://tools.ietf.org/html/draft-cope-heh-01 + * + * Although HEH can be used as either a regular symmetric cipher or as an AEAD, + * currently this module only provides it as a symmetric cipher. Additionally, + * only 16-byte nonces are supported. + */ + +#include <crypto/gf128mul.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <crypto/skcipher.h> +#include "internal.h" + +/* + * The block size is the size of GF(2^128) elements and also the required block + * size of the underlying block cipher. + */ +#define HEH_BLOCK_SIZE 16 + +struct heh_instance_ctx { + struct crypto_shash_spawn cmac; + struct crypto_shash_spawn poly_hash; + struct crypto_skcipher_spawn ecb; +}; + +struct heh_tfm_ctx { + struct crypto_shash *cmac; + struct crypto_shash *poly_hash; /* keyed with tau_key */ + struct crypto_ablkcipher *ecb; +}; + +struct heh_cmac_data { + u8 nonce[HEH_BLOCK_SIZE]; + __le32 nonce_length; + __le32 aad_length; + __le32 message_length; + __le32 padding; +}; + +struct heh_req_ctx { /* aligned to alignmask */ + be128 beta1_key; + be128 beta2_key; + union { + struct { + struct heh_cmac_data data; + struct shash_desc desc; + /* + crypto_shash_descsize(cmac) */ + } cmac; + struct { + struct shash_desc desc; + /* + crypto_shash_descsize(poly_hash) */ + } poly_hash; + struct { + u8 keystream[HEH_BLOCK_SIZE]; + u8 tmp[HEH_BLOCK_SIZE]; + struct scatterlist tmp_sgl[2]; + struct ablkcipher_request req; + /* + crypto_ablkcipher_reqsize(ecb) */ + } ecb; + } u; +}; + +/* + * Get the offset in bytes to the last full block, or equivalently the length of + * all full blocks excluding the last + */ +static inline unsigned int get_tail_offset(unsigned int len) +{ + len -= len % HEH_BLOCK_SIZE; + return len - HEH_BLOCK_SIZE; +} + +static inline struct heh_req_ctx *heh_req_ctx(struct ablkcipher_request *req) +{ + unsigned int alignmask = crypto_ablkcipher_alignmask( + crypto_ablkcipher_reqtfm(req)); + + return (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), + alignmask + 1); +} + +static inline void async_done(struct crypto_async_request *areq, int err, + int (*next_step)(struct ablkcipher_request *, + u32)) +{ + struct ablkcipher_request *req = areq->data; + + if (err) + goto out; + + err = next_step(req, req->base.flags & ~CRYPTO_TFM_REQ_MAY_SLEEP); + if (err == -EINPROGRESS || + (err == -EBUSY && (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return; +out: + ablkcipher_request_complete(req, err); +} + +/* + * Generate the per-message "beta" keys used by the hashing layers of HEH. The + * first beta key is the CMAC of the nonce, the additional authenticated data + * (AAD), and the lengths in bytes of the nonce, AAD, and message. The nonce + * and AAD are each zero-padded to the next 16-byte block boundary, and the + * lengths are serialized as 4-byte little endian integers and zero-padded to + * the next 16-byte block boundary. + * The second beta key is the first one interpreted as an element in GF(2^128) + * and multiplied by x. + * + * Note that because the nonce and AAD may, in general, be variable-length, the + * key generation must be done by a pseudo-random function (PRF) on + * variable-length inputs. CBC-MAC does not satisfy this, as it is only a PRF + * on fixed-length inputs. CMAC remedies this flaw. Including the lengths of + * the nonce, AAD, and message is also critical to avoid collisions. + * + * That being said, this implementation does not yet operate as an AEAD and + * therefore there is never any AAD, nor are variable-length nonces supported. + */ +static int generate_betas(struct ablkcipher_request *req, + be128 *beta1_key, be128 *beta2_key) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct heh_req_ctx *rctx = heh_req_ctx(req); + struct heh_cmac_data *data = &rctx->u.cmac.data; + struct shash_desc *desc = &rctx->u.cmac.desc; + int err; + + BUILD_BUG_ON(sizeof(*data) != 2 * HEH_BLOCK_SIZE); + memcpy(data->nonce, req->info, HEH_BLOCK_SIZE); + data->nonce_length = cpu_to_le32(HEH_BLOCK_SIZE); + data->aad_length = cpu_to_le32(0); + data->message_length = cpu_to_le32(req->nbytes); + data->padding = cpu_to_le32(0); + + desc->tfm = ctx->cmac; + desc->flags = req->base.flags; + + err = crypto_shash_digest(desc, (const u8 *)data, sizeof(*data), + (u8 *)beta1_key); + if (err) + return err; + + gf128mul_x_ble(beta2_key, beta1_key); + return 0; +} + +/*****************************************************************************/ + +/* + * This is the generic version of poly_hash. It does the GF(2^128) + * multiplication by 'tau_key' using a precomputed table, without using any + * special CPU instructions. On some platforms, an accelerated version (with + * higher cra_priority) may be used instead. + */ + +struct poly_hash_tfm_ctx { + struct gf128mul_4k *tau_key; +}; + +struct poly_hash_desc_ctx { + be128 digest; + unsigned int count; +}; + +static int poly_hash_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(tfm); + be128 key128; + + if (keylen != HEH_BLOCK_SIZE) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + if (tctx->tau_key) + gf128mul_free_4k(tctx->tau_key); + memcpy(&key128, key, HEH_BLOCK_SIZE); + tctx->tau_key = gf128mul_init_4k_ble(&key128); + if (!tctx->tau_key) + return -ENOMEM; + return 0; +} + +static int poly_hash_init(struct shash_desc *desc) +{ + struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->digest = (be128) { 0 }; + ctx->count = 0; + return 0; +} + +static int poly_hash_update(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); + unsigned int partial = ctx->count % HEH_BLOCK_SIZE; + u8 *dst = (u8 *)&ctx->digest + partial; + + ctx->count += len; + + /* Finishing at least one block? */ + if (partial + len >= HEH_BLOCK_SIZE) { + + if (partial) { + /* Finish the pending block. */ + unsigned int n = HEH_BLOCK_SIZE - partial; + + len -= n; + do { + *dst++ ^= *src++; + } while (--n); + + gf128mul_4k_ble(&ctx->digest, tctx->tau_key); + } + + /* Process zero or more full blocks. */ + while (len >= HEH_BLOCK_SIZE) { + be128 coeff; + + memcpy(&coeff, src, HEH_BLOCK_SIZE); + be128_xor(&ctx->digest, &ctx->digest, &coeff); + src += HEH_BLOCK_SIZE; + len -= HEH_BLOCK_SIZE; + gf128mul_4k_ble(&ctx->digest, tctx->tau_key); + } + dst = (u8 *)&ctx->digest; + } + + /* Continue adding the next block to 'digest'. */ + while (len--) + *dst++ ^= *src++; + return 0; +} + +static int poly_hash_final(struct shash_desc *desc, u8 *out) +{ + struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); + + /* Finish the last block if needed. */ + if (ctx->count % HEH_BLOCK_SIZE) { + struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + + gf128mul_4k_ble(&ctx->digest, tctx->tau_key); + } + + memcpy(out, &ctx->digest, HEH_BLOCK_SIZE); + return 0; +} + +static void poly_hash_exit(struct crypto_tfm *tfm) +{ + struct poly_hash_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + + gf128mul_free_4k(tctx->tau_key); +} + +static struct shash_alg poly_hash_alg = { + .digestsize = HEH_BLOCK_SIZE, + .init = poly_hash_init, + .update = poly_hash_update, + .final = poly_hash_final, + .setkey = poly_hash_setkey, + .descsize = sizeof(struct poly_hash_desc_ctx), + .base = { + .cra_name = "poly_hash", + .cra_driver_name = "poly_hash-generic", + .cra_priority = 100, + .cra_ctxsize = sizeof(struct poly_hash_tfm_ctx), + .cra_exit = poly_hash_exit, + .cra_module = THIS_MODULE, + }, +}; + +/*****************************************************************************/ + +/* + * Split the message into 16 byte blocks, padding out the last block, and use + * the blocks as coefficients in the evaluation of a polynomial over GF(2^128) + * at the secret point 'tau_key'. For ease of implementing the higher-level + * heh_hash_inv() function, the constant and degree-1 coefficients are swapped + * if there is a partial block. + * + * Mathematically, compute: + * if (no partial block) + * k^{N-1} * m_0 + ... + k * m_{N-2} + m_{N-1} + * else if (partial block) + * k^N * m_0 + ... + k^2 * m_{N-2} + k * m_N + m_{N-1} + * + * where: + * t is tau_key + * N is the number of full blocks in the message + * m_i is the i-th full block in the message for i = 0 to N-1 inclusive + * m_N is the partial block of the message zero-padded up to 16 bytes + * + * Note that most of this is now separated out into its own keyed hash + * algorithm, to allow optimized implementations. However, we still handle the + * swapping of the last two coefficients here in the HEH template because this + * simplifies the poly_hash algorithms: they don't have to buffer an extra + * block, don't have to duplicate as much code, and are more similar to GHASH. + */ +static int poly_hash(struct ablkcipher_request *req, struct scatterlist *sgl, + be128 *hash) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct shash_desc *desc = &rctx->u.poly_hash.desc; + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int tail_len = req->nbytes - tail_offset; + be128 tail[2]; + unsigned int i, n; + struct sg_mapping_iter miter; + int err; + + desc->tfm = ctx->poly_hash; + desc->flags = req->base.flags; + + /* Handle all full blocks except the last */ + err = crypto_shash_init(desc); + sg_miter_start(&miter, sgl, sg_nents(sgl), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + for (i = 0; i < tail_offset && !err; i += n) { + sg_miter_next(&miter); + n = min_t(unsigned int, miter.length, tail_offset - i); + err = crypto_shash_update(desc, miter.addr, n); + } + sg_miter_stop(&miter); + if (err) + return err; + + /* Handle the last full block and the partial block */ + scatterwalk_map_and_copy(tail, sgl, tail_offset, tail_len, 0); + + if (tail_len != HEH_BLOCK_SIZE) { + /* handle the partial block */ + memset((u8 *)tail + tail_len, 0, sizeof(tail) - tail_len); + err = crypto_shash_update(desc, (u8 *)&tail[1], HEH_BLOCK_SIZE); + if (err) + return err; + } + err = crypto_shash_final(desc, (u8 *)hash); + if (err) + return err; + be128_xor(hash, hash, &tail[0]); + return 0; +} + +/* + * Transform all full blocks except the last. + * This is used by both the hash and inverse hash phases. + */ +static int heh_tfm_blocks(struct ablkcipher_request *req, + struct scatterlist *src_sgl, + struct scatterlist *dst_sgl, unsigned int len, + const be128 *hash, const be128 *beta_key) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct blkcipher_desc desc = { .flags = req->base.flags }; + struct blkcipher_walk walk; + be128 e = *beta_key; + int err; + unsigned int nbytes; + + blkcipher_walk_init(&walk, dst_sgl, src_sgl, len); + + err = blkcipher_ablkcipher_walk_virt(&desc, &walk, tfm); + + while ((nbytes = walk.nbytes)) { + const be128 *src = (be128 *)walk.src.virt.addr; + be128 *dst = (be128 *)walk.dst.virt.addr; + + do { + gf128mul_x_ble(&e, &e); + be128_xor(dst, src, hash); + be128_xor(dst, dst, &e); + src++; + dst++; + } while ((nbytes -= HEH_BLOCK_SIZE) >= HEH_BLOCK_SIZE); + err = blkcipher_walk_done(&desc, &walk, nbytes); + } + return err; +} + +/* + * The hash phase of HEH. Given a message, compute: + * + * (m_0 + H, ..., m_{N-2} + H, H, m_N) + (xb, x^2b, ..., x^{N-1}b, b, 0) + * + * where: + * N is the number of full blocks in the message + * m_i is the i-th full block in the message for i = 0 to N-1 inclusive + * m_N is the unpadded partial block, possibly empty + * H is the poly_hash() of the message, keyed by tau_key + * b is beta_key + * x is the element x in our representation of GF(2^128) + * + * Note that the partial block remains unchanged, but it does affect the result + * of poly_hash() and therefore the transformation of all the full blocks. + */ +static int heh_hash(struct ablkcipher_request *req, const be128 *beta_key) +{ + be128 hash; + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int partial_len = req->nbytes % HEH_BLOCK_SIZE; + int err; + + /* poly_hash() the full message including the partial block */ + err = poly_hash(req, req->src, &hash); + if (err) + return err; + + /* Transform all full blocks except the last */ + err = heh_tfm_blocks(req, req->src, req->dst, tail_offset, &hash, + beta_key); + if (err) + return err; + + /* Set the last full block to hash XOR beta_key */ + be128_xor(&hash, &hash, beta_key); + scatterwalk_map_and_copy(&hash, req->dst, tail_offset, HEH_BLOCK_SIZE, + 1); + + /* Copy the partial block if needed */ + if (partial_len != 0 && req->src != req->dst) { + unsigned int offs = tail_offset + HEH_BLOCK_SIZE; + + scatterwalk_map_and_copy(&hash, req->src, offs, partial_len, 0); + scatterwalk_map_and_copy(&hash, req->dst, offs, partial_len, 1); + } + return 0; +} + +/* + * The inverse hash phase of HEH. This undoes the result of heh_hash(). + */ +static int heh_hash_inv(struct ablkcipher_request *req, const be128 *beta_key) +{ + be128 hash; + be128 tmp; + struct scatterlist tmp_sgl[2]; + struct scatterlist *tail_sgl; + unsigned int tail_offset = get_tail_offset(req->nbytes); + struct scatterlist *sgl = req->dst; + int err; + + /* + * The last full block was computed as hash XOR beta_key, so XOR it with + * beta_key to recover hash. + */ + tail_sgl = scatterwalk_ffwd(tmp_sgl, sgl, tail_offset); + scatterwalk_map_and_copy(&hash, tail_sgl, 0, HEH_BLOCK_SIZE, 0); + be128_xor(&hash, &hash, beta_key); + + /* Transform all full blocks except the last */ + err = heh_tfm_blocks(req, sgl, sgl, tail_offset, &hash, beta_key); + if (err) + return err; + + /* + * Recover the last full block. We know 'hash', i.e. the poly_hash() of + * the the original message. The last full block was the constant term + * of the polynomial. To recover the last full block, temporarily zero + * it, compute the poly_hash(), and take the difference from 'hash'. + */ + memset(&tmp, 0, sizeof(tmp)); + scatterwalk_map_and_copy(&tmp, tail_sgl, 0, HEH_BLOCK_SIZE, 1); + err = poly_hash(req, sgl, &tmp); + if (err) + return err; + be128_xor(&tmp, &tmp, &hash); + scatterwalk_map_and_copy(&tmp, tail_sgl, 0, HEH_BLOCK_SIZE, 1); + return 0; +} + +static int heh_hash_inv_step(struct ablkcipher_request *req, u32 flags) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + + return heh_hash_inv(req, &rctx->beta2_key); +} + +static int heh_ecb_step_3(struct ablkcipher_request *req, u32 flags) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + u8 partial_block[HEH_BLOCK_SIZE] __aligned(__alignof__(u32)); + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int partial_offset = tail_offset + HEH_BLOCK_SIZE; + unsigned int partial_len = req->nbytes - partial_offset; + + /* + * Extract the pad in req->dst at tail_offset, and xor the partial block + * with it to create encrypted partial block + */ + scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, + HEH_BLOCK_SIZE, 0); + scatterwalk_map_and_copy(partial_block, req->dst, partial_offset, + partial_len, 0); + crypto_xor(partial_block, rctx->u.ecb.keystream, partial_len); + + /* + * Store the encrypted final block and partial block back in dst_sg + */ + scatterwalk_map_and_copy(&rctx->u.ecb.tmp, req->dst, tail_offset, + HEH_BLOCK_SIZE, 1); + scatterwalk_map_and_copy(partial_block, req->dst, partial_offset, + partial_len, 1); + + return heh_hash_inv_step(req, flags); +} + +static void heh_ecb_step_2_done(struct crypto_async_request *areq, int err) +{ + return async_done(areq, err, heh_ecb_step_3); +} + +static int heh_ecb_step_2(struct ablkcipher_request *req, u32 flags) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + unsigned int partial_len = req->nbytes % HEH_BLOCK_SIZE; + struct scatterlist *tmp_sgl; + int err; + unsigned int tail_offset = get_tail_offset(req->nbytes); + + if (partial_len == 0) + return heh_hash_inv_step(req, flags); + + /* + * Extract the final full block, store it in tmp, and then xor that with + * the value saved in u.ecb.keystream + */ + scatterwalk_map_and_copy(rctx->u.ecb.tmp, req->dst, tail_offset, + HEH_BLOCK_SIZE, 0); + crypto_xor(rctx->u.ecb.keystream, rctx->u.ecb.tmp, HEH_BLOCK_SIZE); + + /* + * Encrypt the value in rctx->u.ecb.keystream to create the pad for the + * partial block. + * We cannot encrypt stack buffers, so re-use the dst_sg to do this + * encryption to avoid a malloc. The value at tail_offset is stored in + * tmp, and will be restored later. + */ + scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, + HEH_BLOCK_SIZE, 1); + tmp_sgl = scatterwalk_ffwd(rctx->u.ecb.tmp_sgl, req->dst, tail_offset); + ablkcipher_request_set_callback(&rctx->u.ecb.req, flags, + heh_ecb_step_2_done, req); + ablkcipher_request_set_crypt(&rctx->u.ecb.req, tmp_sgl, tmp_sgl, + HEH_BLOCK_SIZE, NULL); + err = crypto_ablkcipher_encrypt(&rctx->u.ecb.req); + if (err) + return err; + return heh_ecb_step_3(req, flags); +} + +static void heh_ecb_full_done(struct crypto_async_request *areq, int err) +{ + return async_done(areq, err, heh_ecb_step_2); +} + +/* + * The encrypt phase of HEH. This uses ECB encryption, with special handling + * for the partial block at the end if any. The source data is already in + * req->dst, so the encryption happens in-place. + * + * After the encrypt phase we continue on to the inverse hash phase. The + * functions calls are chained to support asynchronous ECB algorithms. + */ +static int heh_ecb(struct ablkcipher_request *req, bool decrypt) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct heh_req_ctx *rctx = heh_req_ctx(req); + struct ablkcipher_request *ecb_req = &rctx->u.ecb.req; + unsigned int tail_offset = get_tail_offset(req->nbytes); + unsigned int full_len = tail_offset + HEH_BLOCK_SIZE; + int err; + + /* + * Save the last full block before it is encrypted/decrypted. This will + * be used later to encrypt/decrypt the partial block + */ + scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, + HEH_BLOCK_SIZE, 0); + + /* Encrypt/decrypt all full blocks */ + ablkcipher_request_set_tfm(ecb_req, ctx->ecb); + ablkcipher_request_set_callback(ecb_req, req->base.flags, + heh_ecb_full_done, req); + ablkcipher_request_set_crypt(ecb_req, req->dst, req->dst, full_len, + NULL); + if (decrypt) + err = crypto_ablkcipher_decrypt(ecb_req); + else + err = crypto_ablkcipher_encrypt(ecb_req); + if (err) + return err; + + return heh_ecb_step_2(req, req->base.flags); +} + +static int heh_crypt(struct ablkcipher_request *req, bool decrypt) +{ + struct heh_req_ctx *rctx = heh_req_ctx(req); + int err; + + /* Inputs must be at least one full block */ + if (req->nbytes < HEH_BLOCK_SIZE) + return -EINVAL; + + err = generate_betas(req, &rctx->beta1_key, &rctx->beta2_key); + if (err) + return err; + + if (decrypt) + swap(rctx->beta1_key, rctx->beta2_key); + + err = heh_hash(req, &rctx->beta1_key); + if (err) + return err; + + return heh_ecb(req, decrypt); +} + +static int heh_encrypt(struct ablkcipher_request *req) +{ + return heh_crypt(req, false); +} + +static int heh_decrypt(struct ablkcipher_request *req) +{ + return heh_crypt(req, true); +} + +static int heh_setkey(struct crypto_ablkcipher *parent, const u8 *key, + unsigned int keylen) +{ + struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(parent); + struct crypto_shash *cmac = ctx->cmac; + struct crypto_ablkcipher *ecb = ctx->ecb; + SHASH_DESC_ON_STACK(desc, cmac); + u8 *derived_keys; + u8 digest[HEH_BLOCK_SIZE]; + unsigned int i; + int err; + + /* set prf_key = key */ + crypto_shash_clear_flags(cmac, CRYPTO_TFM_REQ_MASK); + crypto_shash_set_flags(cmac, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_shash_setkey(cmac, key, keylen); + crypto_ablkcipher_set_flags(parent, crypto_shash_get_flags(cmac) & + CRYPTO_TFM_RES_MASK); + if (err) + return err; + + /* + * Generate tau_key and ecb_key as follows: + * tau_key = cmac(prf_key, 0x00...01) + * ecb_key = cmac(prf_key, 0x00...02) || cmac(prf_key, 0x00...03) || ... + * truncated to keylen bytes + */ + derived_keys = kzalloc(round_up(HEH_BLOCK_SIZE + keylen, + HEH_BLOCK_SIZE), GFP_KERNEL); + if (!derived_keys) + return -ENOMEM; + desc->tfm = cmac; + desc->flags = (crypto_shash_get_flags(cmac) & CRYPTO_TFM_REQ_MASK); + for (i = 0; i < keylen + HEH_BLOCK_SIZE; i += HEH_BLOCK_SIZE) { + derived_keys[i + HEH_BLOCK_SIZE - 1] = + 0x01 + i / HEH_BLOCK_SIZE; + err = crypto_shash_digest(desc, derived_keys + i, + HEH_BLOCK_SIZE, digest); + if (err) + goto out; + memcpy(derived_keys + i, digest, HEH_BLOCK_SIZE); + } + + err = crypto_shash_setkey(ctx->poly_hash, derived_keys, HEH_BLOCK_SIZE); + if (err) + goto out; + + crypto_ablkcipher_clear_flags(ecb, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(ecb, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(ecb, derived_keys + HEH_BLOCK_SIZE, + keylen); + crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(ecb) & + CRYPTO_TFM_RES_MASK); +out: + kzfree(derived_keys); + return err; +} + +static int heh_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct heh_instance_ctx *ictx = crypto_instance_ctx(inst); + struct heh_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *cmac; + struct crypto_shash *poly_hash; + struct crypto_ablkcipher *ecb; + unsigned int reqsize; + int err; + + cmac = crypto_spawn_shash(&ictx->cmac); + if (IS_ERR(cmac)) + return PTR_ERR(cmac); + + poly_hash = crypto_spawn_shash(&ictx->poly_hash); + err = PTR_ERR(poly_hash); + if (IS_ERR(poly_hash)) + goto err_free_cmac; + + ecb = crypto_spawn_skcipher(&ictx->ecb); + err = PTR_ERR(ecb); + if (IS_ERR(ecb)) + goto err_free_poly_hash; + + ctx->cmac = cmac; + ctx->poly_hash = poly_hash; + ctx->ecb = ecb; + + reqsize = crypto_tfm_alg_alignmask(tfm) & + ~(crypto_tfm_ctx_alignment() - 1); + reqsize += max3(offsetof(struct heh_req_ctx, u.cmac.desc) + + sizeof(struct shash_desc) + + crypto_shash_descsize(cmac), + offsetof(struct heh_req_ctx, u.poly_hash.desc) + + sizeof(struct shash_desc) + + crypto_shash_descsize(poly_hash), + offsetof(struct heh_req_ctx, u.ecb.req) + + sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(ecb)); + tfm->crt_ablkcipher.reqsize = reqsize; + + return 0; + +err_free_poly_hash: + crypto_free_shash(poly_hash); +err_free_cmac: + crypto_free_shash(cmac); + return err; +} + +static void heh_exit_tfm(struct crypto_tfm *tfm) +{ + struct heh_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(ctx->cmac); + crypto_free_shash(ctx->poly_hash); + crypto_free_ablkcipher(ctx->ecb); +} + +static void heh_free_instance(struct crypto_instance *inst) +{ + struct heh_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_shash(&ctx->cmac); + crypto_drop_shash(&ctx->poly_hash); + crypto_drop_skcipher(&ctx->ecb); + kfree(inst); +} + +/* + * Create an instance of HEH as a ablkcipher. + * + * This relies on underlying CMAC and ECB algorithms, usually cmac(aes) and + * ecb(aes). For performance reasons we support asynchronous ECB algorithms. + * However, we do not yet support asynchronous CMAC algorithms because CMAC is + * only used on a small fixed amount of data per request, independent of the + * request length. This would change if AEAD or variable-length nonce support + * were to be exposed. + */ +static int heh_create_common(struct crypto_template *tmpl, struct rtattr **tb, + const char *full_name, const char *cmac_name, + const char *poly_hash_name, const char *ecb_name) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct heh_instance_ctx *ctx; + struct shash_alg *cmac; + struct shash_alg *poly_hash; + struct crypto_alg *ecb; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + /* User must be asking for something compatible with ablkcipher */ + if ((algt->type ^ CRYPTO_ALG_TYPE_ABLKCIPHER) & algt->mask) + return -EINVAL; + + /* Allocate the ablkcipher instance */ + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = crypto_instance_ctx(inst); + + /* Set up the cmac spawn */ + ctx->cmac.base.inst = inst; + err = crypto_grab_shash(&ctx->cmac, cmac_name, 0, 0); + if (err) + goto err_free_inst; + cmac = crypto_spawn_shash_alg(&ctx->cmac); + err = -EINVAL; + if (cmac->digestsize != HEH_BLOCK_SIZE) + goto err_drop_cmac; + + /* Set up the poly_hash spawn */ + ctx->poly_hash.base.inst = inst; + err = crypto_grab_shash(&ctx->poly_hash, poly_hash_name, 0, 0); + if (err) + goto err_drop_cmac; + poly_hash = crypto_spawn_shash_alg(&ctx->poly_hash); + err = -EINVAL; + if (poly_hash->digestsize != HEH_BLOCK_SIZE) + goto err_drop_poly_hash; + + /* Set up the ecb spawn */ + ctx->ecb.base.inst = inst; + err = crypto_grab_skcipher(&ctx->ecb, ecb_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_drop_poly_hash; + ecb = crypto_skcipher_spawn_alg(&ctx->ecb); + + /* HEH only supports block ciphers with 16 byte block size */ + err = -EINVAL; + if (ecb->cra_blocksize != HEH_BLOCK_SIZE) + goto err_drop_ecb; + + /* The underlying "ECB" algorithm must not require an IV */ + err = -EINVAL; + if ((ecb->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { + if (ecb->cra_blkcipher.ivsize != 0) + goto err_drop_ecb; + } else { + if (ecb->cra_ablkcipher.ivsize != 0) + goto err_drop_ecb; + } + + /* Set the instance names */ + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "heh_base(%s,%s,%s)", cmac->base.cra_driver_name, + poly_hash->base.cra_driver_name, + ecb->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ecb; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "%s", full_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ecb; + + /* Finish initializing the instance */ + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + (ecb->cra_flags & CRYPTO_ALG_ASYNC); + inst->alg.cra_blocksize = HEH_BLOCK_SIZE; + inst->alg.cra_ctxsize = sizeof(struct heh_tfm_ctx); + inst->alg.cra_alignmask = ecb->cra_alignmask | (__alignof__(be128) - 1); + inst->alg.cra_priority = ecb->cra_priority; + inst->alg.cra_type = &crypto_ablkcipher_type; + inst->alg.cra_init = heh_init_tfm; + inst->alg.cra_exit = heh_exit_tfm; + + inst->alg.cra_ablkcipher.setkey = heh_setkey; + inst->alg.cra_ablkcipher.encrypt = heh_encrypt; + inst->alg.cra_ablkcipher.decrypt = heh_decrypt; + if ((ecb->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { + inst->alg.cra_ablkcipher.min_keysize = ecb->cra_blkcipher.min_keysize; + inst->alg.cra_ablkcipher.max_keysize = ecb->cra_blkcipher.max_keysize; + } else { + inst->alg.cra_ablkcipher.min_keysize = ecb->cra_ablkcipher.min_keysize; + inst->alg.cra_ablkcipher.max_keysize = ecb->cra_ablkcipher.max_keysize; + } + inst->alg.cra_ablkcipher.ivsize = HEH_BLOCK_SIZE; + + /* Register the instance */ + err = crypto_register_instance(tmpl, inst); + if (err) + goto err_drop_ecb; + return 0; + +err_drop_ecb: + crypto_drop_skcipher(&ctx->ecb); +err_drop_poly_hash: + crypto_drop_shash(&ctx->poly_hash); +err_drop_cmac: + crypto_drop_shash(&ctx->cmac); +err_free_inst: + kfree(inst); + return err; +} + +static int heh_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *cipher_name; + char full_name[CRYPTO_MAX_ALG_NAME]; + char cmac_name[CRYPTO_MAX_ALG_NAME]; + char ecb_name[CRYPTO_MAX_ALG_NAME]; + + /* Get the name of the requested block cipher (e.g. aes) */ + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "heh(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + if (snprintf(cmac_name, CRYPTO_MAX_ALG_NAME, "cmac(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return heh_create_common(tmpl, tb, full_name, cmac_name, "poly_hash", + ecb_name); +} + +static struct crypto_template heh_tmpl = { + .name = "heh", + .create = heh_create, + .free = heh_free_instance, + .module = THIS_MODULE, +}; + +static int heh_base_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + char full_name[CRYPTO_MAX_ALG_NAME]; + const char *cmac_name; + const char *poly_hash_name; + const char *ecb_name; + + cmac_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cmac_name)) + return PTR_ERR(cmac_name); + + poly_hash_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(poly_hash_name)) + return PTR_ERR(poly_hash_name); + + ecb_name = crypto_attr_alg_name(tb[3]); + if (IS_ERR(ecb_name)) + return PTR_ERR(ecb_name); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "heh_base(%s,%s,%s)", + cmac_name, poly_hash_name, ecb_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return heh_create_common(tmpl, tb, full_name, cmac_name, poly_hash_name, + ecb_name); +} + +/* + * If HEH is instantiated as "heh_base" instead of "heh", then specific + * implementations of cmac, poly_hash, and ecb can be specified instead of just + * the cipher. + */ +static struct crypto_template heh_base_tmpl = { + .name = "heh_base", + .create = heh_base_create, + .free = heh_free_instance, + .module = THIS_MODULE, +}; + +static int __init heh_module_init(void) +{ + int err; + + err = crypto_register_template(&heh_tmpl); + if (err) + return err; + + err = crypto_register_template(&heh_base_tmpl); + if (err) + goto out_undo_heh; + + err = crypto_register_shash(&poly_hash_alg); + if (err) + goto out_undo_heh_base; + + return 0; + +out_undo_heh_base: + crypto_unregister_template(&heh_base_tmpl); +out_undo_heh: + crypto_unregister_template(&heh_tmpl); + return err; +} + +static void __exit heh_module_exit(void) +{ + crypto_unregister_template(&heh_tmpl); + crypto_unregister_template(&heh_base_tmpl); + crypto_unregister_shash(&poly_hash_alg); +} + +module_init(heh_module_init); +module_exit(heh_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Hash-Encrypt-Hash block cipher mode"); +MODULE_ALIAS_CRYPTO("heh"); +MODULE_ALIAS_CRYPTO("heh_base"); diff --git a/crypto/internal.h b/crypto/internal.h index 00e42a3ed814..7eefcdb00227 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -104,6 +104,9 @@ int crypto_probing_notify(unsigned long val, void *v); unsigned int crypto_alg_extsize(struct crypto_alg *alg); +int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, + u32 type, u32 mask); + static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) { atomic_inc(&alg->cra_refcnt); diff --git a/crypto/shash.c b/crypto/shash.c index 5444b429e35d..641568d35599 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -686,6 +686,14 @@ void shash_free_instance(struct crypto_instance *inst) } EXPORT_SYMBOL_GPL(shash_free_instance); +int crypto_grab_shash(struct crypto_shash_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_shash_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_shash); + int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, struct shash_alg *alg, struct crypto_instance *inst) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index d199c0b1751c..d248008e7f7b 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -16,7 +16,11 @@ #include <crypto/internal/skcipher.h> #include <linux/bug.h> +#include <linux/cryptouser.h> #include <linux/module.h> +#include <linux/rtnetlink.h> +#include <linux/seq_file.h> +#include <net/netlink.h> #include "internal.h" @@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) if (alg->cra_type == &crypto_blkcipher_type) return sizeof(struct crypto_blkcipher *); - BUG_ON(alg->cra_type != &crypto_ablkcipher_type && - alg->cra_type != &crypto_givcipher_type); + if (alg->cra_type == &crypto_ablkcipher_type || + alg->cra_type == &crypto_givcipher_type) + return sizeof(struct crypto_ablkcipher *); - return sizeof(struct crypto_ablkcipher *); + return crypto_alg_extsize(alg); } static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, @@ -118,7 +123,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) skcipher->decrypt = skcipher_decrypt_blkcipher; skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); - skcipher->has_setkey = calg->cra_blkcipher.max_keysize; + skcipher->keysize = calg->cra_blkcipher.max_keysize; return 0; } @@ -211,31 +216,123 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + sizeof(struct ablkcipher_request); - skcipher->has_setkey = calg->cra_ablkcipher.max_keysize; + skcipher->keysize = calg->cra_ablkcipher.max_keysize; return 0; } +static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + + alg->exit(skcipher); +} + static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) { + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); - BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type && - tfm->__crt_alg->cra_type != &crypto_givcipher_type); + if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || + tfm->__crt_alg->cra_type == &crypto_givcipher_type) + return crypto_init_skcipher_ops_ablkcipher(tfm); + + skcipher->setkey = alg->setkey; + skcipher->encrypt = alg->encrypt; + skcipher->decrypt = alg->decrypt; + skcipher->ivsize = alg->ivsize; + skcipher->keysize = alg->max_keysize; + + if (alg->exit) + skcipher->base.exit = crypto_skcipher_exit_tfm; - return crypto_init_skcipher_ops_ablkcipher(tfm); + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_skcipher_free_instance(struct crypto_instance *inst) +{ + struct skcipher_instance *skcipher = + container_of(inst, struct skcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + seq_printf(m, "type : skcipher\n"); + seq_printf(m, "async : %s\n", + alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->chunksize); } +#ifdef CONFIG_NET +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_blkcipher rblkcipher; + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); + strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->min_keysize; + rblkcipher.max_keysize = skcipher->max_keysize; + rblkcipher.ivsize = skcipher->ivsize; + + if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(struct crypto_report_blkcipher), &rblkcipher)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + static const struct crypto_type crypto_skcipher_type2 = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, + .free = crypto_skcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_skcipher_show, +#endif + .report = crypto_skcipher_report, .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, - .type = CRYPTO_ALG_TYPE_BLKCIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; +int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_skcipher_type2; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_skcipher2); + struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask) { @@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, } EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, + type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_skcipher2); + +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) + return -EINVAL; + + if (!alg->chunksize) + alg->chunksize = base->cra_blocksize; + + base->cra_type = &crypto_skcipher_type2; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; + + return 0; +} + +int crypto_register_skcipher(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_skcipher); + +void crypto_unregister_skcipher(struct skcipher_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); + +int crypto_register_skciphers(struct skcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_skcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_skciphers); + +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); + +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst) +{ + int err; + + err = skcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(skcipher_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); diff --git a/crypto/speck.c b/crypto/speck.c new file mode 100644 index 000000000000..58aa9f7f91f7 --- /dev/null +++ b/crypto/speck.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Speck: a lightweight block cipher + * + * Copyright (c) 2018 Google, Inc + * + * Speck has 10 variants, including 5 block sizes. For now we only implement + * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and + * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits + * and a key size of K bits. The Speck128 variants are believed to be the most + * secure variants, and they use the same block size and key sizes as AES. The + * Speck64 variants are less secure, but on 32-bit processors are usually + * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less + * secure and/or not as well suited for implementation on either 32-bit or + * 64-bit processors, so are omitted. + * + * Reference: "The Simon and Speck Families of Lightweight Block Ciphers" + * https://eprint.iacr.org/2013/404.pdf + * + * In a correspondence, the Speck designers have also clarified that the words + * should be interpreted in little-endian format, and the words should be + * ordered such that the first word of each block is 'y' rather than 'x', and + * the first key word (rather than the last) becomes the first round key. + */ + +#include <asm/unaligned.h> +#include <crypto/speck.h> +#include <linux/bitops.h> +#include <linux/crypto.h> +#include <linux/init.h> +#include <linux/module.h> + +/* Speck128 */ + +static __always_inline void speck128_round(u64 *x, u64 *y, u64 k) +{ + *x = ror64(*x, 8); + *x += *y; + *x ^= k; + *y = rol64(*y, 3); + *y ^= *x; +} + +static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k) +{ + *y ^= *x; + *y = ror64(*y, 3); + *x ^= k; + *x -= *y; + *x = rol64(*x, 8); +} + +void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u64 y = get_unaligned_le64(in); + u64 x = get_unaligned_le64(in + 8); + int i; + + for (i = 0; i < ctx->nrounds; i++) + speck128_round(&x, &y, ctx->round_keys[i]); + + put_unaligned_le64(y, out); + put_unaligned_le64(x, out + 8); +} +EXPORT_SYMBOL_GPL(crypto_speck128_encrypt); + +static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in); +} + +void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u64 y = get_unaligned_le64(in); + u64 x = get_unaligned_le64(in + 8); + int i; + + for (i = ctx->nrounds - 1; i >= 0; i--) + speck128_unround(&x, &y, ctx->round_keys[i]); + + put_unaligned_le64(y, out); + put_unaligned_le64(x, out + 8); +} +EXPORT_SYMBOL_GPL(crypto_speck128_decrypt); + +static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in); +} + +int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + u64 l[3]; + u64 k; + int i; + + switch (keylen) { + case SPECK128_128_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + ctx->nrounds = SPECK128_128_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[0], &k, i); + } + break; + case SPECK128_192_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + l[1] = get_unaligned_le64(key + 16); + ctx->nrounds = SPECK128_192_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[i % 2], &k, i); + } + break; + case SPECK128_256_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + l[1] = get_unaligned_le64(key + 16); + l[2] = get_unaligned_le64(key + 24); + ctx->nrounds = SPECK128_256_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[i % 3], &k, i); + } + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_speck128_setkey); + +static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen); +} + +/* Speck64 */ + +static __always_inline void speck64_round(u32 *x, u32 *y, u32 k) +{ + *x = ror32(*x, 8); + *x += *y; + *x ^= k; + *y = rol32(*y, 3); + *y ^= *x; +} + +static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k) +{ + *y ^= *x; + *y = ror32(*y, 3); + *x ^= k; + *x -= *y; + *x = rol32(*x, 8); +} + +void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u32 y = get_unaligned_le32(in); + u32 x = get_unaligned_le32(in + 4); + int i; + + for (i = 0; i < ctx->nrounds; i++) + speck64_round(&x, &y, ctx->round_keys[i]); + + put_unaligned_le32(y, out); + put_unaligned_le32(x, out + 4); +} +EXPORT_SYMBOL_GPL(crypto_speck64_encrypt); + +static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in); +} + +void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u32 y = get_unaligned_le32(in); + u32 x = get_unaligned_le32(in + 4); + int i; + + for (i = ctx->nrounds - 1; i >= 0; i--) + speck64_unround(&x, &y, ctx->round_keys[i]); + + put_unaligned_le32(y, out); + put_unaligned_le32(x, out + 4); +} +EXPORT_SYMBOL_GPL(crypto_speck64_decrypt); + +static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in); +} + +int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + u32 l[3]; + u32 k; + int i; + + switch (keylen) { + case SPECK64_96_KEY_SIZE: + k = get_unaligned_le32(key); + l[0] = get_unaligned_le32(key + 4); + l[1] = get_unaligned_le32(key + 8); + ctx->nrounds = SPECK64_96_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck64_round(&l[i % 2], &k, i); + } + break; + case SPECK64_128_KEY_SIZE: + k = get_unaligned_le32(key); + l[0] = get_unaligned_le32(key + 4); + l[1] = get_unaligned_le32(key + 8); + l[2] = get_unaligned_le32(key + 12); + ctx->nrounds = SPECK64_128_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck64_round(&l[i % 3], &k, i); + } + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_speck64_setkey); + +static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen); +} + +/* Algorithm definitions */ + +static struct crypto_alg speck_algs[] = { + { + .cra_name = "speck128", + .cra_driver_name = "speck128-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SPECK128_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct speck128_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = SPECK128_128_KEY_SIZE, + .cia_max_keysize = SPECK128_256_KEY_SIZE, + .cia_setkey = speck128_setkey, + .cia_encrypt = speck128_encrypt, + .cia_decrypt = speck128_decrypt + } + } + }, { + .cra_name = "speck64", + .cra_driver_name = "speck64-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SPECK64_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct speck64_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = SPECK64_96_KEY_SIZE, + .cia_max_keysize = SPECK64_128_KEY_SIZE, + .cia_setkey = speck64_setkey, + .cia_encrypt = speck64_encrypt, + .cia_decrypt = speck64_decrypt + } + } + } +}; + +static int __init speck_module_init(void) +{ + return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_module_exit(void) +{ + crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_module_init); +module_exit(speck_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (generic)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("speck128"); +MODULE_ALIAS_CRYPTO("speck128-generic"); +MODULE_ALIAS_CRYPTO("speck64"); +MODULE_ALIAS_CRYPTO("speck64-generic"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5f15f45fcc9f..a4aef61e40d8 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3114,6 +3114,36 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "ecb(speck128)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = { + .vecs = speck128_enc_tv_template, + .count = ARRAY_SIZE(speck128_enc_tv_template) + }, + .dec = { + .vecs = speck128_dec_tv_template, + .count = ARRAY_SIZE(speck128_dec_tv_template) + } + } + } + }, { + .alg = "ecb(speck64)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = { + .vecs = speck64_enc_tv_template, + .count = ARRAY_SIZE(speck64_enc_tv_template) + }, + .dec = { + .vecs = speck64_dec_tv_template, + .count = ARRAY_SIZE(speck64_dec_tv_template) + } + } + } + }, { .alg = "ecb(tea)", .test = alg_test_skcipher, .suite = { @@ -3215,6 +3245,21 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "heh(aes)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = { + .vecs = aes_heh_enc_tv_template, + .count = AES_HEH_ENC_TEST_VECTORS + }, + .dec = { + .vecs = aes_heh_dec_tv_template, + .count = AES_HEH_DEC_TEST_VECTORS + } + } + } + }, { .alg = "hmac(crc32)", .test = alg_test_hash, .suite = { @@ -3844,6 +3889,36 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "xts(speck128)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = { + .vecs = speck128_xts_enc_tv_template, + .count = ARRAY_SIZE(speck128_xts_enc_tv_template) + }, + .dec = { + .vecs = speck128_xts_dec_tv_template, + .count = ARRAY_SIZE(speck128_xts_dec_tv_template) + } + } + } + }, { + .alg = "xts(speck64)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = { + .vecs = speck64_xts_enc_tv_template, + .count = ARRAY_SIZE(speck64_xts_enc_tv_template) + }, + .dec = { + .vecs = speck64_xts_dec_tv_template, + .count = ARRAY_SIZE(speck64_xts_dec_tv_template) + } + } + } + }, { .alg = "xts(twofish)", .test = alg_test_skcipher, .suite = { diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 0e02c60a57b6..0bb950ea22ed 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -12589,6 +12589,1492 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = { }, }; +/* + * Speck test vectors taken from the original paper: + * "The Simon and Speck Families of Lightweight Block Ciphers" + * https://eprint.iacr.org/2013/404.pdf + * + * Note that the paper does not make byte and word order clear. But it was + * confirmed with the authors that the intended orders are little endian byte + * order and (y, x) word order. Equivalently, the printed test vectors, when + * looking at only the bytes (ignoring the whitespace that divides them into + * words), are backwards: the left-most byte is actually the one with the + * highest memory address, while the right-most byte is actually the one with + * the lowest memory address. + */ + +static struct cipher_testvec speck128_enc_tv_template[] = { + { /* Speck128/128 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .input = "\x20\x6d\x61\x64\x65\x20\x69\x74" + "\x20\x65\x71\x75\x69\x76\x61\x6c", + .ilen = 16, + .result = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" + "\x65\x32\x78\x79\x51\x98\x5d\xa6", + .rlen = 16, + }, { /* Speck128/192 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .input = "\x65\x6e\x74\x20\x74\x6f\x20\x43" + "\x68\x69\x65\x66\x20\x48\x61\x72", + .ilen = 16, + .result = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" + "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", + .rlen = 16, + }, { /* Speck128/256 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .input = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" + "\x49\x6e\x20\x74\x68\x6f\x73\x65", + .ilen = 16, + .result = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" + "\x3e\xf5\xc0\x05\x04\x01\x09\x41", + .rlen = 16, + }, +}; + +static struct cipher_testvec speck128_dec_tv_template[] = { + { /* Speck128/128 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .input = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" + "\x65\x32\x78\x79\x51\x98\x5d\xa6", + .ilen = 16, + .result = "\x20\x6d\x61\x64\x65\x20\x69\x74" + "\x20\x65\x71\x75\x69\x76\x61\x6c", + .rlen = 16, + }, { /* Speck128/192 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .input = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" + "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", + .ilen = 16, + .result = "\x65\x6e\x74\x20\x74\x6f\x20\x43" + "\x68\x69\x65\x66\x20\x48\x61\x72", + .rlen = 16, + }, { /* Speck128/256 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .input = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" + "\x3e\xf5\xc0\x05\x04\x01\x09\x41", + .ilen = 16, + .result = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" + "\x49\x6e\x20\x74\x68\x6f\x73\x65", + .rlen = 16, + }, +}; + +/* + * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the + * result recomputed with Speck128 as the cipher + */ + +static struct cipher_testvec speck128_xts_enc_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ilen = 32, + .result = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" + "\x3b\x99\x4a\x64\x74\x77\xac\xed" + "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" + "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" + "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" + "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" + "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x21\x52\x84\x15\xd1\xf7\x21\x55" + "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" + "\xda\x63\xb2\xf1\x82\xb0\x89\x59" + "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" + "\x53\xd0\xed\x2d\x30\xc1\x20\xef" + "\x70\x67\x5e\xff\x09\x70\xbb\xc1" + "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" + "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" + "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" + "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" + "\x19\xc5\x58\x84\x63\xb9\x12\x68" + "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" + "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" + "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" + "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" + "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" + "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" + "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" + "\xa7\x49\xa0\x0e\x09\x33\x85\x50" + "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" + "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" + "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" + "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" + "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" + "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" + "\xa5\x20\xac\x24\x1c\x73\x59\x73" + "\x58\x61\x3a\x87\x58\xb3\x20\x56" + "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" + "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" + "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" + "\x09\x35\x71\x50\x65\xac\x92\xe3" + "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" + "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" + "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" + "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" + "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" + "\x2a\x26\xcc\x49\x14\x6d\x55\x01" + "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" + "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" + "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" + "\x24\xa9\x60\xa4\x97\x85\x86\x2a" + "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" + "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" + "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" + "\x87\x30\xac\xd5\xea\x73\x49\x10" + "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" + "\x66\x02\x35\x3d\x60\x06\x36\x4f" + "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" + "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" + "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" + "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" + "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" + "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" + "\xbd\x48\x50\xcd\x75\x70\xc4\x62" + "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" + "\x51\x66\x02\x69\x04\x97\x36\xd4" + "\x75\xae\x0b\xa3\x42\xf8\xca\x79" + "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" + "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" + "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" + "\x03\xe7\x05\x39\xf5\x05\x26\xee" + "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" + "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" + "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" + "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" + "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" + "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 64, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" + "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" + "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" + "\x92\x99\xde\xd3\x76\xed\xcd\x63" + "\x64\x3a\x22\x57\xc1\x43\x49\xd4" + "\x79\x36\x31\x19\x62\xae\x10\x7e" + "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" + "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" + "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" + "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" + "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" + "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" + "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" + "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" + "\x85\x3c\x4f\x26\x64\x85\xbc\x68" + "\xb0\xe0\x86\x5e\x26\x41\xce\x11" + "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" + "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" + "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" + "\x14\x4d\xf0\x74\x37\xfd\x07\x25" + "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" + "\x1b\x83\x4d\x15\x83\xac\x57\xa0" + "\xac\xa5\xd0\x38\xef\x19\x56\x53" + "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" + "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" + "\xed\x22\x34\x1c\x5d\xed\x17\x06" + "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" + "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" + "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" + "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" + "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" + "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" + "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" + "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" + "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" + "\x19\xf5\x94\xf9\xd2\x00\x33\x37" + "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" + "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" + "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" + "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" + "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" + "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" + "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" + "\xad\xb7\x73\xf8\x78\x12\xc8\x59" + "\x17\x80\x4c\x57\x39\xf1\x6d\x80" + "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" + "\xec\xce\xb7\xc8\x02\x8a\xed\x53" + "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" + "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" + "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" + "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" + "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" + "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" + "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" + "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" + "\xcc\x1f\x48\x49\x65\x47\x75\xe9" + "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" + "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" + "\xa1\x51\x89\x3b\xeb\x96\x42\xac" + "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" + "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" + "\x66\x8d\x13\xca\xe0\x59\x2a\x00" + "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" + "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static struct cipher_testvec speck128_xts_dec_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" + "\x3b\x99\x4a\x64\x74\x77\xac\xed" + "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" + "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", + .ilen = 32, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" + "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" + "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" + "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x21\x52\x84\x15\xd1\xf7\x21\x55" + "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" + "\xda\x63\xb2\xf1\x82\xb0\x89\x59" + "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" + "\x53\xd0\xed\x2d\x30\xc1\x20\xef" + "\x70\x67\x5e\xff\x09\x70\xbb\xc1" + "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" + "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" + "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" + "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" + "\x19\xc5\x58\x84\x63\xb9\x12\x68" + "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" + "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" + "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" + "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" + "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" + "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" + "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" + "\xa7\x49\xa0\x0e\x09\x33\x85\x50" + "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" + "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" + "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" + "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" + "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" + "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" + "\xa5\x20\xac\x24\x1c\x73\x59\x73" + "\x58\x61\x3a\x87\x58\xb3\x20\x56" + "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" + "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" + "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" + "\x09\x35\x71\x50\x65\xac\x92\xe3" + "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" + "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" + "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" + "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" + "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" + "\x2a\x26\xcc\x49\x14\x6d\x55\x01" + "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" + "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" + "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" + "\x24\xa9\x60\xa4\x97\x85\x86\x2a" + "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" + "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" + "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" + "\x87\x30\xac\xd5\xea\x73\x49\x10" + "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" + "\x66\x02\x35\x3d\x60\x06\x36\x4f" + "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" + "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" + "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" + "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" + "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" + "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" + "\xbd\x48\x50\xcd\x75\x70\xc4\x62" + "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" + "\x51\x66\x02\x69\x04\x97\x36\xd4" + "\x75\xae\x0b\xa3\x42\xf8\xca\x79" + "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" + "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" + "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" + "\x03\xe7\x05\x39\xf5\x05\x26\xee" + "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" + "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" + "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" + "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" + "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" + "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 64, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" + "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" + "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" + "\x92\x99\xde\xd3\x76\xed\xcd\x63" + "\x64\x3a\x22\x57\xc1\x43\x49\xd4" + "\x79\x36\x31\x19\x62\xae\x10\x7e" + "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" + "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" + "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" + "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" + "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" + "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" + "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" + "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" + "\x85\x3c\x4f\x26\x64\x85\xbc\x68" + "\xb0\xe0\x86\x5e\x26\x41\xce\x11" + "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" + "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" + "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" + "\x14\x4d\xf0\x74\x37\xfd\x07\x25" + "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" + "\x1b\x83\x4d\x15\x83\xac\x57\xa0" + "\xac\xa5\xd0\x38\xef\x19\x56\x53" + "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" + "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" + "\xed\x22\x34\x1c\x5d\xed\x17\x06" + "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" + "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" + "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" + "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" + "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" + "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" + "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" + "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" + "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" + "\x19\xf5\x94\xf9\xd2\x00\x33\x37" + "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" + "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" + "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" + "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" + "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" + "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" + "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" + "\xad\xb7\x73\xf8\x78\x12\xc8\x59" + "\x17\x80\x4c\x57\x39\xf1\x6d\x80" + "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" + "\xec\xce\xb7\xc8\x02\x8a\xed\x53" + "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" + "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" + "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" + "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" + "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" + "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" + "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" + "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" + "\xcc\x1f\x48\x49\x65\x47\x75\xe9" + "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" + "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" + "\xa1\x51\x89\x3b\xeb\x96\x42\xac" + "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" + "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" + "\x66\x8d\x13\xca\xe0\x59\x2a\x00" + "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" + "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static struct cipher_testvec speck64_enc_tv_template[] = { + { /* Speck64/96 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13", + .klen = 12, + .input = "\x65\x61\x6e\x73\x20\x46\x61\x74", + .ilen = 8, + .result = "\x6c\x94\x75\x41\xec\x52\x79\x9f", + .rlen = 8, + }, { /* Speck64/128 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13\x18\x19\x1a\x1b", + .klen = 16, + .input = "\x2d\x43\x75\x74\x74\x65\x72\x3b", + .ilen = 8, + .result = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", + .rlen = 8, + }, +}; + +static struct cipher_testvec speck64_dec_tv_template[] = { + { /* Speck64/96 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13", + .klen = 12, + .input = "\x6c\x94\x75\x41\xec\x52\x79\x9f", + .ilen = 8, + .result = "\x65\x61\x6e\x73\x20\x46\x61\x74", + .rlen = 8, + }, { /* Speck64/128 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13\x18\x19\x1a\x1b", + .klen = 16, + .input = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", + .ilen = 8, + .result = "\x2d\x43\x75\x74\x74\x65\x72\x3b", + .rlen = 8, + }, +}; + +/* + * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the result + * recomputed with Speck64 as the cipher, and key lengths adjusted + */ + +static struct cipher_testvec speck64_xts_enc_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ilen = 32, + .result = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" + "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" + "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" + "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x12\x56\x73\xcd\x15\x87\xa8\x59" + "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" + "\xb3\x12\x69\x7e\x36\xeb\x52\xff" + "\x62\xdd\xba\x90\xb3\xe1\xee\x99", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" + "\x27\x36\xc0\xbf\x5d\xea\x36\x37" + "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" + "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" + "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" + "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" + "\x11\xc7\x39\x96\xd0\x95\xf4\x56" + "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" + "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" + "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" + "\xd5\xd2\x13\x86\x94\x34\xe9\x62" + "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" + "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" + "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" + "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" + "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" + "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" + "\x29\xe5\xbe\x54\x30\xcb\x46\x95" + "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" + "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" + "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" + "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" + "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" + "\x82\xc0\x37\x27\xfc\x91\xa7\x05" + "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" + "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" + "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" + "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" + "\x07\xff\xf3\x72\x74\x48\xb5\x40" + "\x50\xb5\xdd\x90\x43\x31\x18\x15" + "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" + "\x29\x93\x90\x8b\xda\x07\xf0\x35" + "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" + "\x94\x12\xbb\x33\x27\x1d\x3f\x23" + "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" + "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" + "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" + "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" + "\xcf\x37\xef\xc9\x11\x01\x5c\x45" + "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" + "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" + "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" + "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" + "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" + "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" + "\x10\xff\xaf\xef\xca\xdd\x4f\x80" + "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" + "\x33\xca\x00\x8b\x8b\x3f\xea\xec" + "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" + "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" + "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" + "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" + "\xc7\x1b\x53\x17\x02\xea\xd1\xad" + "\x13\x51\x73\xc0\xa0\xb2\x05\x32" + "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" + "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" + "\x59\xda\xee\x1a\x22\x18\xda\x0d" + "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" + "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" + "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" + "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" + "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" + "\x7d\x69\x8d\x00\x62\x77\x0d\x14" + "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" + "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" + "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" + "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" + "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" + "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" + "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" + "\x78\x16\xea\x80\xdb\x33\x75\x94" + "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" + "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" + "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" + "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" + "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" + "\xf5\xec\x32\x74\xa3\xb8\x03\x88" + "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" + "\x84\x5e\x46\xed\x20\x89\xb6\x44" + "\x8d\xd0\xed\x54\x47\x16\xbe\x95" + "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" + "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" + "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" + "\x8f\x1e\x10\x07\x57\x25\x98\x7b" + "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" + "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" + "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" + "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" + "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" + "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" + "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" + "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" + "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" + "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" + "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" + "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" + "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" + "\x7b\x7a\x4a\xad\x35\x65\x27\xca" + "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" + "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" + "\xfc\x42\xc8\x31\x59\xbe\x16\x60" + "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" + "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" + "\x52\x7f\x29\x51\x94\x20\x67\x3c" + "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" + "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" + "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" + "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" + "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" + "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" + "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" + "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" + "\x65\x53\x0f\x41\x11\xbd\x98\x99" + "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" + "\x84\x98\xf9\x34\xed\x33\x2a\x1f" + "\x82\xed\xc1\x73\x98\xd3\x02\xdc" + "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" + "\x63\x51\x34\x9d\x96\x12\xae\xce" + "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" + "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" + "\x54\x27\x74\xbb\x10\x86\x57\x46" + "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" + "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" + "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" + "\x7b\x4f\x38\x55\x36\x71\x64\xc1" + "\xfc\x5c\x75\x52\x33\x02\x18\xf8" + "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" + "\x9b\x63\x76\x32\x2f\x19\x72\x10" + "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" + "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static struct cipher_testvec speck64_xts_dec_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" + "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" + "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" + "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", + .ilen = 32, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x12\x56\x73\xcd\x15\x87\xa8\x59" + "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" + "\xb3\x12\x69\x7e\x36\xeb\x52\xff" + "\x62\xdd\xba\x90\xb3\xe1\xee\x99", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" + "\x27\x36\xc0\xbf\x5d\xea\x36\x37" + "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" + "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" + "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" + "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" + "\x11\xc7\x39\x96\xd0\x95\xf4\x56" + "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" + "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" + "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" + "\xd5\xd2\x13\x86\x94\x34\xe9\x62" + "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" + "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" + "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" + "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" + "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" + "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" + "\x29\xe5\xbe\x54\x30\xcb\x46\x95" + "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" + "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" + "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" + "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" + "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" + "\x82\xc0\x37\x27\xfc\x91\xa7\x05" + "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" + "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" + "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" + "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" + "\x07\xff\xf3\x72\x74\x48\xb5\x40" + "\x50\xb5\xdd\x90\x43\x31\x18\x15" + "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" + "\x29\x93\x90\x8b\xda\x07\xf0\x35" + "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" + "\x94\x12\xbb\x33\x27\x1d\x3f\x23" + "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" + "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" + "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" + "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" + "\xcf\x37\xef\xc9\x11\x01\x5c\x45" + "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" + "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" + "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" + "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" + "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" + "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" + "\x10\xff\xaf\xef\xca\xdd\x4f\x80" + "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" + "\x33\xca\x00\x8b\x8b\x3f\xea\xec" + "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" + "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" + "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" + "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" + "\xc7\x1b\x53\x17\x02\xea\xd1\xad" + "\x13\x51\x73\xc0\xa0\xb2\x05\x32" + "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" + "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" + "\x59\xda\xee\x1a\x22\x18\xda\x0d" + "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" + "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" + "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" + "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" + "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" + "\x7d\x69\x8d\x00\x62\x77\x0d\x14" + "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" + "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" + "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" + "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" + "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" + "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" + "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" + "\x78\x16\xea\x80\xdb\x33\x75\x94" + "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" + "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" + "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" + "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" + "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" + "\xf5\xec\x32\x74\xa3\xb8\x03\x88" + "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" + "\x84\x5e\x46\xed\x20\x89\xb6\x44" + "\x8d\xd0\xed\x54\x47\x16\xbe\x95" + "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" + "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" + "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" + "\x8f\x1e\x10\x07\x57\x25\x98\x7b" + "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" + "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" + "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" + "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" + "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" + "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" + "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" + "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" + "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" + "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" + "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" + "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" + "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" + "\x7b\x7a\x4a\xad\x35\x65\x27\xca" + "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" + "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" + "\xfc\x42\xc8\x31\x59\xbe\x16\x60" + "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" + "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" + "\x52\x7f\x29\x51\x94\x20\x67\x3c" + "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" + "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" + "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" + "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" + "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" + "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" + "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" + "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" + "\x65\x53\x0f\x41\x11\xbd\x98\x99" + "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" + "\x84\x98\xf9\x34\xed\x33\x2a\x1f" + "\x82\xed\xc1\x73\x98\xd3\x02\xdc" + "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" + "\x63\x51\x34\x9d\x96\x12\xae\xce" + "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" + "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" + "\x54\x27\x74\xbb\x10\x86\x57\x46" + "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" + "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" + "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" + "\x7b\x4f\x38\x55\x36\x71\x64\xc1" + "\xfc\x5c\x75\x52\x33\x02\x18\xf8" + "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" + "\x9b\x63\x76\x32\x2f\x19\x72\x10" + "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" + "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + /* Cast6 test vectors from RFC 2612 */ #define CAST6_ENC_TEST_VECTORS 4 #define CAST6_DEC_TEST_VECTORS 4 @@ -14139,6 +15625,8 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = { #define AES_DEC_TEST_VECTORS 4 #define AES_CBC_ENC_TEST_VECTORS 5 #define AES_CBC_DEC_TEST_VECTORS 5 +#define AES_HEH_ENC_TEST_VECTORS 4 +#define AES_HEH_DEC_TEST_VECTORS 4 #define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 #define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 #define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2 @@ -14511,6 +15999,198 @@ static struct cipher_testvec aes_dec_tv_template[] = { }, }; +static struct cipher_testvec aes_heh_enc_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .ilen = 16, + .result = "\xd8\xbd\x40\xbf\xca\xe5\xee\x81" + "\x0f\x3d\x1f\x1f\xae\x89\x07\x55", + .rlen = 16, + .also_non_np = 1, + .np = 2, + .tap = { 8, 8 }, + }, { + .key = "\xa8\xda\x24\x9b\x5e\xfa\x13\xc2" + "\xc1\x94\xbf\x32\xba\x38\xa3\x77", + .klen = 16, + .iv = "\x4d\x47\x61\x37\x2b\x47\x86\xf0" + "\xd6\x47\xb5\xc2\xe8\xcf\x85\x27", + .input = "\xb8\xee\x29\xe4\xa5\xd1\xe7\x55" + "\xd0\xfd\xe7\x22\x63\x76\x36\xe2" + "\xf8\x0c\xf8\xfe\x65\x76\xe7\xca" + "\xc1\x42\xf5\xca\x5a\xa8\xac\x2a", + .ilen = 32, + .result = "\x59\xf2\x78\x4e\x10\x94\xf9\x5c" + "\x22\x23\x78\x2a\x30\x48\x11\x97" + "\xb1\xfe\x70\xc4\xef\xdf\x04\xef" + "\x16\x39\x04\xcf\xc0\x95\x9a\x98", + .rlen = 32, + .also_non_np = 1, + .np = 3, + .tap = { 16, 13, 3 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .ilen = 63, + .result = "\xe0\x40\xeb\xe9\x52\xbe\x65\x60" + "\xe4\x68\x68\xa3\x73\x75\xb8\x52" + "\xef\x38\x6a\x87\x25\x25\xf6\x04" + "\xe5\x8e\xbe\x14\x8b\x02\x14\x1f" + "\xa9\x73\xb7\xad\x15\xbe\x9c\xa0" + "\xd2\x8a\x2c\xdc\xd4\xe3\x05\x55" + "\x0a\xf5\xf8\x51\xee\xe5\x62\xa5" + "\x71\xa7\x7c\x15\x5d\x7a\x9e", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .ilen = 63, + .result = "\x4b\x1a\x15\xa0\xaf\x08\x6d\x70" + "\xf0\xa7\x97\xb5\x31\x4b\x8c\xc3" + "\x4d\xf2\x7a\x9d\xdd\xd4\x15\x99" + "\x57\xad\xc6\xb1\x35\x69\xf5\x6a" + "\x2d\x70\xe4\x97\x49\xb2\x9f\x71" + "\xde\x22\xb5\x70\x8c\x69\x24\xd3" + "\xad\x80\x58\x48\x90\xe4\xed\xba" + "\x76\x3d\x71\x7c\x57\x25\x87", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + } +}; + +static struct cipher_testvec aes_heh_dec_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xd8\xbd\x40\xbf\xca\xe5\xee\x81" + "\x0f\x3d\x1f\x1f\xae\x89\x07\x55", + .ilen = 16, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .rlen = 16, + .also_non_np = 1, + .np = 2, + .tap = { 8, 8 }, + }, { + .key = "\xa8\xda\x24\x9b\x5e\xfa\x13\xc2" + "\xc1\x94\xbf\x32\xba\x38\xa3\x77", + .klen = 16, + .iv = "\x4d\x47\x61\x37\x2b\x47\x86\xf0" + "\xd6\x47\xb5\xc2\xe8\xcf\x85\x27", + .input = "\x59\xf2\x78\x4e\x10\x94\xf9\x5c" + "\x22\x23\x78\x2a\x30\x48\x11\x97" + "\xb1\xfe\x70\xc4\xef\xdf\x04\xef" + "\x16\x39\x04\xcf\xc0\x95\x9a\x98", + .ilen = 32, + .result = "\xb8\xee\x29\xe4\xa5\xd1\xe7\x55" + "\xd0\xfd\xe7\x22\x63\x76\x36\xe2" + "\xf8\x0c\xf8\xfe\x65\x76\xe7\xca" + "\xc1\x42\xf5\xca\x5a\xa8\xac\x2a", + .rlen = 32, + .also_non_np = 1, + .np = 3, + .tap = { 16, 13, 3 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xe0\x40\xeb\xe9\x52\xbe\x65\x60" + "\xe4\x68\x68\xa3\x73\x75\xb8\x52" + "\xef\x38\x6a\x87\x25\x25\xf6\x04" + "\xe5\x8e\xbe\x14\x8b\x02\x14\x1f" + "\xa9\x73\xb7\xad\x15\xbe\x9c\xa0" + "\xd2\x8a\x2c\xdc\xd4\xe3\x05\x55" + "\x0a\xf5\xf8\x51\xee\xe5\x62\xa5" + "\x71\xa7\x7c\x15\x5d\x7a\x9e", + .ilen = 63, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .klen = 16, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x4b\x1a\x15\xa0\xaf\x08\x6d\x70" + "\xf0\xa7\x97\xb5\x31\x4b\x8c\xc3" + "\x4d\xf2\x7a\x9d\xdd\xd4\x15\x99" + "\x57\xad\xc6\xb1\x35\x69\xf5\x6a" + "\x2d\x70\xe4\x97\x49\xb2\x9f\x71" + "\xde\x22\xb5\x70\x8c\x69\x24\xd3" + "\xad\x80\x58\x48\x90\xe4\xed\xba" + "\x76\x3d\x71\x7c\x57\x25\x87", + .ilen = 63, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00", + .rlen = 63, + .also_non_np = 1, + .np = 8, + .tap = { 20, 20, 10, 8, 2, 1, 1, 1 }, + } +}; + static struct cipher_testvec aes_cbc_enc_tv_template[] = { { /* From RFC 3602 */ .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" |
