summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhen Kong <zkong@codeaurora.org>2016-03-14 16:10:59 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:23:47 -0700
commitbe20302c2fcde2a8bdbc3dfef556721b9c97e20f (patch)
treec73b45f55dbd19fd1ceb4b4dde1f16cf5a438adb
parent2fbd2f64e42288e62387ebdf26bba4febef01836 (diff)
crypto: msm: make change to support new AEAD API on 4.4 kernel
Make change to support new AEAD API on 4.4 kernel; also, take code with ce_support.aligned_only and ce_bam_info.minor_version =0 out as 4.4 support only for msm which has crypto 5.2 or even higher. Signed-off-by: Zhen Kong <zkong@codeaurora.org>
-rw-r--r--drivers/crypto/msm/qce50.c210
-rw-r--r--drivers/crypto/msm/qce50.h1
-rw-r--r--drivers/crypto/msm/qcrypto.c710
3 files changed, 299 insertions, 622 deletions
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 95cc90c590ec..d85bac8c6a99 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -2035,7 +2035,7 @@ static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
bool is_complete);
-#if 0
+
static int _aead_complete(struct qce_device *pce_dev, int req_info)
{
struct aead_request *areq;
@@ -2058,8 +2058,10 @@ static int _aead_complete(struct qce_device *pce_dev, int req_info)
qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
- qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents,
- DMA_TO_DEVICE);
+
+ if (preq_info->asg)
+ qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
+ preq_info->assoc_nents, DMA_TO_DEVICE);
/* check MAC */
memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
SHA256_DIGEST_SIZE);
@@ -2114,9 +2116,6 @@ static int _aead_complete(struct qce_device *pce_dev, int req_info)
unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
aead = crypto_aead_reqtfm(areq);
ivsize = crypto_aead_ivsize(aead);
- if (pce_dev->ce_bam_info.minor_version != 0)
- dma_unmap_single(pce_dev->pdev, preq_info->phy_iv_in,
- ivsize, DMA_TO_DEVICE);
memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
sizeof(iv));
qce_free_req_info(pce_dev, req_info, true);
@@ -2125,7 +2124,6 @@ static int _aead_complete(struct qce_device *pce_dev, int req_info)
}
return 0;
};
-#endif
static int _sha_complete(struct qce_device *pce_dev, int req_info)
{
@@ -2457,6 +2455,68 @@ static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
return 0;
}
+static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
+ struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
+ struct sps_transfer *sps_bam_pipe)
+{
+ uint32_t data_cnt, len;
+ dma_addr_t addr;
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ unsigned int res_within_sg;
+
+ if (!sg_src)
+ return -ENOENT;
+ res_within_sg = sg_dma_len(sg_src);
+
+ while (off > 0) {
+ if (!sg_src)
+ return -ENOENT;
+ len = sg_dma_len(sg_src);
+ if (off < len) {
+ res_within_sg = len - off;
+ break;
+ }
+ off -= len;
+ sg_src = sg_next(sg_src);
+ res_within_sg = sg_dma_len(sg_src);
+ }
+ while (nbytes > 0 && sg_src) {
+ len = min(nbytes, res_within_sg);
+ nbytes -= len;
+ addr = sg_dma_address(sg_src) + off;
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+ while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ if (len > SPS_MAX_PKT_SIZE) {
+ data_cnt = SPS_MAX_PKT_SIZE;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ } else {
+ data_cnt = len;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ }
+ iovec++;
+ sps_bam_pipe->iovec_count++;
+ addr += data_cnt;
+ len -= data_cnt;
+ }
+ sg_src = sg_next(sg_src);
+ off = 0;
+ res_within_sg = sg_dma_len(sg_src);
+ }
+ return 0;
+}
+
static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
struct qce_cmdlist_info *cmdptr,
struct sps_transfer *sps_bam_pipe)
@@ -2938,11 +2998,9 @@ static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
case QCE_XFER_HASHING:
_sha_complete(pce_dev, req_info);
break;
-#if 0
case QCE_XFER_AEAD:
_aead_complete(pce_dev, req_info);
break;
-#endif
case QCE_XFER_F8:
_f8_complete(pce_dev, req_info);
break;
@@ -4573,15 +4631,15 @@ static int select_mode(struct qce_device *pce_dev,
return 0;
}
-#if 0
+
static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
{
+ int rc = 0;
struct qce_device *pce_dev = (struct qce_device *) handle;
struct aead_request *areq = (struct aead_request *) q_req->areq;
uint32_t authsize = q_req->authsize;
uint32_t totallen_in, out_len;
uint32_t hw_pad_out = 0;
- int rc = 0;
int ce_burst_size;
struct qce_cmdlist_info *cmdlistinfo = NULL;
int req_info = -1;
@@ -4595,7 +4653,7 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
pce_sps_data = &preq_info->ce_sps;
ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
- totallen_in = areq->cryptlen + areq->assoclen;
+ totallen_in = areq->cryptlen + q_req->assoclen;
if (q_req->dir == QCE_ENCRYPT) {
q_req->cryptlen = areq->cryptlen;
out_len = areq->cryptlen + authsize;
@@ -4618,13 +4676,20 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
if (pce_dev->ce_bam_info.minor_version == 0)
preq_info->src_nents = count_sg(areq->src, totallen_in);
else
- preq_info->src_nents = count_sg(areq->src, areq->cryptlen);
+ preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
+ areq->assoclen);
- preq_info->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+ if (q_req->assoclen) {
+ preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
- /* associated data input */
- qce_dma_map_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents,
- DMA_TO_DEVICE);
+ /* formatted associated data input */
+ qce_dma_map_sg(pce_dev->pdev, q_req->asg,
+ preq_info->assoc_nents, DMA_TO_DEVICE);
+ preq_info->asg = q_req->asg;
+ } else {
+ preq_info->assoc_nents = 0;
+ preq_info->asg = NULL;
+ }
/* cipher input */
qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
@@ -4641,7 +4706,8 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
preq_info->dst_nents = count_sg(areq->dst,
out_len + areq->assoclen);
else
- preq_info->dst_nents = count_sg(areq->dst, out_len);
+ preq_info->dst_nents = count_sg(areq->dst, out_len +
+ areq->assoclen);
qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
DMA_FROM_DEVICE);
@@ -4660,11 +4726,11 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
}
/* set up crypto device */
rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
- areq->assoclen, cmdlistinfo);
+ q_req->assoclen, cmdlistinfo);
} else {
/* set up crypto device */
rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
- areq->assoclen);
+ q_req->assoclen);
}
if (rc < 0)
@@ -4687,40 +4753,14 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
&pce_sps_data->in_transfer);
if (pce_dev->ce_bam_info.minor_version == 0) {
- if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
- &pce_sps_data->in_transfer))
- goto bad;
-
- _qce_set_flag(&pce_sps_data->in_transfer,
- SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
-
- /*
- * The destination data should be big enough to
- * include CCM padding.
- */
- if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
- areq->assoclen + hw_pad_out,
- &pce_sps_data->out_transfer))
- goto bad;
- if (totallen_in > SPS_MAX_PKT_SIZE) {
- _qce_set_flag(&pce_sps_data->out_transfer,
- SPS_IOVEC_FLAG_INT);
- pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
- } else {
- if (_qce_sps_add_data(GET_PHYS_ADDR(
- pce_sps_data->result_dump),
- CRYPTO_RESULT_DUMP_SIZE,
- &pce_sps_data->out_transfer))
- goto bad;
- _qce_set_flag(&pce_sps_data->out_transfer,
- SPS_IOVEC_FLAG_INT);
- pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
- }
+ goto bad;
} else {
- if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
- &pce_sps_data->in_transfer))
+ if (q_req->assoclen && (_qce_sps_add_sg_data(
+ pce_dev, q_req->asg, q_req->assoclen,
+ &pce_sps_data->in_transfer)))
goto bad;
- if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
+ if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
+ areq->assoclen,
&pce_sps_data->in_transfer))
goto bad;
_qce_set_flag(&pce_sps_data->in_transfer,
@@ -4736,10 +4776,11 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
/* Pass through to ignore associated data*/
if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
- areq->assoclen,
+ q_req->assoclen,
&pce_sps_data->out_transfer))
goto bad;
- if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
+ if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
+ areq->assoclen,
&pce_sps_data->out_transfer))
goto bad;
/* Pass through to ignore hw_pad (padding of the MAC data) */
@@ -4770,7 +4811,7 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
bad:
if (preq_info->assoc_nents) {
- qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
+ qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
preq_info->assoc_nents, DMA_TO_DEVICE);
}
if (preq_info->src_nents) {
@@ -4783,10 +4824,8 @@ bad:
DMA_FROM_DEVICE);
}
qce_free_req_info(pce_dev, req_info, false);
-
return rc;
}
-#endif
static int _qce_suspend(void *handle)
{
@@ -4849,7 +4888,6 @@ static int _qce_resume(void *handle)
struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
EXPORT_SYMBOL(qce_pm_table);
-#if 0
int qce_aead_req(void *handle, struct qce_req *q_req)
{
struct qce_device *pce_dev = (struct qce_device *)handle;
@@ -4882,7 +4920,7 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
else
q_req->cryptlen = areq->cryptlen - authsize;
- totallen = q_req->cryptlen + areq->assoclen + ivsize;
+ totallen = q_req->cryptlen + areq->assoclen;
if (pce_dev->support_cmd_dscr) {
cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
@@ -4896,15 +4934,13 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
}
/* set up crypto device */
rc = _ce_setup_aead(pce_dev, q_req, totallen,
- areq->assoclen + ivsize, cmdlistinfo);
+ areq->assoclen, cmdlistinfo);
if (rc < 0) {
qce_free_req_info(pce_dev, req_info, false);
return -EINVAL;
}
}
- preq_info->assoc_nents = count_sg(areq->assoc, areq->assoclen);
-
/*
* For crypto 5.0 that has burst size alignment requirement
* for data descritpor,
@@ -4912,46 +4948,27 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
* memory starting with associated data, followed by
* iv, and data stream to be ciphered.
*/
- if (pce_dev->ce_bam_info.minor_version == 0)
- preq_info->src_nents = count_sg(areq->src, totallen);
- else
- preq_info->src_nents = count_sg(areq->src, q_req->cryptlen);
+ preq_info->src_nents = count_sg(areq->src, totallen);
- preq_info->phy_iv_in = 0;
- /* associated data input */
- qce_dma_map_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents,
- DMA_TO_DEVICE);
/* cipher input */
qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
/* cipher output for encryption */
if (areq->src != areq->dst) {
- if (pce_dev->ce_bam_info.minor_version == 0)
- /*
- * The destination scatter list is pointing to the same
- * data area as source.
- */
- preq_info->dst_nents = count_sg(areq->dst, totallen);
- else
- preq_info->dst_nents = count_sg(areq->dst,
- q_req->cryptlen);
+ preq_info->dst_nents = count_sg(areq->dst, totallen);
qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
DMA_FROM_DEVICE);
}
- /* cipher iv for input */
- if (pce_dev->ce_bam_info.minor_version != 0)
- preq_info->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
- ivsize, DMA_TO_DEVICE);
-
/* setup for callback, and issue command to bam */
preq_info->areq = q_req->areq;
preq_info->qce_cb = q_req->qce_cb;
preq_info->dir = q_req->dir;
+ preq_info->asg = NULL;
/* setup xfer type for producer callback handling */
preq_info->xfer_type = QCE_XFER_AEAD;
@@ -4963,7 +4980,7 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
&pce_sps_data->in_transfer);
} else {
rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
- areq->assoclen + ivsize);
+ areq->assoclen);
if (rc)
goto bad;
}
@@ -4996,13 +5013,7 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
}
} else {
- if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
- &pce_sps_data->in_transfer))
- goto bad;
- if (_qce_sps_add_data((uint32_t)preq_info->phy_iv_in, ivsize,
- &pce_sps_data->in_transfer))
- goto bad;
- if (_qce_sps_add_sg_data(pce_dev, areq->src, q_req->cryptlen,
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
&pce_sps_data->in_transfer))
goto bad;
_qce_set_flag(&pce_sps_data->in_transfer,
@@ -5013,13 +5024,7 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
&pce_sps_data->cmdlistptr.unlock_all_pipes,
&pce_sps_data->in_transfer);
- /* Pass through to ignore associated + iv data*/
- if (_qce_sps_add_data(
- GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
- (ivsize + areq->assoclen),
- &pce_sps_data->out_transfer))
- goto bad;
- if (_qce_sps_add_sg_data(pce_dev, areq->dst, q_req->cryptlen,
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
&pce_sps_data->out_transfer))
goto bad;
@@ -5041,9 +5046,6 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
return 0;
bad:
- if (preq_info->assoc_nents)
- qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
- preq_info->assoc_nents, DMA_TO_DEVICE);
if (preq_info->src_nents)
qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
@@ -5051,15 +5053,11 @@ bad:
if (areq->src != areq->dst)
qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
DMA_FROM_DEVICE);
- if (preq_info->phy_iv_in)
- dma_unmap_single(pce_dev->pdev, preq_info->phy_iv_in,
- ivsize, DMA_TO_DEVICE);
qce_free_req_info(pce_dev, req_info, false);
return rc;
}
EXPORT_SYMBOL(qce_aead_req);
-#endif
int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
{
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
index 19f6edf21878..45d65c28477b 100644
--- a/drivers/crypto/msm/qce50.h
+++ b/drivers/crypto/msm/qce50.h
@@ -219,6 +219,7 @@ struct ce_request_info {
void *user;
void *areq;
int assoc_nents;
+ struct scatterlist *asg; /* Formatted associated data sg */
int src_nents;
int dst_nents;
dma_addr_t phy_iv_in;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index d676f5a46650..f9468e1212ab 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -355,6 +355,7 @@ EXPORT_SYMBOL(qcrypto_get_engine_list);
enum qcrypto_alg_type {
QCRYPTO_ALG_CIPHER = 0,
QCRYPTO_ALG_SHA = 1,
+ QCRYPTO_ALG_AEAD = 2,
QCRYPTO_ALG_LAST
};
@@ -362,6 +363,7 @@ struct qcrypto_alg {
struct list_head entry;
struct crypto_alg cipher_alg;
struct ahash_alg sha_alg;
+ struct aead_alg aead_alg;
enum qcrypto_alg_type alg_type;
struct crypto_priv *cp;
};
@@ -410,9 +412,7 @@ struct qcrypto_cipher_req_ctx {
unsigned int ivsize;
int aead;
struct scatterlist asg; /* Formatted associated data sg */
- unsigned char *assoc; /* Pointer to formatted assoc data */
- unsigned int assoclen; /* Save Unformatted assoc data length */
- struct scatterlist *assoc_sg; /* Save Unformatted assoc data sg */
+ unsigned char *adata; /* Pointer to formatted assoc data */
enum qce_cipher_alg_enum alg;
enum qce_cipher_dir_enum dir;
enum qce_cipher_mode_enum mode;
@@ -777,6 +777,22 @@ static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
return q_alg;
};
+static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp,
+ struct aead_alg *template)
+{
+ struct qcrypto_alg *q_alg;
+
+ q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+ if (!q_alg)
+ return ERR_PTR(-ENOMEM);
+
+ q_alg->alg_type = QCRYPTO_ALG_AEAD;
+ q_alg->aead_alg = *template;
+ q_alg->cp = cp;
+
+ return q_alg;
+};
+
static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -897,37 +913,58 @@ static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
return _qcrypto_cra_ablkcipher_init(tfm);
};
-#if 0
-static int _qcrypto_cra_aead_sha1_init(struct crypto_tfm *tfm)
+static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm)
{
int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
- rc = _qcrypto_cipher_cra_init(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_cipher_cra_init(&tfm->base);
ctx->auth_alg = QCE_HASH_SHA1_HMAC;
return rc;
}
-static int _qcrypto_cra_aead_sha256_init(struct crypto_tfm *tfm)
+static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm)
{
int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
- rc = _qcrypto_cipher_cra_init(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_cipher_cra_init(&tfm->base);
ctx->auth_alg = QCE_HASH_SHA256_HMAC;
return rc;
}
-static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_tfm *tfm)
+static int _qcrypto_cra_aead_ccm_init(struct crypto_aead *tfm)
{
int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_cipher_cra_init(&tfm->base);
+ ctx->auth_alg = QCE_HASH_AES_CMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_rfc4309_ccm_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_cipher_cra_init(&tfm->base);
+ ctx->auth_alg = QCE_HASH_AES_CMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_priv *cp = &qcrypto_dev;
- tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
- rc = _qcrypto_cipher_cra_init(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_cipher_cra_init(&tfm->base);
if (rc)
return rc;
ctx->cipher_aes192_fb = NULL;
@@ -951,14 +988,14 @@ static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_tfm *tfm)
return 0;
}
-static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_tfm *tfm)
+static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm)
{
int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_priv *cp = &qcrypto_dev;
- tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
- rc = _qcrypto_cipher_cra_init(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_cipher_cra_init(&tfm->base);
if (rc)
return rc;
ctx->cipher_aes192_fb = NULL;
@@ -982,29 +1019,6 @@ static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_tfm *tfm)
return 0;
}
-static int _qcrypto_cra_aead_ccm_init(struct crypto_tfm *tfm)
-{
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
- rc = _qcrypto_cipher_cra_init(tfm);
- ctx->auth_alg = QCE_HASH_AES_CMAC;
- return rc;
-}
-
-static int _qcrypto_cra_aead_rfc4309_ccm_init(struct crypto_tfm *tfm)
-{
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
- rc = _qcrypto_cipher_cra_init(tfm);
- ctx->auth_alg = QCE_HASH_AES_CMAC;
- return rc;
-}
-#endif
-
static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1023,17 +1037,17 @@ static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
ctx->cipher_aes192_fb = NULL;
}
-static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
+static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm)
{
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
if (!list_empty(&ctx->rsp_queue))
pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
}
-static void _qcrypto_cra_aead_aes_exit(struct crypto_tfm *tfm)
+static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
{
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
if (!list_empty(&ctx->rsp_queue))
pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
@@ -1232,6 +1246,8 @@ static void _qcrypto_remove_engine(struct crypto_engine *pengine)
crypto_unregister_alg(&q_alg->cipher_alg);
if (q_alg->alg_type == QCRYPTO_ALG_SHA)
crypto_unregister_ahash(&q_alg->sha_alg);
+ if (q_alg->alg_type == QCRYPTO_ALG_AEAD)
+ crypto_unregister_aead(&q_alg->aead_alg);
list_del(&q_alg->entry);
kzfree(q_alg);
}
@@ -1695,7 +1711,6 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
req_done(pqcrypto_req_control);
};
-#if 0
static void _qce_aead_complete(void *cookie, unsigned char *icv,
unsigned char *iv, int ret)
{
@@ -1703,7 +1718,6 @@ static void _qce_aead_complete(void *cookie, unsigned char *icv,
struct crypto_async_request *async_req;
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
- struct crypto_priv *cp = ctx->cp;
struct qcrypto_cipher_req_ctx *rctx;
struct crypto_stat *pstat;
struct crypto_engine *pengine;
@@ -1721,65 +1735,10 @@ static void _qce_aead_complete(void *cookie, unsigned char *icv,
}
if (rctx->mode == QCE_MODE_CCM) {
- if (cp->ce_support.aligned_only) {
- struct qcrypto_cipher_req_ctx *rctx;
- uint32_t bytes = 0;
- uint32_t nbytes = 0;
- uint32_t num_sg = 0;
-
- rctx = aead_request_ctx(areq);
- areq->src = rctx->orig_src;
- areq->dst = rctx->orig_dst;
- if (rctx->dir == QCE_ENCRYPT)
- nbytes = areq->cryptlen +
- crypto_aead_authsize(aead);
- else
- nbytes = areq->cryptlen -
- crypto_aead_authsize(aead);
- num_sg = qcrypto_count_sg(areq->dst, nbytes);
- bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
- ((char *)rctx->data + areq->assoclen),
- nbytes);
- if (bytes != nbytes)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
- bytes, nbytes);
- kzfree(rctx->data);
- }
- kzfree(rctx->assoc);
- areq->assoc = rctx->assoc_sg;
- areq->assoclen = rctx->assoclen;
+ kzfree(rctx->adata);
} else {
uint32_t ivsize = crypto_aead_ivsize(aead);
- /* for aead operations, other than aes(ccm) */
- if (cp->ce_support.aligned_only) {
- struct qcrypto_cipher_req_ctx *rctx;
- uint32_t bytes = 0;
- uint32_t nbytes = 0;
- uint32_t num_sg = 0;
- uint32_t offset = areq->assoclen + ivsize;
-
- rctx = aead_request_ctx(areq);
- areq->src = rctx->orig_src;
- areq->dst = rctx->orig_dst;
-
- if (rctx->dir == QCE_ENCRYPT)
- nbytes = areq->cryptlen;
- else
- nbytes = areq->cryptlen -
- crypto_aead_authsize(aead);
- num_sg = qcrypto_count_sg(areq->dst, nbytes);
- bytes = qcrypto_sg_copy_from_buffer(
- areq->dst,
- num_sg,
- (char *)rctx->data + offset,
- nbytes);
- if (bytes != nbytes)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
- bytes, nbytes);
- kzfree(rctx->data);
- }
-
if (ret == 0) {
if (rctx->dir == QCE_ENCRYPT) {
/* copy the icv to dst */
@@ -1817,7 +1776,6 @@ static void _qce_aead_complete(void *cookie, unsigned char *icv,
pqcrypto_req_control->arsp->res = ret;
req_done(pqcrypto_req_control);
}
-#endif
static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
{
@@ -1858,26 +1816,12 @@ static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
}
static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
- struct scatterlist *sg)
+ struct scatterlist *sg, unsigned char *adata)
{
- unsigned char *adata;
uint32_t len;
uint32_t bytes = 0;
uint32_t num_sg = 0;
- if (alen == 0) {
- qreq->assoc = NULL;
- qreq->assoclen = 0;
- return 0;
- }
-
- qreq->assoc = kzalloc((alen + 0x64), GFP_ATOMIC);
- if (!qreq->assoc) {
- pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
- PTR_ERR(qreq->assoc));
- return -ENOMEM;
- }
- adata = qreq->assoc;
/*
* Add control info for associated data
* RFC 3610 and NIST Special Publication 800-38C
@@ -2032,7 +1976,6 @@ static int _qcrypto_process_ahash(struct crypto_engine *pengine,
return ret;
}
-#if 0
static int _qcrypto_process_aead(struct crypto_engine *pengine,
struct qcrypto_req_control *pqcrypto_req_control)
{
@@ -2083,160 +2026,39 @@ static int _qcrypto_process_aead(struct crypto_engine *pengine,
if (ret)
return ret;
- /* Format Associated data */
- ret = qcrypto_aead_ccm_format_adata(&qreq,
+ if (req->assoclen) {
+ rctx->adata = kzalloc((req->assoclen + 0x64),
+ GFP_ATOMIC);
+ if (!rctx->adata)
+ return -ENOMEM;
+ /* Format Associated data */
+ ret = qcrypto_aead_ccm_format_adata(&qreq,
req->assoclen,
- req->assoc);
- if (ret)
+ req->src,
+ rctx->adata);
+ } else {
+ qreq.assoclen = 0;
+ rctx->adata = NULL;
+ }
+ if (ret) {
+ kzfree(rctx->adata);
return ret;
-
- if (pengine->pcp->ce_support.aligned_only) {
- uint32_t bytes = 0;
- uint32_t num_sg = 0;
-
- rctx->orig_src = req->src;
- rctx->orig_dst = req->dst;
-
- if ((MAX_ALIGN_SIZE*2 > UINT_MAX - qreq.assoclen) ||
- ((MAX_ALIGN_SIZE*2 + qreq.assoclen) >
- UINT_MAX - qreq.authsize) ||
- ((MAX_ALIGN_SIZE*2 + qreq.assoclen +
- qreq.authsize) >
- UINT_MAX - req->cryptlen)) {
- pr_err("Integer overflow on aead req length.\n");
- return -EINVAL;
- }
-
- rctx->data = kzalloc((req->cryptlen + qreq.assoclen +
- qreq.authsize + MAX_ALIGN_SIZE*2),
- GFP_ATOMIC);
- if (rctx->data == NULL) {
- pr_err("Mem Alloc fail rctx->data, err %ld\n",
- PTR_ERR(rctx->data));
- kzfree(qreq.assoc);
- return -ENOMEM;
- }
- if (qreq.assoclen)
- memcpy((char *)rctx->data, qreq.assoc,
- qreq.assoclen);
-
- num_sg = qcrypto_count_sg(req->src, req->cryptlen);
- bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg,
- rctx->data + qreq.assoclen , req->cryptlen);
- if (bytes != req->cryptlen)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
- bytes, req->cryptlen);
- sg_set_buf(&rctx->ssg, rctx->data, req->cryptlen +
- qreq.assoclen);
- sg_mark_end(&rctx->ssg);
-
- if (qreq.dir == QCE_ENCRYPT)
- sg_set_buf(&rctx->dsg, rctx->data,
- qreq.assoclen + qreq.cryptlen +
- ALIGN(qreq.authsize, 64));
- else
- sg_set_buf(&rctx->dsg, rctx->data,
- qreq.assoclen + req->cryptlen +
- qreq.authsize);
- sg_mark_end(&rctx->dsg);
-
- req->src = &rctx->ssg;
- req->dst = &rctx->dsg;
}
- /*
- * Save the original associated data
- * length and sg
- */
- rctx->assoc_sg = req->assoc;
- rctx->assoclen = req->assoclen;
- rctx->assoc = qreq.assoc;
+
/*
* update req with new formatted associated
* data info
*/
- req->assoc = &rctx->asg;
- req->assoclen = qreq.assoclen;
- sg_set_buf(req->assoc, qreq.assoc,
- req->assoclen);
- sg_mark_end(req->assoc);
- } else {
- /* for aead operations, other than aes(ccm) */
- if (pengine->pcp->ce_support.aligned_only) {
- uint32_t bytes = 0;
- uint32_t num_sg = 0;
-
- rctx->orig_src = req->src;
- rctx->orig_dst = req->dst;
- /*
- * The data area should be big enough to
- * include assoicated data, ciphering data stream,
- * generated MAC, and CCM padding.
- */
- if ((MAX_ALIGN_SIZE * 2 > ULONG_MAX - req->assoclen) ||
- ((MAX_ALIGN_SIZE * 2 + req->assoclen) >
- ULONG_MAX - qreq.ivsize) ||
- ((MAX_ALIGN_SIZE * 2 + req->assoclen
- + qreq.ivsize)
- > ULONG_MAX - req->cryptlen)) {
- pr_err("Integer overflow on aead req length.\n");
- return -EINVAL;
- }
-
- rctx->data = kzalloc(
- (req->cryptlen +
- req->assoclen +
- qreq.ivsize +
- MAX_ALIGN_SIZE * 2),
- GFP_ATOMIC);
- if (rctx->data == NULL) {
- pr_err("Mem Alloc fail rctx->data, err %ld\n",
- PTR_ERR(rctx->data));
- return -ENOMEM;
- }
-
- /* copy associated data */
- num_sg = qcrypto_count_sg(req->assoc, req->assoclen);
- bytes = qcrypto_sg_copy_to_buffer(
- req->assoc, num_sg,
- rctx->data, req->assoclen);
-
- if (bytes != req->assoclen)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
- bytes, req->assoclen);
-
- /* copy iv */
- memcpy(rctx->data + req->assoclen, qreq.iv,
- qreq.ivsize);
-
- /* copy src */
- num_sg = qcrypto_count_sg(req->src, req->cryptlen);
- bytes = qcrypto_sg_copy_to_buffer(
- req->src,
- num_sg,
- rctx->data + req->assoclen +
- qreq.ivsize,
- req->cryptlen);
- if (bytes != req->cryptlen)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
- bytes, req->cryptlen);
- sg_set_buf(&rctx->ssg, rctx->data,
- req->cryptlen + req->assoclen
- + qreq.ivsize);
- sg_mark_end(&rctx->ssg);
-
- sg_set_buf(&rctx->dsg, rctx->data,
- req->cryptlen + req->assoclen
- + qreq.ivsize);
- sg_mark_end(&rctx->dsg);
- req->src = &rctx->ssg;
- req->dst = &rctx->dsg;
- }
+ qreq.asg = &rctx->asg;
+ if (rctx->adata)
+ sg_set_buf(qreq.asg, rctx->adata,
+ qreq.assoclen);
+ sg_mark_end(qreq.asg);
}
ret = qce_aead_req(pengine->qce, &qreq);
return ret;
}
-#endif
static struct crypto_engine *_qcrypto_static_assign_engine(
struct crypto_priv *cp)
@@ -2376,11 +2198,9 @@ again:
case CRYPTO_ALG_TYPE_AHASH:
ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
break;
-#if 0
case CRYPTO_ALG_TYPE_AEAD:
ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
break;
-#endif
default:
ret = -EINVAL;
};
@@ -2947,7 +2767,6 @@ static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
-#if 0
static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
{
struct qcrypto_cipher_req_ctx *rctx;
@@ -2995,6 +2814,7 @@ static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
pstat->aead_rfc4309_ccm_aes_dec++;
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
+
static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
@@ -3042,7 +2862,6 @@ static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
return 0;
}
-
static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -3271,12 +3090,12 @@ static void _aead_aes_fb_stage1_encrypt_complete(
static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
bool is_encrypt)
{
+ int rc = -EINVAL;
struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
struct ablkcipher_request *aes_req = NULL;
struct ahash_request *ahash_req = NULL;
- int rc = -EINVAL;
int nbytes;
int num_sg;
@@ -3291,19 +3110,18 @@ static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
rctx->fb_aes_req = aes_req;
rctx->fb_hash_req = ahash_req;
rctx->aead_req = req;
- num_sg = qcrypto_count_sg(req->assoc, req->assoclen);
- rctx->fb_ahash_assoc_iv = kzalloc(req->assoclen +
- crypto_aead_ivsize(aead_tfm), GFP_ATOMIC);
+ /* get assoc and iv. They are sitting in the beginning of src */
+ num_sg = qcrypto_count_sg(req->src, req->assoclen);
+ rctx->fb_ahash_assoc_iv = kzalloc(req->assoclen, GFP_ATOMIC);
if (!rctx->fb_ahash_assoc_iv)
goto ret;
+
if (req->assoclen)
- qcrypto_sg_copy_to_buffer(req->assoc, num_sg,
+ qcrypto_sg_copy_to_buffer(req->src, num_sg,
rctx->fb_ahash_assoc_iv, req->assoclen);
- memcpy(rctx->fb_ahash_assoc_iv + req->assoclen,
- req->iv, crypto_aead_ivsize(aead_tfm));
memset(rctx->fb_ahash_sg, 0, sizeof(rctx->fb_ahash_sg));
sg_set_buf(&rctx->fb_ahash_sg[0], rctx->fb_ahash_assoc_iv,
- req->assoclen + crypto_aead_ivsize(aead_tfm));
+ req->assoclen);
sg_mark_end(&rctx->fb_ahash_sg[1]);
nbytes = req->cryptlen;
@@ -3448,41 +3266,6 @@ static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
-static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
-{
- struct aead_request *areq = &req->areq;
- struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct qcrypto_cipher_req_ctx *rctx;
- struct crypto_stat *pstat;
-
- pstat = &_qcrypto_stat;
-
- rctx = aead_request_ctx(areq);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->giv; /* generated iv */
- rctx->aead_req = areq;
-
- memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
- /* avoid consecutive packets going out with same IV */
- *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
-
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_aes_enc++;
- else
- pstat->aead_sha256_aes_enc++;
- if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
- ctx->ahash_aead_aes192_fb) {
- areq->iv = req->giv;
- return _qcrypto_aead_aes_192_fallback(areq, true);
- }
- return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
-}
-
static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
{
struct qcrypto_cipher_req_ctx *rctx;
@@ -3498,7 +3281,6 @@ static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
- rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_des_enc++;
@@ -3522,7 +3304,6 @@ static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
rctx->dir = QCE_DECRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
- rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_des_dec++;
@@ -3531,35 +3312,6 @@ static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
-static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
-{
- struct aead_request *areq = &req->areq;
- struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct qcrypto_cipher_req_ctx *rctx;
- struct crypto_stat *pstat;
-
- pstat = &_qcrypto_stat;
-
- rctx = aead_request_ctx(areq);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->giv; /* generated iv */
- rctx->aead_req = areq;
-
- memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
- /* avoid consecutive packets going out with same IV */
- *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_des_enc++;
- else
- pstat->aead_sha256_des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
-}
-
static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
{
struct qcrypto_cipher_req_ctx *rctx;
@@ -3575,7 +3327,6 @@ static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
- rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_3des_enc++;
@@ -3599,7 +3350,6 @@ static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
rctx->dir = QCE_DECRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
- rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_3des_dec++;
@@ -3608,36 +3358,6 @@ static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
-static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
-{
- struct aead_request *areq = &req->areq;
- struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct qcrypto_cipher_req_ctx *rctx;
- struct crypto_stat *pstat;
-
- pstat = &_qcrypto_stat;
-
- rctx = aead_request_ctx(areq);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_3DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->giv; /* generated iv */
- rctx->aead_req = areq;
-
- memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
- /* avoid consecutive packets going out with same IV */
- *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_3des_enc++;
- else
- pstat->aead_sha256_3des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
-}
-#endif
-
static int _sha_init(struct ahash_request *req)
{
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
@@ -4841,214 +4561,175 @@ static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
},
},
};
-#if 0
-static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
+
+static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = {
{
+ .base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_aes_sha1_init,
- .cra_exit = _qcrypto_cra_aead_aes_exit,
- .cra_u = {
- .aead = {
+ },
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.setkey = _qcrypto_aead_setkey,
.setauthsize = _qcrypto_aead_setauthsize,
.encrypt = _qcrypto_aead_encrypt_aes_cbc,
.decrypt = _qcrypto_aead_decrypt_aes_cbc,
- .givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
- .geniv = "<built-in>",
- }
- }
+ .init = _qcrypto_cra_aead_aes_sha1_init,
+ .exit = _qcrypto_cra_aead_aes_exit,
},
{
+ .base = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_sha1_init,
- .cra_exit = _qcrypto_cra_aead_exit,
- .cra_u = {
- .aead = {
+ },
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.setkey = _qcrypto_aead_setkey,
.setauthsize = _qcrypto_aead_setauthsize,
.encrypt = _qcrypto_aead_encrypt_des_cbc,
.decrypt = _qcrypto_aead_decrypt_des_cbc,
- .givencrypt = _qcrypto_aead_givencrypt_des_cbc,
- .geniv = "<built-in>",
- }
- }
+ .init = _qcrypto_cra_aead_sha1_init,
+ .exit = _qcrypto_cra_aead_exit,
},
{
+ .base = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_sha1_init,
- .cra_exit = _qcrypto_cra_aead_exit,
- .cra_u = {
- .aead = {
+ },
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.setkey = _qcrypto_aead_setkey,
.setauthsize = _qcrypto_aead_setauthsize,
.encrypt = _qcrypto_aead_encrypt_3des_cbc,
.decrypt = _qcrypto_aead_decrypt_3des_cbc,
- .givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
- .geniv = "<built-in>",
- }
- }
+ .init = _qcrypto_cra_aead_sha1_init,
+ .exit = _qcrypto_cra_aead_exit,
},
};
-static struct crypto_alg _qcrypto_aead_sha256_hmac_algos[] = {
+static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = {
{
+ .base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_aes_sha256_init,
- .cra_exit = _qcrypto_cra_aead_aes_exit,
- .cra_u = {
- .aead = {
+ },
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
.setkey = _qcrypto_aead_setkey,
.setauthsize = _qcrypto_aead_setauthsize,
.encrypt = _qcrypto_aead_encrypt_aes_cbc,
.decrypt = _qcrypto_aead_decrypt_aes_cbc,
- .givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
- .geniv = "<built-in>",
- }
- }
+ .init = _qcrypto_cra_aead_aes_sha256_init,
+ .exit = _qcrypto_cra_aead_aes_exit,
},
{
+ .base = {
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_sha256_init,
- .cra_exit = _qcrypto_cra_aead_exit,
- .cra_u = {
- .aead = {
+ },
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
.setkey = _qcrypto_aead_setkey,
.setauthsize = _qcrypto_aead_setauthsize,
.encrypt = _qcrypto_aead_encrypt_des_cbc,
.decrypt = _qcrypto_aead_decrypt_des_cbc,
- .givencrypt = _qcrypto_aead_givencrypt_des_cbc,
- .geniv = "<built-in>",
- }
- }
+ .init = _qcrypto_cra_aead_sha256_init,
+ .exit = _qcrypto_cra_aead_exit,
},
{
+ .base = {
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_sha256_init,
- .cra_exit = _qcrypto_cra_aead_exit,
- .cra_u = {
- .aead = {
+ },
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
.setkey = _qcrypto_aead_setkey,
.setauthsize = _qcrypto_aead_setauthsize,
.encrypt = _qcrypto_aead_encrypt_3des_cbc,
.decrypt = _qcrypto_aead_decrypt_3des_cbc,
- .givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
- .geniv = "<built-in>",
- }
- }
+ .init = _qcrypto_cra_aead_sha256_init,
+ .exit = _qcrypto_cra_aead_exit,
},
};
-static struct crypto_alg _qcrypto_aead_ccm_algo = {
- .cra_name = "ccm(aes)",
- .cra_driver_name = "qcrypto-aes-ccm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_ccm_init,
- .cra_exit = _qcrypto_cra_aead_exit,
- .cra_u = {
- .aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = _qcrypto_aead_ccm_setkey,
- .setauthsize = _qcrypto_aead_ccm_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_aes_ccm,
- .decrypt = _qcrypto_aead_decrypt_aes_ccm,
- .geniv = "<built-in>",
- }
- }
+static struct aead_alg _qcrypto_aead_ccm_algo = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "qcrypto-aes-ccm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = _qcrypto_aead_ccm_setkey,
+ .setauthsize = _qcrypto_aead_ccm_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_aes_ccm,
+ .decrypt = _qcrypto_aead_decrypt_aes_ccm,
+ .init = _qcrypto_cra_aead_ccm_init,
+ .exit = _qcrypto_cra_aead_exit,
};
-static struct crypto_alg _qcrypto_aead_rfc4309_ccm_algo = {
- .cra_name = "rfc4309(ccm(aes))",
- .cra_driver_name = "qcrypto-rfc4309-aes-ccm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_nivaead_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aead_rfc4309_ccm_init,
- .cra_exit = _qcrypto_cra_aead_exit,
- .cra_u = {
- .aead = {
- .ivsize = 8,
- .maxauthsize = 16,
- .setkey = _qcrypto_aead_rfc4309_ccm_setkey,
- .setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
- .encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
- .decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
- .geniv = "seqiv",
- }
- }
+static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+ .setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+ .encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+ .decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+ .init = _qcrypto_cra_aead_rfc4309_ccm_init,
+ .exit = _qcrypto_cra_aead_exit,
};
-#endif
static int _qcrypto_probe(struct platform_device *pdev)
{
@@ -5269,14 +4950,13 @@ static int _qcrypto_probe(struct platform_device *pdev)
}
/* register crypto aead (hmac-sha1) algorithms the device supports */
-#if 0
if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
|| cp->ce_support.sha_hmac) {
for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
i++) {
struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_cipher_alg_alloc(cp,
+ q_alg = _qcrypto_aead_alg_alloc(cp,
&_qcrypto_aead_sha1_hmac_algos[i]);
if (IS_ERR(q_alg)) {
rc = PTR_ERR(q_alg);
@@ -5284,26 +4964,26 @@ static int _qcrypto_probe(struct platform_device *pdev)
}
if (cp->ce_support.use_sw_aead_algo) {
rc = _qcrypto_prefix_alg_cra_name(
- q_alg->cipher_alg.cra_name,
- strlen(q_alg->cipher_alg.cra_name));
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
if (rc) {
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
- q_alg->cipher_alg.cra_name);
+ q_alg->aead_alg.base.cra_name);
kfree(q_alg);
goto err;
}
}
- rc = crypto_register_alg(&q_alg->cipher_alg);
+ rc = crypto_register_aead(&q_alg->aead_alg);
if (rc) {
dev_err(&pdev->dev,
"%s alg registration failed\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
kfree(q_alg);
} else {
list_add_tail(&q_alg->entry, &cp->alg_list);
dev_info(&pdev->dev, "%s\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
}
}
}
@@ -5314,7 +4994,7 @@ static int _qcrypto_probe(struct platform_device *pdev)
i++) {
struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_cipher_alg_alloc(cp,
+ q_alg = _qcrypto_aead_alg_alloc(cp,
&_qcrypto_aead_sha256_hmac_algos[i]);
if (IS_ERR(q_alg)) {
rc = PTR_ERR(q_alg);
@@ -5322,30 +5002,30 @@ static int _qcrypto_probe(struct platform_device *pdev)
}
if (cp->ce_support.use_sw_aead_algo) {
rc = _qcrypto_prefix_alg_cra_name(
- q_alg->cipher_alg.cra_name,
- strlen(q_alg->cipher_alg.cra_name));
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
if (rc) {
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
- q_alg->cipher_alg.cra_name);
+ q_alg->aead_alg.base.cra_name);
kfree(q_alg);
goto err;
}
}
- rc = crypto_register_alg(&q_alg->cipher_alg);
+ rc = crypto_register_aead(&q_alg->aead_alg);
if (rc) {
dev_err(&pdev->dev,
"%s alg registration failed\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
kfree(q_alg);
} else {
list_add_tail(&q_alg->entry, &cp->alg_list);
dev_info(&pdev->dev, "%s\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
}
}
}
-#endif
+
if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
/* register crypto hmac algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
@@ -5388,39 +5068,38 @@ static int _qcrypto_probe(struct platform_device *pdev)
* Register crypto cipher (aes-ccm) algorithms the
* device supports
*/
-#if 0
if (cp->ce_support.aes_ccm) {
struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+ q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
if (IS_ERR(q_alg)) {
rc = PTR_ERR(q_alg);
goto err;
}
if (cp->ce_support.use_sw_aes_ccm_algo) {
rc = _qcrypto_prefix_alg_cra_name(
- q_alg->cipher_alg.cra_name,
- strlen(q_alg->cipher_alg.cra_name));
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
if (rc) {
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
- q_alg->cipher_alg.cra_name);
+ q_alg->aead_alg.base.cra_name);
kfree(q_alg);
goto err;
}
}
- rc = crypto_register_alg(&q_alg->cipher_alg);
+ rc = crypto_register_aead(&q_alg->aead_alg);
if (rc) {
dev_err(&pdev->dev, "%s alg registration failed\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
kzfree(q_alg);
} else {
list_add_tail(&q_alg->entry, &cp->alg_list);
dev_info(&pdev->dev, "%s\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
}
- q_alg = _qcrypto_cipher_alg_alloc(cp,
+ q_alg = _qcrypto_aead_alg_alloc(cp,
&_qcrypto_aead_rfc4309_ccm_algo);
if (IS_ERR(q_alg)) {
rc = PTR_ERR(q_alg);
@@ -5429,28 +5108,27 @@ static int _qcrypto_probe(struct platform_device *pdev)
if (cp->ce_support.use_sw_aes_ccm_algo) {
rc = _qcrypto_prefix_alg_cra_name(
- q_alg->cipher_alg.cra_name,
- strlen(q_alg->cipher_alg.cra_name));
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
if (rc) {
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
- q_alg->cipher_alg.cra_name);
+ q_alg->aead_alg.base.cra_name);
kfree(q_alg);
goto err;
}
}
- rc = crypto_register_alg(&q_alg->cipher_alg);
+ rc = crypto_register_aead(&q_alg->aead_alg);
if (rc) {
dev_err(&pdev->dev, "%s alg registration failed\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
kfree(q_alg);
} else {
list_add_tail(&q_alg->entry, &cp->alg_list);
dev_info(&pdev->dev, "%s\n",
- q_alg->cipher_alg.cra_driver_name);
+ q_alg->aead_alg.base.cra_driver_name);
}
}
-#endif
mutex_unlock(&cp->engine_lock);