From a38acd236cac914aafffd80af79b9556fc2c3934 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 28 Mar 2018 15:39:17 +0300 Subject: crypto: caam - fix DMA mapping dir for generated IV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In case of GIVCIPHER, IV is generated by the device. Fix the DMA mapping direction. Cc: # 3.19+ Fixes: 7222d1a34103 ("crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes))") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7207a535942d..3e18b9266027 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -769,6 +769,7 @@ struct aead_edesc { * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist * @iv_dma: dma address of iv for checking continuity and link table + * @iv_dir: DMA mapping direction for IV * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table @@ -778,6 +779,7 @@ struct ablkcipher_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; + enum dma_data_direction iv_dir; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; @@ -787,7 +789,8 @@ struct ablkcipher_edesc { static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, - dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, + dma_addr_t iv_dma, int ivsize, + enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma, int sec4_sg_bytes) { if (dst != src) { @@ -799,7 +802,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, } if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (sec4_sg_bytes) dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); @@ -810,7 +813,7 @@ static void aead_unmap(struct device *dev, struct aead_request *req) { caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, 0, 0, + edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -823,7 +826,7 @@ static void ablkcipher_unmap(struct device *dev, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, + edesc->iv_dma, ivsize, edesc->iv_dir, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -1287,7 +1290,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, GFP_DMA | flags); if (!edesc) { caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1550,7 +1553,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (dma_mapping_error(jrdev, iv_dma)) { dev_err(jrdev, "unable to map IV\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1572,7 +1575,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1581,6 +1584,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_TO_DEVICE; if (!in_contig) { dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); @@ -1598,7 +1602,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } @@ -1756,11 +1760,11 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( * Check if iv can be contiguous with source and destination. * If so, include it. If not, create scatterlist. */ - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, iv_dma)) { dev_err(jrdev, "unable to map IV\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1781,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1790,6 +1794,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_FROM_DEVICE; if (mapped_src_nents > 1) sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, @@ -1807,7 +1812,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } -- cgit v1.2.3 From 115957bb3e59fcb226ce76b97af14533f239e0ac Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 28 Mar 2018 15:39:18 +0300 Subject: crypto: caam - fix IV DMA mapping and updating MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are two IV-related issues: (1) crypto API does not guarantee to provide an IV buffer that is DMAable, thus it's incorrect to DMA map it (2) for in-place decryption, since ciphertext is overwritten with plaintext, updated req->info will contain the last block of plaintext (instead of the last block of ciphertext) While these two issues could be fixed separately, it's straightforward to fix both in the same time - by allocating extra space in the ablkcipher_edesc for the IV that will be fed to the crypto engine; this allows for fixing (2) by saving req->src[last_block] in req->info directly, i.e. without allocating another temporary buffer. A side effect of the fix is that it's no longer possible to have the IV and req->src contiguous. Code checking for this case is removed. Cc: # 4.13+ Fixes: 854b06f76879 ("crypto: caam - properly set IV after {en,de}crypt") Link: http://lkml.kernel.org/r/20170113084620.GF22022@gondor.apana.org.au Reported-by: Gilad Ben-Yossef Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 212 ++++++++++++++++++------------------------ 1 file changed, 91 insertions(+), 121 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 3e18b9266027..d67667970f7e 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -774,6 +774,7 @@ struct aead_edesc { * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables + * and IV */ struct ablkcipher_edesc { int src_nents; @@ -915,6 +916,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, ivsize, 0); + /* In case initial IV was generated, copy it in GIVCIPHER request */ + if (edesc->iv_dir == DMA_FROM_DEVICE) { + u8 *iv; + struct skcipher_givcrypt_request *greq; + + greq = container_of(req, struct skcipher_givcrypt_request, + creq); + iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) + + edesc->sec4_sg_bytes; + memcpy(greq->giv, iv, ivsize); + } + kfree(edesc); ablkcipher_request_complete(req, err); @@ -925,10 +938,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; +#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); -#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -946,14 +959,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_unmap(jrdev, edesc, req); - - /* - * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. - */ - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, - ivsize, 0); - kfree(edesc); ablkcipher_request_complete(req, err); @@ -1102,15 +1107,14 @@ static void init_authenc_job(struct aead_request *req, */ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req, - bool iv_contig) + struct ablkcipher_request *req) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; - dma_addr_t dst_dma, src_dma; - int len, sec4_sg_index = 0; + u32 out_options = 0; + dma_addr_t dst_dma; + int len; #ifdef DEBUG print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", @@ -1126,30 +1130,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - if (iv_contig) { - src_dma = edesc->iv_dma; - in_options = 0; - } else { - src_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents + 1; - in_options = LDST_SGF; - } - append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize, + LDST_SGF); if (likely(req->src == req->dst)) { - if (edesc->src_nents == 1 && iv_contig) { - dst_dma = sg_dma_address(req->src); - } else { - dst_dma = edesc->sec4_sg_dma + - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; } else { if (edesc->dst_nents == 1) { dst_dma = sg_dma_address(req->dst); } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * sizeof(struct sec4_sg_entry); + dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * + sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } @@ -1161,13 +1153,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, */ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req, - bool iv_contig) + struct ablkcipher_request *req) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); u32 *desc = edesc->hw_desc; - u32 out_options, in_options; + u32 in_options; dma_addr_t dst_dma, src_dma; int len, sec4_sg_index = 0; @@ -1193,15 +1184,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, } append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); - if (iv_contig) { - dst_dma = edesc->iv_dma; - out_options = 0; - } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } - append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); + dst_dma = edesc->sec4_sg_dma + sec4_sg_index * + sizeof(struct sec4_sg_entry); + append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF); } /* @@ -1494,8 +1479,7 @@ static int aead_decrypt(struct aead_request *req) * allocate and map the ablkcipher extended descriptor for ablkcipher */ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request - *req, int desc_bytes, - bool *iv_contig_out) + *req, int desc_bytes) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); @@ -1504,8 +1488,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma = 0; - bool in_contig; + dma_addr_t iv_dma; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1549,33 +1533,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } } - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, DMA_NONE, 0, 0); - return ERR_PTR(-ENOMEM); - } - - if (mapped_src_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->src)) { - in_contig = true; - sec4_sg_ents = 0; - } else { - in_contig = false; - sec4_sg_ents = 1 + mapped_src_nents; - } + sec4_sg_ents = 1 + mapped_src_nents; dst_sg_idx = sec4_sg_ents; sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); - /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1586,12 +1557,22 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request desc_bytes; edesc->iv_dir = DMA_TO_DEVICE; - if (!in_contig) { - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->src, mapped_src_nents, - edesc->sec4_sg + 1, 0); + /* Make sure IV is located in a DMAable area */ + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + memcpy(iv, req->info, ivsize); + + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); } + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); + if (mapped_dst_nents > 1) { sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + dst_sg_idx, 0); @@ -1615,7 +1596,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request sec4_sg_bytes, 1); #endif - *iv_contig_out = in_contig; return edesc; } @@ -1625,19 +1605,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_enc, - ctx->sh_desc_enc_dma, edesc, req, iv_contig); + init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -1661,20 +1638,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, + ivsize, 0); + /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, iv_contig); + init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); desc = edesc->hw_desc; #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", @@ -1699,8 +1681,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) */ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( struct skcipher_givcrypt_request *greq, - int desc_bytes, - bool *iv_contig_out) + int desc_bytes) { struct ablkcipher_request *req = &greq->creq; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); @@ -1710,8 +1691,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma = 0; - bool out_contig; + dma_addr_t iv_dma; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1756,36 +1737,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( } } - /* - * Check if iv can be contiguous with source and destination. - * If so, include it. If not, create scatterlist. - */ - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, DMA_NONE, 0, 0); - return ERR_PTR(-ENOMEM); - } - sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; dst_sg_idx = sec4_sg_ents; - if (mapped_dst_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->dst)) { - out_contig = true; - } else { - out_contig = false; - sec4_sg_ents += 1 + mapped_dst_nents; - } + sec4_sg_ents += 1 + mapped_dst_nents; - /* allocate space for base edesc and hw desc commands, link tables */ + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1796,16 +1761,24 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( desc_bytes; edesc->iv_dir = DMA_FROM_DEVICE; + /* Make sure IV is located in a DMAable area */ + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + if (mapped_src_nents > 1) sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, 0); - if (!out_contig) { - dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, - iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->dst, mapped_dst_nents, - edesc->sec4_sg + dst_sg_idx + 1, 0); - } + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + + dst_sg_idx + 1, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); @@ -1825,7 +1798,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( sec4_sg_bytes, 1); #endif - *iv_contig_out = out_contig; return edesc; } @@ -1836,19 +1808,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig = false; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, - edesc, req, iv_contig); + edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@" __stringify(__LINE__) ": ", -- cgit v1.2.3 From 3a488aaec6f343b5dc6d94529847a840bbeaf009 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 28 Mar 2018 15:39:19 +0300 Subject: crypto: caam/qi - fix IV DMA mapping and updating MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are two IV-related issues: (1) crypto API does not guarantee to provide an IV buffer that is DMAable, thus it's incorrect to DMA map it (2) for in-place decryption, since ciphertext is overwritten with plaintext, updated IV (req->info) will contain the last block of plaintext (instead of the last block of ciphertext) While these two issues could be fixed separately, it's straightforward to fix both in the same time - by using the {ablkcipher,aead}_edesc extended descriptor to store the IV that will be fed to the crypto engine; this allows for fixing (2) by saving req->src[last_block] in req->info directly, i.e. without allocating yet another temporary buffer. A side effect of the fix is that it's no longer possible to have the IV contiguous with req->src or req->dst. Code checking for this case is removed. Cc: # 4.14+ Fixes: a68a19380522 ("crypto: caam/qi - properly set IV after {en,de}crypt") Link: http://lkml.kernel.org/r/20170113084620.GF22022@gondor.apana.org.au Reported-by: Gilad Ben-Yossef Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi.c | 227 ++++++++++++++++++++------------------- 1 file changed, 116 insertions(+), 111 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index cacda0831390..6e61cc93c2b0 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -728,7 +728,7 @@ badkey: * @assoclen: associated data length, in CAAM endianness * @assoclen_dma: bus physical mapped address of req->assoclen * @drv_req: driver-specific request structure - * @sgt: the h/w link table + * @sgt: the h/w link table, followed by IV */ struct aead_edesc { int src_nents; @@ -739,9 +739,6 @@ struct aead_edesc { unsigned int assoclen; dma_addr_t assoclen_dma; struct caam_drv_req drv_req; -#define CAAM_QI_MAX_AEAD_SG \ - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ - sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -753,7 +750,7 @@ struct aead_edesc { * @qm_sg_bytes: length of dma mapped h/w link table * @qm_sg_dma: bus physical mapped address of h/w link table * @drv_req: driver-specific request structure - * @sgt: the h/w link table + * @sgt: the h/w link table, followed by IV */ struct ablkcipher_edesc { int src_nents; @@ -762,9 +759,6 @@ struct ablkcipher_edesc { int qm_sg_bytes; dma_addr_t qm_sg_dma; struct caam_drv_req drv_req; -#define CAAM_QI_MAX_ABLKCIPHER_SG \ - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ - sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -986,17 +980,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, } } - if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) { + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) ivsize = crypto_aead_ivsize(aead); - iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, - dst_nents, 0, 0, op_type, 0, 0); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } - } /* * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. @@ -1004,16 +989,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, */ qm_sg_ents = 1 + !!ivsize + mapped_src_nents + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); - if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", - qm_sg_ents, CAAM_QI_MAX_AEAD_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + sg_table = &edesc->sgt[0]; + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > + CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } - sg_table = &edesc->sgt[0]; - qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + + if (ivsize) { + u8 *iv = (u8 *)(sg_table + qm_sg_ents); + + /* Make sure IV is located in a DMAable area */ + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, + dst_nents, 0, 0, 0, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; @@ -1166,15 +1168,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) #endif ablkcipher_unmap(qidev, edesc, req); - qi_cache_free(edesc); + + /* In case initial IV was generated, copy it in GIVCIPHER request */ + if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) { + u8 *iv; + struct skcipher_givcrypt_request *greq; + + greq = container_of(req, struct skcipher_givcrypt_request, + creq); + iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes; + memcpy(greq->giv, iv, ivsize); + } /* * The crypto API expects us to set the IV (req->info) to the last * ciphertext block. This is used e.g. by the CTS mode. */ - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, - ivsize, 0); + if (edesc->drv_req.drv_ctx->op_type != DECRYPT) + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - + ivsize, ivsize, 0); + qi_cache_free(edesc); ablkcipher_request_complete(req, status); } @@ -1189,9 +1203,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct ablkcipher_edesc *edesc; dma_addr_t iv_dma; - bool in_contig; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - int dst_sg_idx, qm_sg_ents; + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct qm_sg_entry *sg_table, *fd_sgt; struct caam_drv_ctx *drv_ctx; enum optype op_type = encrypt ? ENCRYPT : DECRYPT; @@ -1238,55 +1252,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } } - iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - - if (mapped_src_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->src)) { - in_contig = true; - qm_sg_ents = 0; - } else { - in_contig = false; - qm_sg_ents = 1 + mapped_src_nents; - } + qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } - /* allocate space for base edesc and link tables */ + /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_alloc(GFP_DMA | flags); if (unlikely(!edesc)) { dev_err(qidev, "could not allocate extended descriptor\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + memcpy(iv, req->info, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - sg_table = &edesc->sgt[0]; - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = ablkcipher_done; edesc->drv_req.drv_ctx = drv_ctx; - if (!in_contig) { - dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); - } + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); if (mapped_dst_nents > 1) sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + @@ -1304,20 +1316,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request fd_sgt = &edesc->drv_req.fd_sgt[0]; - if (!in_contig) - dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, - ivsize + req->nbytes, 0); - else - dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes, - 0); + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, + ivsize + req->nbytes, 0); if (req->src == req->dst) { - if (!in_contig) - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + - sizeof(*sg_table), req->nbytes, 0); - else - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), - req->nbytes, 0); + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + + sizeof(*sg_table), req->nbytes, 0); } else if (mapped_dst_nents > 1) { dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * sizeof(*sg_table), req->nbytes, 0); @@ -1341,10 +1345,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; struct ablkcipher_edesc *edesc; dma_addr_t iv_dma; - bool out_contig; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); struct qm_sg_entry *sg_table, *fd_sgt; - int dst_sg_idx, qm_sg_ents; + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, GIVENCRYPT); @@ -1392,46 +1396,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( mapped_dst_nents = src_nents; } - iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; dst_sg_idx = qm_sg_ents; - if (mapped_dst_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->dst)) { - out_contig = true; - } else { - out_contig = false; - qm_sg_ents += 1 + mapped_dst_nents; - } - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, GIVENCRYPT, 0, 0); + qm_sg_ents += 1 + mapped_dst_nents; + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } - /* allocate space for base edesc and link tables */ + /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_alloc(GFP_DMA | flags); if (!edesc) { dev_err(qidev, "could not allocate extended descriptor\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, GIVENCRYPT, 0, 0); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - sg_table = &edesc->sgt[0]; - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = ablkcipher_done; edesc->drv_req.drv_ctx = drv_ctx; @@ -1439,11 +1442,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( if (mapped_src_nents > 1) sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0); - if (!out_contig) { - dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + - dst_sg_idx + 1, 0); - } + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1, + 0); edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, DMA_TO_DEVICE); @@ -1464,13 +1465,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src), req->nbytes, 0); - if (!out_contig) - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * - sizeof(*sg_table), ivsize + req->nbytes, - 0); - else - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), - ivsize + req->nbytes, 0); + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * + sizeof(*sg_table), ivsize + req->nbytes, 0); return edesc; } @@ -1480,6 +1476,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int ret; if (unlikely(caam_congested)) @@ -1490,6 +1487,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) if (IS_ERR(edesc)) return PTR_ERR(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + if (!encrypt) + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - + ivsize, ivsize, 0); + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); if (!ret) { ret = -EINPROGRESS; -- cgit v1.2.3 From 3d8ccf9f6ef6b569b0821d75e4ada9430d3be462 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 3 Apr 2018 15:09:12 -0500 Subject: crypto: chelsio - Fix potential NULL pointer dereferences Add null checks on lookup_tid() return value in order to prevent null pointer dereferences. Addresses-Coverity-ID: 1467422 ("Dereference null return value") Addresses-Coverity-ID: 1467443 ("Dereference null return value") Addresses-Coverity-ID: 1467445 ("Dereference null return value") Addresses-Coverity-ID: 1467449 ("Dereference null return value") Fixes: cc35c88ae4db ("crypto : chtls - CPL handler definition") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_cm.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 82a473a0cefa..23c43b8327db 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1537,6 +1537,10 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } skb_dst_set(skb, NULL); process_cpl_msg(chtls_recv_data, sk, skb); return 0; @@ -1585,6 +1589,10 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } skb_dst_set(skb, NULL); process_cpl_msg(chtls_recv_pdu, sk, skb); return 0; @@ -1646,6 +1654,10 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } skb_dst_set(skb, NULL); process_cpl_msg(chtls_rx_hdr, sk, skb); @@ -2105,6 +2117,10 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } process_cpl_msg(chtls_rx_ack, sk, skb); return 0; -- cgit v1.2.3 From 2d93913e22013cb941fa7e1cf3d5e6649bc2bfad Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Apr 2018 17:44:03 +0100 Subject: crypto: chelsio - don't leak information from the stack to userspace The structure crypto_info contains fields that are not initialized and only .version is set. The copy_to_user call is hence leaking information from the stack to userspace which must be avoided. Fix this by zero'ing all the unused fields. Detected by CoverityScan, CID#1467421 ("Uninitialized scalar variable") Fixes: a08943947873 ("crypto: chtls - Register chtls with net tls") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 007c45c38fc7..69f3756eb980 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -441,7 +441,7 @@ nomem: static int do_chtls_getsockopt(struct sock *sk, char __user *optval, int __user *optlen) { - struct tls_crypto_info crypto_info; + struct tls_crypto_info crypto_info = { 0 }; crypto_info.version = TLS_1_2_VERSION; if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info))) -- cgit v1.2.3 From c4e848586cf11dd80633e4981108f36d4b414df1 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 6 Apr 2018 17:58:47 +0100 Subject: crypto: chelsio - remove redundant assignment to cdev->ports There is a double assignment to cdev->ports, the first is redundant as it is over-written so remove it. Detected by CoverityScan, CID#1467432 ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 69f3756eb980..5b9dd582aac0 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -216,7 +216,6 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info) cdev->lldi = lldi; cdev->pdev = lldi->pdev; cdev->tids = lldi->tids; - cdev->ports = (struct net_device **)(cdev + 1); cdev->ports = lldi->ports; cdev->mtus = lldi->mtus; cdev->tids = lldi->tids; -- cgit v1.2.3 From 37ff02acaa3d7be87ecb89f198a549ffd3ae2403 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 9 Apr 2018 17:45:50 +0200 Subject: crypto: cavium - Fix fallout from CONFIG_VMAP_STACK Enabling virtual mapped kernel stacks breaks the thunderx_zip driver. On compression or decompression the executing CPU hangs in an endless loop. The reason for this is the usage of __pa by the driver which does no longer work for an address that is not part of the 1:1 mapping. The zip driver allocates a result struct on the stack and needs to tell the hardware the physical address within this struct that is used to signal the completion of the request. As the hardware gets the wrong address after the broken __pa conversion it writes to an arbitrary address. The zip driver then waits forever for the completion byte to contain a non-zero value. Allocating the result struct from 1:1 mapped memory resolves this bug. Signed-off-by: Jan Glauber Reviewed-by: Robert Richter Cc: stable # 4.14 Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_crypto.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index 8df4d26cf9d4..b92b6e7e100f 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; - struct zip_state zip_state; + struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; @@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen, if (!zip) return -ENODEV; - memset(&zip_state, 0, sizeof(struct zip_state)); + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); + if (!zip_state) + return -ENOMEM; + zip_ops = &zip_ctx->zip_comp; zip_ops->input_len = slen; zip_ops->output_len = *dlen; memcpy(zip_ops->input, src, slen); - ret = zip_deflate(zip_ops, &zip_state, zip); + ret = zip_deflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } - + kfree(zip_state); return ret; } @@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; - struct zip_state zip_state; + struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; @@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen, if (!zip) return -ENODEV; - memset(&zip_state, 0, sizeof(struct zip_state)); + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); + if (!zip_state) + return -ENOMEM; + zip_ops = &zip_ctx->zip_decomp; memcpy(zip_ops->input, src, slen); @@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen, zip_ops->input_len = slen; zip_ops->output_len = *dlen; - ret = zip_inflate(zip_ops, &zip_state, zip); + ret = zip_inflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } - + kfree(zip_state); return ret; } -- cgit v1.2.3 From c782a8c43e94ba6c09e9de2d69b5e3a5840ce61c Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 9 Apr 2018 17:45:51 +0200 Subject: crypto: cavium - Limit result reading attempts After issuing a request an endless loop was used to read the completion state from memory which is asynchronously updated by the ZIP coprocessor. Add an upper bound to the retry attempts to prevent a CPU getting stuck forever in case of an error. Additionally, add a read memory barrier and a small delay between the reading attempts. Signed-off-by: Jan Glauber Reviewed-by: Robert Richter Cc: stable # 4.14 Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/common.h | 21 +++++++++++++++++++++ drivers/crypto/cavium/zip/zip_deflate.c | 4 ++-- drivers/crypto/cavium/zip/zip_inflate.c | 4 ++-- 3 files changed, 25 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h index dc451e0a43c5..58fb3ed6e644 100644 --- a/drivers/crypto/cavium/zip/common.h +++ b/drivers/crypto/cavium/zip/common.h @@ -46,8 +46,10 @@ #ifndef __COMMON_H__ #define __COMMON_H__ +#include #include #include +#include #include #include #include @@ -149,6 +151,25 @@ struct zip_operation { u32 sizeofzops; }; +static inline int zip_poll_result(union zip_zres_s *result) +{ + int retries = 1000; + + while (!result->s.compcode) { + if (!--retries) { + pr_err("ZIP ERR: request timed out"); + return -ETIMEDOUT; + } + udelay(10); + /* + * Force re-reading of compcode which is updated + * by the ZIP coprocessor. + */ + rmb(); + } + return 0; +} + /* error messages */ #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \ fmt "\n", __func__, __LINE__, ## args) diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c index 9a944b8c1e29..d7133f857d67 100644 --- a/drivers/crypto/cavium/zip/zip_deflate.c +++ b/drivers/crypto/cavium/zip/zip_deflate.c @@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, /* Stats update for compression requests submitted */ atomic64_inc(&zip_dev->stats.comp_req_submit); - while (!result_ptr->s.compcode) - continue; + /* Wait for completion or error */ + zip_poll_result(result_ptr); /* Stats update for compression requests completed */ atomic64_inc(&zip_dev->stats.comp_req_complete); diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c index 50cbdd83dbf2..7e0d73e2f89e 100644 --- a/drivers/crypto/cavium/zip/zip_inflate.c +++ b/drivers/crypto/cavium/zip/zip_inflate.c @@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, /* Decompression requests submitted stats update */ atomic64_inc(&zip_dev->stats.decomp_req_submit); - while (!result_ptr->s.compcode) - continue; + /* Wait for completion or error */ + zip_poll_result(result_ptr); /* Decompression requests completed stats update */ atomic64_inc(&zip_dev->stats.decomp_req_complete); -- cgit v1.2.3 From a40c88045506ecba8e3ae75da19e8a2c53e23a41 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 9 Apr 2018 17:45:52 +0200 Subject: crypto: cavium - Prevent division by zero Avoid two potential divisions by zero when calculating average values for the zip statistics. Signed-off-by: Jan Glauber Reviewed-by: Robert Richter Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_main.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index 1cd8aa488185..79b449e0f955 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -482,10 +482,11 @@ static int zip_show_stats(struct seq_file *s, void *unused) atomic64_add(val, &st->pending_req); } - avg_chunk = (atomic64_read(&st->comp_in_bytes) / - atomic64_read(&st->comp_req_complete)); - avg_cr = (atomic64_read(&st->comp_in_bytes) / - atomic64_read(&st->comp_out_bytes)); + val = atomic64_read(&st->comp_req_complete); + avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; + + val = atomic64_read(&st->comp_out_bytes); + avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; seq_printf(s, " ZIP Device %d Stats\n" "-----------------------------------\n" "Comp Req Submitted : \t%lld\n" -- cgit v1.2.3 From 1cc7e01ff977770ce0651f4d347a84e360835c3b Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 9 Apr 2018 17:45:53 +0200 Subject: crypto: cavium - Fix statistics pending request value The pending request counter was read from the wrong register. While at it, there is no need to use an atomic for it as it is only read localy in a loop. Signed-off-by: Jan Glauber Reviewed-by: Robert Richter Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_main.c | 13 +++++-------- drivers/crypto/cavium/zip/zip_main.h | 1 - 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index 79b449e0f955..ae5b20c695ca 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -469,6 +469,8 @@ static int zip_show_stats(struct seq_file *s, void *unused) struct zip_stats *st; for (index = 0; index < MAX_ZIP_DEVICES; index++) { + u64 pending = 0; + if (zip_dev[index]) { zip = zip_dev[index]; st = &zip->stats; @@ -476,10 +478,8 @@ static int zip_show_stats(struct seq_file *s, void *unused) /* Get all the pending requests */ for (q = 0; q < ZIP_NUM_QUEUES; q++) { val = zip_reg_read((zip->reg_base + - ZIP_DBG_COREX_STA(q))); - val = (val >> 32); - val = val & 0xffffff; - atomic64_add(val, &st->pending_req); + ZIP_DBG_QUEX_STA(q))); + pending += val >> 32 & 0xffffff; } val = atomic64_read(&st->comp_req_complete); @@ -514,10 +514,7 @@ static int zip_show_stats(struct seq_file *s, void *unused) (u64)atomic64_read(&st->decomp_in_bytes), (u64)atomic64_read(&st->decomp_out_bytes), (u64)atomic64_read(&st->decomp_bad_reqs), - (u64)atomic64_read(&st->pending_req)); - - /* Reset pending requests count */ - atomic64_set(&st->pending_req, 0); + pending); } } return 0; diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h index 64e051f60784..e1e4fa92ce80 100644 --- a/drivers/crypto/cavium/zip/zip_main.h +++ b/drivers/crypto/cavium/zip/zip_main.h @@ -74,7 +74,6 @@ struct zip_stats { atomic64_t comp_req_complete; atomic64_t decomp_req_submit; atomic64_t decomp_req_complete; - atomic64_t pending_req; atomic64_t comp_in_bytes; atomic64_t comp_out_bytes; atomic64_t decomp_in_bytes; -- cgit v1.2.3 From e7a9b05ca4c707ff4b46a77963db48d085d383e0 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 9 Apr 2018 17:45:54 +0200 Subject: crypto: cavium - Fix smp_processor_id() warnings Switch to raw_smp_processor_id() to prevent a number of warnings from kernel debugging. We do not care about preemption here, as the CPU number is only used as a poor mans load balancing or device selection. If preemption happens during a compress/decompress operation a small performance hit will occur but everything will continue to work, so just ignore it. Signed-off-by: Jan Glauber Reviewed-by: Robert Richter Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_device.c | 4 ++-- drivers/crypto/cavium/zip/zip_main.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c index ccf21fb91513..f174ec29ed69 100644 --- a/drivers/crypto/cavium/zip/zip_device.c +++ b/drivers/crypto/cavium/zip/zip_device.c @@ -87,12 +87,12 @@ u32 zip_load_instr(union zip_inst_s *instr, * Distribute the instructions between the enabled queues based on * the CPU id. */ - if (smp_processor_id() % 2 == 0) + if (raw_smp_processor_id() % 2 == 0) queue = 0; else queue = 1; - zip_dbg("CPU Core: %d Queue number:%d", smp_processor_id(), queue); + zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue); /* Take cmd buffer lock */ spin_lock(&zip_dev->iq[queue].lock); diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index ae5b20c695ca..be055b9547f6 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -113,7 +113,7 @@ struct zip_device *zip_get_device(int node) */ int zip_get_node_id(void) { - return cpu_to_node(smp_processor_id()); + return cpu_to_node(raw_smp_processor_id()); } /* Initializes the ZIP h/w sub-system */ -- cgit v1.2.3 From b0039c00fbbbc3cb9c4b25852d81a2b4c193371d Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 11 Apr 2018 09:45:19 -0300 Subject: crypto: caam - staticize caam_get_era() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit caam_get_era() is only used locally, so do not export this function and make it static instead. Signed-off-by: Fabio Estevam Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 3 +-- drivers/crypto/caam/ctrl.h | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index e4cc636e1104..bee690ab8650 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -400,7 +400,7 @@ start_rng: * caam_get_era() - Return the ERA of the SEC on SoC, based * on "sec-era" propery in the DTS. This property is updated by u-boot. **/ -int caam_get_era(void) +static int caam_get_era(void) { struct device_node *caam_node; int ret; @@ -412,7 +412,6 @@ int caam_get_era(void) return ret ? -ENOTSUPP : prop; } -EXPORT_SYMBOL(caam_get_era); static const struct of_device_id caam_match[] = { { diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h index be693a2cc25e..f3ecd67922a7 100644 --- a/drivers/crypto/caam/ctrl.h +++ b/drivers/crypto/caam/ctrl.h @@ -9,8 +9,6 @@ #define CTRL_H /* Prototypes for backend-level services exposed to APIs */ -int caam_get_era(void); - extern bool caam_dpaa2; #endif /* CTRL_H */ -- cgit v1.2.3 From 654f2b937b389295581bcb4aa26011a63db7bc8f Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 11 Apr 2018 09:45:20 -0300 Subject: crypto: caam - allow retrieving 'era' from register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'era' information can be retrieved from CAAM registers, so introduce a caam_get_era_from_hw() function that gets it via register reads in case the 'fsl,sec-era' property is not passed in the device tree. This function is based on the U-Boot implementation from drivers/crypto/fsl/sec.c Signed-off-by: Fabio Estevam Reviewed-by: Horia Geantă Tested-by: Breno Lima Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 56 ++++++++++++++++++++++++++++++++++++++++++---- drivers/crypto/caam/regs.h | 6 +++++ 2 files changed, 58 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index bee690ab8650..a28868d5b2d0 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -396,11 +396,56 @@ start_rng: clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC); } +static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl) +{ + static const struct { + u16 ip_id; + u8 maj_rev; + u8 era; + } id[] = { + {0x0A10, 1, 1}, + {0x0A10, 2, 2}, + {0x0A12, 1, 3}, + {0x0A14, 1, 3}, + {0x0A14, 2, 4}, + {0x0A16, 1, 4}, + {0x0A10, 3, 4}, + {0x0A11, 1, 4}, + {0x0A18, 1, 4}, + {0x0A11, 2, 5}, + {0x0A12, 2, 5}, + {0x0A13, 1, 5}, + {0x0A1C, 1, 5} + }; + u32 ccbvid, id_ms; + u8 maj_rev, era; + u16 ip_id; + int i; + + ccbvid = rd_reg32(&ctrl->perfmon.ccb_id); + era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT; + if (era) /* This is '0' prior to CAAM ERA-6 */ + return era; + + id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms); + ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT; + maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT; + + for (i = 0; i < ARRAY_SIZE(id); i++) + if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev) + return id[i].era; + + return -ENOTSUPP; +} + /** * caam_get_era() - Return the ERA of the SEC on SoC, based - * on "sec-era" propery in the DTS. This property is updated by u-boot. + * on "sec-era" optional property in the DTS. This property is updated + * by u-boot. + * In case this property is not passed an attempt to retrieve the CAAM + * era via register reads will be made. **/ -static int caam_get_era(void) +static int caam_get_era(struct caam_ctrl __iomem *ctrl) { struct device_node *caam_node; int ret; @@ -410,7 +455,10 @@ static int caam_get_era(void) ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); of_node_put(caam_node); - return ret ? -ENOTSUPP : prop; + if (!ret) + return prop; + else + return caam_get_era_from_hw(ctrl); } static const struct of_device_id caam_match[] = { @@ -622,7 +670,7 @@ static int caam_probe(struct platform_device *pdev) goto iounmap_ctrl; } - ctrlpriv->era = caam_get_era(); + ctrlpriv->era = caam_get_era(ctrl); ret = of_platform_populate(nprop, caam_match, NULL, dev); if (ret) { diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index fee363865d88..4fb91ba39c36 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -312,11 +312,17 @@ struct caam_perfmon { /* Component Instantiation Parameters fe0-fff */ u32 rtic_id; /* RVID - RTIC Version ID */ +#define CCBVID_ERA_MASK 0xff000000 +#define CCBVID_ERA_SHIFT 24 u32 ccb_id; /* CCBVID - CCB Version ID */ u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/ u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/ u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */ u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/ +#define SECVID_MS_IPID_MASK 0xffff0000 +#define SECVID_MS_IPID_SHIFT 16 +#define SECVID_MS_MAJ_REV_MASK 0x0000ff00 +#define SECVID_MS_MAJ_REV_SHIFT 8 u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */ u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */ }; -- cgit v1.2.3 From 8a2a0dd35f2e54c023d9041a5428b6c5639af86c Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Mon, 16 Apr 2018 08:07:05 -0500 Subject: crypto: caam - strip input zeros from RSA input buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes the provided RSA input buffer provided is not stripped of leading zeros. This could cause its size to be bigger than that of the modulus, making the HW complain: caam_jr 2142000.jr1: 40000789: DECO: desc idx 7: Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used. Fix the problem by stripping off the leading zero from input data before feeding it to the CAAM accelerator. Fixes: 8c419778ab57e ("crypto: caam - add support for RSA algorithm") Cc: # 4.8+ Reported-by: Martin Townsend Link: https://lkml.kernel.org/r/CABatt_ytYORYKtApcB4izhNanEKkGFi9XAQMjHi_n-8YWoCRiw@mail.gmail.com Signed-off-by: Horia Geantă Tested-by: Fabio Estevam Reviewed-by: Tudor Ambarus Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 54 +++++++++++++++++++++++++++++++++++++++++++ drivers/crypto/caam/caampkc.h | 8 +++++++ 2 files changed, 62 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 7a897209f181..979072b25eaa 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, akcipher_request_complete(req, err); } +static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, + unsigned int nbytes, + unsigned int flags) +{ + struct sg_mapping_iter miter; + int lzeros, ents; + unsigned int len; + unsigned int tbytes = nbytes; + const u8 *buff; + + ents = sg_nents_for_len(sgl, nbytes); + if (ents < 0) + return ents; + + sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); + + lzeros = 0; + len = 0; + while (nbytes > 0) { + while (len && !*buff) { + lzeros++; + len--; + buff++; + } + + if (len && *buff) + break; + + sg_miter_next(&miter); + buff = miter.addr; + len = miter.length; + + nbytes -= lzeros; + lzeros = 0; + } + + miter.consumed = lzeros; + sg_miter_stop(&miter); + nbytes -= lzeros; + + return tbytes - nbytes; +} + static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, size_t desclen) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = ctx->dev; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; + int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; int sgc; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; int src_nents, dst_nents; + int lzeros; + + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); + if (lzeros < 0) + return ERR_PTR(lzeros); + + req->src_len -= lzeros; + req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); src_nents = sg_nents_for_len(req->src, req->src_len); dst_nents = sg_nents_for_len(req->dst, req->dst_len); @@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = { .max_size = caam_rsa_max_size, .init = caam_rsa_init_tfm, .exit = caam_rsa_exit_tfm, + .reqsize = sizeof(struct caam_rsa_req_ctx), .base = { .cra_name = "rsa", .cra_driver_name = "rsa-caam", diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h index fd145c46eae1..82645bcf8b27 100644 --- a/drivers/crypto/caam/caampkc.h +++ b/drivers/crypto/caam/caampkc.h @@ -95,6 +95,14 @@ struct caam_rsa_ctx { struct device *dev; }; +/** + * caam_rsa_req_ctx - per request context. + * @src: input scatterlist (stripped of leading zeros) + */ +struct caam_rsa_req_ctx { + struct scatterlist src[2]; +}; + /** * rsa_edesc - s/w-extended rsa descriptor * @src_nents : number of segments in input scatterlist -- cgit v1.2.3 From b930f3a2292d29e53caac1c82d44b655d8d40b72 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 16 Apr 2018 13:05:01 -0300 Subject: crypto: caam: - Use kmemdup() function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use kmemdup() rather than duplicating its implementation. By usign kmemdup() we can also get rid of the 'val' variable. Detected with Coccinelle script. Signed-off-by: Fabio Estevam Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 979072b25eaa..6f990139f324 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -783,19 +783,12 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) */ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) { - u8 *val; caam_rsa_drop_leading_zeros(&buf, nbytes); if (!*nbytes) return NULL; - val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL); - if (!val) - return NULL; - - memcpy(val, buf, *nbytes); - - return val; + return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL); } static int caam_rsa_check_key_length(unsigned int len) -- cgit v1.2.3 From ee1b23d1dfab7f75a18a9de36ef8d1a150f94db0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 17 Apr 2018 19:49:03 +0200 Subject: crypto: drivers - Remove depends on HAS_DMA in case of platform dependency Remove dependencies on HAS_DMA where a Kconfig symbol depends on another symbol that implies HAS_DMA, and, optionally, on "|| COMPILE_TEST". In most cases this other symbol is an architecture or platform specific symbol, or PCI. Generic symbols and drivers without platform dependencies keep their dependencies on HAS_DMA, to prevent compiling subsystems or drivers that cannot work anyway. This simplifies the dependencies, and allows to improve compile-testing. Signed-off-by: Geert Uytterhoeven Reviewed-by: Mark Brown Acked-by: Robin Murphy Acked-by: Herbert Xu Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index d1ea1a07cecb..3dbc47528667 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -419,7 +419,7 @@ config CRYPTO_DEV_EXYNOS_RNG config CRYPTO_DEV_S5P tristate "Support for Samsung S5PV210/Exynos crypto accelerator" depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST - depends on HAS_IOMEM && HAS_DMA + depends on HAS_IOMEM select CRYPTO_AES select CRYPTO_BLKCIPHER help @@ -466,7 +466,6 @@ endif # if CRYPTO_DEV_UX500 config CRYPTO_DEV_ATMEL_AUTHENC tristate "Support for Atmel IPSEC/SSL hw accelerator" - depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_AUTHENC select CRYPTO_DEV_ATMEL_AES @@ -479,7 +478,6 @@ config CRYPTO_DEV_ATMEL_AUTHENC config CRYPTO_DEV_ATMEL_AES tristate "Support for Atmel AES hw accelerator" - depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_AES select CRYPTO_AEAD @@ -494,7 +492,6 @@ config CRYPTO_DEV_ATMEL_AES config CRYPTO_DEV_ATMEL_TDES tristate "Support for Atmel DES/TDES hw accelerator" - depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_DES select CRYPTO_BLKCIPHER @@ -508,7 +505,6 @@ config CRYPTO_DEV_ATMEL_TDES config CRYPTO_DEV_ATMEL_SHA tristate "Support for Atmel SHA hw accelerator" - depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_HASH help @@ -574,7 +570,8 @@ config CRYPTO_DEV_CAVIUM_ZIP config CRYPTO_DEV_QCE tristate "Qualcomm crypto engine accelerator" - depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM select CRYPTO_AES select CRYPTO_DES select CRYPTO_ECB @@ -598,7 +595,6 @@ source "drivers/crypto/vmx/Kconfig" config CRYPTO_DEV_IMGTEC_HASH tristate "Imagination Technologies hardware hash accelerator" depends on MIPS || COMPILE_TEST - depends on HAS_DMA select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_SHA256 @@ -650,7 +646,6 @@ config CRYPTO_DEV_ROCKCHIP config CRYPTO_DEV_MEDIATEK tristate "MediaTek's EIP97 Cryptographic Engine driver" - depends on HAS_DMA depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST select CRYPTO_AES select CRYPTO_AEAD @@ -688,7 +683,7 @@ source "drivers/crypto/stm32/Kconfig" config CRYPTO_DEV_SAFEXCEL tristate "Inside Secure's SafeXcel cryptographic engine driver" - depends on HAS_DMA && OF + depends on OF depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT) select CRYPTO_AES select CRYPTO_BLKCIPHER @@ -706,7 +701,6 @@ config CRYPTO_DEV_SAFEXCEL config CRYPTO_DEV_ARTPEC6 tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration." depends on ARM && (ARCH_ARTPEC || COMPILE_TEST) - depends on HAS_DMA depends on OF select CRYPTO_AEAD select CRYPTO_AES -- cgit v1.2.3 From 9dbc8a0328efa485a6f5b68b867f9f523a3fbeff Mon Sep 17 00:00:00 2001 From: Bin Liu Date: Tue, 17 Apr 2018 14:53:13 -0500 Subject: crypto: omap-sham - fix memleak Fixes: 8043bb1ae03cb ("crypto: omap-sham - convert driver logic to use sgs for data xmit") The memory pages freed in omap_sham_finish_req() were less than those allocated in omap_sham_copy_sgs(). Cc: stable@vger.kernel.org Signed-off-by: Bin Liu Acked-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ad02aa63b519..d1a1c74fb56a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1087,7 +1087,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) free_pages((unsigned long)sg_virt(ctx->sg), - get_order(ctx->sg->length)); + get_order(ctx->sg->length + ctx->bufcnt)); if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) kfree(ctx->sg); -- cgit v1.2.3 From 8ce31dca75c40f076c125547491983a037112c21 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 19 Apr 2018 16:05:36 +0200 Subject: crypto: drivers - simplify getting .drvdata We should get drvdata from struct device directly. Going via platform_device is an unneeded step back and forth. Signed-off-by: Wolfram Sang Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/exynos-rng.c | 6 ++---- drivers/crypto/picoxcell_crypto.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c index 86f5f459762e..2cfabb99cb6e 100644 --- a/drivers/crypto/exynos-rng.c +++ b/drivers/crypto/exynos-rng.c @@ -319,8 +319,7 @@ static int exynos_rng_remove(struct platform_device *pdev) static int __maybe_unused exynos_rng_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos_rng_dev *rng = platform_get_drvdata(pdev); + struct exynos_rng_dev *rng = dev_get_drvdata(dev); int ret; /* If we were never seeded then after resume it will be the same */ @@ -350,8 +349,7 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev) static int __maybe_unused exynos_rng_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos_rng_dev *rng = platform_get_drvdata(pdev); + struct exynos_rng_dev *rng = dev_get_drvdata(dev); int ret; /* Never seeded so nothing to do */ diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index a4df966adbf6..321d5e2ac833 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1169,8 +1169,7 @@ static void spacc_spacc_complete(unsigned long data) #ifdef CONFIG_PM static int spacc_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct spacc_engine *engine = platform_get_drvdata(pdev); + struct spacc_engine *engine = dev_get_drvdata(dev); /* * We only support standby mode. All we have to do is gate the clock to @@ -1184,8 +1183,7 @@ static int spacc_suspend(struct device *dev) static int spacc_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct spacc_engine *engine = platform_get_drvdata(pdev); + struct spacc_engine *engine = dev_get_drvdata(dev); return clk_enable(engine->clk); } -- cgit v1.2.3 From 5b0aa2556ec9ea98f98e2a802818f75827896d25 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 19 Apr 2018 21:19:43 +0530 Subject: crypto: cavium - Remove unnecessary parentheses This patch fixes the clang warning of extraneous parentheses, with the following coccinelle script. @@ identifier i; constant c; expression e; @@ ( !((e)) | -(( \(i == c\|i != c\|i <= c\|i < c\|i >= c\|i > c\) -)) ) Signed-off-by: Varsha Rao Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_regs.h | 42 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h index d0be682305c1..874e0236c87e 100644 --- a/drivers/crypto/cavium/zip/zip_regs.h +++ b/drivers/crypto/cavium/zip/zip_regs.h @@ -443,7 +443,7 @@ union zip_corex_bist_status { static inline u64 ZIP_COREX_BIST_STATUS(u64 param1) { - if (((param1 <= 1))) + if (param1 <= 1) return 0x0520ull + (param1 & 1) * 0x8ull; pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1); return 0; @@ -537,7 +537,7 @@ union zip_dbg_corex_inst { static inline u64 ZIP_DBG_COREX_INST(u64 param1) { - if (((param1 <= 1))) + if (param1 <= 1) return 0x0640ull + (param1 & 1) * 0x8ull; pr_err("ZIP_DBG_COREX_INST: %llu\n", param1); return 0; @@ -568,7 +568,7 @@ union zip_dbg_corex_sta { static inline u64 ZIP_DBG_COREX_STA(u64 param1) { - if (((param1 <= 1))) + if (param1 <= 1) return 0x0680ull + (param1 & 1) * 0x8ull; pr_err("ZIP_DBG_COREX_STA: %llu\n", param1); return 0; @@ -599,7 +599,7 @@ union zip_dbg_quex_sta { static inline u64 ZIP_DBG_QUEX_STA(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x1800ull + (param1 & 7) * 0x8ull; pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1); return 0; @@ -817,7 +817,7 @@ union zip_msix_pbax { static inline u64 ZIP_MSIX_PBAX(u64 param1) { - if (((param1 == 0))) + if (param1 == 0) return 0x0000838000FF0000ull; pr_err("ZIP_MSIX_PBAX: %llu\n", param1); return 0; @@ -846,7 +846,7 @@ union zip_msix_vecx_addr { static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1) { - if (((param1 <= 17))) + if (param1 <= 17) return 0x0000838000F00000ull + (param1 & 31) * 0x10ull; pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1); return 0; @@ -875,7 +875,7 @@ union zip_msix_vecx_ctl { static inline u64 ZIP_MSIX_VECX_CTL(u64 param1) { - if (((param1 <= 17))) + if (param1 <= 17) return 0x0000838000F00008ull + (param1 & 31) * 0x10ull; pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1); return 0; @@ -900,7 +900,7 @@ union zip_quex_done { static inline u64 ZIP_QUEX_DONE(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x2000ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_DONE: %llu\n", param1); return 0; @@ -925,7 +925,7 @@ union zip_quex_done_ack { static inline u64 ZIP_QUEX_DONE_ACK(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x2200ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1); return 0; @@ -950,7 +950,7 @@ union zip_quex_done_ena_w1c { static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x2600ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1); return 0; @@ -975,7 +975,7 @@ union zip_quex_done_ena_w1s { static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x2400ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1); return 0; @@ -1004,7 +1004,7 @@ union zip_quex_done_wait { static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x2800ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1); return 0; @@ -1029,7 +1029,7 @@ union zip_quex_doorbell { static inline u64 ZIP_QUEX_DOORBELL(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x4000ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1); return 0; @@ -1058,7 +1058,7 @@ union zip_quex_err_ena_w1c { static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x3600ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1); return 0; @@ -1087,7 +1087,7 @@ union zip_quex_err_ena_w1s { static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x3400ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1); return 0; @@ -1120,7 +1120,7 @@ union zip_quex_err_int { static inline u64 ZIP_QUEX_ERR_INT(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x3000ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1); return 0; @@ -1150,7 +1150,7 @@ union zip_quex_err_int_w1s { static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x3200ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1); return 0; @@ -1179,7 +1179,7 @@ union zip_quex_gcfg { static inline u64 ZIP_QUEX_GCFG(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x1A00ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_GCFG: %llu\n", param1); return 0; @@ -1204,7 +1204,7 @@ union zip_quex_map { static inline u64 ZIP_QUEX_MAP(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x1400ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_MAP: %llu\n", param1); return 0; @@ -1236,7 +1236,7 @@ union zip_quex_sbuf_addr { static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x1000ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1); return 0; @@ -1276,7 +1276,7 @@ union zip_quex_sbuf_ctl { static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1) { - if (((param1 <= 7))) + if (param1 <= 7) return 0x1200ull + (param1 & 7) * 0x8ull; pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1); return 0; -- cgit v1.2.3 From a8d79d7bfb14f471914017103ee2329a74e5e89d Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Apr 2018 18:41:51 +0200 Subject: crypto: crypto4xx - performance optimizations This patch provides a cheap 2MiB/s+ (~ 6%) performance improvement over the current code. This is because the compiler can now optimize several endian swap memcpy. Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_alg.c | 32 +++++++++++++++++++------------- drivers/crypto/amcc/crypto4xx_core.c | 22 +++++++++++----------- drivers/crypto/amcc/crypto4xx_core.h | 6 ++++-- 3 files changed, 34 insertions(+), 26 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index ea83d0bff0e9..51fffd6c8411 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -74,32 +74,38 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, sa->sa_command_1.bf.copy_hdr = cp_hdr; } -int crypto4xx_encrypt(struct ablkcipher_request *req) +static inline int crypto4xx_crypt(struct ablkcipher_request *req, + const unsigned int ivlen, bool decrypt) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - unsigned int ivlen = crypto_ablkcipher_ivsize( - crypto_ablkcipher_reqtfm(req)); __le32 iv[ivlen]; if (ivlen) crypto4xx_memcpy_to_le32(iv, req->info, ivlen); return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, - req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0); + req->nbytes, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out, + ctx->sa_len, 0); } -int crypto4xx_decrypt(struct ablkcipher_request *req) +int crypto4xx_encrypt_noiv(struct ablkcipher_request *req) { - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - unsigned int ivlen = crypto_ablkcipher_ivsize( - crypto_ablkcipher_reqtfm(req)); - __le32 iv[ivlen]; + return crypto4xx_crypt(req, 0, false); +} - if (ivlen) - crypto4xx_memcpy_to_le32(iv, req->info, ivlen); +int crypto4xx_encrypt_iv(struct ablkcipher_request *req) +{ + return crypto4xx_crypt(req, AES_IV_SIZE, false); +} - return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, - req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0); +int crypto4xx_decrypt_noiv(struct ablkcipher_request *req) +{ + return crypto4xx_crypt(req, 0, true); +} + +int crypto4xx_decrypt_iv(struct ablkcipher_request *req) +{ + return crypto4xx_crypt(req, AES_IV_SIZE, true); } /** diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 76f459ad2821..5a0a4c157700 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -580,7 +580,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev, struct scatterlist *dst = pd_uinfo->dest_va; size_t cp_len = crypto_aead_authsize( crypto_aead_reqtfm(aead_req)); - u32 icv[cp_len]; + u32 icv[AES_BLOCK_SIZE]; int err = 0; if (pd_uinfo->using_sd) { @@ -595,7 +595,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev, if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) { /* append icv at the end */ crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest, - cp_len); + sizeof(icv)); scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen, cp_len, 1); @@ -605,7 +605,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev, aead_req->assoclen + aead_req->cryptlen - cp_len, cp_len, 0); - crypto4xx_memcpy_from_le32(icv, icv, cp_len); + crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv)); if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len)) err = -EBADMSG; @@ -1122,8 +1122,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_IV_SIZE, .setkey = crypto4xx_setkey_aes_cbc, - .encrypt = crypto4xx_encrypt, - .decrypt = crypto4xx_decrypt, + .encrypt = crypto4xx_encrypt_iv, + .decrypt = crypto4xx_decrypt_iv, } } }}, @@ -1146,8 +1146,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_IV_SIZE, .setkey = crypto4xx_setkey_aes_cfb, - .encrypt = crypto4xx_encrypt, - .decrypt = crypto4xx_decrypt, + .encrypt = crypto4xx_encrypt_iv, + .decrypt = crypto4xx_decrypt_iv, } } } }, @@ -1195,8 +1195,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = crypto4xx_setkey_aes_ecb, - .encrypt = crypto4xx_encrypt, - .decrypt = crypto4xx_decrypt, + .encrypt = crypto4xx_encrypt_noiv, + .decrypt = crypto4xx_decrypt_noiv, } } } }, @@ -1219,8 +1219,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_IV_SIZE, .setkey = crypto4xx_setkey_aes_ofb, - .encrypt = crypto4xx_encrypt, - .decrypt = crypto4xx_decrypt, + .encrypt = crypto4xx_encrypt_iv, + .decrypt = crypto4xx_decrypt_iv, } } } }, diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 23b726da6534..c240199472da 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -168,8 +168,10 @@ int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen); -int crypto4xx_encrypt(struct ablkcipher_request *req); -int crypto4xx_decrypt(struct ablkcipher_request *req); +int crypto4xx_encrypt_iv(struct ablkcipher_request *req); +int crypto4xx_decrypt_iv(struct ablkcipher_request *req); +int crypto4xx_encrypt_noiv(struct ablkcipher_request *req); +int crypto4xx_decrypt_noiv(struct ablkcipher_request *req); int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req); int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req); int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); -- cgit v1.2.3 From ce05ffe10457bda487fa049016a6ba79934bdece Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Apr 2018 18:41:52 +0200 Subject: crypto: crypto4xx - convert to skcipher The ablkcipher APIs have been effectively deprecated since [1]. This patch converts the crypto4xx driver to the new skcipher APIs. [1] Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_alg.c | 60 +++++---- drivers/crypto/amcc/crypto4xx_core.c | 255 ++++++++++++++++------------------- drivers/crypto/amcc/crypto4xx_core.h | 25 ++-- 3 files changed, 163 insertions(+), 177 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 51fffd6c8411..2a8e4f0fe1e9 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "crypto4xx_reg_def.h" #include "crypto4xx_core.h" #include "crypto4xx_sa.h" @@ -74,36 +75,37 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, sa->sa_command_1.bf.copy_hdr = cp_hdr; } -static inline int crypto4xx_crypt(struct ablkcipher_request *req, +static inline int crypto4xx_crypt(struct skcipher_request *req, const unsigned int ivlen, bool decrypt) { - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); __le32 iv[ivlen]; if (ivlen) - crypto4xx_memcpy_to_le32(iv, req->info, ivlen); + crypto4xx_memcpy_to_le32(iv, req->iv, ivlen); return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, - req->nbytes, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out, + req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len, 0); } -int crypto4xx_encrypt_noiv(struct ablkcipher_request *req) +int crypto4xx_encrypt_noiv(struct skcipher_request *req) { return crypto4xx_crypt(req, 0, false); } -int crypto4xx_encrypt_iv(struct ablkcipher_request *req) +int crypto4xx_encrypt_iv(struct skcipher_request *req) { return crypto4xx_crypt(req, AES_IV_SIZE, false); } -int crypto4xx_decrypt_noiv(struct ablkcipher_request *req) +int crypto4xx_decrypt_noiv(struct skcipher_request *req) { return crypto4xx_crypt(req, 0, true); } -int crypto4xx_decrypt_iv(struct ablkcipher_request *req) +int crypto4xx_decrypt_iv(struct skcipher_request *req) { return crypto4xx_crypt(req, AES_IV_SIZE, true); } @@ -111,20 +113,19 @@ int crypto4xx_decrypt_iv(struct ablkcipher_request *req) /** * AES Functions */ -static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher, +static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen, unsigned char cm, u8 fb) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); struct dynamic_sa_ctl *sa; int rc; if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) { - crypto_ablkcipher_set_flags(cipher, + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -164,39 +165,38 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher, return 0; } -int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC, CRYPTO_FEEDBACK_MODE_NO_FB); } -int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB, CRYPTO_FEEDBACK_MODE_128BIT_CFB); } -int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB, CRYPTO_FEEDBACK_MODE_NO_FB); } -int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB, CRYPTO_FEEDBACK_MODE_64BIT_OFB); } -int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); int rc; rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE, @@ -210,31 +210,33 @@ int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, return 0; } -int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req) +int crypto4xx_rfc3686_encrypt(struct skcipher_request *req) { - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); __le32 iv[AES_IV_SIZE / 4] = { ctx->iv_nonce, - cpu_to_le32p((u32 *) req->info), - cpu_to_le32p((u32 *) (req->info + 4)), + cpu_to_le32p((u32 *) req->iv), + cpu_to_le32p((u32 *) (req->iv + 4)), cpu_to_le32(1) }; return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, - req->nbytes, iv, AES_IV_SIZE, + req->cryptlen, iv, AES_IV_SIZE, ctx->sa_out, ctx->sa_len, 0); } -int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req) +int crypto4xx_rfc3686_decrypt(struct skcipher_request *req) { - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); __le32 iv[AES_IV_SIZE / 4] = { ctx->iv_nonce, - cpu_to_le32p((u32 *) req->info), - cpu_to_le32p((u32 *) (req->info + 4)), + cpu_to_le32p((u32 *) req->iv), + cpu_to_le32p((u32 *) (req->iv + 4)), cpu_to_le32(1) }; return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, - req->nbytes, iv, AES_IV_SIZE, + req->cryptlen, iv, AES_IV_SIZE, ctx->sa_out, ctx->sa_len, 0); } diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 5a0a4c157700..7d0629626d15 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include "crypto4xx_reg_def.h" @@ -526,21 +527,19 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, } } -static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, +static void crypto4xx_cipher_done(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo, struct ce_pd *pd) { - struct crypto4xx_ctx *ctx; - struct ablkcipher_request *ablk_req; + struct skcipher_request *req; struct scatterlist *dst; dma_addr_t addr; - ablk_req = ablkcipher_request_cast(pd_uinfo->async_req); - ctx = crypto_tfm_ctx(ablk_req->base.tfm); + req = skcipher_request_cast(pd_uinfo->async_req); if (pd_uinfo->using_sd) { - crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes, - ablk_req->dst); + crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, + req->cryptlen, req->dst); } else { dst = pd_uinfo->dest_va; addr = dma_map_page(dev->core_dev->device, sg_page(dst), @@ -549,8 +548,8 @@ static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, crypto4xx_ret_sg_desc(dev, pd_uinfo); if (pd_uinfo->state & PD_ENTRY_BUSY) - ablkcipher_request_complete(ablk_req, -EINPROGRESS); - ablkcipher_request_complete(ablk_req, 0); + skcipher_request_complete(req, -EINPROGRESS); + skcipher_request_complete(req, 0); } static void crypto4xx_ahash_done(struct crypto4xx_device *dev, @@ -641,8 +640,8 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx]; switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) { - case CRYPTO_ALG_TYPE_ABLKCIPHER: - crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); + case CRYPTO_ALG_TYPE_SKCIPHER: + crypto4xx_cipher_done(dev, pd_uinfo, pd); break; case CRYPTO_ALG_TYPE_AEAD: crypto4xx_aead_done(dev, pd_uinfo, pd); @@ -936,15 +935,14 @@ static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg, ctx->sa_len = 0; } -static int crypto4xx_ablk_init(struct crypto_tfm *tfm) +static int crypto4xx_sk_init(struct crypto_skcipher *sk) { - struct crypto_alg *alg = tfm->__crt_alg; + struct skcipher_alg *alg = crypto_skcipher_alg(sk); struct crypto4xx_alg *amcc_alg; - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk); amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher); crypto4xx_ctx_init(amcc_alg, ctx); - tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); return 0; } @@ -953,9 +951,11 @@ static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx) crypto4xx_free_sa(ctx); } -static void crypto4xx_ablk_exit(struct crypto_tfm *tfm) +static void crypto4xx_sk_exit(struct crypto_skcipher *sk) { - crypto4xx_common_exit(crypto_tfm_ctx(tfm)); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk); + + crypto4xx_common_exit(ctx); } static int crypto4xx_aead_init(struct crypto_aead *tfm) @@ -1012,7 +1012,7 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, break; default: - rc = crypto_register_alg(&alg->alg.u.cipher); + rc = crypto_register_skcipher(&alg->alg.u.cipher); break; } @@ -1041,7 +1041,7 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) break; default: - crypto_unregister_alg(&alg->alg.u.cipher); + crypto_unregister_skcipher(&alg->alg.u.cipher); } kfree(alg); } @@ -1103,126 +1103,109 @@ static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data) */ static struct crypto4xx_alg_common crypto4xx_alg[] = { /* Crypto AES modes */ - { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-ppc4xx", - .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto4xx_ctx), - .cra_type = &crypto_ablkcipher_type, - .cra_init = crypto4xx_ablk_init, - .cra_exit = crypto4xx_ablk_exit, - .cra_module = THIS_MODULE, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_IV_SIZE, - .setkey = crypto4xx_setkey_aes_cbc, - .encrypt = crypto4xx_encrypt_iv, - .decrypt = crypto4xx_decrypt_iv, - } - } - }}, - { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { - .cra_name = "cfb(aes)", - .cra_driver_name = "cfb-aes-ppc4xx", - .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto4xx_ctx), - .cra_type = &crypto_ablkcipher_type, - .cra_init = crypto4xx_ablk_init, - .cra_exit = crypto4xx_ablk_exit, - .cra_module = THIS_MODULE, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_IV_SIZE, - .setkey = crypto4xx_setkey_aes_cfb, - .encrypt = crypto4xx_encrypt_iv, - .decrypt = crypto4xx_decrypt_iv, - } - } + { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-ppc4xx", + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto4xx_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_IV_SIZE, + .setkey = crypto4xx_setkey_aes_cbc, + .encrypt = crypto4xx_encrypt_iv, + .decrypt = crypto4xx_decrypt_iv, + .init = crypto4xx_sk_init, + .exit = crypto4xx_sk_exit, } }, - { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { - .cra_name = "rfc3686(ctr(aes))", - .cra_driver_name = "rfc3686-ctr-aes-ppc4xx", - .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto4xx_ctx), - .cra_type = &crypto_ablkcipher_type, - .cra_init = crypto4xx_ablk_init, - .cra_exit = crypto4xx_ablk_exit, - .cra_module = THIS_MODULE, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, - .max_keysize = AES_MAX_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, - .ivsize = CTR_RFC3686_IV_SIZE, - .setkey = crypto4xx_setkey_rfc3686, - .encrypt = crypto4xx_rfc3686_encrypt, - .decrypt = crypto4xx_rfc3686_decrypt, - } - } + { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { + .base = { + .cra_name = "cfb(aes)", + .cra_driver_name = "cfb-aes-ppc4xx", + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto4xx_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_IV_SIZE, + .setkey = crypto4xx_setkey_aes_cfb, + .encrypt = crypto4xx_encrypt_iv, + .decrypt = crypto4xx_decrypt_iv, + .init = crypto4xx_sk_init, + .exit = crypto4xx_sk_exit, } }, - { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-ppc4xx", - .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto4xx_ctx), - .cra_type = &crypto_ablkcipher_type, - .cra_init = crypto4xx_ablk_init, - .cra_exit = crypto4xx_ablk_exit, - .cra_module = THIS_MODULE, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = crypto4xx_setkey_aes_ecb, - .encrypt = crypto4xx_encrypt_noiv, - .decrypt = crypto4xx_decrypt_noiv, - } - } + { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-ppc4xx", + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto4xx_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .setkey = crypto4xx_setkey_rfc3686, + .encrypt = crypto4xx_rfc3686_encrypt, + .decrypt = crypto4xx_rfc3686_decrypt, + .init = crypto4xx_sk_init, + .exit = crypto4xx_sk_exit, } }, - { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { - .cra_name = "ofb(aes)", - .cra_driver_name = "ofb-aes-ppc4xx", - .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto4xx_ctx), - .cra_type = &crypto_ablkcipher_type, - .cra_init = crypto4xx_ablk_init, - .cra_exit = crypto4xx_ablk_exit, - .cra_module = THIS_MODULE, - .cra_u = { - .ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_IV_SIZE, - .setkey = crypto4xx_setkey_aes_ofb, - .encrypt = crypto4xx_encrypt_iv, - .decrypt = crypto4xx_decrypt_iv, - } - } + { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-ppc4xx", + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto4xx_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = crypto4xx_setkey_aes_ecb, + .encrypt = crypto4xx_encrypt_noiv, + .decrypt = crypto4xx_decrypt_noiv, + .init = crypto4xx_sk_init, + .exit = crypto4xx_sk_exit, + } }, + { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { + .base = { + .cra_name = "ofb(aes)", + .cra_driver_name = "ofb-aes-ppc4xx", + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto4xx_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_IV_SIZE, + .setkey = crypto4xx_setkey_aes_ofb, + .encrypt = crypto4xx_encrypt_iv, + .decrypt = crypto4xx_decrypt_iv, + .init = crypto4xx_sk_init, + .exit = crypto4xx_sk_exit, } }, /* AEAD */ diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index c240199472da..863cc558bc29 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -25,6 +25,7 @@ #include #include #include +#include #include "crypto4xx_reg_def.h" #include "crypto4xx_sa.h" @@ -134,7 +135,7 @@ struct crypto4xx_ctx { struct crypto4xx_alg_common { u32 type; union { - struct crypto_alg cipher; + struct skcipher_alg cipher; struct ahash_alg hash; struct aead_alg aead; } u; @@ -158,22 +159,22 @@ int crypto4xx_build_pd(struct crypto_async_request *req, const struct dynamic_sa_ctl *sa, const unsigned int sa_len, const unsigned int assoclen); -int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); -int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); -int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); -int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); -int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, +int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); -int crypto4xx_encrypt_iv(struct ablkcipher_request *req); -int crypto4xx_decrypt_iv(struct ablkcipher_request *req); -int crypto4xx_encrypt_noiv(struct ablkcipher_request *req); -int crypto4xx_decrypt_noiv(struct ablkcipher_request *req); -int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req); -int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req); +int crypto4xx_encrypt_iv(struct skcipher_request *req); +int crypto4xx_decrypt_iv(struct skcipher_request *req); +int crypto4xx_encrypt_noiv(struct skcipher_request *req); +int crypto4xx_decrypt_noiv(struct skcipher_request *req); +int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); +int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); int crypto4xx_hash_digest(struct ahash_request *req); int crypto4xx_hash_final(struct ahash_request *req); -- cgit v1.2.3 From c4e90650ff0cbf123ec9cfc32026fa0fb2931658 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Apr 2018 18:41:53 +0200 Subject: crypto: crypto4xx - avoid VLA use This patch fixes some of the -Wvla warnings. crypto4xx_alg.c:83:19: warning: Variable length array is used. crypto4xx_alg.c:273:56: warning: Variable length array is used. crypto4xx_alg.c:380:32: warning: Variable length array is used. Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_alg.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 2a8e4f0fe1e9..2dfeb71deca9 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -80,7 +80,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req, { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); - __le32 iv[ivlen]; + __le32 iv[AES_IV_SIZE]; if (ivlen) crypto4xx_memcpy_to_le32(iv, req->iv, ivlen); @@ -270,13 +270,7 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req, static int crypto4xx_aead_fallback(struct aead_request *req, struct crypto4xx_ctx *ctx, bool do_decrypt) { - char aead_req_data[sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->sw_cipher.aead)] - __aligned(__alignof__(struct aead_request)); - - struct aead_request *subreq = (void *) aead_req_data; - - memset(subreq, 0, sizeof(aead_req_data)); + struct aead_request *subreq = aead_request_ctx(req); aead_request_set_tfm(subreq, ctx->sw_cipher.aead); aead_request_set_callback(subreq, req->base.flags, @@ -377,7 +371,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int len = req->cryptlen; __le32 iv[16]; - u32 tmp_sa[ctx->sa_len * 4]; + u32 tmp_sa[SA_AES128_CCM_LEN + 4]; struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa; if (crypto4xx_aead_need_fallback(req, true, decrypt)) @@ -386,7 +380,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) if (decrypt) len -= crypto_aead_authsize(aead); - memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa)); + memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4); sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2; if (req->iv[0] == 1) { -- cgit v1.2.3 From 98e87e3d933b8e504ea41b8857c038d2cd06cddc Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Apr 2018 18:41:54 +0200 Subject: crypto: crypto4xx - add aes-ctr support This patch adds support for the aes-ctr skcipher. name : ctr(aes) driver : ctr-aes-ppc4xx module : crypto4xx priority : 300 refcnt : 1 selftest : passed internal : no type : skcipher async : yes blocksize : 16 min keysize : 16 max keysize : 32 ivsize : 16 chunksize : 16 walksize : 16 The hardware uses only the last 32-bits as the counter while the kernel tests (aes_ctr_enc_tv_template[4] for example) expect that the whole IV is a counter. To make this work, the driver will fallback if the counter is going to overlow. The aead's crypto4xx_setup_fallback() function is renamed to crypto4xx_aead_setup_fallback. Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + drivers/crypto/amcc/crypto4xx_alg.c | 91 +++++++++++++++++++++++++++++++++--- drivers/crypto/amcc/crypto4xx_core.c | 37 +++++++++++++++ drivers/crypto/amcc/crypto4xx_core.h | 5 ++ 4 files changed, 128 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 3dbc47528667..1fa263adbcc6 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -302,6 +302,7 @@ config CRYPTO_DEV_PPC4XX select CRYPTO_AEAD select CRYPTO_AES select CRYPTO_CCM + select CRYPTO_CTR select CRYPTO_GCM select CRYPTO_BLKCIPHER help diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 2dfeb71deca9..8a352ddefd52 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -240,6 +240,85 @@ int crypto4xx_rfc3686_decrypt(struct skcipher_request *req) ctx->sa_out, ctx->sa_len, 0); } +static int +crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); + size_t iv_len = crypto_skcipher_ivsize(cipher); + unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4)); + unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / + AES_BLOCK_SIZE; + + /* + * The hardware uses only the last 32-bits as the counter while the + * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that + * the whole IV is a counter. So fallback if the counter is going to + * overlow. + */ + if (counter + nblks < counter) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + int ret; + + skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(subreq) + : crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); + return ret; + } + + return encrypt ? crypto4xx_encrypt_iv(req) + : crypto4xx_decrypt_iv(req); +} + +static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx, + struct crypto_skcipher *cipher, + const u8 *key, + unsigned int keylen) +{ + int rc; + + crypto_skcipher_clear_flags(ctx->sw_cipher.cipher, + CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->sw_cipher.cipher, + crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); + rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen); + crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK); + crypto_skcipher_set_flags(cipher, + crypto_skcipher_get_flags(ctx->sw_cipher.cipher) & + CRYPTO_TFM_RES_MASK); + + return rc; +} + +int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); + int rc; + + rc = crypto4xx_sk_setup_fallback(ctx, cipher, key, keylen); + if (rc) + return rc; + + return crypto4xx_setkey_aes(cipher, key, keylen, + CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB); +} + +int crypto4xx_encrypt_ctr(struct skcipher_request *req) +{ + return crypto4xx_ctr_crypt(req, true); +} + +int crypto4xx_decrypt_ctr(struct skcipher_request *req) +{ + return crypto4xx_ctr_crypt(req, false); +} + static inline bool crypto4xx_aead_need_fallback(struct aead_request *req, bool is_ccm, bool decrypt) { @@ -282,10 +361,10 @@ static int crypto4xx_aead_fallback(struct aead_request *req, crypto_aead_encrypt(subreq); } -static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx, - struct crypto_aead *cipher, - const u8 *key, - unsigned int keylen) +static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx, + struct crypto_aead *cipher, + const u8 *key, + unsigned int keylen) { int rc; @@ -313,7 +392,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key, struct dynamic_sa_ctl *sa; int rc = 0; - rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen); + rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen); if (rc) return rc; @@ -472,7 +551,7 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, return -EINVAL; } - rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen); + rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen); if (rc) return rc; diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 7d0629626d15..73963928d91b 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -941,6 +941,19 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk) struct crypto4xx_alg *amcc_alg; struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk); + if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) { + ctx->sw_cipher.cipher = + crypto_alloc_skcipher(alg->base.cra_name, 0, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->sw_cipher.cipher)) + return PTR_ERR(ctx->sw_cipher.cipher); + + crypto_skcipher_set_reqsize(sk, + sizeof(struct skcipher_request) + 32 + + crypto_skcipher_reqsize(ctx->sw_cipher.cipher)); + } + amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher); crypto4xx_ctx_init(amcc_alg, ctx); return 0; @@ -956,6 +969,8 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk) struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk); crypto4xx_common_exit(ctx); + if (ctx->sw_cipher.cipher) + crypto_free_skcipher(ctx->sw_cipher.cipher); } static int crypto4xx_aead_init(struct crypto_aead *tfm) @@ -1145,6 +1160,28 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .init = crypto4xx_sk_init, .exit = crypto4xx_sk_exit, } }, + { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-ppc4xx", + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto4xx_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_IV_SIZE, + .setkey = crypto4xx_setkey_aes_ctr, + .encrypt = crypto4xx_encrypt_ctr, + .decrypt = crypto4xx_decrypt_ctr, + .init = crypto4xx_sk_init, + .exit = crypto4xx_sk_exit, + } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { .base = { .cra_name = "rfc3686(ctr(aes))", diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 863cc558bc29..279f650c4f1c 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -128,6 +128,7 @@ struct crypto4xx_ctx { __le32 iv_nonce; u32 sa_len; union { + struct crypto_skcipher *cipher; struct crypto_aead *aead; } sw_cipher; }; @@ -163,12 +164,16 @@ int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); +int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen); int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); +int crypto4xx_encrypt_ctr(struct skcipher_request *req); +int crypto4xx_decrypt_ctr(struct skcipher_request *req); int crypto4xx_encrypt_iv(struct skcipher_request *req); int crypto4xx_decrypt_iv(struct skcipher_request *req); int crypto4xx_encrypt_noiv(struct skcipher_request *req); -- cgit v1.2.3 From fc340115ffb8235c1bbd200c28855e6373d0dd1a Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Apr 2018 18:41:55 +0200 Subject: crypto: crypto4xx - properly set IV after de- and encrypt This patch fixes cts(cbc(aes)) test when cbc-aes-ppc4xx is used. alg: skcipher: Test 1 failed (invalid result) on encryption for cts(cbc-aes-ppc4xx) 00000000: 4b 10 75 fc 2f 14 1b 6a 27 35 37 33 d1 b7 70 05 00000010: 97 alg: skcipher: Failed to load transform for cts(cbc(aes)): -2 The CTS cipher mode expect the IV (req->iv) of skcipher_request to contain the last ciphertext block after the {en,de}crypt operation is complete. Fix this issue for the AMCC Crypto4xx hardware engine. The tcrypt test case for cts(cbc(aes)) is now correctly passed. name : cts(cbc(aes)) driver : cts(cbc-aes-ppc4xx) module : cts priority : 300 refcnt : 1 selftest : passed internal : no type : skcipher async : yes blocksize : 16 min keysize : 16 max keysize : 32 ivsize : 16 chunksize : 16 walksize : 16 Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_alg.c | 3 ++- drivers/crypto/amcc/crypto4xx_core.c | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 8a352ddefd52..5abe86f0b4c6 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -141,7 +141,8 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, /* Setup SA */ sa = ctx->sa_in; - set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, + set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ? + SA_SAVE_IV : SA_NOT_SAVE_IV), SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 73963928d91b..605398c89ccd 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -545,6 +545,15 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, addr = dma_map_page(dev->core_dev->device, sg_page(dst), dst->offset, dst->length, DMA_FROM_DEVICE); } + + if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) { + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + + crypto4xx_memcpy_from_le32((u32 *)req->iv, + pd_uinfo->sr_va->save_iv, + crypto_skcipher_ivsize(skcipher)); + } + crypto4xx_ret_sg_desc(dev, pd_uinfo); if (pd_uinfo->state & PD_ENTRY_BUSY) -- cgit v1.2.3 From 584201f1895d915c1aa523bc86afdc126e94beca Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Apr 2018 18:41:56 +0200 Subject: crypto: crypto4xx - extend aead fallback checks 1020 bytes is the limit for associated data. Any more and it will no longer fit into hash_crypto_offset anymore. The hardware will not process aead requests with plaintext that have less than AES_BLOCK_SIZE bytes. When decrypting aead requests the authsize has to be taken in account as well, as it is part of the cryptlen. Otherwise the hardware will think it has been misconfigured and will return: aead return err status = 0x98 For rtc4543(gcm(aes)), the hardware has a dedicated GMAC mode as part of the hash function set. Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_alg.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 5abe86f0b4c6..c952bcfdd6ae 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -321,6 +321,7 @@ int crypto4xx_decrypt_ctr(struct skcipher_request *req) } static inline bool crypto4xx_aead_need_fallback(struct aead_request *req, + unsigned int len, bool is_ccm, bool decrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -330,14 +331,14 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req, return true; /* - * hardware does not handle cases where cryptlen - * is less than a block + * hardware does not handle cases where plaintext + * is less than a block. */ - if (req->cryptlen < AES_BLOCK_SIZE) + if (len < AES_BLOCK_SIZE) return true; - /* assoc len needs to be a multiple of 4 */ - if (req->assoclen & 0x3) + /* assoc len needs to be a multiple of 4 and <= 1020 */ + if (req->assoclen & 0x3 || req->assoclen > 1020) return true; /* CCM supports only counter field length of 2 and 4 bytes */ @@ -449,17 +450,17 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_aead *aead = crypto_aead_reqtfm(req); - unsigned int len = req->cryptlen; __le32 iv[16]; u32 tmp_sa[SA_AES128_CCM_LEN + 4]; struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa; - - if (crypto4xx_aead_need_fallback(req, true, decrypt)) - return crypto4xx_aead_fallback(req, ctx, decrypt); + unsigned int len = req->cryptlen; if (decrypt) len -= crypto_aead_authsize(aead); + if (crypto4xx_aead_need_fallback(req, len, true, decrypt)) + return crypto4xx_aead_fallback(req, ctx, decrypt); + memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4); sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2; @@ -605,18 +606,19 @@ static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req, bool decrypt) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - unsigned int len = req->cryptlen; + struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req); __le32 iv[4]; + unsigned int len = req->cryptlen; + + if (decrypt) + len -= crypto_aead_authsize(crypto_aead_reqtfm(req)); - if (crypto4xx_aead_need_fallback(req, false, decrypt)) + if (crypto4xx_aead_need_fallback(req, len, false, decrypt)) return crypto4xx_aead_fallback(req, ctx, decrypt); crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE); iv[3] = cpu_to_le32(1); - if (decrypt) - len -= crypto_aead_authsize(crypto_aead_reqtfm(req)); - return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, len, iv, sizeof(iv), decrypt ? ctx->sa_in : ctx->sa_out, -- cgit v1.2.3 From 658c9d2b9f374c835d0348d852a3f002196628d0 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Apr 2018 18:41:57 +0200 Subject: crypto: crypto4xx - put temporary dst sg into request ctx This patch fixes a crash that happens when testing rfc4543(gcm(aes)) Unable to handle kernel paging request for data at address 0xf59b3420 Faulting instruction address: 0xc0012994 Oops: Kernel access of bad area, sig: 11 [#1] BE PowerPC 44x Platform Modules linked in: tcrypt(+) crypto4xx [...] CPU: 0 PID: 0 Comm: swapper Tainted: G O 4.17.0-rc1+ #23 NIP: c0012994 LR: d3077934 CTR: 06026d49 REGS: cfff7e30 TRAP: 0300 Tainted: G O (4.17.0-rc1+) MSR: 00029000 CR: 44744822 XER: 00000000 DEAR: f59b3420 ESR: 00000000 NIP [c0012994] __dma_sync+0x58/0x10c LR [d3077934] crypto4xx_bh_tasklet_cb+0x188/0x3c8 [crypto4xx] __dma_sync was fed the temporary _dst that crypto4xx_build_pd() had in it's function stack. This clearly never worked. This patch therefore overhauls the code from the original driver and puts the temporary dst sg list into aead's request context. Fixes: a0aae821ba3d3 ("crypto: crypto4xx - prepare for AEAD support") Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_alg.c | 15 ++++++++------- drivers/crypto/amcc/crypto4xx_core.c | 10 +++++----- drivers/crypto/amcc/crypto4xx_core.h | 7 ++++++- 3 files changed, 19 insertions(+), 13 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index c952bcfdd6ae..f5c07498ea4f 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -87,7 +87,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req, return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out, - ctx->sa_len, 0); + ctx->sa_len, 0, NULL); } int crypto4xx_encrypt_noiv(struct skcipher_request *req) @@ -223,7 +223,7 @@ int crypto4xx_rfc3686_encrypt(struct skcipher_request *req) return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, req->cryptlen, iv, AES_IV_SIZE, - ctx->sa_out, ctx->sa_len, 0); + ctx->sa_out, ctx->sa_len, 0, NULL); } int crypto4xx_rfc3686_decrypt(struct skcipher_request *req) @@ -238,7 +238,7 @@ int crypto4xx_rfc3686_decrypt(struct skcipher_request *req) return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, req->cryptlen, iv, AES_IV_SIZE, - ctx->sa_out, ctx->sa_len, 0); + ctx->sa_out, ctx->sa_len, 0, NULL); } static int @@ -449,6 +449,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key, static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); __le32 iv[16]; u32 tmp_sa[SA_AES128_CCM_LEN + 4]; @@ -474,7 +475,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, len, iv, sizeof(iv), - sa, ctx->sa_len, req->assoclen); + sa, ctx->sa_len, req->assoclen, rctx->dst); } int crypto4xx_encrypt_aes_ccm(struct aead_request *req) @@ -622,7 +623,7 @@ static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req, return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, len, iv, sizeof(iv), decrypt ? ctx->sa_in : ctx->sa_out, - ctx->sa_len, req->assoclen); + ctx->sa_len, req->assoclen, rctx->dst); } int crypto4xx_encrypt_aes_gcm(struct aead_request *req) @@ -707,7 +708,7 @@ int crypto4xx_hash_update(struct ahash_request *req) return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, req->nbytes, NULL, 0, ctx->sa_in, - ctx->sa_len, 0); + ctx->sa_len, 0, NULL); } int crypto4xx_hash_final(struct ahash_request *req) @@ -726,7 +727,7 @@ int crypto4xx_hash_digest(struct ahash_request *req) return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, req->nbytes, NULL, 0, ctx->sa_in, - ctx->sa_len, 0); + ctx->sa_len, 0, NULL); } /** diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 605398c89ccd..9cb234c72549 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -695,9 +695,9 @@ int crypto4xx_build_pd(struct crypto_async_request *req, const __le32 *iv, const u32 iv_len, const struct dynamic_sa_ctl *req_sa, const unsigned int sa_len, - const unsigned int assoclen) + const unsigned int assoclen, + struct scatterlist *_dst) { - struct scatterlist _dst[2]; struct crypto4xx_device *dev = ctx->dev; struct dynamic_sa_ctl *sa; struct ce_gd *gd; @@ -996,9 +996,9 @@ static int crypto4xx_aead_init(struct crypto_aead *tfm) amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead); crypto4xx_ctx_init(amcc_alg, ctx); - crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + - max(sizeof(struct crypto4xx_ctx), 32 + - crypto_aead_reqsize(ctx->sw_cipher.aead))); + crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 + + crypto_aead_reqsize(ctx->sw_cipher.aead), + sizeof(struct crypto4xx_aead_reqctx))); return 0; } diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 279f650c4f1c..e2ca56722f07 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -133,6 +133,10 @@ struct crypto4xx_ctx { } sw_cipher; }; +struct crypto4xx_aead_reqctx { + struct scatterlist dst[2]; +}; + struct crypto4xx_alg_common { u32 type; union { @@ -159,7 +163,8 @@ int crypto4xx_build_pd(struct crypto_async_request *req, const __le32 *iv, const u32 iv_len, const struct dynamic_sa_ctl *sa, const unsigned int sa_len, - const unsigned int assoclen); + const unsigned int assoclen, + struct scatterlist *dst_tmp); int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher, -- cgit v1.2.3 From a794d8d876d21d165721b5460264ca811245f5bd Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 23 Apr 2018 08:25:14 +0100 Subject: crypto: ccree - enable support for hardware keys Enable CryptoCell support for hardware keys. Hardware keys are regular AES keys loaded into CryptoCell internal memory via firmware, often from secure boot ROM or hardware fuses at boot time. As such, they can be used for enc/dec purposes like any other key but cannot (read: extremely hard to) be extracted since since they are not available anywhere in RAM during runtime. The mechanism has some similarities to s390 secure keys although the keys are not wrapped or sealed, but simply loaded offline. The interface was therefore modeled based on the s390 secure keys support. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/testmgr.c | 43 +++++ drivers/crypto/ccree/cc_cipher.c | 350 ++++++++++++++++++++++++++++++++++----- drivers/crypto/ccree/cc_cipher.h | 30 +--- 3 files changed, 361 insertions(+), 62 deletions(-) (limited to 'drivers/crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 397b117309f1..c31da0f3f680 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2581,6 +2581,13 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(des3_ede_cbc_dec_tv_template) } } + }, { + /* Same as cbc(aes) except the key is stored in + * hardware secure memory which we reference by index + */ + .alg = "cbc(paes)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "cbc(serpent)", .test = alg_test_skcipher, @@ -2727,6 +2734,13 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(des3_ede_ctr_dec_tv_template) } } + }, { + /* Same as ctr(aes) except the key is stored in + * hardware secure memory which we reference by index + */ + .alg = "ctr(paes)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "ctr(serpent)", .test = alg_test_skcipher, @@ -2997,6 +3011,13 @@ static const struct alg_test_desc alg_test_descs[] = { } } } + }, { + /* Same as ecb(aes) except the key is stored in + * hardware secure memory which we reference by index + */ + .alg = "ecb(paes)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "ecb(khazad)", .test = alg_test_skcipher, @@ -3324,6 +3345,13 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(aes_ofb_dec_tv_template) } } + }, { + /* Same as ofb(aes) except the key is stored in + * hardware secure memory which we reference by index + */ + .alg = "ofb(paes)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "pcbc(fcrypt)", .test = alg_test_skcipher, @@ -3581,6 +3609,21 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(aes_xts_dec_tv_template) } } + }, { + /* Same as xts(aes) except the key is stored in + * hardware secure memory which we reference by index + */ + .alg = "xts(paes)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "xts4096(paes)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "xts512(paes)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "xts(camellia)", .test = alg_test_skcipher, diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index df98f7afe645..d2810c183b73 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -42,6 +42,7 @@ struct cc_cipher_ctx { int cipher_mode; int flow_mode; unsigned int flags; + bool hw_key; struct cc_user_key_info user; struct cc_hw_key_info hw; struct crypto_shash *shash_tfm; @@ -49,6 +50,13 @@ struct cc_cipher_ctx { static void cc_cipher_complete(struct device *dev, void *cc_req, int err); +static inline bool cc_is_hw_key(struct crypto_tfm *tfm) +{ + struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); + + return ctx_p->hw_key; +} + static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) { switch (ctx_p->flow_mode) { @@ -211,7 +219,7 @@ struct tdes_keys { u8 key3[DES_KEY_SIZE]; }; -static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num) +static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num) { switch (slot_num) { case 0: @@ -226,69 +234,100 @@ static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num) return END_OF_KEYS; } -static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, - unsigned int keylen) +static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, + unsigned int keylen) { struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm); struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); - u32 tmp[DES3_EDE_EXPKEY_WORDS]; - struct cc_crypto_alg *cc_alg = - container_of(tfm->__crt_alg, struct cc_crypto_alg, - skcipher_alg.base); - unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; + struct cc_hkey_info hki; - dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n", + dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n", ctx_p, crypto_tfm_alg_name(tfm), keylen); dump_byte_array("key", (u8 *)key, keylen); /* STAT_PHASE_0: Init and sanity checks */ + /* This check the size of the hardware key token */ + if (keylen != sizeof(hki)) { + dev_err(dev, "Unsupported HW key size %d.\n", keylen); + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + if (ctx_p->flow_mode != S_DIN_to_AES) { + dev_err(dev, "HW key not supported for non-AES flows\n"); + return -EINVAL; + } + + memcpy(&hki, key, keylen); + + /* The real key len for crypto op is the size of the HW key + * referenced by the HW key slot, not the hardware key token + */ + keylen = hki.keylen; + if (validate_keys_sizes(ctx_p, keylen)) { dev_err(dev, "Unsupported key size %d.\n", keylen); crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } - if (cc_is_hw_key(tfm)) { - /* setting HW key slots */ - struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key; + ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1); + if (ctx_p->hw.key1_slot == END_OF_KEYS) { + dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1); + return -EINVAL; + } - if (ctx_p->flow_mode != S_DIN_to_AES) { - dev_err(dev, "HW key not supported for non-AES flows\n"); + if (ctx_p->cipher_mode == DRV_CIPHER_XTS || + ctx_p->cipher_mode == DRV_CIPHER_ESSIV || + ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) { + if (hki.hw_key1 == hki.hw_key2) { + dev_err(dev, "Illegal hw key numbers (%d,%d)\n", + hki.hw_key1, hki.hw_key2); return -EINVAL; } - - ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1); - if (ctx_p->hw.key1_slot == END_OF_KEYS) { - dev_err(dev, "Unsupported hw key1 number (%d)\n", - hki->hw_key1); + ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2); + if (ctx_p->hw.key2_slot == END_OF_KEYS) { + dev_err(dev, "Unsupported hw key2 number (%d)\n", + hki.hw_key2); return -EINVAL; } + } - if (ctx_p->cipher_mode == DRV_CIPHER_XTS || - ctx_p->cipher_mode == DRV_CIPHER_ESSIV || - ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) { - if (hki->hw_key1 == hki->hw_key2) { - dev_err(dev, "Illegal hw key numbers (%d,%d)\n", - hki->hw_key1, hki->hw_key2); - return -EINVAL; - } - ctx_p->hw.key2_slot = - hw_key_to_cc_hw_key(hki->hw_key2); - if (ctx_p->hw.key2_slot == END_OF_KEYS) { - dev_err(dev, "Unsupported hw key2 number (%d)\n", - hki->hw_key2); - return -EINVAL; - } - } + ctx_p->keylen = keylen; + ctx_p->hw_key = true; + dev_dbg(dev, "cc_is_hw_key ret 0"); + + return 0; +} + +static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm); + struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx_p->drvdata); + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + struct cc_crypto_alg *cc_alg = + container_of(tfm->__crt_alg, struct cc_crypto_alg, + skcipher_alg.base); + unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; + + dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n", + ctx_p, crypto_tfm_alg_name(tfm), keylen); + dump_byte_array("key", (u8 *)key, keylen); - ctx_p->keylen = keylen; - dev_dbg(dev, "cc_is_hw_key ret 0"); + /* STAT_PHASE_0: Init and sanity checks */ - return 0; + if (validate_keys_sizes(ctx_p, keylen)) { + dev_err(dev, "Unsupported key size %d.\n", keylen); + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; } + ctx_p->hw_key = false; + /* * Verify DES weak keys * Note that we're dropping the expanded key since the @@ -734,6 +773,241 @@ static int cc_cipher_decrypt(struct skcipher_request *req) /* Block cipher alg */ static const struct cc_alg_template skcipher_algs[] = { + { + .name = "xts(paes)", + .driver_name = "xts-paes-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_XTS, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_630, + }, + { + .name = "xts512(paes)", + .driver_name = "xts-paes-du512-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_XTS, + .flow_mode = S_DIN_to_AES, + .data_unit = 512, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "xts4096(paes)", + .driver_name = "xts-paes-du4096-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_XTS, + .flow_mode = S_DIN_to_AES, + .data_unit = 4096, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "essiv(paes)", + .driver_name = "essiv-paes-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_ESSIV, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "essiv512(paes)", + .driver_name = "essiv-paes-du512-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_ESSIV, + .flow_mode = S_DIN_to_AES, + .data_unit = 512, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "essiv4096(paes)", + .driver_name = "essiv-paes-du4096-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_ESSIV, + .flow_mode = S_DIN_to_AES, + .data_unit = 4096, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "bitlocker(paes)", + .driver_name = "bitlocker-paes-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_BITLOCKER, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "bitlocker512(paes)", + .driver_name = "bitlocker-paes-du512-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_BITLOCKER, + .flow_mode = S_DIN_to_AES, + .data_unit = 512, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "bitlocker4096(paes)", + .driver_name = "bitlocker-paes-du4096-ccree", + .blocksize = AES_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_BITLOCKER, + .flow_mode = S_DIN_to_AES, + .data_unit = 4096, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "ecb(paes)", + .driver_name = "ecb-paes-ccree", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = 0, + }, + .cipher_mode = DRV_CIPHER_ECB, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "cbc(paes)", + .driver_name = "cbc-paes-ccree", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_CBC, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "ofb(paes)", + .driver_name = "ofb-paes-ccree", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_OFB, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "cts1(cbc(paes))", + .driver_name = "cts1-cbc-paes-ccree", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_CBC_CTS, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_712, + }, + { + .name = "ctr(paes)", + .driver_name = "ctr-paes-ccree", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_skcipher = { + .setkey = cc_cipher_sethkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = CC_HW_KEY_SIZE, + .max_keysize = CC_HW_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_CTR, + .flow_mode = S_DIN_to_AES, + .min_hw_rev = CC_HW_REV_712, + }, { .name = "xts(aes)", .driver_name = "xts-aes-ccree", diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h index 2a2a6f46c515..68444cfa936b 100644 --- a/drivers/crypto/ccree/cc_cipher.h +++ b/drivers/crypto/ccree/cc_cipher.h @@ -13,18 +13,6 @@ #include "cc_driver.h" #include "cc_buffer_mgr.h" -/* Crypto cipher flags */ -#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0) -#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1) -#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2) -#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3) -#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4) - -#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \ - CC_CRYPTO_CIPHER_KEY_KFDE1 | \ - CC_CRYPTO_CIPHER_KEY_KFDE2 | \ - CC_CRYPTO_CIPHER_KEY_KFDE3) - struct cipher_req_ctx { struct async_gen_req_ctx gen_ctx; enum cc_req_dma_buf_type dma_buf_type; @@ -42,18 +30,12 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata); int cc_cipher_free(struct cc_drvdata *drvdata); -struct arm_hw_key_info { - int hw_key1; - int hw_key2; -}; +struct cc_hkey_info { + u16 keylen; + u8 hw_key1; + u8 hw_key2; +} __packed; -/* - * This is a stub function that will replaced when we - * implement secure keys - */ -static inline bool cc_is_hw_key(struct crypto_tfm *tfm) -{ - return false; -} +#define CC_HW_KEY_SIZE sizeof(struct cc_hkey_info) #endif /*__CC_CIPHER_H__*/ -- cgit v1.2.3 From 5e7b516a8e6f840bcdfd5d6a8ede06bad62bc604 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 23 Apr 2018 08:25:15 +0100 Subject: crypto: ccree - use proper printk format Fix incorrect use of %pad as a printk format string for none dma_addr_t variable. Discovered via smatch. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_driver.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 89ce013ae093..37f2e6ec0e88 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -265,7 +265,7 @@ static int init_cc_resources(struct platform_device *plat_dev) } if (rc) { - dev_err(dev, "Failed in dma_set_mask, mask=%pad\n", &dma_mask); + dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask); return rc; } -- cgit v1.2.3 From 4bffaab373d9afaf862f3924442c33340bd26736 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Fri, 27 Apr 2018 11:40:11 +0300 Subject: crypto: caam - fix size of RSA prime factor q MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a typo where size of RSA prime factor q is using the size of prime factor p. Cc: # 4.13+ Fixes: 52e26d77b8b3 ("crypto: caam - add support for RSA key form 2") Fixes: 4a651b122adb ("crypto: caam - add support for RSA key form 3") Reported-by: David Binderman Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 6f990139f324..578ea63a3109 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); @@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); @@ -397,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; int sec4_sg_index = 0; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->d_dma)) { @@ -472,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; int sec4_sg_index = 0; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->p_dma)) { -- cgit v1.2.3 From 1411b5218adbcf1d45ddb260db5553c52e8d917c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 3 May 2018 22:29:29 +1000 Subject: crypto: vmx - Remove overly verbose printk from AES init routines In the vmx AES init routines we do a printk(KERN_INFO ...) to report the fallback implementation we're using. However with a slow console this can significantly affect the speed of crypto operations. Using 'cryptsetup benchmark' the removal of the printk() leads to a ~5x speedup for aes-cbc decryption. So remove them. Fixes: 8676590a1593 ("crypto: vmx - Adding AES routines for VMX module") Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module") Fixes: 4f7f60d312b3 ("crypto: vmx - Adding CTR routines for VMX module") Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module") Cc: stable@vger.kernel.org # v4.1+ Signed-off-by: Michael Ellerman Signed-off-by: Herbert Xu --- drivers/crypto/vmx/aes.c | 2 -- drivers/crypto/vmx/aes_cbc.c | 3 --- drivers/crypto/vmx/aes_ctr.c | 2 -- drivers/crypto/vmx/ghash.c | 2 -- 4 files changed, 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index 96072b9b55c4..d7316f7a3a69 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); crypto_cipher_set_flags(fallback, crypto_cipher_get_flags((struct diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 7394d35d5936..5285ece4f33a 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_skcipher_driver_name(fallback)); - crypto_skcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index fc60d00a2e84..cd777c75291d 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -50,8 +50,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_skcipher_driver_name(fallback)); crypto_skcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 27a94a119009..1c4b5b889fba 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); crypto_shash_set_flags(fallback, crypto_shash_get_flags((struct crypto_shash -- cgit v1.2.3 From 730f23b66095a700e2f0786abda6bca011b31558 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 3 May 2018 22:29:30 +1000 Subject: crypto: vmx - Remove overly verbose printk from AES XTS init In p8_aes_xts_init() we do a printk(KERN_INFO ...) to report the fallback implementation we're using. However with a slow console this can significantly affect the speed of crypto operations. So remove it. Fixes: c07f5d3da643 ("crypto: vmx - Adding support for XTS") Cc: stable@vger.kernel.org # v4.8+ Signed-off-by: Michael Ellerman Signed-off-by: Herbert Xu --- drivers/crypto/vmx/aes_xts.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8cd6e62e4c90..8bd9aff0f55f 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_skcipher_driver_name(fallback)); crypto_skcipher_set_flags( fallback, -- cgit v1.2.3 From 7024e0da72cf4d9a385ffd0d859d9582b763f376 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 9 May 2018 10:16:36 +0100 Subject: crypto: nx - fix spelling mistake: "seqeunce" -> "sequence" Trivial fix to spelling mistake in CSB_ERR error message text Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842-powernv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 1e87637c412d..36afd6d8753c 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -334,7 +334,7 @@ static int wait_for_csb(struct nx842_workmem *wmem, return -EPROTO; case CSB_CC_SEQUENCE: /* should not happen, we don't use chained CRBs */ - CSB_ERR(csb, "CRB seqeunce number error"); + CSB_ERR(csb, "CRB sequence number error"); return -EPROTO; case CSB_CC_UNKNOWN_CODE: CSB_ERR(csb, "Unknown subfunction code"); -- cgit v1.2.3 From 4c826fed675dfffd8485c5477b616d61d1ec9e9a Mon Sep 17 00:00:00 2001 From: Atul Gupta Date: Thu, 10 May 2018 10:14:42 +0530 Subject: crypto: chelsio - request to HW should wrap -Tx request and data is copied to HW Q in 64B desc, check for end of queue and adjust the current position to start from beginning before passing the additional request info. -key context copy should check key length only -Few reverse christmas tree correction Signed-off-by: Atul Gupta Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_ipsec.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 8e0aa3f175c9..461b97e2f1fd 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb, struct net_device *dev, void *pos) { + struct cpl_tx_pkt_core *cpl; + struct sge_eth_txq *q; struct adapter *adap; struct port_info *pi; - struct sge_eth_txq *q; - struct cpl_tx_pkt_core *cpl; - u64 cntrl = 0; u32 ctrl0, qidx; + u64 cntrl = 0; + int left; pi = netdev_priv(dev); adap = pi->adapter; qidx = skb->queue_mapping; q = &adap->sge.ethtxq[qidx + pi->first_qset]; + left = (void *)q->q.stat - pos; + if (!left) + pos = q->q.desc; + cpl = (struct cpl_tx_pkt_core *)pos; cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; @@ -382,18 +387,17 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, void *pos, struct ipsec_sa_entry *sa_entry) { - struct adapter *adap; - struct port_info *pi; - struct sge_eth_txq *q; - unsigned int len, qidx; struct _key_ctx *key_ctx; int left, eoq, key_len; + struct sge_eth_txq *q; + struct adapter *adap; + struct port_info *pi; + unsigned int qidx; pi = netdev_priv(dev); adap = pi->adapter; qidx = skb->queue_mapping; q = &adap->sge.ethtxq[qidx + pi->first_qset]; - len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core); key_len = sa_entry->kctx_len; /* end of queue, reset pos to start of queue */ @@ -411,19 +415,14 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, pos += sizeof(struct _key_ctx); left -= sizeof(struct _key_ctx); - if (likely(len <= left)) { + if (likely(key_len <= left)) { memcpy(key_ctx->key, sa_entry->key, key_len); pos += key_len; } else { - if (key_len <= left) { - memcpy(pos, sa_entry->key, key_len); - pos += key_len; - } else { - memcpy(pos, sa_entry->key, left); - memcpy(q->q.desc, sa_entry->key + left, - key_len - left); - pos = (u8 *)q->q.desc + (key_len - left); - } + memcpy(pos, sa_entry->key, left); + memcpy(q->q.desc, sa_entry->key + left, + key_len - left); + pos = (u8 *)q->q.desc + (key_len - left); } /* Copy CPL TX PKT XT */ pos = copy_cpltx_pktxt(skb, dev, pos); -- cgit v1.2.3 From 17a7d24aa89d64c325dce97e1e314a8558b26fca Mon Sep 17 00:00:00 2001 From: Atul Gupta Date: Mon, 14 May 2018 16:41:38 +0530 Subject: crypto: chtls - generic handling of data and hdr removed redundant check and made TLS PDU and header recv handling common as received from HW. Ensure that only tls header is read in cpl_rx_tls_cmp read-ahead and skb is freed when entire data is processed. Signed-off-by: Atul Gupta Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls.h | 10 ++---- drivers/crypto/chelsio/chtls/chtls_cm.c | 12 +++++--- drivers/crypto/chelsio/chtls/chtls_io.c | 54 ++++++++------------------------- 3 files changed, 23 insertions(+), 53 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index f4b8f1ec0061..1b2f43ccb11e 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -67,11 +67,6 @@ enum { CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */ }; -#define TLS_RCV_ST_READ_HEADER 0xF0 -#define TLS_RCV_ST_READ_BODY 0xF1 -#define TLS_RCV_ST_READ_DONE 0xF2 -#define TLS_RCV_ST_READ_NB 0xF3 - #define LISTEN_INFO_HASH_SIZE 32 #define RSPQ_HASH_BITS 5 struct listen_info { @@ -278,6 +273,7 @@ struct tlsrx_cmp_hdr { #define TLSRX_HDR_PKT_MAC_ERROR_F TLSRX_HDR_PKT_MAC_ERROR_V(1U) #define TLSRX_HDR_PKT_ERROR_M 0x1F +#define CONTENT_TYPE_ERROR 0x7F struct ulp_mem_rw { __be32 cmd; @@ -347,8 +343,8 @@ enum { ULPCB_FLAG_HOLD = 1 << 3, /* skb not ready for Tx yet */ ULPCB_FLAG_COMPL = 1 << 4, /* request WR completion */ ULPCB_FLAG_URG = 1 << 5, /* urgent data */ - ULPCB_FLAG_TLS_ND = 1 << 6, /* payload of zero length */ - ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */ + ULPCB_FLAG_TLS_HDR = 1 << 6, /* payload with tls hdr */ + ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */ }; /* The ULP mode/submode of an skbuff */ diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 23c43b8327db..2bb6f0380758 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1608,12 +1608,14 @@ static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen) static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb) { - struct cpl_rx_tls_cmp *cmp_cpl = cplhdr(skb); + struct tlsrx_cmp_hdr *tls_hdr_pkt; + struct cpl_rx_tls_cmp *cmp_cpl; struct sk_buff *skb_rec; struct chtls_sock *csk; struct chtls_hws *tlsk; struct tcp_sock *tp; + cmp_cpl = cplhdr(skb); csk = rcu_dereference_sk_user_data(sk); tlsk = &csk->tlshws; tp = tcp_sk(sk); @@ -1623,16 +1625,18 @@ static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb) skb_reset_transport_header(skb); __skb_pull(skb, sizeof(*cmp_cpl)); + tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data; + if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M) + tls_hdr_pkt->type = CONTENT_TYPE_ERROR; if (!skb->data_len) - __skb_trim(skb, CPL_RX_TLS_CMP_LENGTH_G - (ntohl(cmp_cpl->pdulength_length))); + __skb_trim(skb, TLS_HEADER_LENGTH); tp->rcv_nxt += CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length)); + ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR; skb_rec = __skb_dequeue(&tlsk->sk_recv_queue); if (!skb_rec) { - ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_ND; __skb_queue_tail(&sk->sk_receive_queue, skb); } else { chtls_set_hdrlen(skb, tlsk->pldlen); diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 5a75be43950f..840dd0100c2f 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1449,31 +1449,13 @@ found_ok_skb: } } } - if (hws->rstate == TLS_RCV_ST_READ_BODY) { - if (skb_copy_datagram_msg(skb, offset, - msg, avail)) { - if (!copied) { - copied = -EFAULT; - break; - } - } - } else { - struct tlsrx_cmp_hdr *tls_hdr_pkt = - (struct tlsrx_cmp_hdr *)skb->data; - - if ((tls_hdr_pkt->res_to_mac_error & - TLSRX_HDR_PKT_ERROR_M)) - tls_hdr_pkt->type = 0x7F; - - /* CMP pld len is for recv seq */ - hws->rcvpld = skb->hdr_len; - if (skb_copy_datagram_msg(skb, offset, msg, avail)) { - if (!copied) { - copied = -EFAULT; - break; - } + if (skb_copy_datagram_msg(skb, offset, msg, avail)) { + if (!copied) { + copied = -EFAULT; + break; } } + copied += avail; len -= avail; hws->copied_seq += avail; @@ -1481,32 +1463,20 @@ skip_copy: if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) tp->urg_data = 0; - if (hws->rstate == TLS_RCV_ST_READ_BODY && - (avail + offset) >= skb->len) { + if ((avail + offset) >= skb->len) { if (likely(skb)) chtls_free_skb(sk, skb); buffers_freed++; - hws->rstate = TLS_RCV_ST_READ_HEADER; - atomic_inc(&adap->chcr_stats.tls_pdu_rx); - tp->copied_seq += hws->rcvpld; + if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { + tp->copied_seq += skb->len; + hws->rcvpld = skb->hdr_len; + } else { + tp->copied_seq += hws->rcvpld; + } hws->copied_seq = 0; if (copied >= target && !skb_peek(&sk->sk_receive_queue)) break; - } else { - if (likely(skb)) { - if (ULP_SKB_CB(skb)->flags & - ULPCB_FLAG_TLS_ND) - hws->rstate = - TLS_RCV_ST_READ_HEADER; - else - hws->rstate = - TLS_RCV_ST_READ_BODY; - chtls_free_skb(sk, skb); - } - buffers_freed++; - tp->copied_seq += avail; - hws->copied_seq = 0; } } while (len > 0); -- cgit v1.2.3 From 6182480637d98100056883e8191b4f69c5313847 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:10:55 +0200 Subject: crypto: inside-secure - remove VLAs This patch removes the use of VLAs to allocate requests on the stack, by removing both SKCIPHER_REQUEST_ON_STACK and AHASH_REQUEST_ON_STACK. As we still need to allocate requests on the stack to ease the creation of invalidation requests a new, non-VLA, definition is used: EIP197_REQUEST_ON_STACK. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 9 +++++++++ drivers/crypto/inside-secure/safexcel_cipher.c | 2 +- drivers/crypto/inside-secure/safexcel_hash.c | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index b470a849721f..afdd099b2c1a 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -28,6 +28,15 @@ #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ GFP_KERNEL : GFP_ATOMIC) +/* Custom on-stack requests (for invalidation) */ +#define EIP197_SKCIPHER_REQ_SIZE sizeof(struct skcipher_request) + \ + sizeof(struct safexcel_cipher_req) +#define EIP197_AHASH_REQ_SIZE sizeof(struct ahash_request) + \ + sizeof(struct safexcel_ahash_req) +#define EIP197_REQUEST_ON_STACK(name, type, size) \ + char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \ + struct type##_request *name = (void *)__##name##_desc + /* Register base offsets */ #define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) #define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index bafb60505fab..9a51da28fb62 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -433,7 +433,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); + EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 317b9e480312..00d2cad0ff1c 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -480,7 +480,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm)); + EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE); struct safexcel_ahash_req *rctx = ahash_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; -- cgit v1.2.3 From 8ac1283e4aaa14ea7eeadb9afb7dcaa40145282f Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:10:56 +0200 Subject: crypto: inside-secure - rework cipher functions for future AEAD support This patch reworks the Inside Secure cipher functions, to remove all skcipher specific information and structure from all functions generic enough to be shared between skcipher and aead algorithms. This is a cosmetic only patch. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 236 ++++++++++++++----------- 1 file changed, 129 insertions(+), 107 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 9a51da28fb62..51f88f93ed99 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -38,18 +38,16 @@ struct safexcel_cipher_req { bool needs_inv; }; -static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, - struct crypto_async_request *async, - struct safexcel_command_desc *cdesc, - u32 length) +static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc, + u32 length) { - struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_token *token; unsigned offset = 0; if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { offset = AES_BLOCK_SIZE / sizeof(u32); - memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE); + memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; } @@ -65,8 +63,8 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, EIP197_TOKEN_INS_TYPE_OUTPUT; } -static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, - unsigned int len) +static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); @@ -100,11 +98,10 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, struct crypto_async_request *async, + struct safexcel_cipher_req *sreq, struct safexcel_command_desc *cdesc) { struct safexcel_crypto_priv *priv = ctx->priv; - struct skcipher_request *req = skcipher_request_cast(async); - struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); int ctrl_size; if (sreq->direction == SAFEXCEL_ENCRYPT) @@ -140,9 +137,12 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *async, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, + struct safexcel_cipher_req *sreq, bool *should_complete, int *ret) { - struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_result_desc *rdesc; int ndesc = 0; @@ -171,16 +171,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin safexcel_complete(priv, ring); spin_unlock_bh(&priv->ring[ring].egress_lock); - if (req->src == req->dst) { - dma_unmap_sg(priv->dev, req->src, - sg_nents_for_len(req->src, req->cryptlen), + if (src == dst) { + dma_unmap_sg(priv->dev, src, + sg_nents_for_len(src, cryptlen), DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, req->src, - sg_nents_for_len(req->src, req->cryptlen), + dma_unmap_sg(priv->dev, src, + sg_nents_for_len(src, cryptlen), DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, req->dst, - sg_nents_for_len(req->dst, req->cryptlen), + dma_unmap_sg(priv->dev, dst, + sg_nents_for_len(dst, cryptlen), DMA_FROM_DEVICE); } @@ -189,39 +189,41 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin return ndesc; } -static int safexcel_aes_send(struct crypto_async_request *async, - int ring, struct safexcel_request *request, - int *commands, int *results) +static int safexcel_aes_send(struct crypto_async_request *base, int ring, + struct safexcel_request *request, + struct safexcel_cipher_req *sreq, + struct scatterlist *src, struct scatterlist *dst, + unsigned int cryptlen, u8 *iv, int *commands, + int *results) { - struct skcipher_request *req = skcipher_request_cast(async); - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_command_desc *cdesc; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen; + int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = cryptlen; int i, ret = 0; - if (req->src == req->dst) { - nr_src = dma_map_sg(priv->dev, req->src, - sg_nents_for_len(req->src, req->cryptlen), + if (src == dst) { + nr_src = dma_map_sg(priv->dev, src, + sg_nents_for_len(src, cryptlen), DMA_BIDIRECTIONAL); nr_dst = nr_src; if (!nr_src) return -EINVAL; } else { - nr_src = dma_map_sg(priv->dev, req->src, - sg_nents_for_len(req->src, req->cryptlen), + nr_src = dma_map_sg(priv->dev, src, + sg_nents_for_len(src, cryptlen), DMA_TO_DEVICE); if (!nr_src) return -EINVAL; - nr_dst = dma_map_sg(priv->dev, req->dst, - sg_nents_for_len(req->dst, req->cryptlen), + nr_dst = dma_map_sg(priv->dev, dst, + sg_nents_for_len(dst, cryptlen), DMA_FROM_DEVICE); if (!nr_dst) { - dma_unmap_sg(priv->dev, req->src, - sg_nents_for_len(req->src, req->cryptlen), + dma_unmap_sg(priv->dev, src, + sg_nents_for_len(src, cryptlen), DMA_TO_DEVICE); return -EINVAL; } @@ -232,7 +234,7 @@ static int safexcel_aes_send(struct crypto_async_request *async, spin_lock_bh(&priv->ring[ring].egress_lock); /* command descriptors */ - for_each_sg(req->src, sg, nr_src, i) { + for_each_sg(src, sg, nr_src, i) { int len = sg_dma_len(sg); /* Do not overflow the request */ @@ -240,7 +242,7 @@ static int safexcel_aes_send(struct crypto_async_request *async, len = queued; cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), - sg_dma_address(sg), len, req->cryptlen, + sg_dma_address(sg), len, cryptlen, ctx->base.ctxr_dma); if (IS_ERR(cdesc)) { /* No space left in the command descriptor ring */ @@ -250,8 +252,8 @@ static int safexcel_aes_send(struct crypto_async_request *async, n_cdesc++; if (n_cdesc == 1) { - safexcel_context_control(ctx, async, cdesc); - safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); + safexcel_context_control(ctx, base, sreq, cdesc); + safexcel_skcipher_token(ctx, iv, cdesc, cryptlen); } queued -= len; @@ -260,7 +262,7 @@ static int safexcel_aes_send(struct crypto_async_request *async, } /* result descriptors */ - for_each_sg(req->dst, sg, nr_dst, i) { + for_each_sg(dst, sg, nr_dst, i) { bool first = !i, last = (i == nr_dst - 1); u32 len = sg_dma_len(sg); @@ -276,7 +278,7 @@ static int safexcel_aes_send(struct crypto_async_request *async, spin_unlock_bh(&priv->ring[ring].egress_lock); - request->req = &req->base; + request->req = base; *commands = n_cdesc; *results = n_rdesc; @@ -291,16 +293,16 @@ cdesc_rollback: spin_unlock_bh(&priv->ring[ring].egress_lock); - if (req->src == req->dst) { - dma_unmap_sg(priv->dev, req->src, - sg_nents_for_len(req->src, req->cryptlen), + if (src == dst) { + dma_unmap_sg(priv->dev, src, + sg_nents_for_len(src, cryptlen), DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, req->src, - sg_nents_for_len(req->src, req->cryptlen), + dma_unmap_sg(priv->dev, src, + sg_nents_for_len(src, cryptlen), DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, req->dst, - sg_nents_for_len(req->dst, req->cryptlen), + dma_unmap_sg(priv->dev, dst, + sg_nents_for_len(dst, cryptlen), DMA_FROM_DEVICE); } @@ -309,11 +311,10 @@ cdesc_rollback: static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, + struct crypto_async_request *base, bool *should_complete, int *ret) { - struct skcipher_request *req = skcipher_request_cast(async); - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_result_desc *rdesc; int ndesc = 0, enq_ret; @@ -354,7 +355,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ctx->base.ring = ring; spin_lock_bh(&priv->ring[ring].queue_lock); - enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); + enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base); spin_unlock_bh(&priv->ring[ring].queue_lock); if (enq_ret != -EINPROGRESS) @@ -368,9 +369,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, return ndesc; } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, + int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); @@ -381,24 +383,24 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, err = safexcel_handle_inv_result(priv, ring, async, should_complete, ret); } else { - err = safexcel_handle_req_result(priv, ring, async, + err = safexcel_handle_req_result(priv, ring, async, req->src, + req->dst, req->cryptlen, sreq, should_complete, ret); } return err; } -static int safexcel_cipher_send_inv(struct crypto_async_request *async, +static int safexcel_cipher_send_inv(struct crypto_async_request *base, int ring, struct safexcel_request *request, int *commands, int *results) { - struct skcipher_request *req = skcipher_request_cast(async); - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; int ret; - ret = safexcel_invalidate_cache(async, priv, - ctx->base.ctxr_dma, ring, request); + ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring, + request); if (unlikely(ret)) return ret; @@ -408,9 +410,9 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, return 0; } -static int safexcel_send(struct crypto_async_request *async, - int ring, struct safexcel_request *request, - int *commands, int *results) +static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, + struct safexcel_request *request, + int *commands, int *results) { struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); @@ -421,59 +423,69 @@ static int safexcel_send(struct crypto_async_request *async, BUG_ON(priv->version == EIP97 && sreq->needs_inv); if (sreq->needs_inv) - ret = safexcel_cipher_send_inv(async, ring, request, - commands, results); + ret = safexcel_cipher_send_inv(async, ring, request, commands, + results); else - ret = safexcel_aes_send(async, ring, request, + ret = safexcel_aes_send(async, ring, request, sreq, req->src, + req->dst, req->cryptlen, req->iv, commands, results); return ret; } -static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) +static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm, + struct crypto_async_request *base, + struct safexcel_cipher_req *sreq, + struct safexcel_inv_result *result) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE); - struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); - struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(req, 0, sizeof(struct skcipher_request)); + init_completion(&result->completion); - /* create invalidation request */ - init_completion(&result.completion); - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); - - skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); - ctx = crypto_tfm_ctx(req->base.tfm); + ctx = crypto_tfm_ctx(base->tfm); ctx->base.exit_inv = true; sreq->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req->base); + crypto_enqueue_request(&priv->ring[ring].queue, base); spin_unlock_bh(&priv->ring[ring].queue_lock); queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); - wait_for_completion(&result.completion); + wait_for_completion(&result->completion); - if (result.error) { + if (result->error) { dev_warn(priv->dev, "cipher: sync: invalidate: completion error %d\n", - result.error); - return result.error; + result->error); + return result->error; } return 0; } -static int safexcel_aes(struct skcipher_request *req, - enum safexcel_cipher_direction dir, u32 mode) +static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm) { - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + struct safexcel_inv_result result = {}; + + memset(req, 0, sizeof(struct skcipher_request)); + + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + safexcel_inv_complete, &result); + skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); + + return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); +} + +static int safexcel_aes(struct crypto_async_request *base, + struct safexcel_cipher_req *sreq, + enum safexcel_cipher_direction dir, u32 mode) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; @@ -489,7 +501,7 @@ static int safexcel_aes(struct skcipher_request *req, } else { ctx->base.ring = safexcel_select_ring(priv); ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, - EIP197_GFP_FLAGS(req->base), + EIP197_GFP_FLAGS(*base), &ctx->base.ctxr_dma); if (!ctx->base.ctxr) return -ENOMEM; @@ -498,7 +510,7 @@ static int safexcel_aes(struct skcipher_request *req, ring = ctx->base.ring; spin_lock_bh(&priv->ring[ring].queue_lock); - ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); + ret = crypto_enqueue_request(&priv->ring[ring].queue, base); spin_unlock_bh(&priv->ring[ring].queue_lock); queue_work(priv->ring[ring].workqueue, @@ -509,14 +521,14 @@ static int safexcel_aes(struct skcipher_request *req, static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) { - return safexcel_aes(req, SAFEXCEL_ENCRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_ECB); + return safexcel_aes(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB); } static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) { - return safexcel_aes(req, SAFEXCEL_DECRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_ECB); + return safexcel_aes(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB); } static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) @@ -526,34 +538,44 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) container_of(tfm->__crt_alg, struct safexcel_alg_template, alg.skcipher.base); - ctx->priv = tmpl->priv; - ctx->base.send = safexcel_send; - ctx->base.handle_result = safexcel_handle_result; - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), sizeof(struct safexcel_cipher_req)); + ctx->priv = tmpl->priv; + + ctx->base.send = safexcel_skcipher_send; + ctx->base.handle_result = safexcel_skcipher_handle_result; return 0; } -static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) +static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_crypto_priv *priv = ctx->priv; - int ret; memzero_explicit(ctx->key, 8 * sizeof(u32)); /* context not allocated, skip invalidation */ if (!ctx->base.ctxr) - return; + return -ENOMEM; memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); + return 0; +} + +static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->priv; + int ret; + + if (safexcel_cipher_cra_exit(tfm)) + return; if (priv->version == EIP197) { - ret = safexcel_cipher_exit_inv(tfm); + ret = safexcel_skcipher_exit_inv(tfm); if (ret) - dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); + dev_warn(priv->dev, "skcipher: invalidation error %d\n", + ret); } else { dma_pool_free(priv->context_pool, ctx->base.ctxr, ctx->base.ctxr_dma); @@ -563,7 +585,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_ecb_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { - .setkey = safexcel_aes_setkey, + .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_ecb_aes_encrypt, .decrypt = safexcel_ecb_aes_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -586,20 +608,20 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) { - return safexcel_aes(req, SAFEXCEL_ENCRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC); + return safexcel_aes(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC); } static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) { - return safexcel_aes(req, SAFEXCEL_DECRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC); + return safexcel_aes(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC); } struct safexcel_alg_template safexcel_alg_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { - .setkey = safexcel_aes_setkey, + .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_cbc_aes_encrypt, .decrypt = safexcel_cbc_aes_decrypt, .min_keysize = AES_MIN_KEY_SIZE, -- cgit v1.2.3 From 3a5ca230bbb64b5a6330559671bd67cfd8b4a170 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:10:57 +0200 Subject: crypto: inside-secure - rework the alg type settings in the context This patches reworks the way the algorithm type is set in the context, by using the fact that the decryption algorithms are just a combination of the algorithm encryption type and CONTEXT_CONTROL_TYPE_NULL_IN. This will help having simpler code when adding the AEAD support, to avoid ending up with an endless switch case block. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 51f88f93ed99..9ed3f2641ef3 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -104,10 +104,13 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, struct safexcel_crypto_priv *priv = ctx->priv; int ctrl_size; - if (sreq->direction == SAFEXCEL_ENCRYPT) - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; - else - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; + cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; + + /* The decryption control type is a combination of the encryption type + * and CONTEXT_CONTROL_TYPE_NULL_IN, for all types. + */ + if (sreq->direction == SAFEXCEL_DECRYPT) + cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN; cdesc->control_data.control1 |= ctx->mode; -- cgit v1.2.3 From fef0cfe577e9c8c75ba574d669eec55ac6ab0bad Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:10:58 +0200 Subject: crypto: inside-secure - make the context control size dynamic This patch makes the context control size computation dynamic, not to rely on hardcoded values. This is better for the future, and will help adding the AEAD support. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 9ed3f2641ef3..26f6e05726ea 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -118,21 +118,20 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, switch (ctx->key_len) { case AES_KEYSIZE_128: cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; - ctrl_size = 4; break; case AES_KEYSIZE_192: cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; - ctrl_size = 6; break; case AES_KEYSIZE_256: cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; - ctrl_size = 8; break; default: dev_err(priv->dev, "aes keysize not supported: %u\n", ctx->key_len); return -EINVAL; } + + ctrl_size = ctx->key_len / sizeof(u32); cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size); return 0; -- cgit v1.2.3 From ce6795593ba572ace9195b8e573e15f86fb411dd Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:10:59 +0200 Subject: crypto: inside-secure - make the key and context size computation dynamic This patches makes the key and context size computation dynamic when using memzero_explicit() on these two arrays. This is safer, cleaner and will help future modifications of the driver when these two parameters sizes will changes (the context size will be bigger when using AEAD algorithms). Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 26f6e05726ea..1f4787ea2725 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -554,13 +554,13 @@ static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - memzero_explicit(ctx->key, 8 * sizeof(u32)); + memzero_explicit(ctx->key, sizeof(ctx->key)); /* context not allocated, skip invalidation */ if (!ctx->base.ctxr) return -ENOMEM; - memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); + memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data)); return 0; } -- cgit v1.2.3 From 8a21f067e0cff89eb7e32ff383d328d9e1de7697 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:11:00 +0200 Subject: crypto: inside-secure - fix the hash then encrypt/decrypt types This commit fixes the CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT and CONTEXT_CONTROL_TYPE_HASH_DECRYPT_OUT types by assigning the right value, and by renaming CONTEXT_CONTROL_TYPE_HASH_DECRYPT_OUT to CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN. This is not submitted as a fix for older kernel versions as these two defines weren't used back then. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index afdd099b2c1a..eec75dfcdad7 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -295,8 +295,8 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_TYPE_CRYPTO_IN 0x5 #define CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT 0x6 #define CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN 0x7 -#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT 0x14 -#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_OUT 0x15 +#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT 0xe +#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN 0xf #define CONTEXT_CONTROL_RESTART_HASH BIT(4) #define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5) #define CONTEXT_CONTROL_SIZE(n) ((n) << 8) -- cgit v1.2.3 From bdfd19095685825ba7bbaa6740c9fc7fe4a2ca39 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:11:01 +0200 Subject: crypto: inside-secure - improve error reporting This patch improves the error reporting from the Inside Secure driver to the upper layers and crypto consumers. All errors reported by the engine aren't fatal, and some may be genuine. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 21 +++++++++++++++++++++ drivers/crypto/inside-secure/safexcel.h | 2 ++ drivers/crypto/inside-secure/safexcel_cipher.c | 8 ++------ drivers/crypto/inside-secure/safexcel_hash.c | 7 ++----- 4 files changed, 27 insertions(+), 11 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index d4a81be0d7d2..87e6d1a63886 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -537,6 +537,27 @@ finalize: EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); } +inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, + struct safexcel_result_desc *rdesc) +{ + if (likely(!rdesc->result_data.error_code)) + return 0; + + if (rdesc->result_data.error_code & 0x407f) { + /* Fatal error (bits 0-7, 14) */ + dev_err(priv->dev, + "cipher: result: result descriptor error (%d)\n", + rdesc->result_data.error_code); + return -EIO; + } else if (rdesc->result_data.error_code == BIT(9)) { + /* Authentication failed */ + return -EBADMSG; + } + + /* All other non-fatal errors */ + return -EINVAL; +} + void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) { struct safexcel_command_desc *cdesc; diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index eec75dfcdad7..dce02bf92dff 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -610,6 +610,8 @@ struct safexcel_inv_result { }; void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); +int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, + struct safexcel_result_desc *rdesc); void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); int safexcel_invalidate_cache(struct crypto_async_request *async, struct safexcel_crypto_priv *priv, diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 1f4787ea2725..2452fce64fb7 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -160,12 +160,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin break; } - if (rdesc->result_data.error_code) { - dev_err(priv->dev, - "cipher: result: result descriptor error (%d)\n", - rdesc->result_data.error_code); - *ret = -EIO; - } + if (likely(!*ret)) + *ret = safexcel_rdesc_check_errors(priv, rdesc); ndesc++; } while (!rdesc->last_seg); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 00d2cad0ff1c..6cbd879e8fb5 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -146,11 +146,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin dev_err(priv->dev, "hash: result: could not retrieve the result descriptor\n"); *ret = PTR_ERR(rdesc); - } else if (rdesc->result_data.error_code) { - dev_err(priv->dev, - "hash: result: result descriptor error (%d)\n", - rdesc->result_data.error_code); - *ret = -EINVAL; + } else { + *ret = safexcel_rdesc_check_errors(priv, rdesc); } safexcel_complete(priv, ring); -- cgit v1.2.3 From f6beaea304872bb1c76bf6c551386bf896cac8b9 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:11:02 +0200 Subject: crypto: inside-secure - authenc(hmac(sha256), cbc(aes)) support This patch adds support for the first AEAD algorithm in the Inside Secure SafeXcel driver, authenc(hmac(sha256),cbc(aes)). As this is the first AEAD algorithm added to this driver, common AEAD functions are added as well. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + drivers/crypto/inside-secure/safexcel.c | 9 + drivers/crypto/inside-secure/safexcel.h | 27 +- drivers/crypto/inside-secure/safexcel_cipher.c | 335 +++++++++++++++++++++++-- drivers/crypto/inside-secure/safexcel_hash.c | 14 +- 5 files changed, 353 insertions(+), 33 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1fa263adbcc6..43cccf6aff61 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -687,6 +687,7 @@ config CRYPTO_DEV_SAFEXCEL depends on OF depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT) select CRYPTO_AES + select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER select CRYPTO_HASH select CRYPTO_HMAC diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 87e6d1a63886..8c963ef0953a 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -352,6 +353,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* H/W capabilities selection */ val = EIP197_FUNCTION_RSVD; val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; + val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2; @@ -791,6 +793,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_hmac_sha1, &safexcel_alg_hmac_sha224, &safexcel_alg_hmac_sha256, + &safexcel_alg_authenc_hmac_sha256_cbc_aes, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) @@ -802,6 +805,8 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); + else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) + ret = crypto_register_aead(&safexcel_algs[i]->alg.aead); else ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash); @@ -815,6 +820,8 @@ fail: for (j = 0; j < i; j++) { if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); + else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD) + crypto_unregister_aead(&safexcel_algs[j]->alg.aead); else crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash); } @@ -829,6 +836,8 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); + else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) + crypto_unregister_aead(&safexcel_algs[i]->alg.aead); else crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash); } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index dce02bf92dff..ae113c14caea 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -11,8 +11,10 @@ #ifndef __SAFEXCEL_H__ #define __SAFEXCEL_H__ +#include #include #include +#include #include #define EIP197_HIA_VERSION_LE 0xca35 @@ -20,7 +22,7 @@ /* Static configuration */ #define EIP197_DEFAULT_RING_SIZE 400 -#define EIP197_MAX_TOKENS 5 +#define EIP197_MAX_TOKENS 8 #define EIP197_MAX_RINGS 4 #define EIP197_FETCH_COUNT 1 #define EIP197_MAX_BATCH_SZ 64 @@ -33,6 +35,8 @@ sizeof(struct safexcel_cipher_req) #define EIP197_AHASH_REQ_SIZE sizeof(struct ahash_request) + \ sizeof(struct safexcel_ahash_req) +#define EIP197_AEAD_REQ_SIZE sizeof(struct aead_request) + \ + sizeof(struct safexcel_cipher_req) #define EIP197_REQUEST_ON_STACK(name, type, size) \ char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \ struct type##_request *name = (void *)__##name##_desc @@ -283,7 +287,7 @@ struct safexcel_context_record { u32 control0; u32 control1; - __le32 data[12]; + __le32 data[24]; } __packed; /* control0 */ @@ -400,11 +404,15 @@ struct safexcel_token { u8 opcode:4; } __packed; +#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16) + #define EIP197_TOKEN_STAT_LAST_HASH BIT(0) #define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) #define EIP197_TOKEN_OPCODE_DIRECTION 0x0 #define EIP197_TOKEN_OPCODE_INSERT 0x2 #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT +#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 +#define EIP197_TOKEN_OPCODE_VERIFY 0xd #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) static inline void eip197_noop_token(struct safexcel_token *token) @@ -488,6 +496,7 @@ struct safexcel_ring { enum safexcel_alg_type { SAFEXCEL_ALG_TYPE_SKCIPHER, + SAFEXCEL_ALG_TYPE_AEAD, SAFEXCEL_ALG_TYPE_AHASH, }; @@ -590,6 +599,16 @@ struct safexcel_context { bool exit_inv; }; +struct safexcel_ahash_export_state { + u64 len; + u64 processed; + + u32 digest; + + u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; + u8 cache[SHA256_BLOCK_SIZE]; +}; + /* * Template structure to describe the algorithms in order to register them. * It also has the purpose to contain our private structure and is actually @@ -600,6 +619,7 @@ struct safexcel_alg_template { enum safexcel_alg_type type; union { struct skcipher_alg skcipher; + struct aead_alg aead; struct ahash_alg ahash; } alg; }; @@ -636,6 +656,8 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri bool first, bool last, dma_addr_t data, u32 len); void safexcel_inv_complete(struct crypto_async_request *req, int error); +int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, + void *istate, void *ostate); /* available algorithms */ extern struct safexcel_alg_template safexcel_alg_ecb_aes; @@ -646,5 +668,6 @@ extern struct safexcel_alg_template safexcel_alg_sha256; extern struct safexcel_alg_template safexcel_alg_hmac_sha1; extern struct safexcel_alg_template safexcel_alg_hmac_sha224; extern struct safexcel_alg_template safexcel_alg_hmac_sha256; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 2452fce64fb7..51ab448f664e 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -12,8 +12,12 @@ #include #include +#include #include +#include +#include #include +#include #include #include "safexcel.h" @@ -28,9 +32,16 @@ struct safexcel_cipher_ctx { struct safexcel_crypto_priv *priv; u32 mode; + bool aead; __le32 key[8]; unsigned int key_len; + + /* All the below is AEAD specific */ + u32 alg; + u32 state_sz; + u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)]; + u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)]; }; struct safexcel_cipher_req { @@ -63,6 +74,62 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, EIP197_TOKEN_INS_TYPE_OUTPUT; } +static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc, + enum safexcel_cipher_direction direction, + u32 cryptlen, u32 assoclen, u32 digestsize) +{ + struct safexcel_token *token; + unsigned offset = 0; + + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { + offset = AES_BLOCK_SIZE / sizeof(u32); + memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); + + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + } + + token = (struct safexcel_token *)(cdesc->control_data.token + offset); + + if (direction == SAFEXCEL_DECRYPT) + cryptlen -= digestsize; + + token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[0].packet_length = assoclen; + token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + + token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[1].packet_length = cryptlen; + token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[1].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYTO | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + + if (direction == SAFEXCEL_ENCRYPT) { + token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[2].packet_length = digestsize; + token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + } else { + token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; + token[2].packet_length = digestsize; + token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + + token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY; + token[3].packet_length = digestsize | + EIP197_TOKEN_HASH_RESULT_VERIFY; + token[3].stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; + } +} + static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { @@ -96,6 +163,55 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, return 0; } +static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_ahash_export_state istate, ostate; + struct safexcel_crypto_priv *priv = ctx->priv; + struct crypto_authenc_keys keys; + + if (crypto_authenc_extractkeys(&keys, key, len) != 0) + goto badkey; + + if (keys.enckeylen > sizeof(ctx->key)) + goto badkey; + + /* Encryption key */ + if (priv->version == EIP197 && ctx->base.ctxr_dma && + memcmp(ctx->key, keys.enckey, keys.enckeylen)) + ctx->base.needs_inv = true; + + /* Auth key */ + if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey, + keys.authkeylen, &istate, &ostate)) + goto badkey; + + crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) & + CRYPTO_TFM_RES_MASK); + + if (priv->version == EIP197 && ctx->base.ctxr_dma && + (memcmp(ctx->ipad, istate.state, ctx->state_sz) || + memcmp(ctx->opad, ostate.state, ctx->state_sz))) + ctx->base.needs_inv = true; + + /* Now copy the keys into the context */ + memcpy(ctx->key, keys.enckey, keys.enckeylen); + ctx->key_len = keys.enckeylen; + + memcpy(ctx->ipad, &istate.state, ctx->state_sz); + memcpy(ctx->opad, &ostate.state, ctx->state_sz); + + memzero_explicit(&keys, sizeof(keys)); + return 0; + +badkey: + crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + memzero_explicit(&keys, sizeof(keys)); + return -EINVAL; +} + static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, struct crypto_async_request *async, struct safexcel_cipher_req *sreq, @@ -104,17 +220,29 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, struct safexcel_crypto_priv *priv = ctx->priv; int ctrl_size; - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; - - /* The decryption control type is a combination of the encryption type - * and CONTEXT_CONTROL_TYPE_NULL_IN, for all types. - */ - if (sreq->direction == SAFEXCEL_DECRYPT) - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; + if (ctx->aead) { + if (sreq->direction == SAFEXCEL_ENCRYPT) + cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; + else + cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; + } else { + cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; + + /* The decryption control type is a combination of the + * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all + * types. + */ + if (sreq->direction == SAFEXCEL_DECRYPT) + cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; + } cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN; cdesc->control_data.control1 |= ctx->mode; + if (ctx->aead) + cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC | + ctx->alg; + switch (ctx->key_len) { case AES_KEYSIZE_128: cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; @@ -132,6 +260,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, } ctrl_size = ctx->key_len / sizeof(u32); + if (ctx->aead) + /* Take in account the ipad+opad digests */ + ctrl_size += ctx->state_sz / sizeof(u32) * 2; cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size); return 0; @@ -191,7 +322,8 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, struct safexcel_request *request, struct safexcel_cipher_req *sreq, struct scatterlist *src, struct scatterlist *dst, - unsigned int cryptlen, u8 *iv, int *commands, + unsigned int cryptlen, unsigned int assoclen, + unsigned int digestsize, u8 *iv, int *commands, int *results) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); @@ -199,29 +331,30 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, struct safexcel_command_desc *cdesc; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = cryptlen; + unsigned int totlen = cryptlen + assoclen; + int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; int i, ret = 0; if (src == dst) { nr_src = dma_map_sg(priv->dev, src, - sg_nents_for_len(src, cryptlen), + sg_nents_for_len(src, totlen), DMA_BIDIRECTIONAL); nr_dst = nr_src; if (!nr_src) return -EINVAL; } else { nr_src = dma_map_sg(priv->dev, src, - sg_nents_for_len(src, cryptlen), + sg_nents_for_len(src, totlen), DMA_TO_DEVICE); if (!nr_src) return -EINVAL; nr_dst = dma_map_sg(priv->dev, dst, - sg_nents_for_len(dst, cryptlen), + sg_nents_for_len(dst, totlen), DMA_FROM_DEVICE); if (!nr_dst) { dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, cryptlen), + sg_nents_for_len(src, totlen), DMA_TO_DEVICE); return -EINVAL; } @@ -229,6 +362,13 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); + if (ctx->aead) { + memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), + ctx->ipad, ctx->state_sz); + memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32), + ctx->opad, ctx->state_sz); + } + spin_lock_bh(&priv->ring[ring].egress_lock); /* command descriptors */ @@ -240,7 +380,7 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, len = queued; cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), - sg_dma_address(sg), len, cryptlen, + sg_dma_address(sg), len, totlen, ctx->base.ctxr_dma); if (IS_ERR(cdesc)) { /* No space left in the command descriptor ring */ @@ -251,7 +391,13 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, if (n_cdesc == 1) { safexcel_context_control(ctx, base, sreq, cdesc); - safexcel_skcipher_token(ctx, iv, cdesc, cryptlen); + if (ctx->aead) + safexcel_aead_token(ctx, iv, cdesc, + sreq->direction, cryptlen, + assoclen, digestsize); + else + safexcel_skcipher_token(ctx, iv, cdesc, + cryptlen); } queued -= len; @@ -293,14 +439,14 @@ cdesc_rollback: if (src == dst) { dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, cryptlen), + sg_nents_for_len(src, totlen), DMA_BIDIRECTIONAL); } else { dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, cryptlen), + sg_nents_for_len(src, totlen), DMA_TO_DEVICE); dma_unmap_sg(priv->dev, dst, - sg_nents_for_len(dst, cryptlen), + sg_nents_for_len(dst, totlen), DMA_FROM_DEVICE); } @@ -389,6 +535,30 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, return err; } +static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, + int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct aead_request *req = aead_request_cast(async); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct safexcel_cipher_req *sreq = aead_request_ctx(req); + int err; + + if (sreq->needs_inv) { + sreq->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, req->src, + req->dst, + req->cryptlen + crypto_aead_authsize(tfm), + sreq, should_complete, ret); + } + + return err; +} + static int safexcel_cipher_send_inv(struct crypto_async_request *base, int ring, struct safexcel_request *request, int *commands, int *results) @@ -425,7 +595,31 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, results); else ret = safexcel_aes_send(async, ring, request, sreq, req->src, - req->dst, req->cryptlen, req->iv, + req->dst, req->cryptlen, 0, 0, req->iv, + commands, results); + return ret; +} + +static int safexcel_aead_send(struct crypto_async_request *async, int ring, + struct safexcel_request *request, int *commands, + int *results) +{ + struct aead_request *req = aead_request_cast(async); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_req *sreq = aead_request_ctx(req); + struct safexcel_crypto_priv *priv = ctx->priv; + int ret; + + BUG_ON(priv->version == EIP97 && sreq->needs_inv); + + if (sreq->needs_inv) + ret = safexcel_cipher_send_inv(async, ring, request, commands, + results); + else + ret = safexcel_aes_send(async, ring, request, sreq, req->src, + req->dst, req->cryptlen, req->assoclen, + crypto_aead_authsize(tfm), req->iv, commands, results); return ret; } @@ -479,6 +673,21 @@ static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm) return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); } +static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) +{ + EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE); + struct safexcel_cipher_req *sreq = aead_request_ctx(req); + struct safexcel_inv_result result = {}; + + memset(req, 0, sizeof(struct aead_request)); + + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + safexcel_inv_complete, &result); + aead_request_set_tfm(req, __crypto_aead_cast(tfm)); + + return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); +} + static int safexcel_aes(struct crypto_async_request *base, struct safexcel_cipher_req *sreq, enum safexcel_cipher_direction dir, u32 mode) @@ -580,6 +789,26 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) } } +static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->priv; + int ret; + + if (safexcel_cipher_cra_exit(tfm)) + return; + + if (priv->version == EIP197) { + ret = safexcel_aead_exit_inv(tfm); + if (ret) + dev_warn(priv->dev, "aead: invalidation error %d\n", + ret); + } else { + dma_pool_free(priv->context_pool, ctx->base.ctxr, + ctx->base.ctxr_dma); + } +} + struct safexcel_alg_template safexcel_alg_ecb_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { @@ -640,3 +869,71 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { }, }, }; + +static int safexcel_aead_encrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT, + CONTEXT_CONTROL_CRYPTO_MODE_CBC); +} + +static int safexcel_aead_decrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT, + CONTEXT_CONTROL_CRYPTO_MODE_CBC); +} + +static int safexcel_aead_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_alg_template *tmpl = + container_of(tfm->__crt_alg, struct safexcel_alg_template, + alg.aead.base); + + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct safexcel_cipher_req)); + + ctx->priv = tmpl->priv; + + ctx->aead = true; + ctx->base.send = safexcel_aead_send; + ctx->base.handle_result = safexcel_aead_handle_result; + return 0; +} + +static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; + ctx->state_sz = SHA256_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_aes_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha256_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 6cbd879e8fb5..d138d6b8fec5 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -50,16 +50,6 @@ struct safexcel_ahash_req { u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); }; -struct safexcel_ahash_export_state { - u64 len; - u64 processed; - - u32 digest; - - u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; - u8 cache[SHA256_BLOCK_SIZE]; -}; - static void safexcel_hash_token(struct safexcel_command_desc *cdesc, u32 input_length, u32 result_length) { @@ -909,8 +899,8 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq, return crypto_ahash_export(areq, state); } -static int safexcel_hmac_setkey(const char *alg, const u8 *key, - unsigned int keylen, void *istate, void *ostate) +int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, + void *istate, void *ostate) { struct ahash_request *areq; struct crypto_ahash *tfm; -- cgit v1.2.3 From 678b2878ac396ff1cceb870b72d1e95721f7a7f2 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:11:03 +0200 Subject: crypto: inside-secure - authenc(hmac(sha224), cbc(aes)) support This patch adds the authenc(hmac(sha224),cbc(aes)) AEAD algorithm support to the Inside Secure SafeXcel driver. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 1 + drivers/crypto/inside-secure/safexcel.h | 1 + drivers/crypto/inside-secure/safexcel_cipher.c | 50 ++++++++++++++++++++++++-- 3 files changed, 50 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 8c963ef0953a..0f061c3757e9 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -793,6 +793,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_hmac_sha1, &safexcel_alg_hmac_sha224, &safexcel_alg_hmac_sha256, + &safexcel_alg_authenc_hmac_sha224_cbc_aes, &safexcel_alg_authenc_hmac_sha256_cbc_aes, }; diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index ae113c14caea..c2e953f60447 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -668,6 +668,7 @@ extern struct safexcel_alg_template safexcel_alg_sha256; extern struct safexcel_alg_template safexcel_alg_hmac_sha1; extern struct safexcel_alg_template safexcel_alg_hmac_sha224; extern struct safexcel_alg_template safexcel_alg_hmac_sha256; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 51ab448f664e..9250fb205cf2 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -184,9 +184,21 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, ctx->base.needs_inv = true; /* Auth key */ - if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey, - keys.authkeylen, &istate, &ostate)) + switch (ctx->alg) { + case CONTEXT_CONTROL_CRYPTO_ALG_SHA224: + if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey, + keys.authkeylen, &istate, &ostate)) + goto badkey; + break; + case CONTEXT_CONTROL_CRYPTO_ALG_SHA256: + if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey, + keys.authkeylen, &istate, &ostate)) + goto badkey; + break; + default: + dev_err(priv->dev, "aead: unsupported hash algorithm\n"); goto badkey; + } crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) & CRYPTO_TFM_RES_MASK); @@ -937,3 +949,37 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { }, }, }; + +static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; + ctx->state_sz = SHA256_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_aes_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha224_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; -- cgit v1.2.3 From 01ba061d0fd769e4aa657561a2ff88e6c19e34e6 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 14 May 2018 15:11:04 +0200 Subject: crypto: inside-secure - authenc(hmac(sha1), cbc(aes)) support This patch adds the authenc(hmac(sha1),cbc(aes)) AEAD algorithm support to the Inside Secure SafeXcel driver. Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 1 + drivers/crypto/inside-secure/safexcel.h | 1 + drivers/crypto/inside-secure/safexcel_cipher.c | 39 ++++++++++++++++++++++++++ 3 files changed, 41 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 0f061c3757e9..46ab2d0eb3fd 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -793,6 +793,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_hmac_sha1, &safexcel_alg_hmac_sha224, &safexcel_alg_hmac_sha256, + &safexcel_alg_authenc_hmac_sha1_cbc_aes, &safexcel_alg_authenc_hmac_sha224_cbc_aes, &safexcel_alg_authenc_hmac_sha256_cbc_aes, }; diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index c2e953f60447..8b3ee9b59f53 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -668,6 +668,7 @@ extern struct safexcel_alg_template safexcel_alg_sha256; extern struct safexcel_alg_template safexcel_alg_hmac_sha1; extern struct safexcel_alg_template safexcel_alg_hmac_sha224; extern struct safexcel_alg_template safexcel_alg_hmac_sha256; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 9250fb205cf2..6bb60fda2043 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -185,6 +185,11 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, /* Auth key */ switch (ctx->alg) { + case CONTEXT_CONTROL_CRYPTO_ALG_SHA1: + if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey, + keys.authkeylen, &istate, &ostate)) + goto badkey; + break; case CONTEXT_CONTROL_CRYPTO_ALG_SHA224: if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey, keys.authkeylen, &istate, &ostate)) @@ -916,6 +921,40 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm) return 0; } +static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; + ctx->state_sz = SHA1_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_aes_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha1_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -- cgit v1.2.3 From f16b613ca8b3e4960cdc5575e9b8e1dbdd7d54d5 Mon Sep 17 00:00:00 2001 From: Wenwen Wang Date: Fri, 18 May 2018 14:55:35 -0500 Subject: crypto: chtls - fix a missing-check bug In do_chtls_setsockopt(), the tls crypto info is first copied from the poiner 'optval' in userspace and saved to 'tmp_crypto_info'. Then the 'version' of the crypto info is checked. If the version is not as expected, i.e., TLS_1_2_VERSION, error code -ENOTSUPP is returned to indicate that the provided crypto info is not supported yet. Then, the 'cipher_type' field of the 'tmp_crypto_info' is also checked to see if it is TLS_CIPHER_AES_GCM_128. If it is, the whole struct of tls12_crypto_info_aes_gcm_128 is copied from the pointer 'optval' and then the function chtls_setkey() is invoked to set the key. Given that the 'optval' pointer resides in userspace, a malicious userspace process can race to change the data pointed by 'optval' between the two copies. For example, a user can provide a crypto info with TLS_1_2_VERSION and TLS_CIPHER_AES_GCM_128. After the first copy, the user can modify the 'version' and the 'cipher_type' fields to any versions and/or cipher types that are not allowed. This way, the user can bypass the checks, inject bad data to the kernel, cause chtls_setkey() to set a wrong key or other issues. This patch reuses the data copied in the first try so as to ensure these checks will not be bypassed. Signed-off-by: Wenwen Wang Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_main.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 5b9dd582aac0..53ffb00d45bf 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -490,9 +490,13 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, switch (tmp_crypto_info.cipher_type) { case TLS_CIPHER_AES_GCM_128: { - rc = copy_from_user(crypto_info, optval, - sizeof(struct - tls12_crypto_info_aes_gcm_128)); + /* Obtain version and type from previous copy */ + crypto_info[0] = tmp_crypto_info; + /* Now copy the following data */ + rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), + optval + sizeof(*crypto_info), + sizeof(struct tls12_crypto_info_aes_gcm_128) + - sizeof(*crypto_info)); if (rc) { rc = -EFAULT; -- cgit v1.2.3 From c2e415fe75bbc83c1cd9299b12b81aa2f5ad7c6e Mon Sep 17 00:00:00 2001 From: Adam Langley Date: Tue, 22 May 2018 12:35:11 -0700 Subject: crypto: clarify licensing of OpenSSL asm code Several source files have been taken from OpenSSL. In some of them a comment that "permission to use under GPL terms is granted" was included below a contradictory license statement. In several cases, there was no indication that the license of the code was compatible with the GPLv2. This change clarifies the licensing for all of these files. I've confirmed with the author (Andy Polyakov) that a) he has licensed the files with the GPLv2 comment under that license and b) that he's also happy to license the other files under GPLv2 too. In one case, the file is already contained in his CRYPTOGAMS bundle, which has a GPLv2 option, and so no special measures are needed. In all cases, the license status of code has been clarified by making the GPLv2 license prominent. The .S files have been regenerated from the updated .pl files. This is a comment-only change. No code is changed. Signed-off-by: Adam Langley Signed-off-by: Herbert Xu --- arch/arm/crypto/sha1-armv4-large.S | 10 +++++++ arch/arm/crypto/sha256-armv4.pl | 11 ++++++-- arch/arm/crypto/sha256-core.S_shipped | 11 ++++++-- arch/arm/crypto/sha512-armv4.pl | 11 ++++++-- arch/arm/crypto/sha512-core.S_shipped | 11 ++++++-- arch/arm64/crypto/sha256-core.S_shipped | 12 ++++++-- arch/arm64/crypto/sha512-armv8.pl | 12 ++++++-- arch/arm64/crypto/sha512-core.S_shipped | 12 ++++++-- drivers/crypto/vmx/aesp8-ppc.pl | 49 +++++++++++++++++++++++++++++---- drivers/crypto/vmx/ghashp8-ppc.pl | 9 ++++++ 10 files changed, 129 insertions(+), 19 deletions(-) (limited to 'drivers/crypto') diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index 99207c45ec10..f82cd8cf5a09 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S @@ -1,4 +1,14 @@ #define __ARM_ARCH__ __LINUX_ARM_ARCH__ +@ SPDX-License-Identifier: GPL-2.0 + +@ This code is taken from the OpenSSL project but the author (Andy Polyakov) +@ has relicensed it under the GPLv2. Therefore this program is free software; +@ you can redistribute it and/or modify it under the terms of the GNU General +@ Public License version 2 as published by the Free Software Foundation. +@ +@ The original headers, including the original license headers, are +@ included below for completeness. + @ ==================================================================== @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index fac0533ea633..b9ec44060ed3 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -1,12 +1,19 @@ #!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from the OpenSSL project but the author (Andy Polyakov) +# has relicensed it under the GPLv2. Therefore this program is free software; +# you can redistribute it and/or modify it under the terms of the GNU General +# Public License version 2 as published by the Free Software Foundation. +# +# The original headers, including the original license headers, are +# included below for completeness. # ==================================================================== # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. -# -# Permission to use under GPL terms is granted. # ==================================================================== # SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 555a1a8eec90..3b58300d611c 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -1,11 +1,18 @@ +@ SPDX-License-Identifier: GPL-2.0 + +@ This code is taken from the OpenSSL project but the author (Andy Polyakov) +@ has relicensed it under the GPLv2. Therefore this program is free software; +@ you can redistribute it and/or modify it under the terms of the GNU General +@ Public License version 2 as published by the Free Software Foundation. +@ +@ The original headers, including the original license headers, are +@ included below for completeness. @ ==================================================================== @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index a2b11a844357..fb5d15048c0b 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -1,12 +1,19 @@ #!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from the OpenSSL project but the author (Andy Polyakov) +# has relicensed it under the GPLv2. Therefore this program is free software; +# you can redistribute it and/or modify it under the terms of the GNU General +# Public License version 2 as published by the Free Software Foundation. +# +# The original headers, including the original license headers, are +# included below for completeness. # ==================================================================== # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. -# -# Permission to use under GPL terms is granted. # ==================================================================== # SHA512 block procedure for ARMv4. September 2007. diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index 3694c4d4ca2b..b1c334a49cda 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -1,11 +1,18 @@ +@ SPDX-License-Identifier: GPL-2.0 + +@ This code is taken from the OpenSSL project but the author (Andy Polyakov) +@ has relicensed it under the GPLv2. Therefore this program is free software; +@ you can redistribute it and/or modify it under the terms of the GNU General +@ Public License version 2 as published by the Free Software Foundation. +@ +@ The original headers, including the original license headers, are +@ included below for completeness. @ ==================================================================== @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA512 block procedure for ARMv4. September 2007. diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped index 3ce82cc860bc..7c7ce2e3bad6 100644 --- a/arch/arm64/crypto/sha256-core.S_shipped +++ b/arch/arm64/crypto/sha256-core.S_shipped @@ -1,3 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +// This code is taken from the OpenSSL project but the author (Andy Polyakov) +// has relicensed it under the GPLv2. Therefore this program is free software; +// you can redistribute it and/or modify it under the terms of the GNU General +// Public License version 2 as published by the Free Software Foundation. +// +// The original headers, including the original license headers, are +// included below for completeness. + // Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use @@ -10,8 +20,6 @@ // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/crypto/sha512-armv8.pl index c55efb308544..2d8655d5b1af 100644 --- a/arch/arm64/crypto/sha512-armv8.pl +++ b/arch/arm64/crypto/sha512-armv8.pl @@ -1,4 +1,14 @@ #! /usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from the OpenSSL project but the author (Andy Polyakov) +# has relicensed it under the GPLv2. Therefore this program is free software; +# you can redistribute it and/or modify it under the terms of the GNU General +# Public License version 2 as published by the Free Software Foundation. +# +# The original headers, including the original license headers, are +# included below for completeness. + # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. # # Licensed under the OpenSSL license (the "License"). You may not use @@ -11,8 +21,6 @@ # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. -# -# Permission to use under GPLv2 terms is granted. # ==================================================================== # # SHA256/512 for ARMv8. diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped index bd0f59f06c9d..e063a6106720 100644 --- a/arch/arm64/crypto/sha512-core.S_shipped +++ b/arch/arm64/crypto/sha512-core.S_shipped @@ -1,3 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +// This code is taken from the OpenSSL project but the author (Andy Polyakov) +// has relicensed it under the GPLv2. Therefore this program is free software; +// you can redistribute it and/or modify it under the terms of the GNU General +// Public License version 2 as published by the Free Software Foundation. +// +// The original headers, including the original license headers, are +// included below for completeness. + // Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use @@ -10,8 +20,6 @@ // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl index 0b4a293b8a1e..d6a9f63d65ba 100644 --- a/drivers/crypto/vmx/aesp8-ppc.pl +++ b/drivers/crypto/vmx/aesp8-ppc.pl @@ -1,12 +1,51 @@ #! /usr/bin/env perl -# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from CRYPTOGAMs[1] and is included here using the option +# in the license to distribute the code under the GPL. Therefore this program +# is free software; you can redistribute it and/or modify it under the terms of +# the GNU General Public License version 2 as published by the Free Software +# Foundation. # -# Licensed under the OpenSSL license (the "License"). You may not use -# this file except in compliance with the License. You can obtain a copy -# in the file LICENSE in the source distribution or at -# https://www.openssl.org/source/license.html +# [1] https://www.openssl.org/~appro/cryptogams/ +# Copyright (c) 2006-2017, CRYPTOGAMS by +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain copyright notices, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. # +# * Neither the name of the CRYPTOGAMS nor the names of its +# copyright holder and contributors may be used to endorse or +# promote products derived from this software without specific +# prior written permission. +# +# ALTERNATIVELY, provided that this notice is retained in full, this +# product may be distributed under the terms of the GNU General Public +# License (GPL), in which case the provisions of the GPL apply INSTEAD OF +# those given above. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + # ==================================================================== # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl index d8429cb71f02..f746af271460 100644 --- a/drivers/crypto/vmx/ghashp8-ppc.pl +++ b/drivers/crypto/vmx/ghashp8-ppc.pl @@ -1,5 +1,14 @@ #!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from the OpenSSL project but the author (Andy Polyakov) +# has relicensed it under the GPLv2. Therefore this program is free software; +# you can redistribute it and/or modify it under the terms of the GNU General +# Public License version 2 as published by the Free Software Foundation. # +# The original headers, including the original license headers, are +# included below for completeness. + # ==================================================================== # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and -- cgit v1.2.3 From 06d44c918a689e41215f763285061b3a99c56b6c Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 23 May 2018 14:32:40 +0300 Subject: crypto: caam - fix MC firmware detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Management Complex (MC) f/w detection is based on CTPR_MS[DPAA2] bit. This is incorrect since: -the bit is set for all CAAM blocks integrated in SoCs with a certain Layerscape Chassis -some SoCs with LS Chassis don't have an MC block (thus no MC f/w) To fix this, MC f/w detection will be based on the presence of "fsl,qoriq-mc" compatible string in the device tree. Fixes: 297b9cebd2fc0 ("crypto: caam/jr - add support for DPAA2 parts") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 21 ++++++++++++--------- drivers/crypto/caam/intern.h | 1 + 2 files changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index a28868d5b2d0..538c01f428c1 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -322,9 +322,9 @@ static int caam_remove(struct platform_device *pdev) /* * De-initialize RNG state handles initialized by this driver. - * In case of DPAA 2.x, RNG is managed by MC firmware. + * In case of SoCs with Management Complex, RNG is managed by MC f/w. */ - if (!caam_dpaa2 && ctrlpriv->rng4_sh_init) + if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init) deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); /* Shut down debug views */ @@ -618,11 +618,15 @@ static int caam_probe(struct platform_device *pdev) /* * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, * long pointers in master configuration register. - * In case of DPAA 2.x, Management Complex firmware performs + * In case of SoCs with Management Complex, MC f/w performs * the configuration. */ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); - if (!caam_dpaa2) + np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc"); + ctrlpriv->mc_en = !!np; + of_node_put(np); + + if (!ctrlpriv->mc_en) clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST | @@ -733,9 +737,9 @@ static int caam_probe(struct platform_device *pdev) /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation - * In case of DPAA 2.x, RNG is managed by MC firmware. + * In case of SoCs with Management Complex, RNG is managed by MC f/w. */ - if (!caam_dpaa2 && + if (!ctrlpriv->mc_en && (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); @@ -804,9 +808,8 @@ static int caam_probe(struct platform_device *pdev) /* Report "alive" for developer to see */ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, ctrlpriv->era); - dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", - ctrlpriv->total_jobrs, ctrlpriv->qi_present, - caam_dpaa2 ? "yes" : "no"); + dev_info(dev, "job rings = %d, qi = %d\n", + ctrlpriv->total_jobrs, ctrlpriv->qi_present); #ifdef CONFIG_DEBUG_FS debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH, diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 7696a774a362..babc78abd155 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -82,6 +82,7 @@ struct caam_drv_private { */ u8 total_jobrs; /* Total Job Rings in device */ u8 qi_present; /* Nonzero if QI present in device */ + u8 mc_en; /* Nonzero if MC f/w is active */ int secvio_irq; /* Security violation interrupt number */ int virt_en; /* Virtualization enabled in CAAM */ int era; /* CAAM Era (internal HW revision) */ -- cgit v1.2.3 From b2106476a8cf18318a9677ad669e63502900d907 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 23 May 2018 14:32:41 +0300 Subject: crypto: caam - fix rfc4543 descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some cases the CCB DMA-based internal transfer started by the MOVE command (src=M3 register, dst=descriptor buffer) does not finish in time and DECO executes the unpatched descriptor. This leads eventually to a DECO Watchdog Timer timeout error. To make sure the transfer ends, change the MOVE command to be blocking. Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_desc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 8ae7a1be7dfd..a408edd84f34 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -1093,7 +1093,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | (0x6 << MOVE_LEN_SHIFT)); write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | - (0x8 << MOVE_LEN_SHIFT)); + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP); /* Will read assoclen + cryptlen bytes */ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); @@ -1178,7 +1178,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | (0x6 << MOVE_LEN_SHIFT)); write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | - (0x8 << MOVE_LEN_SHIFT)); + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP); /* Will read assoclen + cryptlen bytes */ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); -- cgit v1.2.3 From d9c35771d884c6d8eb57057be4bc9a74161361bc Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 23 May 2018 14:32:42 +0300 Subject: crypto: caam/qi - fix warning in init_cgr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Coverity warns about an "Unintentional integer overflow (OVERFLOW_BEFORE_WIDEN)" when computing the congestion threshold value. Even though it is highly unlikely for an overflow to happen, use this as an opportunity to simplify the code. Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/qi.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index b9480828da38..67f7f8c42c93 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -657,9 +657,8 @@ static int init_cgr(struct device *qidev) { int ret; struct qm_mcc_initcgr opts; - const u64 cpus = *(u64 *)qman_affine_cpus(); - const int num_cpus = hweight64(cpus); - const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU; + const u64 val = (u64)cpumask_weight(qman_affine_cpus()) * + MAX_RSP_FQ_BACKLOG_PER_CPU; ret = qman_alloc_cgrid(&qipriv.cgr.cgrid); if (ret) { -- cgit v1.2.3 From 6faa0f572518fd7fa1946ac477975e5d8e2f6ead Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Thu, 24 May 2018 17:26:37 +0530 Subject: crypto: chelsio - Return -ENOSPC for transient busy indication. Change the return type based on following patch https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg28552.html Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 56 ++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 30 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 59fe6631e73e..db15a9fafe0d 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -688,6 +688,7 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher, int err; SKCIPHER_REQUEST_ON_STACK(subreq, cipher); + skcipher_request_set_tfm(subreq, cipher); skcipher_request_set_callback(subreq, flags, NULL, NULL); skcipher_request_set_crypt(subreq, src, dst, @@ -1113,14 +1114,6 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, goto complete; } - if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], - c_ctx(tfm)->tx_qidx))) { - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { - err = -EBUSY; - goto unmap; - } - - } if (!reqctx->imm) { bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, CIP_SPACE_LEFT(ablkctx->enckey_len), @@ -1293,13 +1286,14 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct sk_buff *skb = NULL; - int err; + int err, isfull = 0; struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], c_ctx(tfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], @@ -1309,7 +1303,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; } static int chcr_aes_decrypt(struct ablkcipher_request *req) @@ -1317,12 +1311,13 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct sk_buff *skb = NULL; - int err; + int err, isfull = 0; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], c_ctx(tfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], @@ -1332,7 +1327,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; } static int chcr_device_init(struct chcr_context *ctx) @@ -1574,14 +1569,15 @@ static int chcr_ahash_update(struct ahash_request *req) u8 remainder = 0, bs; unsigned int nbytes = req->nbytes; struct hash_wr_param params; - int error; + int error, isfull = 0; bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } if (nbytes + req_ctx->reqlen >= bs) { @@ -1633,7 +1629,7 @@ static int chcr_ahash_update(struct ahash_request *req) set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); return error; @@ -1710,15 +1706,16 @@ static int chcr_ahash_finup(struct ahash_request *req) struct sk_buff *skb; struct hash_wr_param params; u8 bs; - int error; + int error, isfull = 0; bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); @@ -1777,7 +1774,7 @@ static int chcr_ahash_finup(struct ahash_request *req) set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); return error; @@ -1791,7 +1788,7 @@ static int chcr_ahash_digest(struct ahash_request *req) struct sk_buff *skb; struct hash_wr_param params; u8 bs; - int error; + int error, isfull = 0; rtfm->init(req); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); @@ -1799,8 +1796,9 @@ static int chcr_ahash_digest(struct ahash_request *req) u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } chcr_init_hctx_per_wr(req_ctx); @@ -1856,7 +1854,7 @@ static int chcr_ahash_digest(struct ahash_request *req) skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); return error; @@ -1875,11 +1873,6 @@ static int chcr_ahash_continue(struct ahash_request *req) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); - if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], - h_ctx(rtfm)->tx_qidx))) { - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; - } get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); if (is_hmac(crypto_ahash_tfm(rtfm))) { @@ -3461,6 +3454,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } { SHASH_DESC_ON_STACK(shash, base_hash); + shash->tfm = base_hash; shash->flags = crypto_shash_get_flags(base_hash); bs = crypto_shash_blocksize(base_hash); @@ -3592,6 +3586,7 @@ static int chcr_aead_op(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct uld_ctx *u_ctx; struct sk_buff *skb; + int isfull = 0; if (!a_ctx(tfm)->dev) { pr_err("chcr : %s : No crypto device.\n", __func__); @@ -3600,8 +3595,9 @@ static int chcr_aead_op(struct aead_request *req, u_ctx = ULD_CTX(a_ctx(tfm)); if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], a_ctx(tfm)->tx_qidx)) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } /* Form a WR from req */ @@ -3614,7 +3610,7 @@ static int chcr_aead_op(struct aead_request *req, skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; } static int chcr_aead_encrypt(struct aead_request *req) -- cgit v1.2.3 From 335bcc4a2600f56ec3c28cf93dd9070df2576891 Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Thu, 24 May 2018 17:26:38 +0530 Subject: crypt: chelsio - Send IV as Immediate for cipher algo Send IV in WR as immediate instead of dma mapped entry for cipher. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 49 +++++++++++------------------------- drivers/crypto/chelsio/chcr_algo.h | 3 +-- drivers/crypto/chelsio/chcr_core.h | 2 +- drivers/crypto/chelsio/chcr_crypto.h | 3 +-- 4 files changed, 17 insertions(+), 40 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index db15a9fafe0d..b2bfeb251e21 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -638,7 +638,6 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, src = sg_next(src); srcskip = 0; } - if (sg_dma_len(dst) == dstskip) { dst = sg_next(dst); dstskip = 0; @@ -761,13 +760,13 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, reqctx->dst_ofst); - dst_size = get_space_for_phys_dsgl(nents + 1); + dst_size = get_space_for_phys_dsgl(nents); kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, CHCR_SRC_SG_SIZE, reqctx->src_ofst); - temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) : - (sgl_len(nents + MIN_CIPHER_SG) * 8); + temp = reqctx->imm ? roundup(wrparam->bytes, 16) : + (sgl_len(nents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); skb = alloc_skb(SGE_MAX_WR_LEN, flags); @@ -789,7 +788,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) ablkctx->ciph_mode, 0, 0, IV >> 1); chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, - 0, 0, dst_size); + 0, 1, dst_size); chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; if ((reqctx->op == CHCR_DECRYPT_OP) && @@ -819,8 +818,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); atomic_inc(&adap->chcr_stats.cipher_rqst); - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len - +(reqctx->imm ? (IV + wrparam->bytes) : 0); + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV + + (reqctx->imm ? (wrparam->bytes) : 0); create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, transhdr_len, temp, ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); @@ -1023,7 +1022,7 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, ret = crypto_cipher_setkey(cipher, key, keylen); if (ret) goto out; - /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/ + crypto_cipher_encrypt_one(cipher, iv, iv); for (i = 0; i < round8; i++) gf128mul_x8_ble((le128 *)iv, (le128 *)iv); @@ -1115,7 +1114,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, } if (!reqctx->imm) { - bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, + bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), reqctx->src_ofst, reqctx->dst_ofst); if ((bytes + reqctx->processed) >= req->nbytes) @@ -1126,11 +1125,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, /*CTR mode counter overfloa*/ bytes = req->nbytes - reqctx->processed; } - dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, - reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); - dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, - reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); if (err) goto unmap; @@ -1205,7 +1200,6 @@ static int process_cipher(struct ablkcipher_request *req, dnents = sg_nents_xlen(req->dst, req->nbytes, CHCR_DST_SG_SIZE, 0); - dnents += 1; // IV phys_dsgl = get_space_for_phys_dsgl(dnents); kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); @@ -1218,8 +1212,7 @@ static int process_cipher(struct ablkcipher_request *req, } if (!reqctx->imm) { - bytes = chcr_sg_ent_in_wr(req->src, req->dst, - MIN_CIPHER_SG, + bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), 0, 0); if ((bytes + reqctx->processed) >= req->nbytes) @@ -2516,22 +2509,20 @@ void chcr_add_aead_dst_ent(struct aead_request *req, } void chcr_add_cipher_src_ent(struct ablkcipher_request *req, - struct ulptx_sgl *ulptx, + void *ulptx, struct cipher_wr_param *wrparam) { struct ulptx_walk ulp_walk; struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); + u8 *buf = ulptx; + memcpy(buf, reqctx->iv, IV); + buf += IV; if (reqctx->imm) { - u8 *buf = (u8 *)ulptx; - - memcpy(buf, reqctx->iv, IV); - buf += IV; sg_pcopy_to_buffer(req->src, sg_nents(req->src), buf, wrparam->bytes, reqctx->processed); } else { - ulptx_walk_init(&ulp_walk, ulptx); - ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); + ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf); ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, reqctx->src_ofst); reqctx->srcsg = ulp_walk.last_sg; @@ -2549,7 +2540,6 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, struct dsgl_walk dsgl_walk; dsgl_walk_init(&dsgl_walk, phys_cpl); - dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, reqctx->dst_ofst); reqctx->dstsg = dsgl_walk.last_sg; @@ -2623,12 +2613,6 @@ int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req) { int error; - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - - reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, reqctx->iv_dma)) - return -ENOMEM; if (req->src == req->dst) { error = dma_map_sg(dev, req->src, sg_nents(req->src), @@ -2651,17 +2635,12 @@ int chcr_cipher_dma_map(struct device *dev, return 0; err: - dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); return -ENOMEM; } void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req) { - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - - dma_unmap_single(dev, reqctx->iv_dma, IV, - DMA_BIDIRECTIONAL); if (req->src == req->dst) { dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index dba3dff1e209..1871500309e2 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -146,7 +146,7 @@ kctx_len) #define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\ - sizeof(struct cpl_rx_phys_dsgl)) + sizeof(struct cpl_rx_phys_dsgl) + AES_BLOCK_SIZE) #define HASH_TRANSHDR_SIZE(kctx_len)\ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES) @@ -259,7 +259,6 @@ ULP_TX_SC_MORE_V((immdatalen))) #define MAX_NK 8 #define MAX_DSGL_ENT 32 -#define MIN_CIPHER_SG 1 /* IV */ #define MIN_AUTH_SG 1 /* IV */ #define MIN_GCM_SG 1 /* IV */ #define MIN_DIGEST_SG 1 /*Partial Buffer*/ diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index 1a20424e18c6..de3a9c085daf 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -56,7 +56,7 @@ #define MAX_SALT 4 #define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \ sizeof(struct cpl_rx_phys_dsgl) + \ - sizeof(struct ulptx_sgl)) + sizeof(struct ulptx_sgl) + 16) //IV #define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \ DUMMY_BYTES + \ diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index c8e8972af283..97878d46c287 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -295,7 +295,6 @@ struct chcr_blkcipher_req_ctx { unsigned int src_ofst; unsigned int dst_ofst; unsigned int op; - dma_addr_t iv_dma; u16 imm; u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; }; @@ -327,7 +326,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req, void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, unsigned int assoclen, unsigned short op_type); void chcr_add_cipher_src_ent(struct ablkcipher_request *req, - struct ulptx_sgl *ulptx, + void *ulptx, struct cipher_wr_param *wrparam); int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req); void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req); -- cgit v1.2.3 From 4262c98aab95119ec0810b5ec4be521dda1b28b2 Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Thu, 24 May 2018 17:26:39 +0530 Subject: crypto: chelsio - Remove separate buffer used for DMA map B0 block in CCM Extends memory required for IV to include B0 Block and DMA map in single operation. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 198 ++++++++++++++++------------------- drivers/crypto/chelsio/chcr_crypto.h | 12 +-- 2 files changed, 97 insertions(+), 113 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index b2bfeb251e21..b916c4eb608c 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -203,13 +203,8 @@ static inline void chcr_handle_aead_resp(struct aead_request *req, int err) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); - chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); - if (reqctx->b0_dma) - dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, - reqctx->b0_len, DMA_BIDIRECTIONAL); + chcr_aead_common_exit(req); if (reqctx->verify == VERIFY_SW) { chcr_verify_tag(req, input, &err); reqctx->verify = VERIFY_HW; @@ -2178,22 +2173,35 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) } } -static int chcr_aead_common_init(struct aead_request *req, - unsigned short op_type) +inline void chcr_aead_common_exit(struct aead_request *req) +{ + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); + + chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); +} + +static int chcr_aead_common_init(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); - int error = -EINVAL; unsigned int authsize = crypto_aead_authsize(tfm); + int error = -EINVAL; /* validate key size */ if (aeadctx->enckey_len == 0) goto err; - if (op_type && req->cryptlen < authsize) + if (reqctx->op && req->cryptlen < authsize) goto err; + if (reqctx->b0_len) + reqctx->scratch_pad = reqctx->iv + IV; + else + reqctx->scratch_pad = NULL; + error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); + reqctx->op); if (error) { error = -ENOMEM; goto err; @@ -2230,7 +2238,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) aead_request_set_tfm(subreq, aeadctx->sw_cipher); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(subreq, req->assoclen); return op_type ? crypto_aead_decrypt(subreq) : @@ -2239,8 +2247,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) static struct sk_buff *create_authenc_wr(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type) + int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); @@ -2264,18 +2271,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, if (req->cryptlen == 0) return NULL; - reqctx->b0_dma = 0; + reqctx->b0_len = 0; + error = chcr_aead_common_init(req); + if (error) + return ERR_PTR(error); + if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || - subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { + subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { null = 1; assoclen = 0; + reqctx->aad_nents = 0; } - error = chcr_aead_common_init(req, op_type); - if (error) - return ERR_PTR(error); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents += sg_nents_xlen(req->dst, req->cryptlen + - (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_AUTH_SG; // For IV @@ -2292,11 +2301,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, - transhdr_len, op_type)) { + transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); - return ERR_PTR(chcr_aead_fallback(req, op_type)); + chcr_aead_common_exit(req); + return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(SGE_MAX_WR_LEN, flags); if (!skb) { @@ -2306,7 +2314,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, chcr_req = __skb_put_zero(skb, transhdr_len); - temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; + temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; /* * Input order is AAD,IV and Payload. where IV should be included as @@ -2330,8 +2338,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; else temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; - chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, - (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, + chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, + (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, temp, actx->auth_mode, aeadctx->hmac_ctrl, IV >> 1); @@ -2339,7 +2347,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, 0, 0, dst_size); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; - if (op_type == CHCR_ENCRYPT_OP || + if (reqctx->op == CHCR_ENCRYPT_OP || subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) memcpy(chcr_req->key_ctx.key, aeadctx->key, @@ -2362,20 +2370,18 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, } phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); + chcr_add_aead_src_ent(req, ulptx, assoclen); atomic_inc(&adap->chcr_stats.cipher_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, 0); reqctx->skb = skb; - reqctx->op = op_type; return skb; err: - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); + chcr_aead_common_exit(req); return ERR_PTR(error); } @@ -2394,11 +2400,14 @@ int chcr_aead_dma_map(struct device *dev, -authsize : authsize); if (!req->cryptlen || !dst_size) return 0; - reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, + reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, reqctx->iv_dma)) return -ENOMEM; - + if (reqctx->b0_len) + reqctx->b0_dma = reqctx->iv_dma + IV; + else + reqctx->b0_dma = 0; if (req->src == req->dst) { error = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); @@ -2438,7 +2447,7 @@ void chcr_aead_dma_unmap(struct device *dev, if (!req->cryptlen || !dst_size) return; - dma_unmap_single(dev, reqctx->iv_dma, IV, + dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (req->src == req->dst) { dma_unmap_sg(dev, req->src, sg_nents(req->src), @@ -2453,8 +2462,7 @@ void chcr_aead_dma_unmap(struct device *dev, void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, - unsigned int assoclen, - unsigned short op_type) + unsigned int assoclen) { struct ulptx_walk ulp_walk; struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2462,7 +2470,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, if (reqctx->imm) { u8 *buf = (u8 *)ulptx; - if (reqctx->b0_dma) { + if (reqctx->b0_len) { memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); buf += reqctx->b0_len; } @@ -2475,7 +2483,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, buf, req->cryptlen, req->assoclen); } else { ulptx_walk_init(&ulp_walk, ulptx); - if (reqctx->b0_dma) + if (reqctx->b0_len) ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, &reqctx->b0_dma); ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); @@ -2489,7 +2497,6 @@ void chcr_add_aead_src_ent(struct aead_request *req, void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, unsigned int assoclen, - unsigned short op_type, unsigned short qid) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2499,11 +2506,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req, u32 temp; dsgl_walk_init(&dsgl_walk, phys_cpl); - if (reqctx->b0_dma) + if (reqctx->b0_len) dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); - temp = req->cryptlen + (op_type ? -authsize : authsize); + temp = req->cryptlen + (reqctx->op ? -authsize : authsize); dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); dsgl_walk_end(&dsgl_walk, qid); } @@ -2710,7 +2717,8 @@ static inline int crypto_ccm_check_iv(const u8 *iv) static int ccm_format_packet(struct aead_request *req, struct chcr_aead_ctx *aeadctx, unsigned int sub_type, - unsigned short op_type) + unsigned short op_type, + unsigned int assoclen) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); int rc = 0; @@ -2720,13 +2728,13 @@ static int ccm_format_packet(struct aead_request *req, memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); memcpy(reqctx->iv + 4, req->iv, 8); memset(reqctx->iv + 12, 0, 4); - *((unsigned short *)(reqctx->scratch_pad + 16)) = - htons(req->assoclen - 8); } else { memcpy(reqctx->iv, req->iv, 16); - *((unsigned short *)(reqctx->scratch_pad + 16)) = - htons(req->assoclen); } + if (assoclen) + *((unsigned short *)(reqctx->scratch_pad + 16)) = + htons(assoclen); + generate_b0(req, aeadctx, op_type); /* zero the ctr value */ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); @@ -2808,8 +2816,7 @@ static int aead_ccm_validate_input(unsigned short op_type, static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type) + int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); @@ -2827,22 +2834,20 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, GFP_ATOMIC; struct adapter *adap = padap(a_ctx(tfm)->dev); - reqctx->b0_dma = 0; sub_type = get_aead_subtype(tfm); if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) assoclen -= 8; - error = chcr_aead_common_init(req, op_type); + reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); + error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); - - reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); - error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); + error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type); if (error) goto err; dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents += sg_nents_xlen(req->dst, req->cryptlen - + (op_type ? -authsize : authsize), + + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_CCM_SG; // For IV and B0 dst_size = get_space_for_phys_dsgl(dnents); @@ -2858,11 +2863,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - - reqctx->b0_len, transhdr_len, op_type)) { + reqctx->b0_len, transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); - return ERR_PTR(chcr_aead_fallback(req, op_type)); + chcr_aead_common_exit(req); + return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(SGE_MAX_WR_LEN, flags); @@ -2873,7 +2877,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); - fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); + fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); @@ -2882,21 +2886,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - error = ccm_format_packet(req, aeadctx, sub_type, op_type); + error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen); if (error) goto dstmap_fail; - - reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, - &reqctx->scratch_pad, reqctx->b0_len, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, - reqctx->b0_dma)) { - error = -ENOMEM; - goto dstmap_fail; - } - - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); + chcr_add_aead_src_ent(req, ulptx, assoclen); atomic_inc(&adap->chcr_stats.aead_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + @@ -2905,20 +2899,18 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, transhdr_len, temp, 0); reqctx->skb = skb; - reqctx->op = op_type; return skb; dstmap_fail: kfree_skb(skb); err: - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); + chcr_aead_common_exit(req); return ERR_PTR(error); } static struct sk_buff *create_gcm_wr(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type) + int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); @@ -2938,13 +2930,13 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) assoclen = req->assoclen - 8; - reqctx->b0_dma = 0; - error = chcr_aead_common_init(req, op_type); + reqctx->b0_len = 0; + error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents += sg_nents_xlen(req->dst, req->cryptlen + - (op_type ? -authsize : authsize), + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_GCM_SG; // For IV dst_size = get_space_for_phys_dsgl(dnents); @@ -2958,11 +2950,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, - transhdr_len, op_type)) { + transhdr_len, reqctx->op)) { + atomic_inc(&adap->chcr_stats.fallback); - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); - return ERR_PTR(chcr_aead_fallback(req, op_type)); + chcr_aead_common_exit(req); + return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(SGE_MAX_WR_LEN, flags); if (!skb) { @@ -2973,7 +2965,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, chcr_req = __skb_put_zero(skb, transhdr_len); //Offset of tag from end - temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; + temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( a_ctx(tfm)->dev->rx_channel_id, 2, (assoclen + 1)); @@ -2986,7 +2978,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, temp, temp); chcr_req->sec_cpl.seqno_numivs = - FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == + FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_GCM, CHCR_SCMD_AUTH_MODE_GHASH, @@ -3012,19 +3004,18 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); + chcr_add_aead_src_ent(req, ulptx, assoclen); atomic_inc(&adap->chcr_stats.aead_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, reqctx->verify); reqctx->skb = skb; - reqctx->op = op_type; return skb; err: - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); + chcr_aead_common_exit(req); return ERR_PTR(error); } @@ -3558,7 +3549,6 @@ out: } static int chcr_aead_op(struct aead_request *req, - unsigned short op_type, int size, create_wr_t create_wr_fn) { @@ -3580,8 +3570,7 @@ static int chcr_aead_op(struct aead_request *req, } /* Form a WR from req */ - skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, - op_type); + skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size); if (IS_ERR(skb) || !skb) return PTR_ERR(skb); @@ -3598,21 +3587,19 @@ static int chcr_aead_encrypt(struct aead_request *req) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); reqctx->verify = VERIFY_HW; + reqctx->op = CHCR_ENCRYPT_OP; switch (get_aead_subtype(tfm)) { case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL: - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, - create_authenc_wr); + return chcr_aead_op(req, 0, create_authenc_wr); case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, - create_aead_ccm_wr); + return chcr_aead_op(req, 0, create_aead_ccm_wr); default: - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, - create_gcm_wr); + return chcr_aead_op(req, 0, create_gcm_wr); } } @@ -3630,21 +3617,18 @@ static int chcr_aead_decrypt(struct aead_request *req) size = 0; reqctx->verify = VERIFY_HW; } - + reqctx->op = CHCR_DECRYPT_OP; switch (get_aead_subtype(tfm)) { case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL: - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, - create_authenc_wr); + return chcr_aead_op(req, size, create_authenc_wr); case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, - create_aead_ccm_wr); + return chcr_aead_op(req, size, create_aead_ccm_wr); default: - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, - create_gcm_wr); + return chcr_aead_op(req, size, create_gcm_wr); } } diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 97878d46c287..54835cb109e5 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -190,8 +190,8 @@ struct chcr_aead_reqctx { short int dst_nents; u16 imm; u16 verify; - u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; - unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; + u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE]; + u8 *scratch_pad; }; struct ulptx_walk { @@ -311,8 +311,7 @@ struct chcr_alg_template { typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type); + int size); void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); int chcr_aead_dma_map(struct device *dev, struct aead_request *req, @@ -321,10 +320,10 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, unsigned short op_type); void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, - unsigned int assoclen, unsigned short op_type, + unsigned int assoclen, unsigned short qid); void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, - unsigned int assoclen, unsigned short op_type); + unsigned int assoclen); void chcr_add_cipher_src_ent(struct ablkcipher_request *req, void *ulptx, struct cipher_wr_param *wrparam); @@ -339,4 +338,5 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, struct hash_wr_param *param); int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); +void chcr_aead_common_exit(struct aead_request *req); #endif /* __CHCR_CRYPTO_H__ */ -- cgit v1.2.3 From 281a58c8326ca62ca6341f9d2cc2eb08044670e8 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Thu, 24 May 2018 15:19:06 +0100 Subject: crypto: ccree - correct host regs offset The product signature and HW revision register have different offset on the older HW revisions. This fixes the problem of the driver failing sanity check on silicon despite working on the FPGA emulation systems. Fixes: 27b3b22dd98c ("crypto: ccree - add support for older HW revs") Cc: stable@vger.kernel.org Signed-off-by: Gilad Ben-Yossef Reviewed-by: Simon Horman Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_debugfs.c | 7 +++++-- drivers/crypto/ccree/cc_driver.c | 8 ++++++-- drivers/crypto/ccree/cc_driver.h | 2 ++ drivers/crypto/ccree/cc_host_regs.h | 6 ++++-- 4 files changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 08f8db489cf0..5ca184e42483 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -26,7 +26,8 @@ struct cc_debugfs_ctx { static struct dentry *cc_debugfs_dir; static struct debugfs_reg32 debug_regs[] = { - CC_DEBUG_REG(HOST_SIGNATURE), + { .name = "SIGNATURE" }, /* Must be 0th */ + { .name = "VERSION" }, /* Must be 1st */ CC_DEBUG_REG(HOST_IRR), CC_DEBUG_REG(HOST_POWER_DOWN_EN), CC_DEBUG_REG(AXIM_MON_ERR), @@ -34,7 +35,6 @@ static struct debugfs_reg32 debug_regs[] = { CC_DEBUG_REG(HOST_IMR), CC_DEBUG_REG(AXIM_CFG), CC_DEBUG_REG(AXIM_CACHE_PARAMS), - CC_DEBUG_REG(HOST_VERSION), CC_DEBUG_REG(GPR_HOST), CC_DEBUG_REG(AXIM_MON_COMP), }; @@ -58,6 +58,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata) struct debugfs_regset32 *regset; struct dentry *file; + debug_regs[0].offset = drvdata->sig_offset; + debug_regs[1].offset = drvdata->ver_offset; + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 37f2e6ec0e88..444fc5542598 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -207,9 +207,13 @@ static int init_cc_resources(struct platform_device *plat_dev) if (hw_rev->rev >= CC_HW_REV_712) { new_drvdata->hash_len_sz = HASH_LEN_SIZE_712; new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP); + new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712); + new_drvdata->ver_offset = CC_REG(HOST_VERSION_712); } else { new_drvdata->hash_len_sz = HASH_LEN_SIZE_630; new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8); + new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630); + new_drvdata->ver_offset = CC_REG(HOST_VERSION_630); } platform_set_drvdata(plat_dev, new_drvdata); @@ -276,7 +280,7 @@ static int init_cc_resources(struct platform_device *plat_dev) } /* Verify correct mapping */ - signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE)); + signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset); if (signature_val != hw_rev->sig) { dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", signature_val, hw_rev->sig); @@ -287,7 +291,7 @@ static int init_cc_resources(struct platform_device *plat_dev) /* Display HW versions */ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", - hw_rev->name, cc_ioread(new_drvdata, CC_REG(HOST_VERSION)), + hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset), DRV_MODULE_VERSION); rc = init_cc_regs(new_drvdata, true); diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index 2048fdeb9579..95f82b2d1e70 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -129,6 +129,8 @@ struct cc_drvdata { enum cc_hw_rev hw_rev; u32 hash_len_sz; u32 axim_mon_offset; + u32 sig_offset; + u32 ver_offset; }; struct cc_crypto_alg { diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h index f51001898ca1..616b2e1c41ba 100644 --- a/drivers/crypto/ccree/cc_host_regs.h +++ b/drivers/crypto/ccree/cc_host_regs.h @@ -45,7 +45,8 @@ #define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL -#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL +#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL +#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL #define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL #define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL #define CC_HOST_BOOT_REG_OFFSET 0xA28UL @@ -105,7 +106,8 @@ #define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL #define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL #define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL -#define CC_HOST_VERSION_REG_OFFSET 0xA40UL +#define CC_HOST_VERSION_712_REG_OFFSET 0xA40UL +#define CC_HOST_VERSION_630_REG_OFFSET 0xAD8UL #define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL #define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL #define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL -- cgit v1.2.3 From 35f859fc936cade5bd60cf8ece27d48503084011 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Thu, 24 May 2018 15:19:07 +0100 Subject: crypto: ccree - better clock handling Use managed clock handling, differentiate between no clock (possibly OK) and clock init failure (never OK) and correctly handle clock detection being deferred. Suggested-by: Geert Uytterhoeven Signed-off-by: Gilad Ben-Yossef Reviewed-by: Simon Horman Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_driver.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 444fc5542598..892d4120a120 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -190,6 +190,7 @@ static int init_cc_resources(struct platform_device *plat_dev) u64 dma_mask; const struct cc_hw_data *hw_rev; const struct of_device_id *dev_id; + struct clk *clk; int rc = 0; new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL); @@ -219,7 +220,24 @@ static int init_cc_resources(struct platform_device *plat_dev) platform_set_drvdata(plat_dev, new_drvdata); new_drvdata->plat_dev = plat_dev; - new_drvdata->clk = of_clk_get(np, 0); + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) + switch (PTR_ERR(clk)) { + /* Clock is optional so this might be fine */ + case -ENOENT: + break; + + /* Clock not available, let's try again soon */ + case -EPROBE_DEFER: + return -EPROBE_DEFER; + + default: + dev_err(dev, "Error getting clock: %ld\n", + PTR_ERR(clk)); + return PTR_ERR(clk); + } + new_drvdata->clk = clk; + new_drvdata->coherent = of_dma_is_coherent(np); /* Get device resources */ -- cgit v1.2.3 From 565018b893b1fec7d57cfd0be7b678143f6c8e26 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Thu, 24 May 2018 15:19:08 +0100 Subject: crypto: ccree - silence debug prints The cache parameter register configuration was being too verbose. Use dev_dbg() to only provide the information if needed. Signed-off-by: Gilad Ben-Yossef Reviewed-by: Simon Horman Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_driver.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 892d4120a120..bd974fef05e4 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -168,14 +168,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe) val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS)); if (is_probe) - dev_info(dev, "Cache params previous: 0x%08X\n", val); + dev_dbg(dev, "Cache params previous: 0x%08X\n", val); cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params); val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS)); if (is_probe) - dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n", - val, cache_params); + dev_dbg(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n", + val, cache_params); return 0; } -- cgit v1.2.3 From 1ebe6da2f989c1ce296be69ada40e8cfc3e78b3e Mon Sep 17 00:00:00 2001 From: Conor McLoughlin Date: Fri, 25 May 2018 10:14:22 +0100 Subject: crypto: qat - Add MODULE_FIRMWARE for all qat drivers Signed-off-by: Conor McLoughlin Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_c3xxx/adf_drv.c | 2 ++ drivers/crypto/qat/qat_c62x/adf_drv.c | 2 ++ drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 1 + 3 files changed, 5 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index f172171668ee..ba197f34c252 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -329,5 +329,7 @@ module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_C3XXX_FW); +MODULE_FIRMWARE(ADF_C3XXX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 58a984c9c3ec..59a5a0df50b6 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -329,5 +329,7 @@ module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_C62X_FW); +MODULE_FIRMWARE(ADF_C62X_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 2ce01f010c74..be5c5a988ca5 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -332,5 +332,6 @@ module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_FIRMWARE(ADF_DH895XCC_FW); +MODULE_FIRMWARE(ADF_DH895XCC_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); -- cgit v1.2.3 From edd303ff0e9ebc39118e633916278b5ca8558c30 Mon Sep 17 00:00:00 2001 From: Janakarajan Natarajan Date: Fri, 25 May 2018 15:23:29 -0500 Subject: crypto: ccp - Add DOWNLOAD_FIRMWARE SEV command The DOWNLOAD_FIRMWARE command, added as of SEV API v0.15, allows the OS to install SEV firmware newer than the currently active SEV firmware. For the new SEV firmware to be applied it must: * Pass the validation test performed by the existing firmware. * Be of the same build or a newer build compared to the existing firmware. For more information please refer to "Section 5.11 DOWNLOAD_FIRMWARE" of https://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf Signed-off-by: Janakarajan Natarajan Signed-off-by: Herbert Xu --- drivers/crypto/ccp/psp-dev.c | 99 +++++++++++++++++++++++++++++++++++++++----- drivers/crypto/ccp/psp-dev.h | 4 ++ include/linux/psp-sev.h | 12 ++++++ 3 files changed, 105 insertions(+), 10 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index d95ec526587a..12838b406696 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -22,11 +22,17 @@ #include #include #include +#include #include "sp-dev.h" #include "psp-dev.h" +#define SEV_VERSION_GREATER_OR_EQUAL(_maj, _min) \ + ((psp_master->api_major) >= _maj && \ + (psp_master->api_minor) >= _min) + #define DEVICE_NAME "sev" +#define SEV_FW_FILE "amd/sev.fw" static DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; @@ -112,6 +118,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); + case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); default: return 0; } @@ -378,6 +385,79 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len) } EXPORT_SYMBOL_GPL(psp_copy_user_blob); +static int sev_get_api_version(void) +{ + struct sev_user_data_status *status; + int error, ret; + + status = &psp_master->status_cmd_buf; + ret = sev_platform_status(status, &error); + if (ret) { + dev_err(psp_master->dev, + "SEV: failed to get status. Error: %#x\n", error); + return 1; + } + + psp_master->api_major = status->api_major; + psp_master->api_minor = status->api_minor; + psp_master->build = status->build; + + return 0; +} + +/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ +static int sev_update_firmware(struct device *dev) +{ + struct sev_data_download_firmware *data; + const struct firmware *firmware; + int ret, error, order; + struct page *p; + u64 data_size; + + ret = request_firmware(&firmware, SEV_FW_FILE, dev); + if (ret < 0) + return -1; + + /* + * SEV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the SEV + * FW. Allocate enough memory for data structure + alignment + * padding + SEV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(firmware->size + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) { + ret = -1; + goto fw_err; + } + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + memcpy(page_address(p) + data_size, firmware->data, firmware->size); + + data->address = __psp_pa(page_address(p) + data_size); + data->len = firmware->size; + + ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + if (ret) + dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); + else + dev_info(dev, "SEV firmware update successful\n"); + + __free_pages(p, order); + +fw_err: + release_firmware(firmware); + + return ret; +} + static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp) { struct sev_user_data_pek_cert_import input; @@ -750,7 +830,6 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void psp_pci_init(void) { - struct sev_user_data_status *status; struct sp_device *sp; int error, rc; @@ -760,6 +839,13 @@ void psp_pci_init(void) psp_master = sp->psp_data; + if (sev_get_api_version()) + goto err; + + if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) && + sev_update_firmware(psp_master->dev) == 0) + sev_get_api_version(); + /* Initialize the platform */ rc = sev_platform_init(&error); if (rc) { @@ -767,16 +853,9 @@ void psp_pci_init(void) goto err; } - /* Display SEV firmware version */ - status = &psp_master->status_cmd_buf; - rc = sev_platform_status(status, &error); - if (rc) { - dev_err(sp->dev, "SEV: failed to get status error %#x\n", error); - goto err; - } + dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major, + psp_master->api_minor, psp_master->build); - dev_info(sp->dev, "SEV API:%d.%d build:%d\n", status->api_major, - status->api_minor, status->build); return; err: diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index c81f0b11287a..c7e9098a233c 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -78,6 +78,10 @@ struct psp_device { struct sev_misc_dev *sev_misc; struct sev_user_data_status status_cmd_buf; struct sev_data_init init_cmd_buf; + + u8 api_major; + u8 api_minor; + u8 build; }; #endif /* __PSP_DEV_H */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 93addfa34061..1d2496246072 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -54,6 +54,7 @@ enum sev_cmd { SEV_CMD_PDH_CERT_EXPORT = 0x008, SEV_CMD_PDH_GEN = 0x009, SEV_CMD_DF_FLUSH = 0x00A, + SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B, /* Guest commands */ SEV_CMD_DECOMMISSION = 0x020, @@ -129,6 +130,17 @@ struct sev_data_pek_cert_import { u32 oca_cert_len; /* In */ } __packed; +/** + * struct sev_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of firmware image + * @len: len of the firmware image + */ +struct sev_data_download_firmware { + u64 address; /* In */ + u32 len; /* In */ +} __packed; + /** * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters * -- cgit v1.2.3 From 0b3a830bb407dce79468a26f382260131b50b3c5 Mon Sep 17 00:00:00 2001 From: Janakarajan Natarajan Date: Fri, 25 May 2018 15:23:30 -0500 Subject: crypto: ccp - Add GET_ID SEV command The GET_ID command, added as of SEV API v0.16, allows the SEV firmware to be queried about a unique CPU ID. This unique ID can then be used to obtain the public certificate containing the Chip Endorsement Key (CEK) public key signed by the AMD SEV Signing Key (ASK). For more information please refer to "Section 5.12 GET_ID" of https://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf Signed-off-by: Janakarajan Natarajan Signed-off-by: Herbert Xu --- drivers/crypto/ccp/psp-dev.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 11 +++++++++++ include/uapi/linux/psp-sev.h | 12 ++++++++++++ 3 files changed, 67 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 12838b406696..ff478d826d7d 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -119,6 +119,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); + case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); default: return 0; } @@ -510,6 +511,46 @@ e_free: return ret; } +static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) +{ + struct sev_data_get_id *data; + u64 data_size, user_size; + void *id_blob, *mem; + int ret; + + /* SEV GET_ID available from SEV API v0.16 and up */ + if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16)) + return -ENOTSUPP; + + /* SEV FW expects the buffer it fills with the ID to be + * 8-byte aligned. Memory allocated should be enough to + * hold data structure + alignment padding + memory + * where SEV FW writes the ID. + */ + data_size = ALIGN(sizeof(struct sev_data_get_id), 8); + user_size = sizeof(struct sev_user_data_get_id); + + mem = kzalloc(data_size + user_size, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + data = mem; + id_blob = mem + data_size; + + data->address = __psp_pa(id_blob); + data->len = user_size; + + ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); + if (!ret) { + if (copy_to_user((void __user *)argp->data, id_blob, data->len)) + ret = -EFAULT; + } + + kfree(mem); + + return ret; +} + static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp) { struct sev_user_data_pdh_cert_export input; @@ -647,6 +688,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case SEV_PDH_CERT_EXPORT: ret = sev_ioctl_do_pdh_export(&input); break; + case SEV_GET_ID: + ret = sev_ioctl_do_get_id(&input); + break; default: ret = -EINVAL; goto out; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 1d2496246072..827c601841c4 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -55,6 +55,7 @@ enum sev_cmd { SEV_CMD_PDH_GEN = 0x009, SEV_CMD_DF_FLUSH = 0x00A, SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B, + SEV_CMD_GET_ID = 0x00C, /* Guest commands */ SEV_CMD_DECOMMISSION = 0x020, @@ -141,6 +142,16 @@ struct sev_data_download_firmware { u32 len; /* In */ } __packed; +/** + * struct sev_data_get_id - GET_ID command parameters + * + * @address: physical address of region to place unique CPU ID(s) + * @len: len of the region + */ +struct sev_data_get_id { + u64 address; /* In */ + u32 len; /* In/Out */ +} __packed; /** * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters * diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 9008f31c7eb6..ac8c60bcc83b 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -30,6 +30,7 @@ enum { SEV_PDH_GEN, SEV_PDH_CERT_EXPORT, SEV_PEK_CERT_IMPORT, + SEV_GET_ID, SEV_MAX, }; @@ -123,6 +124,17 @@ struct sev_user_data_pdh_cert_export { __u32 cert_chain_len; /* In/Out */ } __packed; +/** + * struct sev_user_data_get_id - GET_ID command parameters + * + * @socket1: Buffer to pass unique ID of first socket + * @socket2: Buffer to pass unique ID of second socket + */ +struct sev_user_data_get_id { + __u8 socket1[64]; /* Out */ + __u8 socket2[64]; /* Out */ +} __packed; + /** * struct sev_issue_cmd - SEV ioctl parameters * -- cgit v1.2.3 From 09e53d8289477449ddd74cfff640792ca02fa90d Mon Sep 17 00:00:00 2001 From: Atul Gupta Date: Sun, 27 May 2018 21:15:18 +0530 Subject: crypto: chtls - key len correction corrected the key length to copy 128b key. Removed 192b and 256b key as user input supports key of size 128b in gcm_ctx Reported-by: Dan Carpenter Signed-off-by: Atul Gupta Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_hw.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index 54a13aa99121..55d50140f9e5 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -213,7 +213,7 @@ static int chtls_key_info(struct chtls_sock *csk, struct _key_ctx *kctx, u32 keylen, u32 optname) { - unsigned char key[CHCR_KEYCTX_CIPHER_KEY_SIZE_256]; + unsigned char key[AES_KEYSIZE_128]; struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; unsigned char ghash_h[AEAD_H_SIZE]; struct crypto_cipher *cipher; @@ -228,10 +228,6 @@ static int chtls_key_info(struct chtls_sock *csk, if (keylen == AES_KEYSIZE_128) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; - } else if (keylen == AES_KEYSIZE_192) { - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; - } else if (keylen == AES_KEYSIZE_256) { - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { pr_err("GCM: Invalid key length %d\n", keylen); return -EINVAL; -- cgit v1.2.3 From 3b8305f5c844685d00637a0ad155e90e423a4445 Mon Sep 17 00:00:00 2001 From: Atul Gupta Date: Sun, 27 May 2018 21:15:19 +0530 Subject: crypto: chtls - wait for memory sendmsg, sendpage address suspicious code 1210 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1211 } The issue is that in the code above, set_bit is never reached due to the 'continue' statement at line 1208. Also reported by bug report: 1210 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Not reachable. Its required to wait for buffer in the send path and takes care of unaddress and un-handled SOCK_NOSPACE. v2: use csk_mem_free where appropriate proper indent of goto do_nonblock replace out with do_rm_wq Reported-by: Gustavo A. R. Silva Reported-by: Dan Carpenter Signed-off-by: Atul Gupta Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls.h | 1 + drivers/crypto/chelsio/chtls/chtls_io.c | 90 +++++++++++++++++++++++++++++-- drivers/crypto/chelsio/chtls/chtls_main.c | 1 + 3 files changed, 89 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index 1b2f43ccb11e..a53a0e6ba024 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -144,6 +144,7 @@ struct chtls_dev { struct list_head rcu_node; struct list_head na_node; unsigned int send_page_order; + int max_host_sndbuf; struct key_map kmap; }; diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 840dd0100c2f..7aa5d90c6ebd 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -914,6 +914,78 @@ static u16 tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) return (__force u16)cpu_to_be16(thdr->length); } +static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk) +{ + return (cdev->max_host_sndbuf - sk->sk_wmem_queued); +} + +static int csk_wait_memory(struct chtls_dev *cdev, + struct sock *sk, long *timeo_p) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int sndbuf, err = 0; + long current_timeo; + long vm_wait = 0; + bool noblock; + + current_timeo = *timeo_p; + noblock = (*timeo_p ? false : true); + sndbuf = cdev->max_host_sndbuf; + if (csk_mem_free(cdev, sk)) { + current_timeo = (prandom_u32() % (HZ / 5)) + 2; + vm_wait = (prandom_u32() % (HZ / 5)) + 2; + } + + add_wait_queue(sk_sleep(sk), &wait); + while (1) { + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + goto do_error; + if (!*timeo_p) { + if (noblock) + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + goto do_nonblock; + } + if (signal_pending(current)) + goto do_interrupted; + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); + if (csk_mem_free(cdev, sk) && !vm_wait) + break; + + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + sk->sk_write_pending++; + sk_wait_event(sk, ¤t_timeo, sk->sk_err || + (sk->sk_shutdown & SEND_SHUTDOWN) || + (csk_mem_free(cdev, sk) && !vm_wait), &wait); + sk->sk_write_pending--; + + if (vm_wait) { + vm_wait -= current_timeo; + current_timeo = *timeo_p; + if (current_timeo != MAX_SCHEDULE_TIMEOUT) { + current_timeo -= vm_wait; + if (current_timeo < 0) + current_timeo = 0; + } + vm_wait = 0; + } + *timeo_p = current_timeo; + } +do_rm_wq: + remove_wait_queue(sk_sleep(sk), &wait); + return err; +do_error: + err = -EPIPE; + goto do_rm_wq; +do_nonblock: + err = -EAGAIN; + goto do_rm_wq; +do_interrupted: + err = sock_intr_errno(*timeo_p); + goto do_rm_wq; +} + int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); @@ -952,6 +1024,8 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) copy = mss - skb->len; skb->ip_summed = CHECKSUM_UNNECESSARY; } + if (!csk_mem_free(cdev, sk)) + goto wait_for_sndbuf; if (is_tls_tx(csk) && !csk->tlshws.txleft) { struct tls_hdr hdr; @@ -1099,8 +1173,10 @@ copy: if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) push_frames_if_head(sk); continue; +wait_for_sndbuf: + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: - err = sk_stream_wait_memory(sk, &timeo); + err = csk_wait_memory(cdev, sk, &timeo); if (err) goto do_error; } @@ -1131,6 +1207,7 @@ int chtls_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct chtls_sock *csk; + struct chtls_dev *cdev; int mss, err, copied; struct tcp_sock *tp; long timeo; @@ -1138,6 +1215,7 @@ int chtls_sendpage(struct sock *sk, struct page *page, tp = tcp_sk(sk); copied = 0; csk = rcu_dereference_sk_user_data(sk); + cdev = csk->cdev; timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); err = sk_stream_wait_connect(sk, &timeo); @@ -1156,6 +1234,8 @@ int chtls_sendpage(struct sock *sk, struct page *page, if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || copy <= 0) { new_buf: + if (!csk_mem_free(cdev, sk)) + goto wait_for_sndbuf; if (is_tls_tx(csk)) { skb = get_record_skb(sk, @@ -1167,7 +1247,7 @@ new_buf: skb = get_tx_skb(sk, 0); } if (!skb) - goto do_error; + goto wait_for_memory; copy = mss; } if (copy > size) @@ -1206,8 +1286,12 @@ new_buf: if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)) push_frames_if_head(sk); continue; - +wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); +wait_for_memory: + err = csk_wait_memory(cdev, sk, &timeo); + if (err) + goto do_error; } out: csk_reset_flag(csk, CSK_TX_MORE_DATA); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 53ffb00d45bf..273afd3b6537 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -238,6 +238,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info) spin_lock_init(&cdev->idr_lock); cdev->send_page_order = min_t(uint, get_order(32768), send_page_order); + cdev->max_host_sndbuf = 48 * 1024; if (lldi->vr->key.size) if (chtls_init_kmap(cdev, lldi)) -- cgit v1.2.3 From ea5213b0da48474e20f84a625553028cd857d551 Mon Sep 17 00:00:00 2001 From: Atul Gupta Date: Sun, 27 May 2018 21:15:20 +0530 Subject: crypto: chtls - dereference null variable skb dereferenced before check in sendpage Reported-by: Dan Carpenter Signed-off-by: Atul Gupta Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_io.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 7aa5d90c6ebd..8cfc27b4337d 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1230,9 +1230,8 @@ int chtls_sendpage(struct sock *sk, struct page *page, struct sk_buff *skb = skb_peek_tail(&csk->txq); int copy, i; - copy = mss - skb->len; if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || - copy <= 0) { + (copy = mss - skb->len) <= 0) { new_buf: if (!csk_mem_free(cdev, sk)) goto wait_for_sndbuf; -- cgit v1.2.3 From 1dfe57aa1441a354f9f1163258795ca05c10d4f8 Mon Sep 17 00:00:00 2001 From: Atul Gupta Date: Sun, 27 May 2018 21:15:21 +0530 Subject: crypto: chtls - kbuild warnings - unindented continue - check for null page - signed return Reported-by: Dan Carpenter Signed-off-by: Atul Gupta Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_io.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 8cfc27b4337d..51fc6821cbbf 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -907,11 +907,11 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk, } /* Read TLS header to find content type and data length */ -static u16 tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) +static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) { if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr)) return -EFAULT; - return (__force u16)cpu_to_be16(thdr->length); + return (__force int)cpu_to_be16(thdr->length); } static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk) @@ -1083,9 +1083,10 @@ new_buf: int off = TCP_OFF(sk); bool merge; - if (page) - pg_size <<= compound_order(page); + if (!page) + goto wait_for_memory; + pg_size <<= compound_order(page); if (off < pg_size && skb_can_coalesce(skb, i, page, off)) { merge = 1; @@ -1492,7 +1493,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; chtls_cleanup_rbuf(sk, copied); sk_wait_data(sk, &timeo, NULL); - continue; + continue; found_ok_skb: if (!skb->len) { skb_dst_set(skb, NULL); -- cgit v1.2.3 From b268b3506d9910ca8238e92cb1dc51340574b2f2 Mon Sep 17 00:00:00 2001 From: Atul Gupta Date: Sun, 27 May 2018 21:15:22 +0530 Subject: crypto: chtls - free beyond end rspq_skb_cache Reported-by: Dan Carpenter Signed-off-by: Atul Gupta Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 273afd3b6537..9b07f9165658 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -250,7 +250,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info) return cdev; out_rspq_skb: - for (j = 0; j <= i; j++) + for (j = 0; j < i; j++) kfree_skb(cdev->rspq_skb_cache[j]); kfree_skb(cdev->askb); out_skb: -- cgit v1.2.3