summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 08:51:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 08:51:23 -0700
commita4a78bc8ead44c3cdb470c6e1f37afcabdddfc14 (patch)
treeb86bd2a304d056b23a85bb34c79e033af7ab1b0f /crypto
parentffc766b31e900e91454d53b8619f0ff5377df945 (diff)
parente3a606f2c544b231f6079c8c5fea451e772e1139 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - crypto_destroy_tfm now ignores errors as well as NULL pointers Algorithms: - Add explicit curve IDs in ECDH algorithm names - Add NIST P384 curve parameters - Add ECDSA Drivers: - Add support for Green Sardine in ccp - Add ecdh/curve25519 to hisilicon/hpre - Add support for AM64 in sa2ul" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (184 commits) fsverity: relax build time dependency on CRYPTO_SHA256 fscrypt: relax Kconfig dependencies for crypto API algorithms crypto: camellia - drop duplicate "depends on CRYPTO" crypto: s5p-sss - consistently use local 'dev' variable in probe() crypto: s5p-sss - remove unneeded local variable initialization crypto: s5p-sss - simplify getting of_device_id match data ccp: ccp - add support for Green Sardine crypto: ccp - Make ccp_dev_suspend and ccp_dev_resume void functions crypto: octeontx2 - add support for OcteonTX2 98xx CPT block. crypto: chelsio/chcr - Remove useless MODULE_VERSION crypto: ux500/cryp - Remove duplicate argument crypto: chelsio - remove unused function crypto: sa2ul - Add support for AM64 crypto: sa2ul - Support for per channel coherency dt-bindings: crypto: ti,sa2ul: Add new compatible for AM64 crypto: hisilicon - enable new error types for QM crypto: hisilicon - add new error type for SEC crypto: hisilicon - support new error types for ZIP crypto: hisilicon - dynamic configuration 'err_info' crypto: doc - fix kernel-doc notation in chacha.c and af_alg.c ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig15
-rw-r--r--crypto/Makefile6
-rw-r--r--crypto/aegis.h19
-rw-r--r--crypto/aegis128-core.c15
-rw-r--r--crypto/aegis128-neon.c10
-rw-r--r--crypto/af_alg.c94
-rw-r--r--crypto/api.c2
-rw-r--r--crypto/asymmetric_keys/public_key.c4
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c49
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c4
-rw-r--r--crypto/crc32_generic.c24
-rw-r--r--crypto/ecc.c291
-rw-r--r--crypto/ecc.h49
-rw-r--r--crypto/ecc_curve_defs.h49
-rw-r--r--crypto/ecdh.c72
-rw-r--r--crypto/ecdh_helper.c4
-rw-r--r--crypto/ecdsa.c376
-rw-r--r--crypto/ecdsasignature.asn14
-rw-r--r--crypto/fcrypt.c5
-rw-r--r--crypto/jitterentropy.c8
-rw-r--r--crypto/keywrap.c4
-rw-r--r--crypto/rng.c10
-rw-r--r--crypto/serpent_generic.c39
-rw-r--r--crypto/testmgr.c42
-rw-r--r--crypto/testmgr.h458
25 files changed, 1356 insertions, 297 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 5809cc198fa7..ca3b02dcbbfa 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -242,6 +242,16 @@ config CRYPTO_ECDH
help
Generic implementation of the ECDH algorithm
+config CRYPTO_ECDSA
+ tristate "ECDSA (NIST P192, P256 etc.) algorithm"
+ select CRYPTO_ECC
+ select CRYPTO_AKCIPHER
+ select ASN1
+ help
+ Elliptic Curve Digital Signature Algorithm (NIST P192, P256 etc.)
+ is A NIST cryptographic standard algorithm. Only signature verification
+ is implemented.
+
config CRYPTO_ECRDSA
tristate "EC-RDSA (GOST 34.10) algorithm"
select CRYPTO_ECC
@@ -1213,7 +1223,6 @@ config CRYPTO_BLOWFISH_X86_64
config CRYPTO_CAMELLIA
tristate "Camellia cipher algorithms"
- depends on CRYPTO
select CRYPTO_ALGAPI
help
Camellia cipher algorithms module.
@@ -1229,7 +1238,6 @@ config CRYPTO_CAMELLIA
config CRYPTO_CAMELLIA_X86_64
tristate "Camellia cipher algorithm (x86_64)"
depends on X86 && 64BIT
- depends on CRYPTO
select CRYPTO_SKCIPHER
imply CRYPTO_CTR
help
@@ -1246,7 +1254,6 @@ config CRYPTO_CAMELLIA_X86_64
config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
depends on X86 && 64BIT
- depends on CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_SIMD
@@ -1265,7 +1272,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
depends on X86 && 64BIT
- depends on CRYPTO
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
help
Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
@@ -1281,7 +1287,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
config CRYPTO_CAMELLIA_SPARC64
tristate "Camellia cipher algorithm (SPARC64)"
depends on SPARC64
- depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
help
diff --git a/crypto/Makefile b/crypto/Makefile
index cf23affb1678..10526d4559b8 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -50,6 +50,12 @@ sm2_generic-y += sm2.o
obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o
+$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h
+$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h
+ecdsa_generic-y += ecdsa.o
+ecdsa_generic-y += ecdsasignature.asn1.o
+obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o
+
crypto_acompress-y := acompress.o
crypto_acompress-y += scompress.o
obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o
diff --git a/crypto/aegis.h b/crypto/aegis.h
index 6920ebe77679..6ef9c174c973 100644
--- a/crypto/aegis.h
+++ b/crypto/aegis.h
@@ -21,9 +21,28 @@ union aegis_block {
u8 bytes[AEGIS_BLOCK_SIZE];
};
+struct aegis_state;
+
+extern int aegis128_have_aes_insn;
+
#define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block))
#define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN)
+bool crypto_aegis128_have_simd(void);
+void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
+void crypto_aegis128_init_simd(struct aegis_state *state,
+ const union aegis_block *key,
+ const u8 *iv);
+void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
+ const u8 *src, unsigned int size);
+void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
+ const u8 *src, unsigned int size);
+int crypto_aegis128_final_simd(struct aegis_state *state,
+ union aegis_block *tag_xor,
+ unsigned int assoclen,
+ unsigned int cryptlen,
+ unsigned int authsize);
+
static __always_inline void crypto_aegis_block_xor(union aegis_block *dst,
const union aegis_block *src)
{
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 89dc1c559689..c4f1bfa1d04f 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -58,21 +58,6 @@ static bool aegis128_do_simd(void)
return false;
}
-bool crypto_aegis128_have_simd(void);
-void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
-void crypto_aegis128_init_simd(struct aegis_state *state,
- const union aegis_block *key,
- const u8 *iv);
-void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
- const u8 *src, unsigned int size);
-void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
- const u8 *src, unsigned int size);
-int crypto_aegis128_final_simd(struct aegis_state *state,
- union aegis_block *tag_xor,
- unsigned int assoclen,
- unsigned int cryptlen,
- unsigned int authsize);
-
static void crypto_aegis128_update(struct aegis_state *state)
{
union aegis_block tmp;
diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c
index 94d591a002a4..a7856915ec85 100644
--- a/crypto/aegis128-neon.c
+++ b/crypto/aegis128-neon.c
@@ -30,7 +30,7 @@ bool crypto_aegis128_have_simd(void)
return IS_ENABLED(CONFIG_ARM64);
}
-void crypto_aegis128_init_simd(union aegis_block *state,
+void crypto_aegis128_init_simd(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
@@ -39,14 +39,14 @@ void crypto_aegis128_init_simd(union aegis_block *state,
kernel_neon_end();
}
-void crypto_aegis128_update_simd(union aegis_block *state, const void *msg)
+void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg)
{
kernel_neon_begin();
crypto_aegis128_update_neon(state, msg);
kernel_neon_end();
}
-void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst,
+void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
kernel_neon_begin();
@@ -54,7 +54,7 @@ void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst,
kernel_neon_end();
}
-void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
+void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
kernel_neon_begin();
@@ -62,7 +62,7 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
kernel_neon_end();
}
-int crypto_aegis128_final_simd(union aegis_block *state,
+int crypto_aegis128_final_simd(struct aegis_state *state,
union aegis_block *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 9acb9d2c4bcf..18cc82dc4a42 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -491,8 +491,8 @@ static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
/**
* af_alg_alloc_tsgl - allocate the TX SGL
*
- * @sk socket of connection to user space
- * @return: 0 upon success, < 0 upon error
+ * @sk: socket of connection to user space
+ * Return: 0 upon success, < 0 upon error
*/
static int af_alg_alloc_tsgl(struct sock *sk)
{
@@ -525,15 +525,15 @@ static int af_alg_alloc_tsgl(struct sock *sk)
}
/**
- * aead_count_tsgl - Count number of TX SG entries
+ * af_alg_count_tsgl - Count number of TX SG entries
*
* The counting starts from the beginning of the SGL to @bytes. If
- * an offset is provided, the counting of the SG entries starts at the offset.
+ * an @offset is provided, the counting of the SG entries starts at the @offset.
*
- * @sk socket of connection to user space
- * @bytes Count the number of SG entries holding given number of bytes.
- * @offset Start the counting of SG entries from the given offset.
- * @return Number of TX SG entries found given the constraints
+ * @sk: socket of connection to user space
+ * @bytes: Count the number of SG entries holding given number of bytes.
+ * @offset: Start the counting of SG entries from the given offset.
+ * Return: Number of TX SG entries found given the constraints
*/
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
{
@@ -577,19 +577,19 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
/**
- * aead_pull_tsgl - Release the specified buffers from TX SGL
+ * af_alg_pull_tsgl - Release the specified buffers from TX SGL
*
- * If @dst is non-null, reassign the pages to dst. The caller must release
+ * If @dst is non-null, reassign the pages to @dst. The caller must release
* the pages. If @dst_offset is given only reassign the pages to @dst starting
* at the @dst_offset (byte). The caller must ensure that @dst is large
* enough (e.g. by using af_alg_count_tsgl with the same offset).
*
- * @sk socket of connection to user space
- * @used Number of bytes to pull from TX SGL
- * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
- * caller must release the buffers in dst.
- * @dst_offset Reassign the TX SGL from given offset. All buffers before
- * reaching the offset is released.
+ * @sk: socket of connection to user space
+ * @used: Number of bytes to pull from TX SGL
+ * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
+ * caller must release the buffers in dst.
+ * @dst_offset: Reassign the TX SGL from given offset. All buffers before
+ * reaching the offset is released.
*/
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset)
@@ -657,7 +657,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
/**
* af_alg_free_areq_sgls - Release TX and RX SGLs of the request
*
- * @areq Request holding the TX and RX SGL
+ * @areq: Request holding the TX and RX SGL
*/
static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
{
@@ -692,9 +692,9 @@ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
/**
* af_alg_wait_for_wmem - wait for availability of writable memory
*
- * @sk socket of connection to user space
- * @flags If MSG_DONTWAIT is set, then only report if function would sleep
- * @return 0 when writable memory is available, < 0 upon error
+ * @sk: socket of connection to user space
+ * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
+ * Return: 0 when writable memory is available, < 0 upon error
*/
static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
{
@@ -725,7 +725,7 @@ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
/**
* af_alg_wmem_wakeup - wakeup caller when writable memory is available
*
- * @sk socket of connection to user space
+ * @sk: socket of connection to user space
*/
void af_alg_wmem_wakeup(struct sock *sk)
{
@@ -748,10 +748,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
/**
* af_alg_wait_for_data - wait for availability of TX data
*
- * @sk socket of connection to user space
- * @flags If MSG_DONTWAIT is set, then only report if function would sleep
- * @min Set to minimum request size if partial requests are allowed.
- * @return 0 when writable memory is available, < 0 upon error
+ * @sk: socket of connection to user space
+ * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
+ * @min: Set to minimum request size if partial requests are allowed.
+ * Return: 0 when writable memory is available, < 0 upon error
*/
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
{
@@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
/**
* af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
*
- * @sk socket of connection to user space
+ * @sk: socket of connection to user space
*/
static void af_alg_data_wakeup(struct sock *sk)
{
@@ -820,12 +820,12 @@ static void af_alg_data_wakeup(struct sock *sk)
*
* In addition, the ctx is filled with the information sent via CMSG.
*
- * @sock socket of connection to user space
- * @msg message from user space
- * @size size of message from user space
- * @ivsize the size of the IV for the cipher operation to verify that the
+ * @sock: socket of connection to user space
+ * @msg: message from user space
+ * @size: size of message from user space
+ * @ivsize: the size of the IV for the cipher operation to verify that the
* user-space-provided IV has the right size
- * @return the number of copied data upon success, < 0 upon error
+ * Return: the number of copied data upon success, < 0 upon error
*/
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize)
@@ -977,6 +977,11 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg);
/**
* af_alg_sendpage - sendpage system call handler
+ * @sock: socket of connection to user space to write to
+ * @page: data to send
+ * @offset: offset into page to begin sending
+ * @size: length of data
+ * @flags: message send/receive flags
*
* This is a generic implementation of sendpage to fill ctx->tsgl_list.
*/
@@ -1035,6 +1040,7 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
/**
* af_alg_free_resources - release resources required for crypto request
+ * @areq: Request holding the TX and RX SGL
*/
void af_alg_free_resources(struct af_alg_async_req *areq)
{
@@ -1047,6 +1053,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
/**
* af_alg_async_cb - AIO callback handler
+ * @_req: async request info
+ * @err: if non-zero, error result to be returned via ki_complete();
+ * otherwise return the AIO output length via ki_complete().
*
* This handler cleans up the struct af_alg_async_req upon completion of the
* AIO operation.
@@ -1073,6 +1082,9 @@ EXPORT_SYMBOL_GPL(af_alg_async_cb);
/**
* af_alg_poll - poll system call handler
+ * @file: file pointer
+ * @sock: socket to poll
+ * @wait: poll_table
*/
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait)
@@ -1098,9 +1110,9 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
/**
* af_alg_alloc_areq - allocate struct af_alg_async_req
*
- * @sk socket of connection to user space
- * @areqlen size of struct af_alg_async_req + crypto_*_reqsize
- * @return allocated data structure or ERR_PTR upon error
+ * @sk: socket of connection to user space
+ * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize
+ * Return: allocated data structure or ERR_PTR upon error
*/
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen)
@@ -1125,13 +1137,13 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
* af_alg_get_rsgl - create the RX SGL for the output data from the crypto
* operation
*
- * @sk socket of connection to user space
- * @msg user space message
- * @flags flags used to invoke recvmsg with
- * @areq instance of the cryptographic request that will hold the RX SGL
- * @maxsize maximum number of bytes to be pulled from user space
- * @outlen number of bytes in the RX SGL
- * @return 0 on success, < 0 upon error
+ * @sk: socket of connection to user space
+ * @msg: user space message
+ * @flags: flags used to invoke recvmsg with
+ * @areq: instance of the cryptographic request that will hold the RX SGL
+ * @maxsize: maximum number of bytes to be pulled from user space
+ * @outlen: number of bytes in the RX SGL
+ * Return: 0 on success, < 0 upon error
*/
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
struct af_alg_async_req *areq, size_t maxsize,
diff --git a/crypto/api.c b/crypto/api.c
index ed08cbd5b9d3..c4eda56cff89 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
- if (unlikely(!mem))
+ if (IS_ERR_OR_NULL(mem))
return;
alg = tfm->__crt_alg;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 788a4ba1e2e7..4fefb219bfdc 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
+#include <linux/asn1.h>
#include <keys/asymmetric-subtype.h>
#include <crypto/public_key.h>
#include <crypto/akcipher.h>
@@ -85,7 +86,8 @@ int software_key_determine_akcipher(const char *encoding,
return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
}
- if (strcmp(encoding, "raw") == 0) {
+ if (strcmp(encoding, "raw") == 0 ||
+ strcmp(encoding, "x962") == 0) {
strcpy(alg_name, pkey->pkey_algo);
return 0;
}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 52c9b455fc7d..6d003096b5bc 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -227,6 +227,26 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
+ case OID_id_ecdsa_with_sha1:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha224:
+ ctx->cert->sig->hash_algo = "sha224";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha256:
+ ctx->cert->sig->hash_algo = "sha256";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha384:
+ ctx->cert->sig->hash_algo = "sha384";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha512:
+ ctx->cert->sig->hash_algo = "sha512";
+ goto ecdsa;
+
case OID_gost2012Signature256:
ctx->cert->sig->hash_algo = "streebog256";
goto ecrdsa;
@@ -255,6 +275,11 @@ sm2:
ctx->cert->sig->encoding = "raw";
ctx->algo_oid = ctx->last_oid;
return 0;
+ecdsa:
+ ctx->cert->sig->pkey_algo = "ecdsa";
+ ctx->cert->sig->encoding = "x962";
+ ctx->algo_oid = ctx->last_oid;
+ return 0;
}
/*
@@ -276,7 +301,8 @@ int x509_note_signature(void *context, size_t hdrlen,
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 ||
- strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0) {
+ strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 ||
+ strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
@@ -459,6 +485,7 @@ int x509_extract_key_data(void *context, size_t hdrlen,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
+ enum OID oid;
ctx->key_algo = ctx->last_oid;
switch (ctx->last_oid) {
@@ -470,7 +497,25 @@ int x509_extract_key_data(void *context, size_t hdrlen,
ctx->cert->pub->pkey_algo = "ecrdsa";
break;
case OID_id_ecPublicKey:
- ctx->cert->pub->pkey_algo = "sm2";
+ if (parse_OID(ctx->params, ctx->params_size, &oid) != 0)
+ return -EBADMSG;
+
+ switch (oid) {
+ case OID_sm2:
+ ctx->cert->pub->pkey_algo = "sm2";
+ break;
+ case OID_id_prime192v1:
+ ctx->cert->pub->pkey_algo = "ecdsa-nist-p192";
+ break;
+ case OID_id_prime256v1:
+ ctx->cert->pub->pkey_algo = "ecdsa-nist-p256";
+ break;
+ case OID_id_ansip384r1:
+ ctx->cert->pub->pkey_algo = "ecdsa-nist-p384";
+ break;
+ default:
+ return -ENOPKG;
+ }
break;
default:
return -ENOPKG;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index ae450eb8be14..3d45161b271a 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -129,7 +129,9 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
}
ret = -EKEYREJECTED;
- if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
+ if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0 &&
+ (strncmp(cert->pub->pkey_algo, "ecdsa-", 6) != 0 ||
+ strcmp(cert->sig->pkey_algo, "ecdsa") != 0))
goto out;
ret = public_key_verify_signature(cert->pub, cert->sig);
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index 0e103fb5dd77..a989cb44fd16 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -1,26 +1,4 @@
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012 Xyratex Technology Limited
*/
diff --git a/crypto/ecc.c b/crypto/ecc.c
index c80aa25994a0..afc6cefdc1d9 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <crypto/ecc_curve.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
@@ -42,7 +43,14 @@ typedef struct {
u64 m_high;
} uint128_t;
-static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
+/* Returns curv25519 curve param */
+const struct ecc_curve *ecc_get_curve25519(void)
+{
+ return &ecc_25519;
+}
+EXPORT_SYMBOL(ecc_get_curve25519);
+
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
{
switch (curve_id) {
/* In FIPS mode only allow P256 and higher */
@@ -50,10 +58,13 @@ static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
return fips_enabled ? NULL : &nist_p192;
case ECC_CURVE_NIST_P256:
return &nist_p256;
+ case ECC_CURVE_NIST_P384:
+ return &nist_p384;
default:
return NULL;
}
}
+EXPORT_SYMBOL(ecc_get_curve);
static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
@@ -128,7 +139,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits)
}
EXPORT_SYMBOL(vli_is_zero);
-/* Returns nonzero if bit bit of vli is set. */
+/* Returns nonzero if bit of vli is set. */
static u64 vli_test_bit(const u64 *vli, unsigned int bit)
{
return (vli[bit / 64] & ((u64)1 << (bit % 64)));
@@ -775,18 +786,133 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
}
}
+#define SL32OR32(x32, y32) (((u64)x32 << 32) | y32)
+#define AND64H(x64) (x64 & 0xffFFffFF00000000ull)
+#define AND64L(x64) (x64 & 0x00000000ffFFffFFull)
+
+/* Computes result = product % curve_prime
+ * from "Mathematical routines for the NIST prime elliptic curves"
+ */
+static void vli_mmod_fast_384(u64 *result, const u64 *product,
+ const u64 *curve_prime, u64 *tmp)
+{
+ int carry;
+ const unsigned int ndigits = 6;
+
+ /* t */
+ vli_set(result, product, ndigits);
+
+ /* s1 */
+ tmp[0] = 0; // 0 || 0
+ tmp[1] = 0; // 0 || 0
+ tmp[2] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
+ tmp[3] = product[11]>>32; // 0 ||a23
+ tmp[4] = 0; // 0 || 0
+ tmp[5] = 0; // 0 || 0
+ carry = vli_lshift(tmp, tmp, 1, ndigits);
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s2 */
+ tmp[0] = product[6]; //a13||a12
+ tmp[1] = product[7]; //a15||a14
+ tmp[2] = product[8]; //a17||a16
+ tmp[3] = product[9]; //a19||a18
+ tmp[4] = product[10]; //a21||a20
+ tmp[5] = product[11]; //a23||a22
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s3 */
+ tmp[0] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
+ tmp[1] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
+ tmp[2] = SL32OR32(product[7], (product[6])>>32); //a14||a13
+ tmp[3] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
+ tmp[4] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
+ tmp[5] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s4 */
+ tmp[0] = AND64H(product[11]); //a23|| 0
+ tmp[1] = (product[10]<<32); //a20|| 0
+ tmp[2] = product[6]; //a13||a12
+ tmp[3] = product[7]; //a15||a14
+ tmp[4] = product[8]; //a17||a16
+ tmp[5] = product[9]; //a19||a18
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s5 */
+ tmp[0] = 0; // 0|| 0
+ tmp[1] = 0; // 0|| 0
+ tmp[2] = product[10]; //a21||a20
+ tmp[3] = product[11]; //a23||a22
+ tmp[4] = 0; // 0|| 0
+ tmp[5] = 0; // 0|| 0
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* s6 */
+ tmp[0] = AND64L(product[10]); // 0 ||a20
+ tmp[1] = AND64H(product[10]); //a21|| 0
+ tmp[2] = product[11]; //a23||a22
+ tmp[3] = 0; // 0 || 0
+ tmp[4] = 0; // 0 || 0
+ tmp[5] = 0; // 0 || 0
+ carry += vli_add(result, result, tmp, ndigits);
+
+ /* d1 */
+ tmp[0] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
+ tmp[1] = SL32OR32(product[7], (product[6]>>32)); //a14||a13
+ tmp[2] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
+ tmp[3] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
+ tmp[4] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
+ tmp[5] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
+ carry -= vli_sub(result, result, tmp, ndigits);
+
+ /* d2 */
+ tmp[0] = (product[10]<<32); //a20|| 0
+ tmp[1] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
+ tmp[2] = (product[11]>>32); // 0 ||a23
+ tmp[3] = 0; // 0 || 0
+ tmp[4] = 0; // 0 || 0
+ tmp[5] = 0; // 0 || 0
+ carry -= vli_sub(result, result, tmp, ndigits);
+
+ /* d3 */
+ tmp[0] = 0; // 0 || 0
+ tmp[1] = AND64H(product[11]); //a23|| 0
+ tmp[2] = product[11]>>32; // 0 ||a23
+ tmp[3] = 0; // 0 || 0
+ tmp[4] = 0; // 0 || 0
+ tmp[5] = 0; // 0 || 0
+ carry -= vli_sub(result, result, tmp, ndigits);
+
+ if (carry < 0) {
+ do {
+ carry += vli_add(result, result, curve_prime, ndigits);
+ } while (carry < 0);
+ } else {
+ while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
+ carry -= vli_sub(result, result, curve_prime, ndigits);
+ }
+
+}
+
+#undef SL32OR32
+#undef AND64H
+#undef AND64L
+
/* Computes result = product % curve_prime for different curve_primes.
*
* Note that curve_primes are distinguished just by heuristic check and
* not by complete conformance check.
*/
static bool vli_mmod_fast(u64 *result, u64 *product,
- const u64 *curve_prime, unsigned int ndigits)
+ const struct ecc_curve *curve)
{
u64 tmp[2 * ECC_MAX_DIGITS];
+ const u64 *curve_prime = curve->p;
+ const unsigned int ndigits = curve->g.ndigits;
- /* Currently, both NIST primes have -1 in lowest qword. */
- if (curve_prime[0] != -1ull) {
+ /* All NIST curves have name prefix 'nist_' */
+ if (strncmp(curve->name, "nist_", 5) != 0) {
/* Try to handle Pseudo-Marsenne primes. */
if (curve_prime[ndigits - 1] == -1ull) {
vli_mmod_special(result, product, curve_prime,
@@ -809,6 +935,9 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
case 4:
vli_mmod_fast_256(result, product, curve_prime, tmp);
break;
+ case 6:
+ vli_mmod_fast_384(result, product, curve_prime, tmp);
+ break;
default:
pr_err_ratelimited("ecc: unsupported digits size!\n");
return false;
@@ -832,22 +961,22 @@ EXPORT_SYMBOL(vli_mod_mult_slow);
/* Computes result = (left * right) % curve_prime. */
static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
- const u64 *curve_prime, unsigned int ndigits)
+ const struct ecc_curve *curve)
{
u64 product[2 * ECC_MAX_DIGITS];
- vli_mult(product, left, right, ndigits);
- vli_mmod_fast(result, product, curve_prime, ndigits);
+ vli_mult(product, left, right, curve->g.ndigits);
+ vli_mmod_fast(result, product, curve);
}
/* Computes result = left^2 % curve_prime. */
static void vli_mod_square_fast(u64 *result, const u64 *left,
- const u64 *curve_prime, unsigned int ndigits)
+ const struct ecc_curve *curve)
{
u64 product[2 * ECC_MAX_DIGITS];
- vli_square(product, left, ndigits);
- vli_mmod_fast(result, product, curve_prime, ndigits);
+ vli_square(product, left, curve->g.ndigits);
+ vli_mmod_fast(result, product, curve);
}
#define EVEN(vli) (!(vli[0] & 1))
@@ -945,25 +1074,27 @@ static bool ecc_point_is_zero(const struct ecc_point *point)
/* Double in place */
static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
- u64 *curve_prime, unsigned int ndigits)
+ const struct ecc_curve *curve)
{
/* t1 = x, t2 = y, t3 = z */
u64 t4[ECC_MAX_DIGITS];
u64 t5[ECC_MAX_DIGITS];
+ const u64 *curve_prime = curve->p;
+ const unsigned int ndigits = curve->g.ndigits;
if (vli_is_zero(z1, ndigits))
return;
/* t4 = y1^2 */
- vli_mod_square_fast(t4, y1, curve_prime, ndigits);
+ vli_mod_square_fast(t4, y1, curve);
/* t5 = x1*y1^2 = A */
- vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits);
+ vli_mod_mult_fast(t5, x1, t4, curve);
/* t4 = y1^4 */
- vli_mod_square_fast(t4, t4, curve_prime, ndigits);
+ vli_mod_square_fast(t4, t4, curve);
/* t2 = y1*z1 = z3 */
- vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits);
+ vli_mod_mult_fast(y1, y1, z1, curve);
/* t3 = z1^2 */
- vli_mod_square_fast(z1, z1, curve_prime, ndigits);
+ vli_mod_square_fast(z1, z1, curve);
/* t1 = x1 + z1^2 */
vli_mod_add(x1, x1, z1, curve_prime, ndigits);
@@ -972,7 +1103,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
/* t3 = x1 - z1^2 */
vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
/* t1 = x1^2 - z1^4 */
- vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits);
+ vli_mod_mult_fast(x1, x1, z1, curve);
/* t3 = 2*(x1^2 - z1^4) */
vli_mod_add(z1, x1, x1, curve_prime, ndigits);
@@ -989,7 +1120,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
/* t1 = 3/2*(x1^2 - z1^4) = B */
/* t3 = B^2 */
- vli_mod_square_fast(z1, x1, curve_prime, ndigits);
+ vli_mod_square_fast(z1, x1, curve);
/* t3 = B^2 - A */
vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
/* t3 = B^2 - 2A = x3 */
@@ -997,7 +1128,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
/* t5 = A - x3 */
vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
/* t1 = B * (A - x3) */
- vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+ vli_mod_mult_fast(x1, x1, t5, curve);
/* t4 = B * (A - x3) - y1^4 = y3 */
vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
@@ -1007,23 +1138,22 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
}
/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
-static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime,
- unsigned int ndigits)
+static void apply_z(u64 *x1, u64 *y1, u64 *z, const struct ecc_curve *curve)
{
u64 t1[ECC_MAX_DIGITS];
- vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */
- vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */
- vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */
- vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */
+ vli_mod_square_fast(t1, z, curve); /* z^2 */
+ vli_mod_mult_fast(x1, x1, t1, curve); /* x1 * z^2 */
+ vli_mod_mult_fast(t1, t1, z, curve); /* z^3 */
+ vli_mod_mult_fast(y1, y1, t1, curve); /* y1 * z^3 */
}
/* P = (x1, y1) => 2P, (x2, y2) => P' */
static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
- u64 *p_initial_z, u64 *curve_prime,
- unsigned int ndigits)
+ u64 *p_initial_z, const struct ecc_curve *curve)
{
u64 z[ECC_MAX_DIGITS];
+ const unsigned int ndigits = curve->g.ndigits;
vli_set(x2, x1, ndigits);
vli_set(y2, y1, ndigits);
@@ -1034,35 +1164,37 @@ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
if (p_initial_z)
vli_set(z, p_initial_z, ndigits);
- apply_z(x1, y1, z, curve_prime, ndigits);
+ apply_z(x1, y1, z, curve);
- ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits);
+ ecc_point_double_jacobian(x1, y1, z, curve);
- apply_z(x2, y2, z, curve_prime, ndigits);
+ apply_z(x2, y2, z, curve);
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
* Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
* or P => P', Q => P + Q
*/
-static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
- unsigned int ndigits)
+static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
+ const struct ecc_curve *curve)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ECC_MAX_DIGITS];
+ const u64 *curve_prime = curve->p;
+ const unsigned int ndigits = curve->g.ndigits;
/* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
/* t5 = (x2 - x1)^2 = A */
- vli_mod_square_fast(t5, t5, curve_prime, ndigits);
+ vli_mod_square_fast(t5, t5, curve);
/* t1 = x1*A = B */
- vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+ vli_mod_mult_fast(x1, x1, t5, curve);
/* t3 = x2*A = C */
- vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
+ vli_mod_mult_fast(x2, x2, t5, curve);
/* t4 = y2 - y1 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
/* t5 = (y2 - y1)^2 = D */
- vli_mod_square_fast(t5, y2, curve_prime, ndigits);
+ vli_mod_square_fast(t5, y2, curve);
/* t5 = D - B */
vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
@@ -1071,11 +1203,11 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
/* t3 = C - B */
vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
/* t2 = y1*(C - B) */
- vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits);
+ vli_mod_mult_fast(y1, y1, x2, curve);
/* t3 = B - x3 */
vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
/* t4 = (y2 - y1)*(B - x3) */
- vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits);
+ vli_mod_mult_fast(y2, y2, x2, curve);
/* t4 = y3 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
@@ -1086,22 +1218,24 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
* Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
* or P => P - Q, Q => P + Q
*/
-static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
- unsigned int ndigits)
+static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
+ const struct ecc_curve *curve)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ECC_MAX_DIGITS];
u64 t6[ECC_MAX_DIGITS];
u64 t7[ECC_MAX_DIGITS];
+ const u64 *curve_prime = curve->p;
+ const unsigned int ndigits = curve->g.ndigits;
/* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
/* t5 = (x2 - x1)^2 = A */
- vli_mod_square_fast(t5, t5, curve_prime, ndigits);
+ vli_mod_square_fast(t5, t5, curve);
/* t1 = x1*A = B */
- vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+ vli_mod_mult_fast(x1, x1, t5, curve);
/* t3 = x2*A = C */
- vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
+ vli_mod_mult_fast(x2, x2, t5, curve);
/* t4 = y2 + y1 */
vli_mod_add(t5, y2, y1, curve_prime, ndigits);
/* t4 = y2 - y1 */
@@ -1110,29 +1244,29 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
/* t6 = C - B */
vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
/* t2 = y1 * (C - B) */
- vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits);
+ vli_mod_mult_fast(y1, y1, t6, curve);
/* t6 = B + C */
vli_mod_add(t6, x1, x2, curve_prime, ndigits);
/* t3 = (y2 - y1)^2 */
- vli_mod_square_fast(x2, y2, curve_prime, ndigits);
+ vli_mod_square_fast(x2, y2, curve);
/* t3 = x3 */
vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
/* t7 = B - x3 */
vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
/* t4 = (y2 - y1)*(B - x3) */
- vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits);
+ vli_mod_mult_fast(y2, y2, t7, curve);
/* t4 = y3 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
/* t7 = (y2 + y1)^2 = F */
- vli_mod_square_fast(t7, t5, curve_prime, ndigits);
+ vli_mod_square_fast(t7, t5, curve);
/* t7 = x3' */
vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
/* t6 = x3' - B */
vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
/* t6 = (y2 + y1)*(x3' - B) */
- vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits);
+ vli_mod_mult_fast(t6, t6, t5, curve);
/* t2 = y3' */
vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
@@ -1162,41 +1296,37 @@ static void ecc_point_mult(struct ecc_point *result,
vli_set(rx[1], point->x, ndigits);
vli_set(ry[1], point->y, ndigits);
- xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime,
- ndigits);
+ xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve);
for (i = num_bits - 2; i > 0; i--) {
nb = !vli_test_bit(scalar, i);
- xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
- ndigits);
- xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime,
- ndigits);
+ xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
+ xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
}
nb = !vli_test_bit(scalar, 0);
- xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
- ndigits);
+ xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
/* Find final 1/Z value. */
/* X1 - X0 */
vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
/* Yb * (X1 - X0) */
- vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits);
+ vli_mod_mult_fast(z, z, ry[1 - nb], curve);
/* xP * Yb * (X1 - X0) */
- vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits);
+ vli_mod_mult_fast(z, z, point->x, curve);
/* 1 / (xP * Yb * (X1 - X0)) */
vli_mod_inv(z, z, curve_prime, point->ndigits);
/* yP / (xP * Yb * (X1 - X0)) */
- vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits);
+ vli_mod_mult_fast(z, z, point->y, curve);
/* Xb * yP / (xP * Yb * (X1 - X0)) */
- vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits);
+ vli_mod_mult_fast(z, z, rx[1 - nb], curve);
/* End 1/Z calculation */
- xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits);
+ xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
- apply_z(rx[0], ry[0], z, curve_prime, ndigits);
+ apply_z(rx[0], ry[0], z, curve);
vli_set(result->x, rx[0], ndigits);
vli_set(result->y, ry[0], ndigits);
@@ -1217,9 +1347,9 @@ static void ecc_point_add(const struct ecc_point *result,
vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
vli_set(px, p->x, ndigits);
vli_set(py, p->y, ndigits);
- xycz_add(px, py, result->x, result->y, curve->p, ndigits);
+ xycz_add(px, py, result->x, result->y, curve);
vli_mod_inv(z, z, curve->p, ndigits);
- apply_z(result->x, result->y, z, curve->p, ndigits);
+ apply_z(result->x, result->y, z, curve);
}
/* Computes R = u1P + u2Q mod p using Shamir's trick.
@@ -1248,8 +1378,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
points[2] = q;
points[3] = &sum;
- num_bits = max(vli_num_bits(u1, ndigits),
- vli_num_bits(u2, ndigits));
+ num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits));
i = num_bits - 1;
idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
point = points[idx];
@@ -1260,7 +1389,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
z[0] = 1;
for (--i; i >= 0; i--) {
- ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
+ ecc_point_double_jacobian(rx, ry, z, curve);
idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
point = points[idx];
if (point) {
@@ -1270,27 +1399,17 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
vli_set(tx, point->x, ndigits);
vli_set(ty, point->y, ndigits);
- apply_z(tx, ty, z, curve->p, ndigits);
+ apply_z(tx, ty, z, curve);
vli_mod_sub(tz, rx, tx, curve->p, ndigits);
- xycz_add(tx, ty, rx, ry, curve->p, ndigits);
- vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
+ xycz_add(tx, ty, rx, ry, curve);
+ vli_mod_mult_fast(z, z, tz, curve);
}
}
vli_mod_inv(z, z, curve->p, ndigits);
- apply_z(rx, ry, z, curve->p, ndigits);
+ apply_z(rx, ry, z, curve);
}
EXPORT_SYMBOL(ecc_point_mult_shamir);
-static inline void ecc_swap_digits(const u64 *in, u64 *out,
- unsigned int ndigits)
-{
- const __be64 *src = (__force __be64 *)in;
- int i;
-
- for (i = 0; i < ndigits; i++)
- out[i] = be64_to_cpu(src[ndigits - 1 - i]);
-}
-
static int __ecc_is_key_valid(const struct ecc_curve *curve,
const u64 *private_key, unsigned int ndigits)
{
@@ -1441,10 +1560,10 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
return -EINVAL;
/* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
- vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
- vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
- vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
- vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
+ vli_mod_square_fast(yy, pk->y, curve); /* y^2 */
+ vli_mod_square_fast(xxx, pk->x, curve); /* x^2 */
+ vli_mod_mult_fast(xxx, xxx, pk->x, curve); /* x^3 */
+ vli_mod_mult_fast(w, curve->a, pk->x, curve); /* a·x */
vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
diff --git a/crypto/ecc.h b/crypto/ecc.h
index d4e546b9ad79..a006132646a4 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -26,49 +26,34 @@
#ifndef _CRYPTO_ECC_H
#define _CRYPTO_ECC_H
+#include <crypto/ecc_curve.h>
+
/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
-#define ECC_MAX_DIGITS (512 / 64)
+#define ECC_CURVE_NIST_P384_DIGITS 6
+#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
#define ECC_DIGITS_TO_BYTES_SHIFT 3
-/**
- * struct ecc_point - elliptic curve point in affine coordinates
- *
- * @x: X coordinate in vli form.
- * @y: Y coordinate in vli form.
- * @ndigits: Length of vlis in u64 qwords.
- */
-struct ecc_point {
- u64 *x;
- u64 *y;
- u8 ndigits;
-};
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
/**
- * struct ecc_curve - definition of elliptic curve
- *
- * @name: Short name of the curve.
- * @g: Generator point of the curve.
- * @p: Prime number, if Barrett's reduction is used for this curve
- * pre-calculated value 'mu' is appended to the @p after ndigits.
- * Use of Barrett's reduction is heuristically determined in
- * vli_mmod_fast().
- * @n: Order of the curve group.
- * @a: Curve parameter a.
- * @b: Curve parameter b.
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in: Input array
+ * @out: Output array
+ * @ndigits: Number of digits to copy
*/
-struct ecc_curve {
- char *name;
- struct ecc_point g;
- u64 *p;
- u64 *n;
- u64 *a;
- u64 *b;
-};
+static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
+{
+ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = be64_to_cpu(src[ndigits - 1 - i]);
+}
/**
* ecc_is_key_valid() - Validate a given ECDH private key
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 69be6c7d228f..9719934c9428 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -54,4 +54,53 @@ static struct ecc_curve nist_p256 = {
.b = nist_p256_b
};
+/* NIST P-384 */
+static u64 nist_p384_g_x[] = { 0x3A545E3872760AB7ull, 0x5502F25DBF55296Cull,
+ 0x59F741E082542A38ull, 0x6E1D3B628BA79B98ull,
+ 0x8Eb1C71EF320AD74ull, 0xAA87CA22BE8B0537ull };
+static u64 nist_p384_g_y[] = { 0x7A431D7C90EA0E5Full, 0x0A60B1CE1D7E819Dull,
+ 0xE9DA3113B5F0B8C0ull, 0xF8F41DBD289A147Cull,
+ 0x5D9E98BF9292DC29ull, 0x3617DE4A96262C6Full };
+static u64 nist_p384_p[] = { 0x00000000FFFFFFFFull, 0xFFFFFFFF00000000ull,
+ 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p384_n[] = { 0xECEC196ACCC52973ull, 0x581A0DB248B0A77Aull,
+ 0xC7634D81F4372DDFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p384_a[] = { 0x00000000FFFFFFFCull, 0xFFFFFFFF00000000ull,
+ 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull,
+ 0x0314088f5013875aull, 0x181d9c6efe814112ull,
+ 0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull };
+static struct ecc_curve nist_p384 = {
+ .name = "nist_384",
+ .g = {
+ .x = nist_p384_g_x,
+ .y = nist_p384_g_y,
+ .ndigits = 6,
+ },
+ .p = nist_p384_p,
+ .n = nist_p384_n,
+ .a = nist_p384_a,
+ .b = nist_p384_b
+};
+
+/* curve25519 */
+static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000,
+ 0x0000000000000000, 0x0000000000000000 };
+static u64 curve25519_p[] = { 0xffffffffffffffed, 0xffffffffffffffff,
+ 0xffffffffffffffff, 0x7fffffffffffffff };
+static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000,
+ 0x0000000000000000, 0x0000000000000000 };
+static const struct ecc_curve ecc_25519 = {
+ .name = "curve25519",
+ .g = {
+ .x = curve25519_g_x,
+ .ndigits = 4,
+ },
+ .p = curve25519_p,
+ .a = curve25519_a,
+};
+
#endif
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 96f80c8f8e30..04a427b8c956 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -23,33 +23,16 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
return kpp_tfm_ctx(tfm);
}
-static unsigned int ecdh_supported_curve(unsigned int curve_id)
-{
- switch (curve_id) {
- case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS;
- case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS;
- default: return 0;
- }
-}
-
static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
struct ecdh params;
- unsigned int ndigits;
if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
- params.key_size > sizeof(ctx->private_key))
+ params.key_size > sizeof(u64) * ctx->ndigits)
return -EINVAL;
- ndigits = ecdh_supported_curve(params.curve_id);
- if (!ndigits)
- return -EINVAL;
-
- ctx->curve_id = params.curve_id;
- ctx->ndigits = ndigits;
-
if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
@@ -140,13 +123,24 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
}
-static struct kpp_alg ecdh = {
+static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P192;
+ ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS;
+
+ return 0;
+}
+
+static struct kpp_alg ecdh_nist_p192 = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
+ .init = ecdh_nist_p192_init_tfm,
.base = {
- .cra_name = "ecdh",
+ .cra_name = "ecdh-nist-p192",
.cra_driver_name = "ecdh-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
@@ -154,14 +148,48 @@ static struct kpp_alg ecdh = {
},
};
+static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P256;
+ ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS;
+
+ return 0;
+}
+
+static struct kpp_alg ecdh_nist_p256 = {
+ .set_secret = ecdh_set_secret,
+ .generate_public_key = ecdh_compute_value,
+ .compute_shared_secret = ecdh_compute_value,
+ .max_size = ecdh_max_size,
+ .init = ecdh_nist_p256_init_tfm,
+ .base = {
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "ecdh-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecdh_ctx),
+ },
+};
+
+static bool ecdh_nist_p192_registered;
+
static int ecdh_init(void)
{
- return crypto_register_kpp(&ecdh);
+ int ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p192);
+ ecdh_nist_p192_registered = ret == 0;
+
+ return crypto_register_kpp(&ecdh_nist_p256);
}
static void ecdh_exit(void)
{
- crypto_unregister_kpp(&ecdh);
+ if (ecdh_nist_p192_registered)
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ crypto_unregister_kpp(&ecdh_nist_p256);
}
subsys_initcall(ecdh_init);
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
index fca63b559f65..f18f9028f912 100644
--- a/crypto/ecdh_helper.c
+++ b/crypto/ecdh_helper.c
@@ -10,7 +10,7 @@
#include <crypto/ecdh.h>
#include <crypto/kpp.h>
-#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short))
+#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short))
static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
{
@@ -46,7 +46,6 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len,
return -EINVAL;
ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
- ptr = ecdh_pack_data(ptr, &params->curve_id, sizeof(params->curve_id));
ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
ecdh_pack_data(ptr, params->key, params->key_size);
@@ -70,7 +69,6 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
if (unlikely(len < secret.len))
return -EINVAL;
- ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
if (secret.len != crypto_ecdh_key_len(params))
return -EINVAL;
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
new file mode 100644
index 000000000000..1e7b15009bf6
--- /dev/null
+++ b/crypto/ecdsa.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 IBM Corporation
+ */
+
+#include <linux/module.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/ecdh.h>
+#include <linux/asn1_decoder.h>
+#include <linux/scatterlist.h>
+
+#include "ecc.h"
+#include "ecdsasignature.asn1.h"
+
+struct ecc_ctx {
+ unsigned int curve_id;
+ const struct ecc_curve *curve;
+
+ bool pub_key_set;
+ u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */
+ u64 y[ECC_MAX_DIGITS];
+ struct ecc_point pub_key;
+};
+
+struct ecdsa_signature_ctx {
+ const struct ecc_curve *curve;
+ u64 r[ECC_MAX_DIGITS];
+ u64 s[ECC_MAX_DIGITS];
+};
+
+/*
+ * Get the r and s components of a signature from the X509 certificate.
+ */
+static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen, unsigned int ndigits)
+{
+ size_t keylen = ndigits * sizeof(u64);
+ ssize_t diff = vlen - keylen;
+ const char *d = value;
+ u8 rs[ECC_MAX_BYTES];
+
+ if (!value || !vlen)
+ return -EINVAL;
+
+ /* diff = 0: 'value' has exacly the right size
+ * diff > 0: 'value' has too many bytes; one leading zero is allowed that
+ * makes the value a positive integer; error on more
+ * diff < 0: 'value' is missing leading zeros, which we add
+ */
+ if (diff > 0) {
+ /* skip over leading zeros that make 'value' a positive int */
+ if (*d == 0) {
+ vlen -= 1;
+ diff--;
+ d++;
+ }
+ if (diff)
+ return -EINVAL;
+ }
+ if (-diff >= keylen)
+ return -EINVAL;
+
+ if (diff) {
+ /* leading zeros not given in 'value' */
+ memset(rs, 0, -diff);
+ }
+
+ memcpy(&rs[-diff], d, vlen);
+
+ ecc_swap_digits((u64 *)rs, dest, ndigits);
+
+ return 0;
+}
+
+int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecdsa_signature_ctx *sig = context;
+
+ return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen,
+ sig->curve->g.ndigits);
+}
+
+int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecdsa_signature_ctx *sig = context;
+
+ return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen,
+ sig->curve->g.ndigits);
+}
+
+static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s)
+{
+ const struct ecc_curve *curve = ctx->curve;
+ unsigned int ndigits = curve->g.ndigits;
+ u64 s1[ECC_MAX_DIGITS];
+ u64 u1[ECC_MAX_DIGITS];
+ u64 u2[ECC_MAX_DIGITS];
+ u64 x1[ECC_MAX_DIGITS];
+ u64 y1[ECC_MAX_DIGITS];
+ struct ecc_point res = ECC_POINT_INIT(x1, y1, ndigits);
+
+ /* 0 < r < n and 0 < s < n */
+ if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 ||
+ vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0)
+ return -EBADMSG;
+
+ /* hash is given */
+ pr_devel("hash : %016llx %016llx ... %016llx\n",
+ hash[ndigits - 1], hash[ndigits - 2], hash[0]);
+
+ /* s1 = (s^-1) mod n */
+ vli_mod_inv(s1, s, curve->n, ndigits);
+ /* u1 = (hash * s1) mod n */
+ vli_mod_mult_slow(u1, hash, s1, curve->n, ndigits);
+ /* u2 = (r * s1) mod n */
+ vli_mod_mult_slow(u2, r, s1, curve->n, ndigits);
+ /* res = u1*G + u2 * pub_key */
+ ecc_point_mult_shamir(&res, u1, &curve->g, u2, &ctx->pub_key, curve);
+
+ /* res.x = res.x mod n (if res.x > order) */
+ if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1))
+ /* faster alternative for NIST p384, p256 & p192 */
+ vli_sub(res.x, res.x, curve->n, ndigits);
+
+ if (!vli_cmp(res.x, r, ndigits))
+ return 0;
+
+ return -EKEYREJECTED;
+}
+
+/*
+ * Verify an ECDSA signature.
+ */
+static int ecdsa_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
+ struct ecdsa_signature_ctx sig_ctx = {
+ .curve = ctx->curve,
+ };
+ u8 rawhash[ECC_MAX_BYTES];
+ u64 hash[ECC_MAX_DIGITS];
+ unsigned char *buffer;
+ ssize_t diff;
+ int ret;
+
+ if (unlikely(!ctx->pub_key_set))
+ return -EINVAL;
+
+ buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ sg_pcopy_to_buffer(req->src,
+ sg_nents_for_len(req->src, req->src_len + req->dst_len),
+ buffer, req->src_len + req->dst_len, 0);
+
+ ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx,
+ buffer, req->src_len);
+ if (ret < 0)
+ goto error;
+
+ /* if the hash is shorter then we will add leading zeros to fit to ndigits */
+ diff = keylen - req->dst_len;
+ if (diff >= 0) {
+ if (diff)
+ memset(rawhash, 0, diff);
+ memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
+ } else if (diff < 0) {
+ /* given hash is longer, we take the left-most bytes */
+ memcpy(&rawhash, buffer + req->src_len, keylen);
+ }
+
+ ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
+
+ ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
+
+error:
+ kfree(buffer);
+
+ return ret;
+}
+
+static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id)
+{
+ ctx->curve_id = curve_id;
+ ctx->curve = ecc_get_curve(curve_id);
+ if (!ctx->curve)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static void ecdsa_ecc_ctx_deinit(struct ecc_ctx *ctx)
+{
+ ctx->pub_key_set = false;
+}
+
+static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
+{
+ unsigned int curve_id = ctx->curve_id;
+ int ret;
+
+ ecdsa_ecc_ctx_deinit(ctx);
+ ret = ecdsa_ecc_ctx_init(ctx, curve_id);
+ if (ret == 0)
+ ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y,
+ ctx->curve->g.ndigits);
+ return ret;
+}
+
+/*
+ * Set the public key given the raw uncompressed key data from an X509
+ * certificate. The key data contain the concatenated X and Y coordinates of
+ * the public key.
+ */
+static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
+{
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ const unsigned char *d = key;
+ const u64 *digits = (const u64 *)&d[1];
+ unsigned int ndigits;
+ int ret;
+
+ ret = ecdsa_ecc_ctx_reset(ctx);
+ if (ret < 0)
+ return ret;
+
+ if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0)
+ return -EINVAL;
+ /* we only accept uncompressed format indicated by '4' */
+ if (d[0] != 4)
+ return -EINVAL;
+
+ keylen--;
+ ndigits = (keylen >> 1) / sizeof(u64);
+ if (ndigits != ctx->curve->g.ndigits)
+ return -EINVAL;
+
+ ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
+ ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
+ ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
+
+ ctx->pub_key_set = ret == 0;
+
+ return ret;
+}
+
+static void ecdsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ecdsa_ecc_ctx_deinit(ctx);
+}
+
+static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+}
+
+static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384);
+}
+
+static struct akcipher_alg ecdsa_nist_p384 = {
+ .verify = ecdsa_verify,
+ .set_pub_key = ecdsa_set_pub_key,
+ .max_size = ecdsa_max_size,
+ .init = ecdsa_nist_p384_init_tfm,
+ .exit = ecdsa_exit_tfm,
+ .base = {
+ .cra_name = "ecdsa-nist-p384",
+ .cra_driver_name = "ecdsa-nist-p384-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecc_ctx),
+ },
+};
+
+static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256);
+}
+
+static struct akcipher_alg ecdsa_nist_p256 = {
+ .verify = ecdsa_verify,
+ .set_pub_key = ecdsa_set_pub_key,
+ .max_size = ecdsa_max_size,
+ .init = ecdsa_nist_p256_init_tfm,
+ .exit = ecdsa_exit_tfm,
+ .base = {
+ .cra_name = "ecdsa-nist-p256",
+ .cra_driver_name = "ecdsa-nist-p256-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecc_ctx),
+ },
+};
+
+static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192);
+}
+
+static struct akcipher_alg ecdsa_nist_p192 = {
+ .verify = ecdsa_verify,
+ .set_pub_key = ecdsa_set_pub_key,
+ .max_size = ecdsa_max_size,
+ .init = ecdsa_nist_p192_init_tfm,
+ .exit = ecdsa_exit_tfm,
+ .base = {
+ .cra_name = "ecdsa-nist-p192",
+ .cra_driver_name = "ecdsa-nist-p192-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecc_ctx),
+ },
+};
+static bool ecdsa_nist_p192_registered;
+
+static int ecdsa_init(void)
+{
+ int ret;
+
+ /* NIST p192 may not be available in FIPS mode */
+ ret = crypto_register_akcipher(&ecdsa_nist_p192);
+ ecdsa_nist_p192_registered = ret == 0;
+
+ ret = crypto_register_akcipher(&ecdsa_nist_p256);
+ if (ret)
+ goto nist_p256_error;
+
+ ret = crypto_register_akcipher(&ecdsa_nist_p384);
+ if (ret)
+ goto nist_p384_error;
+
+ return 0;
+
+nist_p384_error:
+ crypto_unregister_akcipher(&ecdsa_nist_p256);
+
+nist_p256_error:
+ if (ecdsa_nist_p192_registered)
+ crypto_unregister_akcipher(&ecdsa_nist_p192);
+ return ret;
+}
+
+static void ecdsa_exit(void)
+{
+ if (ecdsa_nist_p192_registered)
+ crypto_unregister_akcipher(&ecdsa_nist_p192);
+ crypto_unregister_akcipher(&ecdsa_nist_p256);
+ crypto_unregister_akcipher(&ecdsa_nist_p384);
+}
+
+subsys_initcall(ecdsa_init);
+module_exit(ecdsa_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
+MODULE_DESCRIPTION("ECDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecdsa-generic");
diff --git a/crypto/ecdsasignature.asn1 b/crypto/ecdsasignature.asn1
new file mode 100644
index 000000000000..621ab754fb9f
--- /dev/null
+++ b/crypto/ecdsasignature.asn1
@@ -0,0 +1,4 @@
+ECDSASignature ::= SEQUENCE {
+ r INTEGER ({ ecdsa_get_signature_r }),
+ s INTEGER ({ ecdsa_get_signature_s })
+}
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index c36ea0c8be98..76a04d000c0d 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -63,10 +63,7 @@ do { \
} while (0)
/* Rotate right one 64 bit number as a 56 bit number */
-#define ror56_64(k, n) \
-do { \
- k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
-} while (0)
+#define ror56_64(k, n) (k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)))
/*
* Sboxes for Feistel network derived from
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 6e147c43fc18..a11b3208760f 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -597,7 +597,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
if (!ec)
return -1;
- while (0 < len) {
+ while (len > 0) {
unsigned int tocopy;
jent_gen_entropy(ec);
@@ -678,7 +678,7 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
}
/* verify and set the oversampling rate */
- if (0 == osr)
+ if (osr == 0)
osr = 1; /* minimum sampling rate is 1 */
entropy_collector->osr = osr;
@@ -769,7 +769,7 @@ int jent_entropy_init(void)
* etc. with the goal to clear it to get the worst case
* measurements.
*/
- if (CLEARCACHE > i)
+ if (i < CLEARCACHE)
continue;
if (stuck)
@@ -826,7 +826,7 @@ int jent_entropy_init(void)
* should not fail. The value of 3 should cover the NTP case being
* performed during our test run.
*/
- if (3 < time_backwards)
+ if (time_backwards > 3)
return JENT_ENOMONOTONIC;
/*
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 3517773bc7f7..054d9a216fc9 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -114,9 +114,9 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
scatterwalk_start(walk, sg);
scatterwalk_advance(walk, skip);
break;
- } else
- skip -= sg->length;
+ }
+ skip -= sg->length;
sg = sg_next(sg);
}
}
diff --git a/crypto/rng.c b/crypto/rng.c
index a888d84b524a..fea082b25fe4 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
u8 *buf = NULL;
int err;
- crypto_stats_get(alg);
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
- if (!buf) {
- crypto_alg_put(alg);
+ if (!buf)
return -ENOMEM;
- }
err = get_random_bytes_wait(buf, slen);
- if (err) {
- crypto_alg_put(alg);
+ if (err)
goto out;
- }
seed = buf;
}
+ crypto_stats_get(alg);
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
crypto_stats_rng_seed(alg, err);
out:
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 236c87547a17..45f98b750053 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -272,6 +272,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0, r1, r2, r3, r4;
+ __le32 *lk;
int i;
/* Copy key, add padding */
@@ -283,22 +284,32 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
+ lk = (__le32 *)k;
+ k[0] = le32_to_cpu(lk[0]);
+ k[1] = le32_to_cpu(lk[1]);
+ k[2] = le32_to_cpu(lk[2]);
+ k[3] = le32_to_cpu(lk[3]);
+ k[4] = le32_to_cpu(lk[4]);
+ k[5] = le32_to_cpu(lk[5]);
+ k[6] = le32_to_cpu(lk[6]);
+ k[7] = le32_to_cpu(lk[7]);
+
/* Expand key using polynomial */
- r0 = le32_to_cpu(k[3]);
- r1 = le32_to_cpu(k[4]);
- r2 = le32_to_cpu(k[5]);
- r3 = le32_to_cpu(k[6]);
- r4 = le32_to_cpu(k[7]);
-
- keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0);
- keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1);
- keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2);
- keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3);
- keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4);
- keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5);
- keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6);
- keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7);
+ r0 = k[3];
+ r1 = k[4];
+ r2 = k[5];
+ r3 = k[6];
+ r4 = k[7];
+
+ keyiter(k[0], r0, r4, r2, 0, 0);
+ keyiter(k[1], r1, r0, r3, 1, 1);
+ keyiter(k[2], r2, r1, r4, 2, 2);
+ keyiter(k[3], r3, r2, r0, 3, 3);
+ keyiter(k[4], r4, r3, r1, 4, 4);
+ keyiter(k[5], r0, r4, r2, 5, 5);
+ keyiter(k[6], r1, r0, r3, 6, 6);
+ keyiter(k[7], r2, r1, r4, 7, 7);
keyiter(k[0], r3, r2, r0, 8, 8);
keyiter(k[1], r4, r3, r1, 9, 9);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 93359999c94b..10c5b3b01ec4 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1168,11 +1168,6 @@ static inline int check_shash_op(const char *op, int err,
return err;
}
-static inline const void *sg_data(struct scatterlist *sg)
-{
- return page_address(sg_page(sg)) + sg->offset;
-}
-
/* Test one hash test vector in one configuration, using the shash API */
static int test_shash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
@@ -1230,7 +1225,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
return 0;
if (cfg->nosimd)
crypto_disable_simd_for_test();
- err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]),
+ err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]),
tsgl->sgl[0].length, result);
if (cfg->nosimd)
crypto_reenable_simd_for_test();
@@ -1266,7 +1261,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
if (divs[i]->nosimd)
crypto_disable_simd_for_test();
- err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]),
+ err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length, result);
if (divs[i]->nosimd)
crypto_reenable_simd_for_test();
@@ -1278,7 +1273,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
}
if (divs[i]->nosimd)
crypto_disable_simd_for_test();
- err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]),
+ err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length);
if (divs[i]->nosimd)
crypto_reenable_simd_for_test();
@@ -4904,11 +4899,38 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
#endif
- .alg = "ecdh",
+#ifndef CONFIG_CRYPTO_FIPS
+ .alg = "ecdh-nist-p192",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
- .kpp = __VECS(ecdh_tv_template)
+ .kpp = __VECS(ecdh_p192_tv_template)
+ }
+ }, {
+#endif
+ .alg = "ecdh-nist-p256",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ecdh_p256_tv_template)
+ }
+ }, {
+ .alg = "ecdsa-nist-p192",
+ .test = alg_test_akcipher,
+ .suite = {
+ .akcipher = __VECS(ecdsa_nist_p192_tv_template)
+ }
+ }, {
+ .alg = "ecdsa-nist-p256",
+ .test = alg_test_akcipher,
+ .suite = {
+ .akcipher = __VECS(ecdsa_nist_p256_tv_template)
+ }
+ }, {
+ .alg = "ecdsa-nist-p384",
+ .test = alg_test_akcipher,
+ .suite = {
+ .akcipher = __VECS(ecdsa_nist_p384_tv_template)
}
}, {
.alg = "ecrdsa",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index ced56ea0c9b4..34e4a3db3991 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -567,6 +567,430 @@ static const struct akcipher_testvec rsa_tv_template[] = {
};
/*
+ * ECDSA test vectors.
+ */
+static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
+ {
+ .key =
+ "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
+ "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
+ "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
+ "\x98",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
+ "\x63\x85\xe7\x82",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
+ "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
+ "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
+ "\x80\x6f\xa5\x79\x77\xda\xd0",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
+ "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
+ "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
+ "\xa3",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40"
+ "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11",
+ .m_size = 28,
+ .algo = OID_id_ecdsa_with_sha224,
+ .c =
+ "\x30\x34\x02\x18\x5a\x8b\x82\x69\x7e\x8a\x0a\x09\x14\xf8\x11\x2b"
+ "\x55\xdc\xae\x37\x83\x7b\x12\xe6\xb6\x5b\xcb\xd4\x02\x18\x6a\x14"
+ "\x4f\x53\x75\xc8\x02\x48\xeb\xc3\x92\x0f\x1e\x72\xee\xc4\xa3\xe3"
+ "\x5c\x99\xdb\x92\x5b\x36",
+ .c_size = 54,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae"
+ "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56"
+ "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58"
+ "\x91",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd"
+ "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7",
+ .m_size = 32,
+ .algo = OID_id_ecdsa_with_sha256,
+ .c =
+ "\x30\x35\x02\x18\x3f\x72\x3f\x1f\x42\xd2\x3f\x1d\x6b\x1a\x58\x56"
+ "\xf1\x8f\xf7\xfd\x01\x48\xfb\x5f\x72\x2a\xd4\x8f\x02\x19\x00\xb3"
+ "\x69\x43\xfd\x48\x19\x86\xcf\x32\xdd\x41\x74\x6a\x51\xc7\xd9\x7d"
+ "\x3a\x97\xd9\xcd\x1a\x6a\x49",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7"
+ "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f"
+ "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e"
+ "\x8b",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9"
+ "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61"
+ "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78",
+ .m_size = 48,
+ .algo = OID_id_ecdsa_with_sha384,
+ .c =
+ "\x30\x35\x02\x19\x00\xf0\xa3\x38\xce\x2b\xf8\x9d\x1a\xcf\x7f\x34"
+ "\xb4\xb4\xe5\xc5\x00\xdd\x15\xbb\xd6\x8c\xa7\x03\x78\x02\x18\x64"
+ "\xbc\x5a\x1f\x82\x96\x61\xd7\xd1\x01\x77\x44\x5d\x53\xa4\x7c\x93"
+ "\x12\x3b\x3b\x28\xfb\x6d\xe1",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea"
+ "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d"
+ "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11"
+ "\x57",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23"
+ "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67"
+ "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70"
+ "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5",
+ .m_size = 64,
+ .algo = OID_id_ecdsa_with_sha512,
+ .c =
+ "\x30\x35\x02\x19\x00\x88\x5b\x8f\x59\x43\xbf\xcf\xc6\xdd\x3f\x07"
+ "\x87\x12\xa0\xd4\xac\x2b\x11\x2d\x1c\xb6\x06\xc9\x6c\x02\x18\x73"
+ "\xb4\x22\x9a\x98\x73\x3c\x83\xa9\x14\x2a\x5e\xf5\xe5\xfb\x72\x28"
+ "\x6a\xdf\x97\xfd\x82\x76\x24",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+};
+
+static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
+ {
+ .key =
+ "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
+ "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
+ "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
+ "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
+ "\xaf",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
+ "\x0b\xde\x6a\x42",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
+ "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
+ "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
+ "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
+ "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
+ .c_size = 72,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
+ "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
+ "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
+ "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0"
+ "\xd4",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41"
+ "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0",
+ .m_size = 28,
+ .algo = OID_id_ecdsa_with_sha224,
+ .c =
+ "\x30\x44\x02\x20\x20\x43\xfa\xc0\x9f\x9d\x7b\xe7\xae\xce\x77\x59"
+ "\x1a\xdb\x59\xd5\x34\x62\x79\xcb\x6a\x91\x67\x2e\x7d\x25\xd8\x25"
+ "\xf5\x81\xd2\x1e\x02\x20\x5f\xf8\x74\xf8\x57\xd0\x5e\x54\x76\x20"
+ "\x4a\x77\x22\xec\xc8\x66\xbf\x50\x05\x58\x39\x0e\x26\x92\xce\xd5"
+ "\x2e\x8b\xde\x5a\x04\x0e",
+ .c_size = 70,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f"
+ "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00"
+ "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9"
+ "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e"
+ "\xb8",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b"
+ "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4",
+ .m_size = 32,
+ .algo = OID_id_ecdsa_with_sha256,
+ .c =
+ "\x30\x45\x02\x20\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63"
+ "\xa8\x1a\xad\xfc\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67"
+ "\xdc\x0c\xd0\x82\x02\x21\x00\xbd\xff\xce\xee\x42\xc3\x97\xff\xf9"
+ "\xa9\x81\xac\x4a\x50\xd0\x91\x0a\x6e\x1b\xc4\xaf\xe1\x83\xc3\x4f"
+ "\x2a\x65\x35\x23\xe3\x1d\xfa",
+ .c_size = 71,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d"
+ "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e"
+ "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00"
+ "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2"
+ "\x7c",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6"
+ "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94"
+ "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb",
+ .m_size = 48,
+ .algo = OID_id_ecdsa_with_sha384,
+ .c =
+ "\x30\x46\x02\x21\x00\x8e\xf3\x6f\xdc\xf8\x69\xa6\x2e\xd0\x2e\x95"
+ "\x54\xd1\x95\x64\x93\x08\xb2\x6b\x24\x94\x48\x46\x5e\xf2\xe4\x6c"
+ "\xc7\x94\xb1\xd5\xfe\x02\x21\x00\xeb\xa7\x80\x26\xdc\xf9\x3a\x44"
+ "\x19\xfb\x5f\x92\xf4\xc9\x23\x37\x69\xf4\x3b\x4f\x47\xcf\x9b\x16"
+ "\xc0\x60\x11\x92\xdc\x17\x89\x12",
+ .c_size = 72,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
+ "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a"
+ "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38"
+ "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b"
+ "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92"
+ "\xbf",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9"
+ "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca"
+ "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf"
+ "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6",
+ .m_size = 64,
+ .algo = OID_id_ecdsa_with_sha512,
+ .c =
+ "\x30\x45\x02\x21\x00\xb8\x6d\x87\x81\x43\xdf\xfb\x9f\x40\xea\x44"
+ "\x81\x00\x4e\x29\x08\xed\x8c\x73\x30\x6c\x22\xb3\x97\x76\xf6\x04"
+ "\x99\x09\x37\x4d\xfa\x02\x20\x1e\xb9\x75\x31\xf6\x04\xa5\x4d\xf8"
+ "\x00\xdd\xab\xd4\xc0\x2b\xe6\x5c\xad\xc3\x78\x1c\xc2\xc1\x19\x76"
+ "\x31\x79\x4a\xe9\x81\x6a\xee",
+ .c_size = 71,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+};
+
+static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
+ {
+ .key = /* secp384r1(sha1) */
+ "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
+ "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
+ "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
+ "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
+ "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
+ "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
+ "\xf1",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
+ "\x3a\x69\xc1\x93",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
+ "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
+ "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
+ "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
+ "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
+ "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
+ "\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
+ .c_size = 104,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key = /* secp384r1(sha224) */
+ "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
+ "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
+ "\x3e\xe8\x29\x6e\xf6\xe4\xb5\x8e\xc7\x4a\xc2\x5f\x37\x13\x99\x05"
+ "\xb6\xa4\x9d\xf9\xfb\x79\x41\xe7\xd7\x96\x9f\x73\x3b\x39\x43\xdc"
+ "\xda\xf4\x06\xb9\xa5\x29\x01\x9d\x3b\xe1\xd8\x68\x77\x2a\xf4\x50"
+ "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d"
+ "\xe0",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd"
+ "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5",
+ .m_size = 28,
+ .algo = OID_id_ecdsa_with_sha224,
+ .c =
+ "\x30\x66\x02\x31\x00\x8a\x51\x84\xce\x13\x1e\xd2\xdc\xec\xcb\xe4"
+ "\x89\x47\xb2\xf7\xbc\x97\xf1\xc8\x72\x26\xcf\x5a\x5e\xc5\xda\xb4"
+ "\xe3\x93\x07\xe0\x99\xc9\x9c\x11\xb8\x10\x01\xc5\x41\x3f\xdd\x15"
+ "\x1b\x68\x2b\x9d\x8b\x02\x31\x00\x8b\x03\x2c\xfc\x1f\xd1\xa9\xa4"
+ "\x4b\x00\x08\x31\x6c\xf5\xd5\xf6\xdf\xd8\x68\xa2\x64\x42\x65\xf3"
+ "\x4d\xd0\xc6\x6e\xb0\xe9\xfc\x14\x9f\x19\xd0\x42\x8b\x93\xc2\x11"
+ "\x88\x2b\x82\x26\x5e\x1c\xda\xfb",
+ .c_size = 104,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key = /* secp384r1(sha256) */
+ "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a"
+ "\x1f\x52\x00\x63\x78\xf1\xa9\xfd\x75\x4c\x9e\xb2\x20\x1a\x91\x5a"
+ "\xba\x7a\xa3\xe5\x6c\xb6\x25\x68\x4b\xe8\x13\xa6\x54\x87\x2c\x0e"
+ "\xd0\x83\x95\xbc\xbf\xc5\x28\x4f\x77\x1c\x46\xa6\xf0\xbc\xd4\xa4"
+ "\x8d\xc2\x8f\xb3\x32\x37\x40\xd6\xca\xf8\xae\x07\x34\x52\x39\x52"
+ "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc"
+ "\xab",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3"
+ "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2",
+ .m_size = 32,
+ .algo = OID_id_ecdsa_with_sha256,
+ .c =
+ "\x30\x64\x02\x30\x08\x09\x12\x9d\x6e\x96\x64\xa6\x8e\x3f\x7e\xce"
+ "\x0a\x9b\xaa\x59\xcc\x47\x53\x87\xbc\xbd\x83\x3f\xaf\x06\x3f\x84"
+ "\x04\xe2\xf9\x67\xb6\xc6\xfc\x70\x2e\x66\x3c\x77\xc8\x8d\x2c\x79"
+ "\x3a\x8e\x32\xc4\x02\x30\x40\x34\xb8\x90\xa9\x80\xab\x47\x26\xa2"
+ "\xb0\x89\x42\x0a\xda\xd9\xdd\xce\xbc\xb2\x97\xf4\x9c\xf3\x15\x68"
+ "\xc0\x75\x3e\x23\x5e\x36\x4f\x8d\xde\x1e\x93\x8d\x95\xbb\x10\x0e"
+ "\xf4\x1f\x39\xca\x4d\x43",
+ .c_size = 102,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key = /* secp384r1(sha384) */
+ "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f"
+ "\x74\x16\x43\xe9\x1a\x25\x7c\x55\xff\xf0\x29\x68\x66\x20\x91\xf9"
+ "\xdb\x2b\xf6\xb3\x6c\x54\x01\xca\xc7\x6a\x5c\x0d\xeb\x68\xd9\x3c"
+ "\xf1\x01\x74\x1f\xf9\x6c\xe5\x5b\x60\xe9\x7f\x5d\xb3\x12\x80\x2a"
+ "\xd8\x67\x92\xc9\x0e\x4c\x4c\x6b\xa1\xb2\xa8\x1e\xac\x1c\x97\xd9"
+ "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89"
+ "\x9e",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf"
+ "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1"
+ "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff",
+ .m_size = 48,
+ .algo = OID_id_ecdsa_with_sha384,
+ .c =
+ "\x30\x66\x02\x31\x00\x9b\x28\x68\xc0\xa1\xea\x8c\x50\xee\x2e\x62"
+ "\x35\x46\xfa\x00\xd8\x2d\x7a\x91\x5f\x49\x2d\x22\x08\x29\xe6\xfb"
+ "\xca\x8c\xd6\xb6\xb4\x3b\x1f\x07\x8f\x15\x02\xfe\x1d\xa2\xa4\xc8"
+ "\xf2\xea\x9d\x11\x1f\x02\x31\x00\xfc\x50\xf6\x43\xbd\x50\x82\x0e"
+ "\xbf\xe3\x75\x24\x49\xac\xfb\xc8\x71\xcd\x8f\x18\x99\xf0\x0f\x13"
+ "\x44\x92\x8c\x86\x99\x65\xb3\x97\x96\x17\x04\xc9\x05\x77\xf1\x8e"
+ "\xab\x8d\x4e\xde\xe6\x6d\x9b\x66",
+ .c_size = 104,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key = /* secp384r1(sha512) */
+ "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46"
+ "\xcb\xf6\x05\xc2\xee\x73\x83\xbc\xea\x30\x61\x4d\x40\x05\x41\xf4"
+ "\x8c\xe3\x0e\x5c\xf0\x50\xf2\x07\x19\xe8\x4f\x25\xbe\xee\x0c\x95"
+ "\x54\x36\x86\xec\xc2\x20\x75\xf3\x89\xb5\x11\xa1\xb7\xf5\xaf\xbe"
+ "\x81\xe4\xc3\x39\x06\xbd\xe4\xfe\x68\x1c\x6d\x99\x2b\x1b\x63\xfa"
+ "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac"
+ "\xa3",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1"
+ "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71"
+ "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc"
+ "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e",
+ .m_size = 64,
+ .algo = OID_id_ecdsa_with_sha512,
+ .c =
+ "\x30\x63\x02\x2f\x1d\x20\x94\x77\xfe\x31\xfa\x4d\xc6\xef\xda\x02"
+ "\xe7\x0f\x52\x9a\x02\xde\x93\xe8\x83\xe4\x84\x4c\xfc\x6f\x80\xe3"
+ "\xaf\xb3\xd9\xdc\x2b\x43\x0e\x6a\xb3\x53\x6f\x3e\xb3\xc7\xa8\xb3"
+ "\x17\x77\xd1\x02\x30\x63\xf6\xf0\x3d\x5f\x5f\x99\x3f\xde\x3a\x3d"
+ "\x16\xaf\xb4\x52\x6a\xec\x63\xe3\x0c\xec\x50\xdc\xcc\xc4\x6a\x03"
+ "\x5f\x8d\x7a\xf9\xfb\x34\xe4\x8b\x80\xa5\xb6\xda\x2c\x4e\x45\xcf"
+ "\x3c\x93\xff\x50\x5d",
+ .c_size = 101,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+};
+
+/*
* EC-RDSA test vectors are generated by gost-engine.
*/
static const struct akcipher_testvec ecrdsa_tv_template[] = {
@@ -2261,19 +2685,17 @@ static const struct kpp_testvec curve25519_tv_template[] = {
}
};
-static const struct kpp_testvec ecdh_tv_template[] = {
- {
#ifndef CONFIG_CRYPTO_FIPS
+static const struct kpp_testvec ecdh_p192_tv_template[] = {
+ {
.secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x20\x00" /* len */
- "\x01\x00" /* curve_id */
+ "\x1e\x00" /* len */
"\x18\x00" /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x20" /* len */
- "\x00\x01" /* curve_id */
+ "\x00\x1e" /* len */
"\x00\x18" /* key_size */
#endif
"\xb5\x05\xb1\x71\x1e\xbf\x8c\xda"
@@ -2301,18 +2723,20 @@ static const struct kpp_testvec ecdh_tv_template[] = {
.b_public_size = 48,
.expected_a_public_size = 48,
.expected_ss_size = 24
- }, {
+ }
+};
#endif
+
+static const struct kpp_testvec ecdh_p256_tv_template[] = {
+ {
.secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x28\x00" /* len */
- "\x02\x00" /* curve_id */
+ "\x26\x00" /* len */
"\x20\x00" /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x28" /* len */
- "\x00\x02" /* curve_id */
+ "\x00\x26" /* len */
"\x00\x20" /* key_size */
#endif
"\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
@@ -2350,25 +2774,21 @@ static const struct kpp_testvec ecdh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x08\x00" /* len */
- "\x02\x00" /* curve_id */
+ "\x06\x00" /* len */
"\x00\x00", /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x08" /* len */
- "\x00\x02" /* curve_id */
+ "\x00\x06" /* len */
"\x00\x00", /* key_size */
#endif
.b_secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x28\x00" /* len */
- "\x02\x00" /* curve_id */
+ "\x26\x00" /* len */
"\x20\x00" /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x28" /* len */
- "\x00\x02" /* curve_id */
+ "\x00\x26" /* len */
"\x00\x20" /* key_size */
#endif
"\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"