diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index cbe8eef473ec09f8caa98a6012497fe9e0284d44..5c633ebe258109a91d9b3b536586e8de3fdc12bf 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -553,4 +553,16 @@ config CRYPTO_CRCT10DIF_PCLMUL Architecture: x86_64 using: - PCLMULQDQ (carry-less multiplication) +config CRYPTO_SM2_ZHAOXIN_GMI + tristate "SM2 Cipher algorithm (Zhaoxin GMI Instruction)" + depends on X86 && (CPU_SUP_CENTAUR || CPU_SUP_ZHAOXIN) + default m + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + help + SM2 (ShangMi 2) public key algorithm by Zhaoxin GMI Instruction + + Published by State Encryption Management Bureau, China, + as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. + endmenu diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 4230829a66485b09a65150aa49281795d8b82b27..e5480c50a8d9bf7b5f79943d3f93f8fc321cfc8e 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -109,6 +109,7 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o +obj-$(CONFIG_CRYPTO_SM2_ZHAOXIN_GMI) += sm2-zhaoxin-gmi.o obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o obj-$(CONFIG_CRYPTO_SM4_ZHAOXIN_GMI) += sm4-zhaoxin-gmi.o diff --git a/arch/x86/crypto/sm2-zhaoxin-gmi.c b/arch/x86/crypto/sm2-zhaoxin-gmi.c new file mode 100644 index 0000000000000000000000000000000000000000..a0430c6611fcfac4b921bda40af3d24afc05724c --- /dev/null +++ b/arch/x86/crypto/sm2-zhaoxin-gmi.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SM2 asymmetric public-key algorithm + * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and + * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 + * + * Copyright (c) 2023 Shanghai Zhaoxin Semiconductor LTD. + * Authors: YunShen + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCRATCH_SIZE (4 * 2048) + +#define SM2_CWORD_VERIFY 0x8 +#define SM2_VERIFY_PASS 1 + +struct sm2_cipher_data { + u8 pub_key[65]; /* public key */ +}; + +/* Load supported features of the CPU to see if the SM2 is available. */ +static int zhaoxin_gmi_available(void) +{ + if (!boot_cpu_has(X86_FEATURE_SM2_EN)) { + pr_err("can't enable hardware SM2 if Zhaoxin GMI SM2 is not enabled\n"); + return -ENODEV; + } + return 0; +} + +/* Zhaoxin sm2 verify function */ +static inline size_t zhaoxin_gmi_sm2_verify(unsigned char *key, unsigned char *hash, + unsigned char *sig, unsigned char *scratch) +{ + size_t result; + + asm volatile( + ".byte 0xf2, 0x0f, 0xa6, 0xc0" + : "=c"(result) + : "a"(hash), "b"(key), "d"(SM2_CWORD_VERIFY), "S"(scratch), "D"(sig) + : "memory"); + + return result; +} + +/* Zhaoxin sm2 verify function */ +static int _zhaoxin_sm2_verify(struct sm2_cipher_data *ec, unsigned char *hash, unsigned char *sig) +{ + unsigned char *scratch = kzalloc(SCRATCH_SIZE, GFP_KERNEL); + int ret = -EKEYREJECTED; + size_t result; + + result = zhaoxin_gmi_sm2_verify(ec->pub_key, hash, sig, scratch); + if (result == SM2_VERIFY_PASS) + ret = 0; + + kfree(scratch); + + return ret; +} + +static int zhaoxin_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + unsigned char *buffer; + int ret, buf_len; + + buf_len = req->src_len + req->dst_len; + buffer = kmalloc(buf_len, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, buf_len), buffer, buf_len, 0); + ret = _zhaoxin_sm2_verify(ec, buffer + req->src_len, buffer); + + kfree(buffer); + + return ret; +} + +static int zhaoxin_sm2_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memcpy(ec->pub_key, key, keylen); + + return 0; +} + +static unsigned int zhaoxin_sm2_max_size(struct crypto_akcipher *tfm) +{ + /* Unlimited max size */ + return PAGE_SIZE; +} + +static int zhaoxin_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + return zhaoxin_gmi_available(); +} + +static void zhaoxin_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memset(ec, 0, sizeof(*ec)); +} + +static struct akcipher_alg zhaoxin_sm2 = { + .verify = zhaoxin_sm2_verify, + .set_pub_key = zhaoxin_sm2_set_pub_key, + .max_size = zhaoxin_sm2_max_size, + .init = zhaoxin_sm2_init_tfm, + .exit = zhaoxin_sm2_exit_tfm, + .base = { + .cra_name = "sm2", + .cra_driver_name = "zhaoxin-gmi-sm2", + .cra_priority = 150, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct sm2_cipher_data), + }, +}; + +static const struct x86_cpu_id zhaoxin_sm2_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_SM2, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sm2_cpu_ids); + +static int __init zhaoxin_sm2_init(void) +{ + if (!x86_match_cpu(zhaoxin_sm2_cpu_ids)) + return -ENODEV; + + return crypto_register_akcipher(&zhaoxin_sm2); +} + +static void __exit zhaoxin_sm2_exit(void) +{ + crypto_unregister_akcipher(&zhaoxin_sm2); +} + +module_init(zhaoxin_sm2_init); +module_exit(zhaoxin_sm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("YunShen "); +MODULE_DESCRIPTION("SM2 Zhaoxin GMI Algorithm"); +MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm2"); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 2a9a4e9b7610fc17900e53340c58be9fe1f5f6dd..93f3930609721c3db62c511f16f6351eff793b1c 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -146,6 +146,8 @@ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_SM2 (5*32 + 0) /* SM2 Zhaoxin GMI present */ +#define X86_FEATURE_SM2_EN (5*32 + 1) /* SM2 Zhaoxin GMI enabled */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ #define X86_FEATURE_CCS (5*32+4) /* "sm3 sm4" present */