From 2d31e518a42828df7877bca23a958627d60408bc Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Wed, 1 May 2013 12:52:48 -0700 Subject: crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework When CRC T10 DIF is calculated using the crypto transform framework, we wrap the crc_t10dif function call to utilize it. This allows us to take advantage of any accelerated CRC T10 DIF transform that is plugged into the crypto framework. Signed-off-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/Kconfig | 8 +++ crypto/Makefile | 1 + crypto/crct10dif.c | 178 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+) create mode 100644 crypto/crct10dif.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 622d8a48cbe..ceb3611efe4 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -376,6 +376,14 @@ config CRYPTO_CRC32_PCLMUL which will enable any routine to use the CRC-32-IEEE 802.3 checksum and gain better performance as compared with the table implementation. +config CRYPTO_CRCT10DIF + tristate "CRCT10DIF algorithm" + select CRYPTO_HASH + help + CRC T10 Data Integrity Field computation is being cast as + a crypto transform. This allows for faster crc t10 diff + transforms to be used if they are available. + config CRYPTO_GHASH tristate "GHASH digest algorithm" select CRYPTO_GF128MUL diff --git a/crypto/Makefile b/crypto/Makefile index a8e9b0fefbe..62af87df872 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_CRC32) += crc32.o +obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o obj-$(CONFIG_CRYPTO_842) += 842.o diff --git a/crypto/crct10dif.c b/crypto/crct10dif.c new file mode 100644 index 00000000000..92aca96d6b9 --- /dev/null +++ b/crypto/crct10dif.c @@ -0,0 +1,178 @@ +/* + * Cryptographic API. + * + * T10 Data Integrity Field CRC16 Crypto Transform + * + * Copyright (c) 2007 Oracle Corporation. All rights reserved. + * Written by Martin K. Petersen + * Copyright (C) 2013 Intel Corporation + * Author: Tim Chen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct chksum_desc_ctx { + __u16 crc; +}; + +/* Table generated using the following polynomium: + * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 + * gt: 0x8bb7 + */ +static const __u16 t10_dif_crc_table[256] = { + 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, + 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, + 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, + 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, + 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, + 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, + 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, + 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, + 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, + 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, + 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, + 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, + 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, + 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, + 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, + 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, + 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, + 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, + 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, + 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, + 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, + 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, + 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, + 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, + 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, + 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, + 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, + 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, + 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, + 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, + 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, + 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 +}; + +__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len) +{ + unsigned int i; + + for (i = 0 ; i < len ; i++) + crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + + return crc; +} +EXPORT_SYMBOL(crc_t10dif_generic); + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = 0; + + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc_t10dif_generic(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + *(__u16 *)out = ctx->crc; + return 0; +} + +static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, length, out); +} + +static struct shash_alg alg = { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crct10dif", + .cra_driver_name = "crct10dif-generic", + .cra_priority = 100, + .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init crct10dif_mod_init(void) +{ + int ret; + + ret = crypto_register_shash(&alg); + return ret; +} + +static void __exit crct10dif_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crct10dif_mod_init); +module_exit(crct10dif_mod_fini); + +MODULE_AUTHOR("Tim Chen "); +MODULE_DESCRIPTION("T10 DIF CRC calculation."); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 0b95a7f85718adcbba36407ef88bba0a7379ed03 Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Wed, 1 May 2013 12:52:50 -0700 Subject: crypto: crct10dif - Glue code to cast accelerated CRCT10DIF assembly as a crypto transform Glue code that plugs the PCLMULQDQ accelerated CRC T10 DIF hash into the crypto framework. The config CRYPTO_CRCT10DIF_PCLMUL should be turned on to enable the feature. The crc_t10dif crypto library function will use this faster algorithm when crct10dif_pclmul module is loaded. Signed-off-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/Kconfig | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index ceb3611efe4..d1ca6312d79 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -384,6 +384,17 @@ config CRYPTO_CRCT10DIF a crypto transform. This allows for faster crc t10 diff transforms to be used if they are available. +config CRYPTO_CRCT10DIF_PCLMUL + tristate "CRCT10DIF PCLMULQDQ hardware acceleration" + depends on X86 && 64BIT && CRC_T10DIF + select CRYPTO_HASH + help + For x86_64 processors with SSE4.2 and PCLMULQDQ supported, + CRC T10 DIF PCLMULQDQ computation can be hardware + accelerated PCLMULQDQ instruction. This option will create + 'crct10dif-plcmul' module, which is faster when computing the + crct10dif checksum as compared with the generic table implementation. + config CRYPTO_GHASH tristate "GHASH digest algorithm" select CRYPTO_GF128MUL -- cgit v1.2.3 From 39761214eefc6b070f29402aa1165f24d789b3f7 Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Wed, 1 May 2013 12:52:51 -0700 Subject: crypto: crct10dif - Simple correctness and speed test for CRCT10DIF hash These are simple tests to do sanity check of CRC T10 DIF hash. The correctness of the transform can be checked with the command modprobe tcrypt mode=47 The speed of the transform can be evaluated with the command modprobe tcrypt mode=320 Set the cpu frequency to constant and turn turbo off when running the speed test so the frequency governor will not tweak the frequency and affects the measurements. Signed-off-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 8 ++++++++ crypto/testmgr.c | 10 ++++++++++ crypto/testmgr.h | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 66d254ce0d1..25a5934f0e5 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1174,6 +1174,10 @@ static int do_test(int m) ret += tcrypt_test("ghash"); break; + case 47: + ret += tcrypt_test("crct10dif"); + break; + case 100: ret += tcrypt_test("hmac(md5)"); break; @@ -1498,6 +1502,10 @@ static int do_test(int m) test_hash_speed("crc32c", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; + case 320: + test_hash_speed("crct10dif", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + case 399: break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5823735cf38..f19a392ade7 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1973,6 +1973,16 @@ static const struct alg_test_desc alg_test_descs[] = { .count = CRC32C_TEST_VECTORS } } + }, { + .alg = "crct10dif", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = crct10dif_tv_template, + .count = CRCT10DIF_TEST_VECTORS + } + } }, { .alg = "cryptd(__driver-cbc-aes-aesni)", .test = alg_test_null, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1e701bc075b..7d44aa3d6b4 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -450,6 +450,39 @@ static struct hash_testvec rmd320_tv_template[] = { } }; +#define CRCT10DIF_TEST_VECTORS 3 +static struct hash_testvec crct10dif_tv_template[] = { + { + .plaintext = "abc", + .psize = 3, +#ifdef __LITTLE_ENDIAN + .digest = "\x3b\x44", +#else + .digest = "\x44\x3b", +#endif + }, { + .plaintext = "1234567890123456789012345678901234567890" + "123456789012345678901234567890123456789", + .psize = 79, +#ifdef __LITTLE_ENDIAN + .digest = "\x70\x4b", +#else + .digest = "\x4b\x70", +#endif + }, { + .plaintext = + "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd", + .psize = 56, +#ifdef __LITTLE_ENDIAN + .digest = "\xe3\x9c", +#else + .digest = "\x9c\xe3", +#endif + .np = 2, + .tap = { 28, 28 } + } +}; + /* * SHA1 test vectors from from FIPS PUB 180-1 * Long vector from CAVS 5.0 -- cgit v1.2.3 From d329581493802cede83ca672093588f399d02a65 Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Tue, 21 May 2013 17:10:10 +0300 Subject: crypto: sha512_generic - set cra_driver_name 'sha512_generic' should set driver name now that there is alternative sha512 provider (sha512_ssse3). Signed-off-by: Jussi Kivilinna Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 4c586209567..6ed124f3ea0 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -251,6 +251,7 @@ static struct shash_alg sha512_algs[2] = { { .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", + .cra_driver_name = "sha512-generic", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, @@ -263,6 +264,7 @@ static struct shash_alg sha512_algs[2] = { { .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", + .cra_driver_name = "sha384-generic", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, -- cgit v1.2.3 From 3d387ef08c40382315b8e9baa4bc9a07f7c49fce Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Sat, 8 Jun 2013 12:17:42 +0300 Subject: Revert "crypto: blowfish - add AVX2/x86_64 implementation of blowfish cipher" This reverts commit 604880107010a1e5794552d184cd5471ea31b973. Instruction (vpgatherdd) that this implementation relied on turned out to be slow performer on real hardware (i5-4570). The previous 4-way blowfish implementation is therefore faster and this implementation should be removed. Signed-off-by: Jussi Kivilinna Signed-off-by: Herbert Xu --- crypto/Kconfig | 18 ------------------ crypto/testmgr.c | 12 ------------ 2 files changed, 30 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index d1ca6312d79..4ef0ee71517 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -839,24 +839,6 @@ config CRYPTO_BLOWFISH_X86_64 See also: -config CRYPTO_BLOWFISH_AVX2_X86_64 - tristate "Blowfish cipher algorithm (x86_64/AVX2)" - depends on X86 && 64BIT - select CRYPTO_ALGAPI - select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 - select CRYPTO_BLOWFISH_COMMON - select CRYPTO_BLOWFISH_X86_64 - help - Blowfish cipher algorithm (x86_64/AVX2), by Bruce Schneier. - - This is a variable key length cipher which can use keys from 32 - bits to 448 bits in length. It's fast, simple and specifically - designed for use on "large microprocessors". - - See also: - - config CRYPTO_CAMELLIA tristate "Camellia cipher algorithms" depends on CRYPTO diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f19a392ade7..27f11187652 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1660,9 +1660,6 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "__driver-cbc-aes-aesni", .test = alg_test_null, .fips_allowed = 1, - }, { - .alg = "__driver-cbc-blowfish-avx2", - .test = alg_test_null, }, { .alg = "__driver-cbc-camellia-aesni", .test = alg_test_null, @@ -1694,9 +1691,6 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "__driver-ecb-aes-aesni", .test = alg_test_null, .fips_allowed = 1, - }, { - .alg = "__driver-ecb-blowfish-avx2", - .test = alg_test_null, }, { .alg = "__driver-ecb-camellia-aesni", .test = alg_test_null, @@ -1987,9 +1981,6 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "cryptd(__driver-cbc-aes-aesni)", .test = alg_test_null, .fips_allowed = 1, - }, { - .alg = "cryptd(__driver-cbc-blowfish-avx2)", - .test = alg_test_null, }, { .alg = "cryptd(__driver-cbc-camellia-aesni)", .test = alg_test_null, @@ -2003,9 +1994,6 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "cryptd(__driver-ecb-aes-aesni)", .test = alg_test_null, .fips_allowed = 1, - }, { - .alg = "cryptd(__driver-ecb-blowfish-avx2)", - .test = alg_test_null, }, { .alg = "cryptd(__driver-ecb-camellia-aesni)", .test = alg_test_null, -- cgit v1.2.3 From 99f42f937a080995b34e1ed75ed6934b5f96f9ca Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Sat, 8 Jun 2013 12:17:47 +0300 Subject: Revert "crypto: twofish - add AVX2/x86_64 assembler implementation of twofish cipher" This reverts commit cf1521a1a5e21fd1e79a458605c4282fbfbbeee2. Instruction (vpgatherdd) that this implementation relied on turned out to be slow performer on real hardware (i5-4570). The previous 8-way twofish/AVX implementation is therefore faster and this implementation should be removed. Converting this implementation to use the same method as in twofish/AVX for table look-ups would give additional ~3% speed up vs twofish/AVX, but would hardly be worth of the added code and binary size. Signed-off-by: Jussi Kivilinna Signed-off-by: Herbert Xu --- crypto/Kconfig | 24 ------------------------ crypto/testmgr.c | 12 ------------ 2 files changed, 36 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 4ef0ee71517..904ffe83856 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1297,30 +1297,6 @@ config CRYPTO_TWOFISH_AVX_X86_64 See also: -config CRYPTO_TWOFISH_AVX2_X86_64 - tristate "Twofish cipher algorithm (x86_64/AVX2)" - depends on X86 && 64BIT - select CRYPTO_ALGAPI - select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 - select CRYPTO_GLUE_HELPER_X86 - select CRYPTO_TWOFISH_COMMON - select CRYPTO_TWOFISH_X86_64 - select CRYPTO_TWOFISH_X86_64_3WAY - select CRYPTO_TWOFISH_AVX_X86_64 - select CRYPTO_LRW - select CRYPTO_XTS - help - Twofish cipher algorithm (x86_64/AVX2). - - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. - - See also: - - comment "Compression" config CRYPTO_DEFLATE diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 27f11187652..b2bc5334c17 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1653,9 +1653,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "__cbc-twofish-avx", .test = alg_test_null, - }, { - .alg = "__cbc-twofish-avx2", - .test = alg_test_null, }, { .alg = "__driver-cbc-aes-aesni", .test = alg_test_null, @@ -1684,9 +1681,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "__driver-cbc-twofish-avx", .test = alg_test_null, - }, { - .alg = "__driver-cbc-twofish-avx2", - .test = alg_test_null, }, { .alg = "__driver-ecb-aes-aesni", .test = alg_test_null, @@ -1715,9 +1709,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "__driver-ecb-twofish-avx", .test = alg_test_null, - }, { - .alg = "__driver-ecb-twofish-avx2", - .test = alg_test_null, }, { .alg = "__ghash-pclmulqdqni", .test = alg_test_null, @@ -2018,9 +2009,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "cryptd(__driver-ecb-twofish-avx)", .test = alg_test_null, - }, { - .alg = "cryptd(__driver-ecb-twofish-avx2)", - .test = alg_test_null, }, { .alg = "cryptd(__driver-gcm-aes-aesni)", .test = alg_test_null, -- cgit v1.2.3 From 5714758b5c23dcca8d29a43590397e58d245732f Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Thu, 13 Jun 2013 17:37:40 +0300 Subject: crypto: testmgr - check that entries in alg_test_descs are in correct order Patch adds check for alg_test_descs list order, so that accidentically misplaced entries are found quicker. Duplicate entries are also checked for. Signed-off-by: Jussi Kivilinna Signed-off-by: Herbert Xu --- crypto/testmgr.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index b2bc5334c17..a81c154e5d8 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3054,6 +3054,35 @@ static const struct alg_test_desc alg_test_descs[] = { } }; +static bool alg_test_descs_checked; + +static void alg_test_descs_check_order(void) +{ + int i; + + /* only check once */ + if (alg_test_descs_checked) + return; + + alg_test_descs_checked = true; + + for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) { + int diff = strcmp(alg_test_descs[i - 1].alg, + alg_test_descs[i].alg); + + if (WARN_ON(diff > 0)) { + pr_warn("testmgr: alg_test_descs entries in wrong order: '%s' before '%s'\n", + alg_test_descs[i - 1].alg, + alg_test_descs[i].alg); + } + + if (WARN_ON(diff == 0)) { + pr_warn("testmgr: duplicate alg_test_descs entry: '%s'\n", + alg_test_descs[i].alg); + } + } +} + static int alg_find_test(const char *alg) { int start = 0; @@ -3085,6 +3114,8 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) int j; int rc; + alg_test_descs_check_order(); + if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { char nalg[CRYPTO_MAX_ALG_NAME]; -- cgit v1.2.3 From 3a338f20c3c5c33c45ab1c36c8eccd62289c6401 Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Thu, 13 Jun 2013 17:37:45 +0300 Subject: crypto: testmgr - test skciphers with unaligned buffers This patch adds unaligned buffer tests for blkciphers. The first new test is with one byte offset and the second test checks if cra_alignmask for driver is big enough; for example, for testing a case where cra_alignmask is set to 7, but driver really needs buffers to be aligned to 16 bytes. Signed-off-by: Jussi Kivilinna Signed-off-by: Herbert Xu --- crypto/testmgr.c | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index a81c154e5d8..8bd185f068b 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -820,7 +820,7 @@ out_nobuf: static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, struct cipher_testvec *template, unsigned int tcount, - const bool diff_dst) + const bool diff_dst, const int align_offset) { const char *algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm)); @@ -876,10 +876,12 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, j++; ret = -EINVAL; - if (WARN_ON(template[i].ilen > PAGE_SIZE)) + if (WARN_ON(align_offset + template[i].ilen > + PAGE_SIZE)) goto out; data = xbuf[0]; + data += align_offset; memcpy(data, template[i].input, template[i].ilen); crypto_ablkcipher_clear_flags(tfm, ~0); @@ -900,6 +902,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, sg_init_one(&sg[0], data, template[i].ilen); if (diff_dst) { data = xoutbuf[0]; + data += align_offset; sg_init_one(&sgout[0], data, template[i].ilen); } @@ -941,6 +944,9 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, j = 0; for (i = 0; i < tcount; i++) { + /* alignment tests are only done with continuous buffers */ + if (align_offset != 0) + break; if (template[i].iv) memcpy(iv, template[i].iv, MAX_IVLEN); @@ -1075,15 +1081,34 @@ out_nobuf: static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, struct cipher_testvec *template, unsigned int tcount) { + unsigned int alignmask; int ret; /* test 'dst == src' case */ - ret = __test_skcipher(tfm, enc, template, tcount, false); + ret = __test_skcipher(tfm, enc, template, tcount, false, 0); if (ret) return ret; /* test 'dst != src' case */ - return __test_skcipher(tfm, enc, template, tcount, true); + ret = __test_skcipher(tfm, enc, template, tcount, true, 0); + if (ret) + return ret; + + /* test unaligned buffers, check with one byte offset */ + ret = __test_skcipher(tfm, enc, template, tcount, true, 1); + if (ret) + return ret; + + alignmask = crypto_tfm_alg_alignmask(&tfm->base); + if (alignmask) { + /* Check if alignment mask for tfm is correctly set. */ + ret = __test_skcipher(tfm, enc, template, tcount, true, + alignmask + 1); + if (ret) + return ret; + } + + return 0; } static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, -- cgit v1.2.3 From 58dcf5484c0cbaddbba1d3ed074729f5078346bb Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Thu, 13 Jun 2013 17:37:50 +0300 Subject: crypto: testmgr - test AEADs with unaligned buffers This patch adds unaligned buffer tests for AEADs. The first new test is with one byte offset and the second test checks if cra_alignmask for driver is big enough; for example, for testing a case where cra_alignmask is set to 7, but driver really needs buffers to be aligned to 16 bytes. Signed-off-by: Jussi Kivilinna Signed-off-by: Herbert Xu --- crypto/testmgr.c | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 8bd185f068b..f2053861677 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -360,7 +360,7 @@ out_nobuf: static int __test_aead(struct crypto_aead *tfm, int enc, struct aead_testvec *template, unsigned int tcount, - const bool diff_dst) + const bool diff_dst, const int align_offset) { const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); unsigned int i, j, k, n, temp; @@ -423,15 +423,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc, if (!template[i].np) { j++; - /* some tepmplates have no input data but they will + /* some templates have no input data but they will * touch input */ input = xbuf[0]; + input += align_offset; assoc = axbuf[0]; ret = -EINVAL; - if (WARN_ON(template[i].ilen > PAGE_SIZE || - template[i].alen > PAGE_SIZE)) + if (WARN_ON(align_offset + template[i].ilen > + PAGE_SIZE || template[i].alen > PAGE_SIZE)) goto out; memcpy(input, template[i].input, template[i].ilen); @@ -470,6 +471,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, if (diff_dst) { output = xoutbuf[0]; + output += align_offset; sg_init_one(&sgout[0], output, template[i].ilen + (enc ? authsize : 0)); @@ -530,6 +532,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc, } for (i = 0, j = 0; i < tcount; i++) { + /* alignment tests are only done with continuous buffers */ + if (align_offset != 0) + break; + if (template[i].np) { j++; @@ -732,15 +738,34 @@ out_noxbuf: static int test_aead(struct crypto_aead *tfm, int enc, struct aead_testvec *template, unsigned int tcount) { + unsigned int alignmask; int ret; /* test 'dst == src' case */ - ret = __test_aead(tfm, enc, template, tcount, false); + ret = __test_aead(tfm, enc, template, tcount, false, 0); if (ret) return ret; /* test 'dst != src' case */ - return __test_aead(tfm, enc, template, tcount, true); + ret = __test_aead(tfm, enc, template, tcount, true, 0); + if (ret) + return ret; + + /* test unaligned buffers, check with one byte offset */ + ret = __test_aead(tfm, enc, template, tcount, true, 1); + if (ret) + return ret; + + alignmask = crypto_tfm_alg_alignmask(&tfm->base); + if (alignmask) { + /* Check if alignment mask for tfm is correctly set. */ + ret = __test_aead(tfm, enc, template, tcount, true, + alignmask + 1); + if (ret) + return ret; + } + + return 0; } static int test_cipher(struct crypto_cipher *tfm, int enc, -- cgit v1.2.3 From da5ffe11342a0ecf2cce7000a9392c9ca959e9c8 Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Thu, 13 Jun 2013 17:37:55 +0300 Subject: crypto: testmgr - test hash implementations with unaligned buffers This patch adds unaligned buffer tests for hashes. The first new test is with one byte offset and the second test checks if cra_alignmask for driver is big enough; for example, for testing a case where cra_alignmask is set to 7, but driver really needs buffers to be aligned to 16 bytes. Signed-off-by: Jussi Kivilinna Signed-off-by: Herbert Xu --- crypto/testmgr.c | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f2053861677..2f00607039e 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -184,8 +184,9 @@ static int do_one_async_hash_op(struct ahash_request *req, return ret; } -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, - unsigned int tcount, bool use_digest) +static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, + unsigned int tcount, bool use_digest, + const int align_offset) { const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); unsigned int i, j, k, temp; @@ -216,10 +217,15 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, if (template[i].np) continue; + ret = -EINVAL; + if (WARN_ON(align_offset + template[i].psize > PAGE_SIZE)) + goto out; + j++; memset(result, 0, 64); hash_buff = xbuf[0]; + hash_buff += align_offset; memcpy(hash_buff, template[i].plaintext, template[i].psize); sg_init_one(&sg[0], hash_buff, template[i].psize); @@ -281,6 +287,10 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, j = 0; for (i = 0; i < tcount; i++) { + /* alignment tests are only done with continuous buffers */ + if (align_offset != 0) + break; + if (template[i].np) { j++; memset(result, 0, 64); @@ -358,6 +368,33 @@ out_nobuf: return ret; } +static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, + unsigned int tcount, bool use_digest) +{ + unsigned int alignmask; + int ret; + + ret = __test_hash(tfm, template, tcount, use_digest, 0); + if (ret) + return ret; + + /* test unaligned buffers, check with one byte offset */ + ret = __test_hash(tfm, template, tcount, use_digest, 1); + if (ret) + return ret; + + alignmask = crypto_tfm_alg_alignmask(&tfm->base); + if (alignmask) { + /* Check if alignment mask for tfm is correctly set. */ + ret = __test_hash(tfm, template, tcount, use_digest, + alignmask + 1); + if (ret) + return ret; + } + + return 0; +} + static int __test_aead(struct crypto_aead *tfm, int enc, struct aead_testvec *template, unsigned int tcount, const bool diff_dst, const int align_offset) -- cgit v1.2.3