From 324eb0f17d9dcead3c60c133aa244f6b3631fec9 Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Fri, 23 Mar 2012 15:02:24 -0700 Subject: crc32: add slice-by-8 algorithm to existing code Add slicing-by-8 algorithm to the existing slicing-by-4 algorithm. This consists of: - extend largest BITS size from 32 to 64 - extend tables from tab[4][256] to up to tab[8][256] - Add code for inner loop. [djwong@us.ibm.com: Minor changelog tweaks] Signed-off-by: Bob Pearson Signed-off-by: Darrick J. Wong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/crc32.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'lib/crc32.c') diff --git a/lib/crc32.c b/lib/crc32.c index 5971f2ad46d5..826e16352e0e 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -47,25 +47,28 @@ MODULE_LICENSE("GPL"); #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 +/* implements slicing-by-4 or slicing-by-8 algorithm */ static inline u32 crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) { # ifdef __LITTLE_ENDIAN # define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8) -# define DO_CRC4 crc = t3[(crc) & 255] ^ \ - t2[(crc >> 8) & 255] ^ \ - t1[(crc >> 16) & 255] ^ \ - t0[(crc >> 24) & 255] +# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ + t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) +# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ + t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) # else # define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) -# define DO_CRC4 crc = t0[(crc) & 255] ^ \ - t1[(crc >> 8) & 255] ^ \ - t2[(crc >> 16) & 255] ^ \ - t3[(crc >> 24) & 255] +# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ + t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) +# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ + t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) # endif const u32 *b; size_t rem_len; const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3]; + const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; + u32 q; /* Align it */ if (unlikely((long)buf & 3 && len)) { @@ -73,13 +76,25 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) DO_CRC(*buf++); } while ((--len) && ((long)buf)&3); } + +# if CRC_LE_BITS == 32 rem_len = len & 3; - /* load data 32 bits wide, xor data 32 bits wide. */ len = len >> 2; +# else + rem_len = len & 7; + len = len >> 3; +# endif + b = (const u32 *)buf; for (--b; len; --len) { - crc ^= *++b; /* use pre increment for speed */ - DO_CRC4; + q = crc ^ *++b; /* use pre increment for speed */ +# if CRC_LE_BITS == 32 + crc = DO_CRC4; +# else + crc = DO_CRC8; + q = *++b; + crc ^= DO_CRC4; +# endif } len = rem_len; /* And the last few bytes */ @@ -92,6 +107,7 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) return crc; #undef DO_CRC #undef DO_CRC4 +#undef DO_CRC8 } #endif -- cgit v1.2.3