Commit d4d9c75d authored by Geoff Simmons's avatar Geoff Simmons

add SHA512, generalizing hash() for different hash algorithms

parent d9579da1
......@@ -6,8 +6,12 @@ vmod_LTLIBRARIES = libvmod_blobdigest.la
libvmod_blobdigest_la_LDFLAGS = -module -export-dynamic -avoid-version -shared
libvmod_blobdigest_la_SOURCES = \
vmod_blobdigest.h \
vmod_blobdigest.c \
vmod_blobdigest.h
byte_order.h \
byte_order.c \
sha512.h \
sha512.c
nodist_libvmod_blobdigest_la_SOURCES = \
vcc_if.c \
......
/* byte_order.c - byte order related platform dependent routines,
*
* Copyright: 2008-2012 Aleksey Kravchenko <rhash.admin@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. Use this program at your own risk!
*/
#include "byte_order.h"
#if !(__GNUC__ >= 4 || (__GNUC__ ==3 && __GNUC_MINOR__ >= 4)) /* if !GCC or GCC < 4.3 */
# if _MSC_VER >= 1300 && (_M_IX86 || _M_AMD64 || _M_IA64) /* if MSVC++ >= 2002 on x86/x64 */
# include <intrin.h>
# pragma intrinsic(_BitScanForward)
/**
* Returns index of the trailing bit of x.
*
* @param x the number to process
* @return zero-based index of the trailing bit
*/
unsigned rhash_ctz(unsigned x)
{
unsigned long index;
unsigned char isNonzero = _BitScanForward(&index, x); /* MSVC intrinsic */
return (isNonzero ? (unsigned)index : 0);
}
# else /* _MSC_VER >= 1300... */
/**
* Returns index of the trailing bit of a 32-bit number.
* This is a plain C equivalent for GCC __builtin_ctz() bit scan.
*
* @param x the number to process
* @return zero-based index of the trailing bit
*/
unsigned rhash_ctz(unsigned x)
{
/* array for conversion to bit position */
static unsigned char bit_pos[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
/* The De Bruijn bit-scan was devised in 1997, according to Donald Knuth
* by Martin Lauter. The constant 0x077CB531UL is a De Bruijn sequence,
* which produces a unique pattern of bits into the high 5 bits for each
* possible bit position that it is multiplied against.
* See http://graphics.stanford.edu/~seander/bithacks.html
* and http://chessprogramming.wikispaces.com/BitScan */
return (unsigned)bit_pos[((uint32_t)((x & -x) * 0x077CB531U)) >> 27];
}
# endif /* _MSC_VER >= 1300... */
#endif /* !(GCC >= 4.3) */
/**
* Copy a memory block with simultaneous exchanging byte order.
* The byte order is changed from little-endian 32-bit integers
* to big-endian (or vice-versa).
*
* @param to the pointer where to copy memory block
* @param index the index to start writing from
* @param from the source block to copy
* @param length length of the memory block
*/
void rhash_swap_copy_str_to_u32(void* to, int index, const void* from, size_t length)
{
/* if all pointers and length are 32-bits aligned */
if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 3) ) {
/* copy memory as 32-bit words */
const uint32_t* src = (const uint32_t*)from;
const uint32_t* end = (const uint32_t*)((const char*)src + length);
uint32_t* dst = (uint32_t*)((char*)to + index);
while (src < end) *(dst++) = bswap_32( *(src++) );
} else {
const char* src = (const char*)from;
for (length += index; (size_t)index < length; index++) ((char*)to)[index ^ 3] = *(src++);
}
}
/**
* Copy a memory block with changed byte order.
* The byte order is changed from little-endian 64-bit integers
* to big-endian (or vice-versa).
*
* @param to the pointer where to copy memory block
* @param index the index to start writing from
* @param from the source block to copy
* @param length length of the memory block
*/
void rhash_swap_copy_str_to_u64(void* to, int index, const void* from, size_t length)
{
/* if all pointers and length are 64-bits aligned */
if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 7) ) {
/* copy aligned memory block as 64-bit integers */
const uint64_t* src = (const uint64_t*)from;
const uint64_t* end = (const uint64_t*)((const char*)src + length);
uint64_t* dst = (uint64_t*)((char*)to + index);
while (src < end) *(dst++) = bswap_64( *(src++) );
} else {
const char* src = (const char*)from;
for (length += index; (size_t)index < length; index++) ((char*)to)[index ^ 7] = *(src++);
}
}
/**
* Copy data from a sequence of 64-bit words to a binary string of given length,
* while changing byte order.
*
* @param to the binary string to receive data
* @param from the source sequence of 64-bit words
* @param length the size in bytes of the data being copied
*/
void rhash_swap_copy_u64_to_str(void* to, const void* from, size_t length)
{
/* if all pointers and length are 64-bits aligned */
if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | length ) & 7) ) {
/* copy aligned memory block as 64-bit integers */
const uint64_t* src = (const uint64_t*)from;
const uint64_t* end = (const uint64_t*)((const char*)src + length);
uint64_t* dst = (uint64_t*)to;
while (src < end) *(dst++) = bswap_64( *(src++) );
} else {
size_t index;
char* dst = (char*)to;
for (index = 0; index < length; index++) *(dst++) = ((char*)from)[index ^ 7];
}
}
/**
* Exchange byte order in the given array of 32-bit integers.
*
* @param arr the array to process
* @param length array length
*/
void rhash_u32_mem_swap(unsigned *arr, int length)
{
unsigned* end = arr + length;
for (; arr < end; arr++) {
*arr = bswap_32(*arr);
}
}
/* byte_order.h */
#ifndef BYTE_ORDER_H
#define BYTE_ORDER_H
#include <stdint.h>
#include <unistd.h>
#include <stdlib.h>
#ifdef IN_RHASH
#include "config.h"
#endif
#ifdef __GLIBC__
# include <endian.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* if x86 compatible cpu */
#if defined(i386) || defined(__i386__) || defined(__i486__) || \
defined(__i586__) || defined(__i686__) || defined(__pentium__) || \
defined(__pentiumpro__) || defined(__pentium4__) || \
defined(__nocona__) || defined(prescott) || defined(__core2__) || \
defined(__k6__) || defined(__k8__) || defined(__athlon__) || \
defined(__amd64) || defined(__amd64__) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_IX86) || \
defined(_M_AMD64) || defined(_M_IA64) || defined(_M_X64)
/* detect if x86-64 instruction set is supported */
# if defined(_LP64) || defined(__LP64__) || defined(__x86_64) || \
defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
# define CPU_X64
# else
# define CPU_IA32
# endif
#endif
/* detect CPU endianness */
#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
__BYTE_ORDER == __LITTLE_ENDIAN) || \
defined(CPU_IA32) || defined(CPU_X64) || \
defined(__ia64) || defined(__ia64__) || defined(__alpha__) || defined(_M_ALPHA) || \
defined(vax) || defined(MIPSEL) || defined(_ARM_) || defined(__arm__)
# define CPU_LITTLE_ENDIAN
# define IS_BIG_ENDIAN 0
# define IS_LITTLE_ENDIAN 1
#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
__BYTE_ORDER == __BIG_ENDIAN) || \
defined(__sparc) || defined(__sparc__) || defined(sparc) || \
defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_POWER) || \
defined(__POWERPC__) || defined(POWERPC) || defined(__powerpc) || \
defined(__powerpc__) || defined(__powerpc64__) || defined(__ppc__) || \
defined(__hpux) || defined(_MIPSEB) || defined(mc68000) || \
defined(__s390__) || defined(__s390x__) || defined(sel)
# define CPU_BIG_ENDIAN
# define IS_BIG_ENDIAN 1
# define IS_LITTLE_ENDIAN 0
#else
# error "Can't detect CPU architechture"
#endif
#define IS_ALIGNED_32(p) (0 == (3 & ((const char*)(p) - (const char*)0)))
#define IS_ALIGNED_64(p) (0 == (7 & ((const char*)(p) - (const char*)0)))
#if defined(_MSC_VER)
#define ALIGN_ATTR(n) __declspec(align(n))
#elif defined(__GNUC__)
#define ALIGN_ATTR(n) __attribute__((aligned (n)))
#else
#define ALIGN_ATTR(n) /* nothing */
#endif
#if defined(_MSC_VER) || defined(__BORLANDC__)
#define I64(x) x##ui64
#else
#define I64(x) x##LL
#endif
/* convert a hash flag to index */
#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) /* GCC < 3.4 */
# define rhash_ctz(x) __builtin_ctz(x)
#else
unsigned rhash_ctz(unsigned); /* define as function */
#endif
void rhash_swap_copy_str_to_u32(void* to, int index, const void* from, size_t length);
void rhash_swap_copy_str_to_u64(void* to, int index, const void* from, size_t length);
void rhash_swap_copy_u64_to_str(void* to, const void* from, size_t length);
void rhash_u32_mem_swap(unsigned *p, int length_in_u32);
/* define bswap_32 */
#if defined(__GNUC__) && defined(CPU_IA32) && !defined(__i386__)
/* for intel x86 CPU */
static inline uint32_t bswap_32(uint32_t x) {
__asm("bswap\t%0" : "=r" (x) : "0" (x));
return x;
}
#elif defined(__GNUC__) && (__GNUC__ >= 4) && (__GNUC__ > 4 || __GNUC_MINOR__ >= 3)
/* for GCC >= 4.3 */
# define bswap_32(x) __builtin_bswap32(x)
#elif (_MSC_VER > 1300) && (defined(CPU_IA32) || defined(CPU_X64)) /* MS VC */
# define bswap_32(x) _byteswap_ulong((unsigned long)x)
#elif !defined(__STRICT_ANSI__)
/* general bswap_32 definition */
static inline uint32_t bswap_32(uint32_t x) {
x = ((x << 8) & 0xFF00FF00) | ((x >> 8) & 0x00FF00FF);
return (x >> 16) | (x << 16);
}
#else
#define bswap_32(x) ((((x) & 0xff000000) >> 24) | (((x) & 0x00ff0000) >> 8) | \
(((x) & 0x0000ff00) << 8) | (((x) & 0x000000ff) << 24))
#endif /* bswap_32 */
#if defined(__GNUC__) && (__GNUC__ >= 4) && (__GNUC__ > 4 || __GNUC_MINOR__ >= 3)
# define bswap_64(x) __builtin_bswap64(x)
#elif (_MSC_VER > 1300) && (defined(CPU_IA32) || defined(CPU_X64)) /* MS VC */
# define bswap_64(x) _byteswap_uint64((__int64)x)
#elif !defined(__STRICT_ANSI__)
static inline uint64_t bswap_64(uint64_t x) {
union {
uint64_t ll;
uint32_t l[2];
} w, r;
w.ll = x;
r.l[0] = bswap_32(w.l[1]);
r.l[1] = bswap_32(w.l[0]);
return r.ll;
}
#else
#error "bswap_64 unsupported"
#endif
#ifdef CPU_BIG_ENDIAN
# define be2me_32(x) (x)
# define be2me_64(x) (x)
# define le2me_32(x) bswap_32(x)
# define le2me_64(x) bswap_64(x)
# define be32_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
# define le32_copy(to, index, from, length) rhash_swap_copy_str_to_u32((to), (index), (from), (length))
# define be64_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
# define le64_copy(to, index, from, length) rhash_swap_copy_str_to_u64((to), (index), (from), (length))
# define me64_to_be_str(to, from, length) memcpy((to), (from), (length))
# define me64_to_le_str(to, from, length) rhash_swap_copy_u64_to_str((to), (from), (length))
#else /* CPU_BIG_ENDIAN */
# define be2me_32(x) bswap_32(x)
# define be2me_64(x) bswap_64(x)
# define le2me_32(x) (x)
# define le2me_64(x) (x)
# define be32_copy(to, index, from, length) rhash_swap_copy_str_to_u32((to), (index), (from), (length))
# define le32_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
# define be64_copy(to, index, from, length) rhash_swap_copy_str_to_u64((to), (index), (from), (length))
# define le64_copy(to, index, from, length) memcpy((to) + (index), (from), (length))
# define me64_to_be_str(to, from, length) rhash_swap_copy_u64_to_str((to), (from), (length))
# define me64_to_le_str(to, from, length) memcpy((to), (from), (length))
#endif /* CPU_BIG_ENDIAN */
/* ROTL/ROTR macros rotate a 32/64-bit word left/right by n bits */
#define ROTL32(dword, n) ((dword) << (n) ^ ((dword) >> (32 - (n))))
#define ROTR32(dword, n) ((dword) >> (n) ^ ((dword) << (32 - (n))))
#define ROTL64(qword, n) ((qword) << (n) ^ ((qword) >> (64 - (n))))
#define ROTR64(qword, n) ((qword) >> (n) ^ ((qword) << (64 - (n))))
#ifdef __cplusplus
} /* extern "C" */
#endif /* __cplusplus */
#endif /* BYTE_ORDER_H */
......@@ -4,7 +4,9 @@ use strict;
use warnings;
my @vals = (qw(
SHA256));
SHA256
SHA512
));
sub assert {
unless($_[0]) {
......
/* sha512.c - an implementation of SHA-384/512 hash functions
* based on FIPS 180-3 (Federal Information Processing Standart).
*
* Copyright: 2010-2012 Aleksey Kravchenko <rhash.admin@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. Use this program at your own risk!
*/
#include <string.h>
#include "byte_order.h"
#include "sha512.h"
/* SHA-384 and SHA-512 constants for 80 rounds. These qwords represent
* the first 64 bits of the fractional parts of the cube
* roots of the first 80 prime numbers. */
static const uint64_t rhash_k512[80] = {
I64(0x428a2f98d728ae22), I64(0x7137449123ef65cd), I64(0xb5c0fbcfec4d3b2f),
I64(0xe9b5dba58189dbbc), I64(0x3956c25bf348b538), I64(0x59f111f1b605d019),
I64(0x923f82a4af194f9b), I64(0xab1c5ed5da6d8118), I64(0xd807aa98a3030242),
I64(0x12835b0145706fbe), I64(0x243185be4ee4b28c), I64(0x550c7dc3d5ffb4e2),
I64(0x72be5d74f27b896f), I64(0x80deb1fe3b1696b1), I64(0x9bdc06a725c71235),
I64(0xc19bf174cf692694), I64(0xe49b69c19ef14ad2), I64(0xefbe4786384f25e3),
I64(0x0fc19dc68b8cd5b5), I64(0x240ca1cc77ac9c65), I64(0x2de92c6f592b0275),
I64(0x4a7484aa6ea6e483), I64(0x5cb0a9dcbd41fbd4), I64(0x76f988da831153b5),
I64(0x983e5152ee66dfab), I64(0xa831c66d2db43210), I64(0xb00327c898fb213f),
I64(0xbf597fc7beef0ee4), I64(0xc6e00bf33da88fc2), I64(0xd5a79147930aa725),
I64(0x06ca6351e003826f), I64(0x142929670a0e6e70), I64(0x27b70a8546d22ffc),
I64(0x2e1b21385c26c926), I64(0x4d2c6dfc5ac42aed), I64(0x53380d139d95b3df),
I64(0x650a73548baf63de), I64(0x766a0abb3c77b2a8), I64(0x81c2c92e47edaee6),
I64(0x92722c851482353b), I64(0xa2bfe8a14cf10364), I64(0xa81a664bbc423001),
I64(0xc24b8b70d0f89791), I64(0xc76c51a30654be30), I64(0xd192e819d6ef5218),
I64(0xd69906245565a910), I64(0xf40e35855771202a), I64(0x106aa07032bbd1b8),
I64(0x19a4c116b8d2d0c8), I64(0x1e376c085141ab53), I64(0x2748774cdf8eeb99),
I64(0x34b0bcb5e19b48a8), I64(0x391c0cb3c5c95a63), I64(0x4ed8aa4ae3418acb),
I64(0x5b9cca4f7763e373), I64(0x682e6ff3d6b2b8a3), I64(0x748f82ee5defb2fc),
I64(0x78a5636f43172f60), I64(0x84c87814a1f0ab72), I64(0x8cc702081a6439ec),
I64(0x90befffa23631e28), I64(0xa4506cebde82bde9), I64(0xbef9a3f7b2c67915),
I64(0xc67178f2e372532b), I64(0xca273eceea26619c), I64(0xd186b8c721c0c207),
I64(0xeada7dd6cde0eb1e), I64(0xf57d4f7fee6ed178), I64(0x06f067aa72176fba),
I64(0x0a637dc5a2c898a6), I64(0x113f9804bef90dae), I64(0x1b710b35131c471b),
I64(0x28db77f523047d84), I64(0x32caab7b40c72493), I64(0x3c9ebe0a15c9bebc),
I64(0x431d67c49c100d4c), I64(0x4cc5d4becb3e42b6), I64(0x597f299cfc657e2a),
I64(0x5fcb6fab3ad6faec), I64(0x6c44198c4a475817)
};
/* The SHA512/384 functions defined by FIPS 180-3, 4.1.3 */
/* Optimized version of Ch(x,y,z)=((x & y) | (~x & z)) */
#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
/* Optimized version of Maj(x,y,z)=((x & y) ^ (x & z) ^ (y & z)) */
#define Maj(x,y,z) (((x) & (y)) ^ ((z) & ((x) ^ (y))))
#define Sigma0(x) (ROTR64((x), 28) ^ ROTR64((x), 34) ^ ROTR64((x), 39))
#define Sigma1(x) (ROTR64((x), 14) ^ ROTR64((x), 18) ^ ROTR64((x), 41))
#define sigma0(x) (ROTR64((x), 1) ^ ROTR64((x), 8) ^ ((x) >> 7))
#define sigma1(x) (ROTR64((x), 19) ^ ROTR64((x), 61) ^ ((x) >> 6))
/* Recalculate element n-th of circular buffer W using formula
* W[n] = sigma1(W[n - 2]) + W[n - 7] + sigma0(W[n - 15]) + W[n - 16]; */
#define RECALCULATE_W(W,n) (W[n] += \
(sigma1(W[(n - 2) & 15]) + W[(n - 7) & 15] + sigma0(W[(n - 15) & 15])))
#define ROUND(a,b,c,d,e,f,g,h,k,data) { \
uint64_t T1 = h + Sigma1(e) + Ch(e,f,g) + k + (data); \
d += T1, h = T1 + Sigma0(a) + Maj(a,b,c); }
#define ROUND_1_16(a,b,c,d,e,f,g,h,n) \
ROUND(a,b,c,d,e,f,g,h, rhash_k512[n], W[n] = be2me_64(block[n]))
#define ROUND_17_80(a,b,c,d,e,f,g,h,n) \
ROUND(a,b,c,d,e,f,g,h, k[n], RECALCULATE_W(W, n))
/**
* Initialize context before calculating hash.
*
* @param ctx context to initialize
*/
void rhash_sha512_init(sha512_ctx *ctx)
{
/* Initial values. These words were obtained by taking the first 32
* bits of the fractional parts of the square roots of the first
* eight prime numbers. */
static const uint64_t SHA512_H0[8] = {
I64(0x6a09e667f3bcc908), I64(0xbb67ae8584caa73b), I64(0x3c6ef372fe94f82b),
I64(0xa54ff53a5f1d36f1), I64(0x510e527fade682d1), I64(0x9b05688c2b3e6c1f),
I64(0x1f83d9abfb41bd6b), I64(0x5be0cd19137e2179)
};
ctx->length = 0;
ctx->digest_length = sha512_hash_size;
/* initialize algorithm state */
memcpy(ctx->hash, SHA512_H0, sizeof(ctx->hash));
}
/**
* Initialize context before calculaing hash.
*
* @param ctx context to initialize
*/
void rhash_sha384_init(struct sha512_ctx *ctx)
{
/* Initial values from FIPS 180-3. These words were obtained by taking
* the first sixty-four bits of the fractional parts of the square
* roots of ninth through sixteenth prime numbers. */
static const uint64_t SHA384_H0[8] = {
I64(0xcbbb9d5dc1059ed8), I64(0x629a292a367cd507), I64(0x9159015a3070dd17),
I64(0x152fecd8f70e5939), I64(0x67332667ffc00b31), I64(0x8eb44a8768581511),
I64(0xdb0c2e0d64f98fa7), I64(0x47b5481dbefa4fa4)
};
ctx->length = 0;
ctx->digest_length = sha384_hash_size;
memcpy(ctx->hash, SHA384_H0, sizeof(ctx->hash));
}
/**
* The core transformation. Process a 512-bit block.
*
* @param hash algorithm state
* @param block the message block to process
*/
static void rhash_sha512_process_block(uint64_t hash[8], uint64_t block[16])
{
uint64_t A, B, C, D, E, F, G, H;
uint64_t W[16];
const uint64_t *k;
int i;
A = hash[0], B = hash[1], C = hash[2], D = hash[3];
E = hash[4], F = hash[5], G = hash[6], H = hash[7];
/* Compute SHA using alternate Method: FIPS 180-3 6.1.3 */
ROUND_1_16(A, B, C, D, E, F, G, H, 0);
ROUND_1_16(H, A, B, C, D, E, F, G, 1);
ROUND_1_16(G, H, A, B, C, D, E, F, 2);
ROUND_1_16(F, G, H, A, B, C, D, E, 3);
ROUND_1_16(E, F, G, H, A, B, C, D, 4);
ROUND_1_16(D, E, F, G, H, A, B, C, 5);
ROUND_1_16(C, D, E, F, G, H, A, B, 6);
ROUND_1_16(B, C, D, E, F, G, H, A, 7);
ROUND_1_16(A, B, C, D, E, F, G, H, 8);
ROUND_1_16(H, A, B, C, D, E, F, G, 9);
ROUND_1_16(G, H, A, B, C, D, E, F, 10);
ROUND_1_16(F, G, H, A, B, C, D, E, 11);
ROUND_1_16(E, F, G, H, A, B, C, D, 12);
ROUND_1_16(D, E, F, G, H, A, B, C, 13);
ROUND_1_16(C, D, E, F, G, H, A, B, 14);
ROUND_1_16(B, C, D, E, F, G, H, A, 15);
for (i = 16, k = &rhash_k512[16]; i < 80; i += 16, k += 16) {
ROUND_17_80(A, B, C, D, E, F, G, H, 0);
ROUND_17_80(H, A, B, C, D, E, F, G, 1);
ROUND_17_80(G, H, A, B, C, D, E, F, 2);
ROUND_17_80(F, G, H, A, B, C, D, E, 3);
ROUND_17_80(E, F, G, H, A, B, C, D, 4);
ROUND_17_80(D, E, F, G, H, A, B, C, 5);
ROUND_17_80(C, D, E, F, G, H, A, B, 6);
ROUND_17_80(B, C, D, E, F, G, H, A, 7);
ROUND_17_80(A, B, C, D, E, F, G, H, 8);
ROUND_17_80(H, A, B, C, D, E, F, G, 9);
ROUND_17_80(G, H, A, B, C, D, E, F, 10);
ROUND_17_80(F, G, H, A, B, C, D, E, 11);
ROUND_17_80(E, F, G, H, A, B, C, D, 12);
ROUND_17_80(D, E, F, G, H, A, B, C, 13);
ROUND_17_80(C, D, E, F, G, H, A, B, 14);
ROUND_17_80(B, C, D, E, F, G, H, A, 15);
}
hash[0] += A, hash[1] += B, hash[2] += C, hash[3] += D;
hash[4] += E, hash[5] += F, hash[6] += G, hash[7] += H;
}
/**
* Calculate message hash.
* Can be called repeatedly with chunks of the message to be hashed.
*
* @param ctx the algorithm context containing current hashing state
* @param msg message chunk
* @param size length of the message chunk
*/
void rhash_sha512_update(sha512_ctx *ctx, const unsigned char *msg, size_t size)
{
size_t index = (size_t)ctx->length & 127;
ctx->length += size;
/* fill partial block */
if (index) {
size_t left = sha512_block_size - index;
memcpy((char*)ctx->message + index, msg, (size < left ? size : left));
if (size < left) return;
/* process partial block */
rhash_sha512_process_block(ctx->hash, ctx->message);
msg += left;
size -= left;
}
while (size >= sha512_block_size) {
uint64_t* aligned_message_block;
if (IS_ALIGNED_64(msg)) {
/* the most common case is processing of an already aligned message
without copying it */
aligned_message_block = (uint64_t*)msg;
} else {
memcpy(ctx->message, msg, sha512_block_size);
aligned_message_block = ctx->message;
}
rhash_sha512_process_block(ctx->hash, aligned_message_block);
msg += sha512_block_size;
size -= sha512_block_size;
}
if (size) {
memcpy(ctx->message, msg, size); /* save leftovers */
}
}
/**
* Store calculated hash into the given array.
*
* @param ctx the algorithm context containing current hashing state
* @param result calculated hash in binary form
*/
void rhash_sha512_final(sha512_ctx *ctx, unsigned char* result)
{
size_t index = ((unsigned)ctx->length & 127) >> 3;
unsigned shift = ((unsigned)ctx->length & 7) * 8;
/* pad message and process the last block */
/* append the byte 0x80 to the message */
ctx->message[index] &= le2me_64( ~(I64(0xFFFFFFFFFFFFFFFF) << shift) );
ctx->message[index++] ^= le2me_64( I64(0x80) << shift );
/* if no room left in the message to store 128-bit message length */
if (index >= 15) {
if (index == 15) ctx->message[index] = 0;
rhash_sha512_process_block(ctx->hash, ctx->message);
index = 0;
}
while (index < 15) {
ctx->message[index++] = 0;
}
ctx->message[15] = be2me_64(ctx->length << 3);
rhash_sha512_process_block(ctx->hash, ctx->message);
if (result) be64_copy(result, 0, ctx->hash, ctx->digest_length);
}
/* sha.h sha512 and sha384 hash functions */
#ifndef SHA512_H
#define SHA512_H
#include <stdint.h>
#include <unistd.h>
#define sha512_block_size 128
#define sha512_hash_size 64
#define sha384_hash_size 48
/* algorithm context */
typedef struct sha512_ctx
{
uint64_t message[16]; /* 1024-bit buffer for leftovers */
uint64_t length; /* number of processed bytes */
uint64_t hash[8]; /* 512-bit algorithm internal hashing state */
unsigned digest_length; /* length of the algorithm digest in bytes */
} sha512_ctx;
void rhash_sha384_init(sha512_ctx *ctx);
void rhash_sha512_init(sha512_ctx *ctx);
void rhash_sha512_update(sha512_ctx *ctx, const unsigned char* data, size_t length);
void rhash_sha512_final(sha512_ctx *ctx, unsigned char* result);
#endif /* SHA512_H */
# looks like -*- vcl -*-
varnishtest "SHA512 hash"
# VMOD blobcode must be installed
varnish v1 -vcl {
import blobdigest from "${vmod_topbuild}/src/.libs/libvmod_blobdigest.so";
import blobcode;
backend b { .host = "${bad_ip}"; }
sub vcl_recv {
return(synth(200));
}
sub vcl_synth {
set resp.http.empty
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY, "")));
set resp.http.a
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY, "a")));
set resp.http.abc
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY, "abc")));
set resp.http.msgdigest
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY,
"message digest")));
set resp.http.alphalc
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY,
"abcdefghijklmnopqrstuvwxyz")));
set resp.http.alphasoup
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY,
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq")));
set resp.http.alphanum
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY,
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")));
set resp.http.digits
= blobcode.encode(HEXUC, blobdigest.hash(SHA512,
blobcode.decode(IDENTITY,
"12345678901234567890123456789012345678901234567890123456789012345678901234567890")));
# all 256 byte values in ascending, big-endian order
set resp.http.allbytes
= blobcode.encode(HEXLC, blobdigest.hash(SHA512,
blobcode.decode(BASE64,
"AQACAQMCBAMFBAYFBwYIBwkICgkLCgwLDQwODQ8OEA8REBIRExIUExUUFhUXFhgXGRgaGRsaHBsdHB4dHx4gHyEgIiEjIiQjJSQmJScmKCcpKCopKyosKy0sLi0vLjAvMTAyMTMyNDM1NDY1NzY4Nzk4Ojk7Ojw7PTw+PT8+QD9BQEJBQ0JEQ0VERkVHRkhHSUhKSUtKTEtNTE5NT05QT1FQUlFTUlRTVVRWVVdWWFdZWFpZW1pcW11cXl1fXmBfYWBiYWNiZGNlZGZlZ2ZoZ2loamlramxrbWxubW9ucG9xcHJxc3J0c3V0dnV3dnh3eXh6eXt6fHt9fH59f36Afw==")));
}
} -start
client c1 {
txreq
rxresp
expect resp.status == 200
# from librhash
expect resp.http.empty == "CF83E1357EEFB8BDF1542850D66D8007D620E4050B5715DC83F4A921D36CE9CE47D0D13C5D85F2B0FF8318D2877EEC2F63B931BD47417A81A538327AF927DA3E"
expect resp.http.a == "1F40FC92DA241694750979EE6CF582F2D5D7D28E18335DE05ABC54D0560E0F5302860C652BF08D560252AA5E74210546F369FBBBCE8C12CFC7957B2652FE9A75"
expect resp.http.abc == "DDAF35A193617ABACC417349AE20413112E6FA4E89A97EA20A9EEEE64B55D39A2192992A274FC1A836BA3C23A3FEEBBD454D4423643CE80E2A9AC94FA54CA49F"
expect resp.http.msgdigest == "107DBF389D9E9F71A3A95F6C055B9251BC5268C2BE16D6C13492EA45B0199F3309E16455AB1E96118E8A905D5597B72038DDB372A89826046DE66687BB420E7C"
expect resp.http.alphalc == "4DBFF86CC2CA1BAE1E16468A05CB9881C97F1753BCE3619034898FAA1AABE429955A1BF8EC483D7421FE3C1646613A59ED5441FB0F321389F77F48A879C7B1F1"
expect resp.http.alphasoup == "204A8FC6DDA82F0A0CED7BEB8E08A41657C16EF468B228A8279BE331A703C33596FD15C13B1B07F9AA1D3BEA57789CA031AD85C7A71DD70354EC631238CA3445"
expect resp.http.alphanum == "1E07BE23C26A86EA37EA810C8EC7809352515A970E9253C26F536CFC7A9996C45C8370583E0A78FA4A90041D71A4CEAB7423F19C71B9D5A3E01249F0BEBD5894"
expect resp.http.digits == "72EC1EF1124A45B047E8B7C75A932195135BB61DE24EC0D1914042246E0AEC3A2354E093D76F3048B456764346900CB130D2A4FD5DD16ABB5E30BCB850DEE843"
# verified with: base64 -d | sha512sum
expect resp.http.allbytes == "339d24bfddd04c682a07912f1bd44e9056855f7a4fcb69487951c03d5c09bf962d16e7672aa4e2bd64e75cd2d9be8b185f94f3357ec46cce702ccf5e385326b0"
} -run
......@@ -33,10 +33,9 @@
#include "vdef.h"
#include "cache/cache.h"
#include "vsb.h"
#include "vsha256.h"
#include "vcc_if.h"
#include "parse_algorithm.h"
#include "vmod_blobdigest.h"
#define ERR(ctx, msg) \
errmsg((ctx), "vmod blobcode error: " msg)
......@@ -68,14 +67,60 @@ errmsg(VRT_CTX, const char *fmt, ...)
va_end(args);
}
static void
init(const enum algorithm hash, hash_ctx * const hctx)
{
switch(hash) {
case SHA256:
SHA256_Init(&hctx->sha256);
break;
case SHA512:
rhash_sha512_init(&hctx->sha512);
break;
default:
WRONG("illegal algorithm");
}
}
static void
update(const enum algorithm hash, hash_ctx *restrict const hctx,
const uint8_t *restrict msg, const size_t len)
{
switch(hash) {
case SHA256:
SHA256_Update(&hctx->sha256, msg, len);
break;
case SHA512:
rhash_sha512_update(&hctx->sha512, msg, len);
break;
default:
WRONG("illegal algorithm");
}
}
static void
final(const enum algorithm hash, hash_ctx *restrict const hctx,
uint8_t *restrict result)
{
switch(hash) {
case SHA256:
SHA256_Final(result, &hctx->sha256);
break;
case SHA512:
rhash_sha512_final(&hctx->sha512, result);
break;
default:
WRONG("illegal algorithm");
}
}
VCL_BLOB
vmod_hash(VRT_CTX, VCL_ENUM hashs, VCL_BLOB msg)
{
/* enum algorithm hash = parse_algorithm(hashs); */
enum algorithm hash = parse_algorithm(hashs);
struct vmod_priv *b;
char *snap;
SHA256_CTX hashctx[1];
(void) hashs;
hash_ctx hctx[1];
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
if (INIT_FINI(ctx)) {
......@@ -91,16 +136,16 @@ vmod_hash(VRT_CTX, VCL_ENUM hashs, VCL_BLOB msg)
ERRNOMEM(ctx, "allocating blob in hash()");
return NULL;
}
if ((b->priv = WS_Alloc(ctx->ws, SHA256_LEN)) == NULL) {
if ((b->priv = WS_Alloc(ctx->ws, hashspec[hash])) == NULL) {
WS_Reset(ctx->ws, snap);
ERRNOMEM(ctx, "allocating hash result in hash()");
return NULL;
}
b->len = SHA256_LEN;
b->len = hashspec[hash];
b->free = NULL;
SHA256_Init(hashctx);
SHA256_Update(hashctx, msg->priv, msg->len);
SHA256_Final(b->priv, hashctx);
init(hash, hctx);
update(hash, hctx, msg->priv, msg->len);
final(hash, hctx, b->priv);
return b;
}
......
/*-
* Copyright 2016 UPLEX - Nils Goroll Systemoptimierung
* All rights reserved.
*
* Author: Geoffrey Simmons <geoffrey.simmons@uplex.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/types.h>
#include "parse_algorithm.h"
#include "vsha256.h"
#include "sha512.h"
typedef union hash_ctx {
SHA256_CTX sha256;
sha512_ctx sha512;
} hash_ctx;
static const size_t hashspec[] = {
[SHA256] = SHA256_LEN,
[SHA512] = sha512_hash_size,
};
......@@ -9,7 +9,7 @@
$Module blobdigest 3 digests and hmacs for the VCL blob type
$Function BLOB hash(ENUM {SHA256} hash, BLOB msg)
$Function BLOB hash(ENUM {SHA256, SHA512} hash, BLOB msg)
$Function STRING version()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment