diff options
author | Matt Johnston <matt@ucc.asn.au> | 2018-02-09 21:44:05 +0800 |
---|---|---|
committer | Matt Johnston <matt@ucc.asn.au> | 2018-02-09 21:44:05 +0800 |
commit | 4f2eb1914bdac3ed3ee504ad86061281dbe0d074 (patch) | |
tree | 078293375c3f3ee2d485cf9559a08d65d460786a /libtomcrypt/src/headers/tomcrypt_macros.h | |
parent | d72f50ff3284e15124a0f233c26339229fe305ac (diff) |
Update to libtomcrypt 1.18.1, merged with Dropbear changes
Diffstat (limited to 'libtomcrypt/src/headers/tomcrypt_macros.h')
-rw-r--r-- | libtomcrypt/src/headers/tomcrypt_macros.h | 296 |
1 files changed, 159 insertions, 137 deletions
diff --git a/libtomcrypt/src/headers/tomcrypt_macros.h b/libtomcrypt/src/headers/tomcrypt_macros.h index 6e4d757..94e368f 100644 --- a/libtomcrypt/src/headers/tomcrypt_macros.h +++ b/libtomcrypt/src/headers/tomcrypt_macros.h @@ -1,73 +1,73 @@ -/* fix for MSVC ...evil! */ -#ifdef _MSC_VER - #define CONST64(n) n ## ui64 - typedef unsigned __int64 ulong64; -#else - #define CONST64(n) n ## ULL - typedef unsigned long long ulong64; -#endif - -/* this is the "32-bit at least" data type - * Re-define it to suit your platform but it must be at least 32-bits +/* LibTomCrypt, modular cryptographic library -- Tom St Denis + * + * LibTomCrypt is a library that provides various cryptographic + * algorithms in a highly modular and flexible manner. + * + * The library is free for all purposes without any express + * guarantee it works. */ -#if defined(__x86_64__) || (defined(__sparc__) && defined(__arch64__)) - typedef unsigned ulong32; -#else - typedef unsigned long ulong32; -#endif /* ---- HELPER MACROS ---- */ #ifdef ENDIAN_NEUTRAL #define STORE32L(x, y) \ - { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ - (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + do { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0) #define LOAD32L(x, y) \ - { x = ((unsigned long)((y)[3] & 255)<<24) | \ - ((unsigned long)((y)[2] & 255)<<16) | \ - ((unsigned long)((y)[1] & 255)<<8) | \ - ((unsigned long)((y)[0] & 255)); } + do { x = ((ulong32)((y)[3] & 255)<<24) | \ + ((ulong32)((y)[2] & 255)<<16) | \ + ((ulong32)((y)[1] & 255)<<8) | \ + ((ulong32)((y)[0] & 255)); } while(0) #define STORE64L(x, y) \ - { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ + do { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \ (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ - (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0) #define LOAD64L(x, y) \ - { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \ + do { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \ (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32)| \ (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16)| \ - (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } + (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } while(0) #define STORE32H(x, y) \ - { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \ - (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } + do { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \ + (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } while(0) #define LOAD32H(x, y) \ - { x = ((unsigned long)((y)[0] & 255)<<24) | \ - ((unsigned long)((y)[1] & 255)<<16) | \ - ((unsigned long)((y)[2] & 255)<<8) | \ - ((unsigned long)((y)[3] & 255)); } + do { x = ((ulong32)((y)[0] & 255)<<24) | \ + ((ulong32)((y)[1] & 255)<<16) | \ + ((ulong32)((y)[2] & 255)<<8) | \ + ((ulong32)((y)[3] & 255)); } while(0) #define STORE64H(x, y) \ - { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ +do { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \ (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \ - (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } + (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } while(0) #define LOAD64H(x, y) \ - { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \ +do { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \ (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32) | \ (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16) | \ - (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } + (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } while(0) + + +#elif defined(ENDIAN_LITTLE) -#endif /* ENDIAN_NEUTRAL */ +#ifdef LTC_HAVE_BSWAP_BUILTIN -#ifdef ENDIAN_LITTLE +#define STORE32H(x, y) \ +do { ulong32 __t = __builtin_bswap32 ((x)); \ + XMEMCPY ((y), &__t, 4); } while(0) -#if !defined(LTC_NO_BSWAP) && (defined(INTEL_CC) || (defined(__GNUC__) && (defined(__DJGPP__) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__i386__) || defined(__x86_64__)))) +#define LOAD32H(x, y) \ +do { XMEMCPY (&(x), (y), 4); \ + (x) = __builtin_bswap32 ((x)); } while(0) + +#elif !defined(LTC_NO_BSWAP) && (defined(INTEL_CC) || (defined(__GNUC__) && (defined(__DJGPP__) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__i386__) || defined(__x86_64__)))) #define STORE32H(x, y) \ asm __volatile__ ( \ @@ -85,144 +85,152 @@ asm __volatile__ ( \ #else #define STORE32H(x, y) \ - { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \ - (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } + do { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \ + (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } while(0) #define LOAD32H(x, y) \ - { x = ((unsigned long)((y)[0] & 255)<<24) | \ - ((unsigned long)((y)[1] & 255)<<16) | \ - ((unsigned long)((y)[2] & 255)<<8) | \ - ((unsigned long)((y)[3] & 255)); } + do { x = ((ulong32)((y)[0] & 255)<<24) | \ + ((ulong32)((y)[1] & 255)<<16) | \ + ((ulong32)((y)[2] & 255)<<8) | \ + ((ulong32)((y)[3] & 255)); } while(0) #endif +#ifdef LTC_HAVE_BSWAP_BUILTIN + +#define STORE64H(x, y) \ +do { ulong64 __t = __builtin_bswap64 ((x)); \ + XMEMCPY ((y), &__t, 8); } while(0) + +#define LOAD64H(x, y) \ +do { XMEMCPY (&(x), (y), 8); \ + (x) = __builtin_bswap64 ((x)); } while(0) /* x86_64 processor */ -#if !defined(LTC_NO_BSWAP) && (defined(__GNUC__) && defined(__x86_64__)) +#elif !defined(LTC_NO_BSWAP) && (defined(__GNUC__) && defined(__x86_64__)) #define STORE64H(x, y) \ asm __volatile__ ( \ "bswapq %0 \n\t" \ "movq %0,(%1)\n\t" \ "bswapq %0 \n\t" \ - ::"r"(x), "r"(y)); + ::"r"(x), "r"(y): "memory"); #define LOAD64H(x, y) \ asm __volatile__ ( \ "movq (%1),%0\n\t" \ "bswapq %0\n\t" \ - :"=r"(x): "r"(y)); + :"=r"(x): "r"(y): "memory"); #else #define STORE64H(x, y) \ - { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ +do { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \ (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \ - (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } + (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } while(0) #define LOAD64H(x, y) \ - { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \ +do { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \ (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32) | \ (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16) | \ - (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } + (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } while(0) #endif -#ifdef ENDIAN_32BITWORD +#ifdef ENDIAN_32BITWORD #define STORE32L(x, y) \ - { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + do { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } while(0) #define LOAD32L(x, y) \ - XMEMCPY(&(x), y, 4); + do { XMEMCPY(&(x), y, 4); } while(0) #define STORE64L(x, y) \ - { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ + do { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \ (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ - (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0) #define LOAD64L(x, y) \ - { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \ + do { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \ (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32)| \ (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16)| \ - (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } + (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } while(0) #else /* 64-bit words then */ #define STORE32L(x, y) \ - { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + do { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } while(0) #define LOAD32L(x, y) \ - { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } + do { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } while(0) #define STORE64L(x, y) \ - { ulong64 __t = (x); XMEMCPY(y, &__t, 8); } + do { ulong64 __t = (x); XMEMCPY(y, &__t, 8); } while(0) #define LOAD64L(x, y) \ - { XMEMCPY(&(x), y, 8); } + do { XMEMCPY(&(x), y, 8); } while(0) #endif /* ENDIAN_64BITWORD */ -#endif /* ENDIAN_LITTLE */ +#elif defined(ENDIAN_BIG) -#ifdef ENDIAN_BIG #define STORE32L(x, y) \ - { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ - (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + do { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0) #define LOAD32L(x, y) \ - { x = ((unsigned long)((y)[3] & 255)<<24) | \ - ((unsigned long)((y)[2] & 255)<<16) | \ - ((unsigned long)((y)[1] & 255)<<8) | \ - ((unsigned long)((y)[0] & 255)); } + do { x = ((ulong32)((y)[3] & 255)<<24) | \ + ((ulong32)((y)[2] & 255)<<16) | \ + ((ulong32)((y)[1] & 255)<<8) | \ + ((ulong32)((y)[0] & 255)); } while(0) #define STORE64L(x, y) \ - { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ +do { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \ (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \ (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \ - (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } + (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0) #define LOAD64L(x, y) \ - { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48) | \ +do { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48) | \ (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32) | \ (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16) | \ - (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } + (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } while(0) -#ifdef ENDIAN_32BITWORD +#ifdef ENDIAN_32BITWORD #define STORE32H(x, y) \ - { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + do { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } while(0) #define LOAD32H(x, y) \ - XMEMCPY(&(x), y, 4); + do { XMEMCPY(&(x), y, 4); } while(0) #define STORE64H(x, y) \ - { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ + do { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \ (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \ (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \ - (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } + (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } while(0) #define LOAD64H(x, y) \ - { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48)| \ + do { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48)| \ (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32)| \ (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16)| \ - (((ulong64)((y)[6] & 255))<<8)| (((ulong64)((y)[7] & 255))); } + (((ulong64)((y)[6] & 255))<<8)| (((ulong64)((y)[7] & 255))); } while(0) #else /* 64-bit words then */ #define STORE32H(x, y) \ - { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } + do { ulong32 __t = (x); XMEMCPY(y, &__t, 4); } while(0) #define LOAD32H(x, y) \ - { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } + do { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } while(0) #define STORE64H(x, y) \ - { ulong64 __t = (x); XMEMCPY(y, &__t, 8); } + do { ulong64 __t = (x); XMEMCPY(y, &__t, 8); } while(0) #define LOAD64H(x, y) \ - { XMEMCPY(&(x), y, 8); } + do { XMEMCPY(&(x), y, 8); } while(0) #endif /* ENDIAN_64BITWORD */ #endif /* ENDIAN_BIG */ @@ -233,6 +241,7 @@ asm __volatile__ ( \ /* 32-bit Rotates */ #if defined(_MSC_VER) +#define LTC_ROx_ASM /* instrinsic rotate */ #include <stdlib.h> @@ -243,8 +252,9 @@ asm __volatile__ ( \ #define ROLc(x,n) _lrotl(x,n) #elif !defined(__STRICT_ANSI__) && defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && !defined(INTEL_CC) && !defined(LTC_NO_ASM) +#define LTC_ROx_ASM -static inline unsigned ROL(unsigned word, int i) +static inline ulong32 ROL(ulong32 word, int i) { asm ("roll %%cl,%0" :"=r" (word) @@ -252,7 +262,7 @@ static inline unsigned ROL(unsigned word, int i) return word; } -static inline unsigned ROR(unsigned word, int i) +static inline ulong32 ROR(ulong32 word, int i) { asm ("rorl %%cl,%0" :"=r" (word) @@ -262,21 +272,22 @@ static inline unsigned ROR(unsigned word, int i) #ifndef LTC_NO_ROLC -static inline unsigned ROLc(unsigned word, const int i) -{ - asm ("roll %2,%0" - :"=r" (word) - :"0" (word),"I" (i)); - return word; -} - -static inline unsigned RORc(unsigned word, const int i) -{ - asm ("rorl %2,%0" - :"=r" (word) - :"0" (word),"I" (i)); - return word; -} +#define ROLc(word,i) ({ \ + ulong32 __ROLc_tmp = (word); \ + __asm__ ("roll %2, %0" : \ + "=r" (__ROLc_tmp) : \ + "0" (__ROLc_tmp), \ + "I" (i)); \ + __ROLc_tmp; \ + }) +#define RORc(word,i) ({ \ + ulong32 __RORc_tmp = (word); \ + __asm__ ("rorl %2, %0" : \ + "=r" (__RORc_tmp) : \ + "0" (__RORc_tmp), \ + "I" (i)); \ + __RORc_tmp; \ + }) #else @@ -286,8 +297,9 @@ static inline unsigned RORc(unsigned word, const int i) #endif #elif !defined(__STRICT_ANSI__) && defined(LTC_PPC32) +#define LTC_ROx_ASM -static inline unsigned ROL(unsigned word, int i) +static inline ulong32 ROL(ulong32 word, int i) { asm ("rotlw %0,%0,%2" :"=r" (word) @@ -295,7 +307,7 @@ static inline unsigned ROL(unsigned word, int i) return word; } -static inline unsigned ROR(unsigned word, int i) +static inline ulong32 ROR(ulong32 word, int i) { asm ("rotlw %0,%0,%2" :"=r" (word) @@ -305,7 +317,7 @@ static inline unsigned ROR(unsigned word, int i) #ifndef LTC_NO_ROLC -static inline unsigned ROLc(unsigned word, const int i) +static inline ulong32 ROLc(ulong32 word, const int i) { asm ("rotlwi %0,%0,%2" :"=r" (word) @@ -313,7 +325,7 @@ static inline unsigned ROLc(unsigned word, const int i) return word; } -static inline unsigned RORc(unsigned word, const int i) +static inline ulong32 RORc(ulong32 word, const int i) { asm ("rotrwi %0,%0,%2" :"=r" (word) @@ -332,18 +344,18 @@ static inline unsigned RORc(unsigned word, const int i) #else /* rotates the hard way */ -#define ROL(x, y) ( (((unsigned long)(x)<<(unsigned long)((y)&31)) | (((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) -#define ROR(x, y) ( ((((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)((y)&31)) | ((unsigned long)(x)<<(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) -#define ROLc(x, y) ( (((unsigned long)(x)<<(unsigned long)((y)&31)) | (((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) -#define RORc(x, y) ( ((((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)((y)&31)) | ((unsigned long)(x)<<(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) +#define ROL(x, y) ( (((ulong32)(x)<<(ulong32)((y)&31)) | (((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL) +#define ROR(x, y) ( ((((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((y)&31)) | ((ulong32)(x)<<(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL) +#define ROLc(x, y) ( (((ulong32)(x)<<(ulong32)((y)&31)) | (((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL) +#define RORc(x, y) ( ((((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((y)&31)) | ((ulong32)(x)<<(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL) #endif /* 64-bit Rotates */ -#if !defined(__STRICT_ANSI__) && defined(__GNUC__) && defined(__x86_64__) && !defined(LTC_NO_ASM) +#if !defined(__STRICT_ANSI__) && defined(__GNUC__) && defined(__x86_64__) && !defined(_WIN64) && !defined(LTC_NO_ASM) -static inline unsigned long ROL64(unsigned long word, int i) +static inline ulong64 ROL64(ulong64 word, int i) { asm("rolq %%cl,%0" :"=r" (word) @@ -351,7 +363,7 @@ static inline unsigned long ROL64(unsigned long word, int i) return word; } -static inline unsigned long ROR64(unsigned long word, int i) +static inline ulong64 ROR64(ulong64 word, int i) { asm("rorq %%cl,%0" :"=r" (word) @@ -361,21 +373,22 @@ static inline unsigned long ROR64(unsigned long word, int i) #ifndef LTC_NO_ROLC -static inline unsigned long ROL64c(unsigned long word, const int i) -{ - asm("rolq %2,%0" - :"=r" (word) - :"0" (word),"J" (i)); - return word; -} - -static inline unsigned long ROR64c(unsigned long word, const int i) -{ - asm("rorq %2,%0" - :"=r" (word) - :"0" (word),"J" (i)); - return word; -} +#define ROL64c(word,i) ({ \ + ulong64 __ROL64c_tmp = word; \ + __asm__ ("rolq %2, %0" : \ + "=r" (__ROL64c_tmp) : \ + "0" (__ROL64c_tmp), \ + "J" (i)); \ + __ROL64c_tmp; \ + }) +#define ROR64c(word,i) ({ \ + ulong64 __ROR64c_tmp = word; \ + __asm__ ("rorq %2, %0" : \ + "=r" (__ROR64c_tmp) : \ + "0" (__ROR64c_tmp), \ + "J" (i)); \ + __ROR64c_tmp; \ + }) #else /* LTC_NO_ROLC */ @@ -388,19 +401,19 @@ static inline unsigned long ROR64c(unsigned long word, const int i) #define ROL64(x, y) \ ( (((x)<<((ulong64)(y)&63)) | \ - (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)64-((y)&63)))) & CONST64(0xFFFFFFFFFFFFFFFF)) + (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF)) #define ROR64(x, y) \ ( ((((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)(y)&CONST64(63))) | \ - ((x)<<((ulong64)(64-((y)&CONST64(63)))))) & CONST64(0xFFFFFFFFFFFFFFFF)) + ((x)<<(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF)) #define ROL64c(x, y) \ ( (((x)<<((ulong64)(y)&63)) | \ - (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)64-((y)&63)))) & CONST64(0xFFFFFFFFFFFFFFFF)) + (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF)) #define ROR64c(x, y) \ ( ((((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)(y)&CONST64(63))) | \ - ((x)<<((ulong64)(64-((y)&CONST64(63)))))) & CONST64(0xFFFFFFFFFFFFFFFF)) + ((x)<<(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF)) #endif @@ -412,13 +425,22 @@ static inline unsigned long ROR64c(unsigned long word, const int i) #define MIN(x, y) ( ((x)<(y))?(x):(y) ) #endif +#ifndef LTC_UNUSED_PARAM + #define LTC_UNUSED_PARAM(x) (void)(x) +#endif + /* extract a byte portably */ #ifdef _MSC_VER #define byte(x, n) ((unsigned char)((x) >> (8 * (n)))) #else #define byte(x, n) (((x) >> (8 * (n))) & 255) -#endif +#endif + +/* there is no snprintf before Visual C++ 2015 */ +#if defined(_MSC_VER) && _MSC_VER < 1900 +#define snprintf _snprintf +#endif -/* $Source$ */ -/* $Revision$ */ -/* $Date$ */ +/* ref: $Format:%D$ */ +/* git commit: $Format:%H$ */ +/* commit time: $Format:%ai$ */ |