Crypto++
misc.h
00001 #ifndef CRYPTOPP_MISC_H
00002 #define CRYPTOPP_MISC_H
00003 
00004 #include "cryptlib.h"
00005 #include "smartptr.h"
00006 #include <string.h>     // for memcpy and memmove
00007 
00008 #ifdef _MSC_VER
00009     #if _MSC_VER >= 1400
00010         // VC2005 workaround: disable declarations that conflict with winnt.h
00011         #define _interlockedbittestandset CRYPTOPP_DISABLED_INTRINSIC_1
00012         #define _interlockedbittestandreset CRYPTOPP_DISABLED_INTRINSIC_2
00013         #define _interlockedbittestandset64 CRYPTOPP_DISABLED_INTRINSIC_3
00014         #define _interlockedbittestandreset64 CRYPTOPP_DISABLED_INTRINSIC_4
00015         #include <intrin.h>
00016         #undef _interlockedbittestandset
00017         #undef _interlockedbittestandreset
00018         #undef _interlockedbittestandset64
00019         #undef _interlockedbittestandreset64
00020         #define CRYPTOPP_FAST_ROTATE(x) 1
00021     #elif _MSC_VER >= 1300
00022         #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32 | (x) == 64)
00023     #else
00024         #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00025     #endif
00026 #elif (defined(__MWERKS__) && TARGET_CPU_PPC) || \
00027     (defined(__GNUC__) && (defined(_ARCH_PWR2) || defined(_ARCH_PWR) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_ARCH_COM)))
00028     #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00029 #elif defined(__GNUC__) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86) // depend on GCC's peephole optimization to generate rotate instructions
00030     #define CRYPTOPP_FAST_ROTATE(x) 1
00031 #else
00032     #define CRYPTOPP_FAST_ROTATE(x) 0
00033 #endif
00034 
00035 #ifdef __BORLANDC__
00036 #include <mem.h>
00037 #endif
00038 
00039 #if defined(__GNUC__) && defined(__linux__)
00040 #define CRYPTOPP_BYTESWAP_AVAILABLE
00041 #include <byteswap.h>
00042 #include <cstring>
00043 #endif
00044 
00045 NAMESPACE_BEGIN(CryptoPP)
00046 
00047 // ************** compile-time assertion ***************
00048 
00049 template <bool b>
00050 struct CompileAssert
00051 {
00052     static char dummy[2*b-1];
00053 };
00054 
00055 #define CRYPTOPP_COMPILE_ASSERT(assertion) CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, __LINE__)
00056 #if defined(CRYPTOPP_EXPORTS) || defined(CRYPTOPP_IMPORTS)
00057 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance)
00058 #else
00059 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance) static CompileAssert<(assertion)> CRYPTOPP_ASSERT_JOIN(cryptopp_assert_, instance)
00060 #endif
00061 #define CRYPTOPP_ASSERT_JOIN(X, Y) CRYPTOPP_DO_ASSERT_JOIN(X, Y)
00062 #define CRYPTOPP_DO_ASSERT_JOIN(X, Y) X##Y
00063 
00064 // ************** misc classes ***************
00065 
00066 class CRYPTOPP_DLL Empty
00067 {
00068 };
00069 
00070 //! _
00071 template <class BASE1, class BASE2>
00072 class CRYPTOPP_NO_VTABLE TwoBases : public BASE1, public BASE2
00073 {
00074 };
00075 
00076 //! _
00077 template <class BASE1, class BASE2, class BASE3>
00078 class CRYPTOPP_NO_VTABLE ThreeBases : public BASE1, public BASE2, public BASE3
00079 {
00080 };
00081 
00082 template <class T>
00083 class ObjectHolder
00084 {
00085 protected:
00086     T m_object;
00087 };
00088 
00089 class NotCopyable
00090 {
00091 public:
00092     NotCopyable() {}
00093 private:
00094     NotCopyable(const NotCopyable &);
00095     void operator=(const NotCopyable &);
00096 };
00097 
00098 template <class T>
00099 struct NewObject
00100 {
00101     T* operator()() const {return new T;}
00102 };
00103 
00104 /*! This function safely initializes a static object in a multithreaded environment without using locks (for portability).
00105     Note that if two threads call Ref() at the same time, they may get back different references, and one object 
00106     may end up being memory leaked. This is by design.
00107 */
00108 template <class T, class F = NewObject<T>, int instance=0>
00109 class Singleton
00110 {
00111 public:
00112     Singleton(F objectFactory = F()) : m_objectFactory(objectFactory) {}
00113 
00114     // prevent this function from being inlined
00115     CRYPTOPP_NOINLINE const T & Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const;
00116 
00117 private:
00118     F m_objectFactory;
00119 };
00120 
00121 template <class T, class F, int instance>
00122 const T & Singleton<T, F, instance>::Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const
00123 {
00124     static volatile simple_ptr<T> s_pObject;
00125     T *p = s_pObject.m_p;
00126 
00127     if (p)
00128         return *p;
00129 
00130     T *newObject = m_objectFactory();
00131     p = s_pObject.m_p;
00132 
00133     if (p)
00134     {
00135         delete newObject;
00136         return *p;
00137     }
00138 
00139     s_pObject.m_p = newObject;
00140     return *newObject;
00141 }
00142 
00143 // ************** misc functions ***************
00144 
00145 #if (!__STDC_WANT_SECURE_LIB__)
00146 inline void memcpy_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00147 {
00148     if (count > sizeInBytes)
00149         throw InvalidArgument("memcpy_s: buffer overflow");
00150     memcpy(dest, src, count);
00151 }
00152 
00153 inline void memmove_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00154 {
00155     if (count > sizeInBytes)
00156         throw InvalidArgument("memmove_s: buffer overflow");
00157     memmove(dest, src, count);
00158 }
00159 
00160 #if __BORLANDC__ >= 0x620
00161 // C++Builder 2010 workaround: can't use std::memcpy_s because it doesn't allow 0 lengths
00162 #define memcpy_s CryptoPP::memcpy_s
00163 #define memmove_s CryptoPP::memmove_s
00164 #endif
00165 #endif
00166 
00167 inline void * memset_z(void *ptr, int value, size_t num)
00168 {
00169 // avoid extranous warning on GCC 4.3.2 Ubuntu 8.10
00170 #if CRYPTOPP_GCC_VERSION >= 30001
00171     if (__builtin_constant_p(num) && num==0)
00172         return ptr;
00173 #endif
00174     return memset(ptr, value, num);
00175 }
00176 
00177 // can't use std::min or std::max in MSVC60 or Cygwin 1.1.0
00178 template <class T> inline const T& STDMIN(const T& a, const T& b)
00179 {
00180     return b < a ? b : a;
00181 }
00182 
00183 template <class T1, class T2> inline const T1 UnsignedMin(const T1& a, const T2& b)
00184 {
00185     CRYPTOPP_COMPILE_ASSERT((sizeof(T1)<=sizeof(T2) && T2(-1)>0) || (sizeof(T1)>sizeof(T2) && T1(-1)>0));
00186     assert(a==0 || a>0);    // GCC workaround: get rid of the warning "comparison is always true due to limited range of data type"
00187     assert(b>=0);
00188 
00189     if (sizeof(T1)<=sizeof(T2))
00190         return b < (T2)a ? (T1)b : a;
00191     else
00192         return (T1)b < a ? (T1)b : a;
00193 }
00194 
00195 template <class T> inline const T& STDMAX(const T& a, const T& b)
00196 {
00197     return a < b ? b : a;
00198 }
00199 
00200 #define RETURN_IF_NONZERO(x) size_t returnedValue = x; if (returnedValue) return returnedValue
00201 
00202 // this version of the macro is fastest on Pentium 3 and Pentium 4 with MSVC 6 SP5 w/ Processor Pack
00203 #define GETBYTE(x, y) (unsigned int)byte((x)>>(8*(y)))
00204 // these may be faster on other CPUs/compilers
00205 // #define GETBYTE(x, y) (unsigned int)(((x)>>(8*(y)))&255)
00206 // #define GETBYTE(x, y) (((byte *)&(x))[y])
00207 
00208 #define CRYPTOPP_GET_BYTE_AS_BYTE(x, y) byte((x)>>(8*(y)))
00209 
00210 template <class T>
00211 unsigned int Parity(T value)
00212 {
00213     for (unsigned int i=8*sizeof(value)/2; i>0; i/=2)
00214         value ^= value >> i;
00215     return (unsigned int)value&1;
00216 }
00217 
00218 template <class T>
00219 unsigned int BytePrecision(const T &value)
00220 {
00221     if (!value)
00222         return 0;
00223 
00224     unsigned int l=0, h=8*sizeof(value);
00225 
00226     while (h-l > 8)
00227     {
00228         unsigned int t = (l+h)/2;
00229         if (value >> t)
00230             l = t;
00231         else
00232             h = t;
00233     }
00234 
00235     return h/8;
00236 }
00237 
00238 template <class T>
00239 unsigned int BitPrecision(const T &value)
00240 {
00241     if (!value)
00242         return 0;
00243 
00244     unsigned int l=0, h=8*sizeof(value);
00245 
00246     while (h-l > 1)
00247     {
00248         unsigned int t = (l+h)/2;
00249         if (value >> t)
00250             l = t;
00251         else
00252             h = t;
00253     }
00254 
00255     return h;
00256 }
00257 
00258 inline unsigned int TrailingZeros(word32 v)
00259 {
00260 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
00261     return __builtin_ctz(v);
00262 #elif defined(_MSC_VER) && _MSC_VER >= 1400
00263     unsigned long result;
00264     _BitScanForward(&result, v);
00265     return result;
00266 #else
00267     // from http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup
00268     static const int MultiplyDeBruijnBitPosition[32] = 
00269     {
00270       0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 
00271       31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
00272     };
00273     return MultiplyDeBruijnBitPosition[((word32)((v & -v) * 0x077CB531U)) >> 27];
00274 #endif
00275 }
00276 
00277 inline unsigned int TrailingZeros(word64 v)
00278 {
00279 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
00280     return __builtin_ctzll(v);
00281 #elif defined(_MSC_VER) && _MSC_VER >= 1400 && (defined(_M_X64) || defined(_M_IA64))
00282     unsigned long result;
00283     _BitScanForward64(&result, v);
00284     return result;
00285 #else
00286     return word32(v) ? TrailingZeros(word32(v)) : 32 + TrailingZeros(word32(v>>32));
00287 #endif
00288 }
00289 
00290 template <class T>
00291 inline T Crop(T value, size_t size)
00292 {
00293     if (size < 8*sizeof(value))
00294         return T(value & ((T(1) << size) - 1));
00295     else
00296         return value;
00297 }
00298 
00299 template <class T1, class T2>
00300 inline bool SafeConvert(T1 from, T2 &to)
00301 {
00302     to = (T2)from;
00303     if (from != to || (from > 0) != (to > 0))
00304         return false;
00305     return true;
00306 }
00307 
00308 inline size_t BitsToBytes(size_t bitCount)
00309 {
00310     return ((bitCount+7)/(8));
00311 }
00312 
00313 inline size_t BytesToWords(size_t byteCount)
00314 {
00315     return ((byteCount+WORD_SIZE-1)/WORD_SIZE);
00316 }
00317 
00318 inline size_t BitsToWords(size_t bitCount)
00319 {
00320     return ((bitCount+WORD_BITS-1)/(WORD_BITS));
00321 }
00322 
00323 inline size_t BitsToDwords(size_t bitCount)
00324 {
00325     return ((bitCount+2*WORD_BITS-1)/(2*WORD_BITS));
00326 }
00327 
00328 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *buf, const byte *mask, size_t count);
00329 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *output, const byte *input, const byte *mask, size_t count);
00330 
00331 CRYPTOPP_DLL bool CRYPTOPP_API VerifyBufsEqual(const byte *buf1, const byte *buf2, size_t count);
00332 
00333 template <class T>
00334 inline bool IsPowerOf2(const T &n)
00335 {
00336     return n > 0 && (n & (n-1)) == 0;
00337 }
00338 
00339 template <class T1, class T2>
00340 inline T2 ModPowerOf2(const T1 &a, const T2 &b)
00341 {
00342     assert(IsPowerOf2(b));
00343     return T2(a) & (b-1);
00344 }
00345 
00346 template <class T1, class T2>
00347 inline T1 RoundDownToMultipleOf(const T1 &n, const T2 &m)
00348 {
00349     if (IsPowerOf2(m))
00350         return n - ModPowerOf2(n, m);
00351     else
00352         return n - n%m;
00353 }
00354 
00355 template <class T1, class T2>
00356 inline T1 RoundUpToMultipleOf(const T1 &n, const T2 &m)
00357 {
00358     if (n+m-1 < n)
00359         throw InvalidArgument("RoundUpToMultipleOf: integer overflow");
00360     return RoundDownToMultipleOf(n+m-1, m);
00361 }
00362 
00363 template <class T>
00364 inline unsigned int GetAlignmentOf(T *dummy=NULL)   // VC60 workaround
00365 {
00366 #ifdef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00367     if (sizeof(T) < 16)
00368         return 1;
00369 #endif
00370 
00371 #if (_MSC_VER >= 1300)
00372     return __alignof(T);
00373 #elif defined(__GNUC__)
00374     return __alignof__(T);
00375 #elif CRYPTOPP_BOOL_SLOW_WORD64
00376     return UnsignedMin(4U, sizeof(T));
00377 #else
00378     return sizeof(T);
00379 #endif
00380 }
00381 
00382 inline bool IsAlignedOn(const void *p, unsigned int alignment)
00383 {
00384     return alignment==1 || (IsPowerOf2(alignment) ? ModPowerOf2((size_t)p, alignment) == 0 : (size_t)p % alignment == 0);
00385 }
00386 
00387 template <class T>
00388 inline bool IsAligned(const void *p, T *dummy=NULL) // VC60 workaround
00389 {
00390     return IsAlignedOn(p, GetAlignmentOf<T>());
00391 }
00392 
00393 #ifdef IS_LITTLE_ENDIAN
00394     typedef LittleEndian NativeByteOrder;
00395 #else
00396     typedef BigEndian NativeByteOrder;
00397 #endif
00398 
00399 inline ByteOrder GetNativeByteOrder()
00400 {
00401     return NativeByteOrder::ToEnum();
00402 }
00403 
00404 inline bool NativeByteOrderIs(ByteOrder order)
00405 {
00406     return order == GetNativeByteOrder();
00407 }
00408 
00409 template <class T>
00410 std::string IntToString(T a, unsigned int base = 10)
00411 {
00412     if (a == 0)
00413         return "0";
00414     bool negate = false;
00415     if (a < 0)
00416     {
00417         negate = true;
00418         a = 0-a;    // VC .NET does not like -a
00419     }
00420     std::string result;
00421     while (a > 0)
00422     {
00423         T digit = a % base;
00424         result = char((digit < 10 ? '0' : ('a' - 10)) + digit) + result;
00425         a /= base;
00426     }
00427     if (negate)
00428         result = "-" + result;
00429     return result;
00430 }
00431 
00432 template <class T1, class T2>
00433 inline T1 SaturatingSubtract(const T1 &a, const T2 &b)
00434 {
00435     return T1((a > b) ? (a - b) : 0);
00436 }
00437 
00438 template <class T>
00439 inline CipherDir GetCipherDir(const T &obj)
00440 {
00441     return obj.IsForwardTransformation() ? ENCRYPTION : DECRYPTION;
00442 }
00443 
00444 CRYPTOPP_DLL void CRYPTOPP_API CallNewHandler();
00445 
00446 inline void IncrementCounterByOne(byte *inout, unsigned int s)
00447 {
00448     for (int i=s-1, carry=1; i>=0 && carry; i--)
00449         carry = !++inout[i];
00450 }
00451 
00452 inline void IncrementCounterByOne(byte *output, const byte *input, unsigned int s)
00453 {
00454     int i, carry;
00455     for (i=s-1, carry=1; i>=0 && carry; i--)
00456         carry = ((output[i] = input[i]+1) == 0);
00457     memcpy_s(output, s, input, i+1);
00458 }
00459 
00460 template <class T>
00461 inline void ConditionalSwap(bool c, T &a, T &b)
00462 {
00463     T t = c * (a ^ b);
00464     a ^= t;
00465     b ^= t;
00466 }
00467 
00468 template <class T>
00469 inline void ConditionalSwapPointers(bool c, T &a, T &b)
00470 {
00471     ptrdiff_t t = c * (a - b);
00472     a -= t;
00473     b += t;
00474 }
00475 
00476 // see http://www.dwheeler.com/secure-programs/Secure-Programs-HOWTO/protect-secrets.html
00477 // and https://www.securecoding.cert.org/confluence/display/cplusplus/MSC06-CPP.+Be+aware+of+compiler+optimization+when+dealing+with+sensitive+data
00478 template <class T>
00479 void SecureWipeBuffer(T *buf, size_t n)
00480 {
00481     // GCC 4.3.2 on Cygwin optimizes away the first store if this loop is done in the forward direction
00482     volatile T *p = buf+n;
00483     while (n--)
00484         *(--p) = 0;
00485 }
00486 
00487 #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
00488 
00489 template<> inline void SecureWipeBuffer(byte *buf, size_t n)
00490 {
00491     volatile byte *p = buf;
00492 #ifdef __GNUC__
00493     asm volatile("rep stosb" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00494 #else
00495     __stosb((byte *)(size_t)p, 0, n);
00496 #endif
00497 }
00498 
00499 template<> inline void SecureWipeBuffer(word16 *buf, size_t n)
00500 {
00501     volatile word16 *p = buf;
00502 #ifdef __GNUC__
00503     asm volatile("rep stosw" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00504 #else
00505     __stosw((word16 *)(size_t)p, 0, n);
00506 #endif
00507 }
00508 
00509 template<> inline void SecureWipeBuffer(word32 *buf, size_t n)
00510 {
00511     volatile word32 *p = buf;
00512 #ifdef __GNUC__
00513     asm volatile("rep stosl" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00514 #else
00515     __stosd((unsigned long *)(size_t)p, 0, n);
00516 #endif
00517 }
00518 
00519 template<> inline void SecureWipeBuffer(word64 *buf, size_t n)
00520 {
00521 #if CRYPTOPP_BOOL_X64
00522     volatile word64 *p = buf;
00523 #ifdef __GNUC__
00524     asm volatile("rep stosq" : "+c"(n), "+D"(p) : "a"(0) : "memory");
00525 #else
00526     __stosq((word64 *)(size_t)p, 0, n);
00527 #endif
00528 #else
00529     SecureWipeBuffer((word32 *)buf, 2*n);
00530 #endif
00531 }
00532 
00533 #endif  // #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
00534 
00535 template <class T>
00536 inline void SecureWipeArray(T *buf, size_t n)
00537 {
00538     if (sizeof(T) % 8 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word64>() == 0)
00539         SecureWipeBuffer((word64 *)buf, n * (sizeof(T)/8));
00540     else if (sizeof(T) % 4 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word32>() == 0)
00541         SecureWipeBuffer((word32 *)buf, n * (sizeof(T)/4));
00542     else if (sizeof(T) % 2 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word16>() == 0)
00543         SecureWipeBuffer((word16 *)buf, n * (sizeof(T)/2));
00544     else
00545         SecureWipeBuffer((byte *)buf, n * sizeof(T));
00546 }
00547 
00548 // this function uses wcstombs(), which assumes that setlocale() has been called
00549 static std::string StringNarrow(const wchar_t *str, bool throwOnError = true)
00550 {
00551 #ifdef _MSC_VER
00552 #pragma warning(push)
00553 #pragma warning(disable: 4996)  //  'wcstombs': This function or variable may be unsafe.
00554 #endif
00555     size_t size = wcstombs(NULL, str, 0);
00556     if (size == size_t(0)-1)
00557     {
00558         if (throwOnError)
00559             throw InvalidArgument("StringNarrow: wcstombs() call failed");
00560         else
00561             return std::string();
00562     }
00563     std::string result(size, 0);
00564     wcstombs(&result[0], str, size);
00565     return result;
00566 #ifdef _MSC_VER
00567 #pragma warning(pop)
00568 #endif
00569 }
00570 
00571 #if CRYPTOPP_BOOL_ALIGN16_ENABLED
00572 CRYPTOPP_DLL void * CRYPTOPP_API AlignedAllocate(size_t size);
00573 CRYPTOPP_DLL void CRYPTOPP_API AlignedDeallocate(void *p);
00574 #endif
00575 
00576 CRYPTOPP_DLL void * CRYPTOPP_API UnalignedAllocate(size_t size);
00577 CRYPTOPP_DLL void CRYPTOPP_API UnalignedDeallocate(void *p);
00578 
00579 // ************** rotate functions ***************
00580 
00581 template <class T> inline T rotlFixed(T x, unsigned int y)
00582 {
00583     assert(y < sizeof(T)*8);
00584     return T((x<<y) | (x>>(sizeof(T)*8-y)));
00585 }
00586 
00587 template <class T> inline T rotrFixed(T x, unsigned int y)
00588 {
00589     assert(y < sizeof(T)*8);
00590     return T((x>>y) | (x<<(sizeof(T)*8-y)));
00591 }
00592 
00593 template <class T> inline T rotlVariable(T x, unsigned int y)
00594 {
00595     assert(y < sizeof(T)*8);
00596     return T((x<<y) | (x>>(sizeof(T)*8-y)));
00597 }
00598 
00599 template <class T> inline T rotrVariable(T x, unsigned int y)
00600 {
00601     assert(y < sizeof(T)*8);
00602     return T((x>>y) | (x<<(sizeof(T)*8-y)));
00603 }
00604 
00605 template <class T> inline T rotlMod(T x, unsigned int y)
00606 {
00607     y %= sizeof(T)*8;
00608     return T((x<<y) | (x>>(sizeof(T)*8-y)));
00609 }
00610 
00611 template <class T> inline T rotrMod(T x, unsigned int y)
00612 {
00613     y %= sizeof(T)*8;
00614     return T((x>>y) | (x<<(sizeof(T)*8-y)));
00615 }
00616 
00617 #ifdef _MSC_VER
00618 
00619 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00620 {
00621     assert(y < 8*sizeof(x));
00622     return y ? _lrotl(x, y) : x;
00623 }
00624 
00625 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00626 {
00627     assert(y < 8*sizeof(x));
00628     return y ? _lrotr(x, y) : x;
00629 }
00630 
00631 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00632 {
00633     assert(y < 8*sizeof(x));
00634     return _lrotl(x, y);
00635 }
00636 
00637 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00638 {
00639     assert(y < 8*sizeof(x));
00640     return _lrotr(x, y);
00641 }
00642 
00643 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00644 {
00645     return _lrotl(x, y);
00646 }
00647 
00648 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00649 {
00650     return _lrotr(x, y);
00651 }
00652 
00653 #endif // #ifdef _MSC_VER
00654 
00655 #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
00656 // Intel C++ Compiler 10.0 calls a function instead of using the rotate instruction when using these instructions
00657 
00658 template<> inline word64 rotlFixed<word64>(word64 x, unsigned int y)
00659 {
00660     assert(y < 8*sizeof(x));
00661     return y ? _rotl64(x, y) : x;
00662 }
00663 
00664 template<> inline word64 rotrFixed<word64>(word64 x, unsigned int y)
00665 {
00666     assert(y < 8*sizeof(x));
00667     return y ? _rotr64(x, y) : x;
00668 }
00669 
00670 template<> inline word64 rotlVariable<word64>(word64 x, unsigned int y)
00671 {
00672     assert(y < 8*sizeof(x));
00673     return _rotl64(x, y);
00674 }
00675 
00676 template<> inline word64 rotrVariable<word64>(word64 x, unsigned int y)
00677 {
00678     assert(y < 8*sizeof(x));
00679     return _rotr64(x, y);
00680 }
00681 
00682 template<> inline word64 rotlMod<word64>(word64 x, unsigned int y)
00683 {
00684     return _rotl64(x, y);
00685 }
00686 
00687 template<> inline word64 rotrMod<word64>(word64 x, unsigned int y)
00688 {
00689     return _rotr64(x, y);
00690 }
00691 
00692 #endif // #if _MSC_VER >= 1310
00693 
00694 #if _MSC_VER >= 1400 && !defined(__INTEL_COMPILER)
00695 // Intel C++ Compiler 10.0 gives undefined externals with these
00696 
00697 template<> inline word16 rotlFixed<word16>(word16 x, unsigned int y)
00698 {
00699     assert(y < 8*sizeof(x));
00700     return y ? _rotl16(x, y) : x;
00701 }
00702 
00703 template<> inline word16 rotrFixed<word16>(word16 x, unsigned int y)
00704 {
00705     assert(y < 8*sizeof(x));
00706     return y ? _rotr16(x, y) : x;
00707 }
00708 
00709 template<> inline word16 rotlVariable<word16>(word16 x, unsigned int y)
00710 {
00711     assert(y < 8*sizeof(x));
00712     return _rotl16(x, y);
00713 }
00714 
00715 template<> inline word16 rotrVariable<word16>(word16 x, unsigned int y)
00716 {
00717     assert(y < 8*sizeof(x));
00718     return _rotr16(x, y);
00719 }
00720 
00721 template<> inline word16 rotlMod<word16>(word16 x, unsigned int y)
00722 {
00723     return _rotl16(x, y);
00724 }
00725 
00726 template<> inline word16 rotrMod<word16>(word16 x, unsigned int y)
00727 {
00728     return _rotr16(x, y);
00729 }
00730 
00731 template<> inline byte rotlFixed<byte>(byte x, unsigned int y)
00732 {
00733     assert(y < 8*sizeof(x));
00734     return y ? _rotl8(x, y) : x;
00735 }
00736 
00737 template<> inline byte rotrFixed<byte>(byte x, unsigned int y)
00738 {
00739     assert(y < 8*sizeof(x));
00740     return y ? _rotr8(x, y) : x;
00741 }
00742 
00743 template<> inline byte rotlVariable<byte>(byte x, unsigned int y)
00744 {
00745     assert(y < 8*sizeof(x));
00746     return _rotl8(x, y);
00747 }
00748 
00749 template<> inline byte rotrVariable<byte>(byte x, unsigned int y)
00750 {
00751     assert(y < 8*sizeof(x));
00752     return _rotr8(x, y);
00753 }
00754 
00755 template<> inline byte rotlMod<byte>(byte x, unsigned int y)
00756 {
00757     return _rotl8(x, y);
00758 }
00759 
00760 template<> inline byte rotrMod<byte>(byte x, unsigned int y)
00761 {
00762     return _rotr8(x, y);
00763 }
00764 
00765 #endif // #if _MSC_VER >= 1400
00766 
00767 #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00768 
00769 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00770 {
00771     assert(y < 32);
00772     return y ? __rlwinm(x,y,0,31) : x;
00773 }
00774 
00775 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00776 {
00777     assert(y < 32);
00778     return y ? __rlwinm(x,32-y,0,31) : x;
00779 }
00780 
00781 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00782 {
00783     assert(y < 32);
00784     return (__rlwnm(x,y,0,31));
00785 }
00786 
00787 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00788 {
00789     assert(y < 32);
00790     return (__rlwnm(x,32-y,0,31));
00791 }
00792 
00793 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00794 {
00795     return (__rlwnm(x,y,0,31));
00796 }
00797 
00798 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00799 {
00800     return (__rlwnm(x,32-y,0,31));
00801 }
00802 
00803 #endif // #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00804 
00805 // ************** endian reversal ***************
00806 
00807 template <class T>
00808 inline unsigned int GetByte(ByteOrder order, T value, unsigned int index)
00809 {
00810     if (order == LITTLE_ENDIAN_ORDER)
00811         return GETBYTE(value, index);
00812     else
00813         return GETBYTE(value, sizeof(T)-index-1);
00814 }
00815 
00816 inline byte ByteReverse(byte value)
00817 {
00818     return value;
00819 }
00820 
00821 inline word16 ByteReverse(word16 value)
00822 {
00823 #ifdef CRYPTOPP_BYTESWAP_AVAILABLE
00824     return bswap_16(value);
00825 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00826     return _byteswap_ushort(value);
00827 #else
00828     return rotlFixed(value, 8U);
00829 #endif
00830 }
00831 
00832 inline word32 ByteReverse(word32 value)
00833 {
00834 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE)
00835     __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00836     return value;
00837 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00838     return bswap_32(value);
00839 #elif defined(__MWERKS__) && TARGET_CPU_PPC
00840     return (word32)__lwbrx(&value,0);
00841 #elif _MSC_VER >= 1400 || (_MSC_VER >= 1300 && !defined(_DLL))
00842     return _byteswap_ulong(value);
00843 #elif CRYPTOPP_FAST_ROTATE(32)
00844     // 5 instructions with rotate instruction, 9 without
00845     return (rotrFixed(value, 8U) & 0xff00ff00) | (rotlFixed(value, 8U) & 0x00ff00ff);
00846 #else
00847     // 6 instructions with rotate instruction, 8 without
00848     value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8);
00849     return rotlFixed(value, 16U);
00850 #endif
00851 }
00852 
00853 inline word64 ByteReverse(word64 value)
00854 {
00855 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE) && defined(__x86_64__)
00856     __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00857     return value;
00858 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00859     return bswap_64(value);
00860 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00861     return _byteswap_uint64(value);
00862 #elif CRYPTOPP_BOOL_SLOW_WORD64
00863     return (word64(ByteReverse(word32(value))) << 32) | ByteReverse(word32(value>>32));
00864 #else
00865     value = ((value & W64LIT(0xFF00FF00FF00FF00)) >> 8) | ((value & W64LIT(0x00FF00FF00FF00FF)) << 8);
00866     value = ((value & W64LIT(0xFFFF0000FFFF0000)) >> 16) | ((value & W64LIT(0x0000FFFF0000FFFF)) << 16);
00867     return rotlFixed(value, 32U);
00868 #endif
00869 }
00870 
00871 inline byte BitReverse(byte value)
00872 {
00873     value = ((value & 0xAA) >> 1) | ((value & 0x55) << 1);
00874     value = ((value & 0xCC) >> 2) | ((value & 0x33) << 2);
00875     return rotlFixed(value, 4U);
00876 }
00877 
00878 inline word16 BitReverse(word16 value)
00879 {
00880     value = ((value & 0xAAAA) >> 1) | ((value & 0x5555) << 1);
00881     value = ((value & 0xCCCC) >> 2) | ((value & 0x3333) << 2);
00882     value = ((value & 0xF0F0) >> 4) | ((value & 0x0F0F) << 4);
00883     return ByteReverse(value);
00884 }
00885 
00886 inline word32 BitReverse(word32 value)
00887 {
00888     value = ((value & 0xAAAAAAAA) >> 1) | ((value & 0x55555555) << 1);
00889     value = ((value & 0xCCCCCCCC) >> 2) | ((value & 0x33333333) << 2);
00890     value = ((value & 0xF0F0F0F0) >> 4) | ((value & 0x0F0F0F0F) << 4);
00891     return ByteReverse(value);
00892 }
00893 
00894 inline word64 BitReverse(word64 value)
00895 {
00896 #if CRYPTOPP_BOOL_SLOW_WORD64
00897     return (word64(BitReverse(word32(value))) << 32) | BitReverse(word32(value>>32));
00898 #else
00899     value = ((value & W64LIT(0xAAAAAAAAAAAAAAAA)) >> 1) | ((value & W64LIT(0x5555555555555555)) << 1);
00900     value = ((value & W64LIT(0xCCCCCCCCCCCCCCCC)) >> 2) | ((value & W64LIT(0x3333333333333333)) << 2);
00901     value = ((value & W64LIT(0xF0F0F0F0F0F0F0F0)) >> 4) | ((value & W64LIT(0x0F0F0F0F0F0F0F0F)) << 4);
00902     return ByteReverse(value);
00903 #endif
00904 }
00905 
00906 template <class T>
00907 inline T BitReverse(T value)
00908 {
00909     if (sizeof(T) == 1)
00910         return (T)BitReverse((byte)value);
00911     else if (sizeof(T) == 2)
00912         return (T)BitReverse((word16)value);
00913     else if (sizeof(T) == 4)
00914         return (T)BitReverse((word32)value);
00915     else
00916     {
00917         assert(sizeof(T) == 8);
00918         return (T)BitReverse((word64)value);
00919     }
00920 }
00921 
00922 template <class T>
00923 inline T ConditionalByteReverse(ByteOrder order, T value)
00924 {
00925     return NativeByteOrderIs(order) ? value : ByteReverse(value);
00926 }
00927 
00928 template <class T>
00929 void ByteReverse(T *out, const T *in, size_t byteCount)
00930 {
00931     assert(byteCount % sizeof(T) == 0);
00932     size_t count = byteCount/sizeof(T);
00933     for (size_t i=0; i<count; i++)
00934         out[i] = ByteReverse(in[i]);
00935 }
00936 
00937 template <class T>
00938 inline void ConditionalByteReverse(ByteOrder order, T *out, const T *in, size_t byteCount)
00939 {
00940     if (!NativeByteOrderIs(order))
00941         ByteReverse(out, in, byteCount);
00942     else if (in != out)
00943         memcpy_s(out, byteCount, in, byteCount);
00944 }
00945 
00946 template <class T>
00947 inline void GetUserKey(ByteOrder order, T *out, size_t outlen, const byte *in, size_t inlen)
00948 {
00949     const size_t U = sizeof(T);
00950     assert(inlen <= outlen*U);
00951     memcpy_s(out, outlen*U, in, inlen);
00952     memset_z((byte *)out+inlen, 0, outlen*U-inlen);
00953     ConditionalByteReverse(order, out, out, RoundUpToMultipleOf(inlen, U));
00954 }
00955 
00956 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00957 inline byte UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const byte *)
00958 {
00959     return block[0];
00960 }
00961 
00962 inline word16 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word16 *)
00963 {
00964     return (order == BIG_ENDIAN_ORDER)
00965         ? block[1] | (block[0] << 8)
00966         : block[0] | (block[1] << 8);
00967 }
00968 
00969 inline word32 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word32 *)
00970 {
00971     return (order == BIG_ENDIAN_ORDER)
00972         ? word32(block[3]) | (word32(block[2]) << 8) | (word32(block[1]) << 16) | (word32(block[0]) << 24)
00973         : word32(block[0]) | (word32(block[1]) << 8) | (word32(block[2]) << 16) | (word32(block[3]) << 24);
00974 }
00975 
00976 inline word64 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word64 *)
00977 {
00978     return (order == BIG_ENDIAN_ORDER)
00979         ?
00980         (word64(block[7]) |
00981         (word64(block[6]) <<  8) |
00982         (word64(block[5]) << 16) |
00983         (word64(block[4]) << 24) |
00984         (word64(block[3]) << 32) |
00985         (word64(block[2]) << 40) |
00986         (word64(block[1]) << 48) |
00987         (word64(block[0]) << 56))
00988         :
00989         (word64(block[0]) |
00990         (word64(block[1]) <<  8) |
00991         (word64(block[2]) << 16) |
00992         (word64(block[3]) << 24) |
00993         (word64(block[4]) << 32) |
00994         (word64(block[5]) << 40) |
00995         (word64(block[6]) << 48) |
00996         (word64(block[7]) << 56));
00997 }
00998 
00999 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, byte value, const byte *xorBlock)
01000 {
01001     block[0] = xorBlock ? (value ^ xorBlock[0]) : value;
01002 }
01003 
01004 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word16 value, const byte *xorBlock)
01005 {
01006     if (order == BIG_ENDIAN_ORDER)
01007     {
01008         if (xorBlock)
01009         {
01010             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01011             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01012         }
01013         else
01014         {
01015             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01016             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01017         }
01018     }
01019     else
01020     {
01021         if (xorBlock)
01022         {
01023             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01024             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01025         }
01026         else
01027         {
01028             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01029             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01030         }
01031     }
01032 }
01033 
01034 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word32 value, const byte *xorBlock)
01035 {
01036     if (order == BIG_ENDIAN_ORDER)
01037     {
01038         if (xorBlock)
01039         {
01040             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01041             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01042             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01043             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01044         }
01045         else
01046         {
01047             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01048             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01049             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01050             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01051         }
01052     }
01053     else
01054     {
01055         if (xorBlock)
01056         {
01057             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01058             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01059             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01060             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01061         }
01062         else
01063         {
01064             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01065             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01066             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01067             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01068         }
01069     }
01070 }
01071 
01072 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word64 value, const byte *xorBlock)
01073 {
01074     if (order == BIG_ENDIAN_ORDER)
01075     {
01076         if (xorBlock)
01077         {
01078             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01079             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01080             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01081             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01082             block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01083             block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01084             block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01085             block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01086         }
01087         else
01088         {
01089             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01090             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01091             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01092             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01093             block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01094             block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01095             block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01096             block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01097         }
01098     }
01099     else
01100     {
01101         if (xorBlock)
01102         {
01103             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01104             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01105             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01106             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01107             block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01108             block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01109             block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01110             block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01111         }
01112         else
01113         {
01114             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
01115             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
01116             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
01117             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
01118             block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
01119             block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
01120             block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
01121             block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
01122         }
01123     }
01124 }
01125 #endif  // #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01126 
01127 template <class T>
01128 inline T GetWord(bool assumeAligned, ByteOrder order, const byte *block)
01129 {
01130 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01131     if (!assumeAligned)
01132         return UnalignedGetWordNonTemplate(order, block, (T*)NULL);
01133     assert(IsAligned<T>(block));
01134 #endif
01135     return ConditionalByteReverse(order, *reinterpret_cast<const T *>(block));
01136 }
01137 
01138 template <class T>
01139 inline void GetWord(bool assumeAligned, ByteOrder order, T &result, const byte *block)
01140 {
01141     result = GetWord<T>(assumeAligned, order, block);
01142 }
01143 
01144 template <class T>
01145 inline void PutWord(bool assumeAligned, ByteOrder order, byte *block, T value, const byte *xorBlock = NULL)
01146 {
01147 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
01148     if (!assumeAligned)
01149         return UnalignedPutWordNonTemplate(order, block, value, xorBlock);
01150     assert(IsAligned<T>(block));
01151     assert(IsAligned<T>(xorBlock));
01152 #endif
01153     *reinterpret_cast<T *>(block) = ConditionalByteReverse(order, value) ^ (xorBlock ? *reinterpret_cast<const T *>(xorBlock) : 0);
01154 }
01155 
01156 template <class T, class B, bool A=false>
01157 class GetBlock
01158 {
01159 public:
01160     GetBlock(const void *block)
01161         : m_block((const byte *)block) {}
01162 
01163     template <class U>
01164     inline GetBlock<T, B, A> & operator()(U &x)
01165     {
01166         CRYPTOPP_COMPILE_ASSERT(sizeof(U) >= sizeof(T));
01167         x = GetWord<T>(A, B::ToEnum(), m_block);
01168         m_block += sizeof(T);
01169         return *this;
01170     }
01171 
01172 private:
01173     const byte *m_block;
01174 };
01175 
01176 template <class T, class B, bool A=false>
01177 class PutBlock
01178 {
01179 public:
01180     PutBlock(const void *xorBlock, void *block)
01181         : m_xorBlock((const byte *)xorBlock), m_block((byte *)block) {}
01182 
01183     template <class U>
01184     inline PutBlock<T, B, A> & operator()(U x)
01185     {
01186         PutWord(A, B::ToEnum(), m_block, (T)x, m_xorBlock);
01187         m_block += sizeof(T);
01188         if (m_xorBlock)
01189             m_xorBlock += sizeof(T);
01190         return *this;
01191     }
01192 
01193 private:
01194     const byte *m_xorBlock;
01195     byte *m_block;
01196 };
01197 
01198 template <class T, class B, bool GA=false, bool PA=false>
01199 struct BlockGetAndPut
01200 {
01201     // function needed because of C++ grammatical ambiguity between expression-statements and declarations
01202     static inline GetBlock<T, B, GA> Get(const void *block) {return GetBlock<T, B, GA>(block);}
01203     typedef PutBlock<T, B, PA> Put;
01204 };
01205 
01206 template <class T>
01207 std::string WordToString(T value, ByteOrder order = BIG_ENDIAN_ORDER)
01208 {
01209     if (!NativeByteOrderIs(order))
01210         value = ByteReverse(value);
01211 
01212     return std::string((char *)&value, sizeof(value));
01213 }
01214 
01215 template <class T>
01216 T StringToWord(const std::string &str, ByteOrder order = BIG_ENDIAN_ORDER)
01217 {
01218     T value = 0;
01219     memcpy_s(&value, sizeof(value), str.data(), UnsignedMin(str.size(), sizeof(value)));
01220     return NativeByteOrderIs(order) ? value : ByteReverse(value);
01221 }
01222 
01223 // ************** help remove warning on g++ ***************
01224 
01225 template <bool overflow> struct SafeShifter;
01226 
01227 template<> struct SafeShifter<true>
01228 {
01229     template <class T>
01230     static inline T RightShift(T value, unsigned int bits)
01231     {
01232         return 0;
01233     }
01234 
01235     template <class T>
01236     static inline T LeftShift(T value, unsigned int bits)
01237     {
01238         return 0;
01239     }
01240 };
01241 
01242 template<> struct SafeShifter<false>
01243 {
01244     template <class T>
01245     static inline T RightShift(T value, unsigned int bits)
01246     {
01247         return value >> bits;
01248     }
01249 
01250     template <class T>
01251     static inline T LeftShift(T value, unsigned int bits)
01252     {
01253         return value << bits;
01254     }
01255 };
01256 
01257 template <unsigned int bits, class T>
01258 inline T SafeRightShift(T value)
01259 {
01260     return SafeShifter<(bits>=(8*sizeof(T)))>::RightShift(value, bits);
01261 }
01262 
01263 template <unsigned int bits, class T>
01264 inline T SafeLeftShift(T value)
01265 {
01266     return SafeShifter<(bits>=(8*sizeof(T)))>::LeftShift(value, bits);
01267 }
01268 
01269 // ************** use one buffer for multiple data members ***************
01270 
01271 #define CRYPTOPP_BLOCK_1(n, t, s) t* m_##n() {return (t *)(m_aggregate+0);}     size_t SS1() {return       sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01272 #define CRYPTOPP_BLOCK_2(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS1());} size_t SS2() {return SS1()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01273 #define CRYPTOPP_BLOCK_3(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS2());} size_t SS3() {return SS2()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01274 #define CRYPTOPP_BLOCK_4(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS3());} size_t SS4() {return SS3()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01275 #define CRYPTOPP_BLOCK_5(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS4());} size_t SS5() {return SS4()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01276 #define CRYPTOPP_BLOCK_6(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS5());} size_t SS6() {return SS5()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01277 #define CRYPTOPP_BLOCK_7(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS6());} size_t SS7() {return SS6()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01278 #define CRYPTOPP_BLOCK_8(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS7());} size_t SS8() {return SS7()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
01279 #define CRYPTOPP_BLOCKS_END(i) size_t SST() {return SS##i();} void AllocateBlocks() {m_aggregate.New(SST());} AlignedSecByteBlock m_aggregate;
01280 
01281 NAMESPACE_END
01282 
01283 #endif