From dfc73bec451e7bd0d31a97a67e982888158df03b Mon Sep 17 00:00:00 2001 From: Olaf van der Spek Date: Mon, 30 Jan 2017 11:05:09 +0100 Subject: [PATCH] Use uint32_t and uint64_t --- src/base/arena.cc | 12 ++++++------ src/base/arena.h | 10 +++++----- src/base/macros.h | 32 ++++++-------------------------- src/template_string.cc | 20 ++++++++++---------- 4 files changed, 27 insertions(+), 47 deletions(-) diff --git a/src/base/arena.cc b/src/base/arena.cc index 62df770..5893e72 100644 --- a/src/base/arena.cc +++ b/src/base/arena.cc @@ -400,19 +400,19 @@ void* BaseArena::GetMemoryWithHandle( } CHECK_GE(block_index, 0) << "Failed to find block that was allocated from"; CHECK(block != NULL) << "Failed to find block that was allocated from"; - const uint64 offset = reinterpret_cast(p) - block->mem; + const uint64_t offset = reinterpret_cast(p) - block->mem; DCHECK_LT(offset, block_size_); DCHECK((offset & ((1 << handle_alignment_bits_) - 1)) == 0); DCHECK((block_size_ & ((1 << handle_alignment_bits_) - 1)) == 0); - uint64 handle_value = - ((static_cast(block_index) << block_size_bits_) + offset) >> + uint64_t handle_value = + ((static_cast(block_index) << block_size_bits_) + offset) >> handle_alignment_bits_; - if (handle_value >= static_cast(0xFFFFFFFF)) { + if (handle_value >= static_cast(0xFFFFFFFF)) { // We ran out of space to be able to return a handle, so return an invalid // handle. handle_value = Handle::kInvalidValue; } - handle->handle_ = static_cast(handle_value); + handle->handle_ = static_cast(handle_value); return p; } @@ -448,7 +448,7 @@ void BaseArena::set_handle_alignment(int align) { void* BaseArena::HandleToPointer(const Handle& h) const { CHECK(h.valid()); - uint64 handle = static_cast(h.handle_) << handle_alignment_bits_; + uint64_t handle = static_cast(h.handle_) << handle_alignment_bits_; int block_index = static_cast(handle >> block_size_bits_); size_t block_offset = static_cast(handle & ((1 << block_size_bits_) - 1)); diff --git a/src/base/arena.h b/src/base/arena.h index 049a6b5..c48c784 100644 --- a/src/base/arena.h +++ b/src/base/arena.h @@ -276,7 +276,7 @@ #include #include #include "base/thread_annotations.h" -#include "base/macros.h" // for uint32 +#include "base/macros.h" #include "base/util.h" // for CHECK, etc namespace ctemplate { @@ -306,23 +306,23 @@ class CTEMPLATE_DLL_DECL BaseArena { // copy and assignment semantics. class Handle { public: - static const uint32 kInvalidValue = 0xFFFFFFFF; // int32-max + static const uint32_t kInvalidValue = 0xFFFFFFFF; // int32-max Handle() : handle_(kInvalidValue) { } // Default copy constructors are fine here. bool operator==(const Handle& h) const { return handle_ == h.handle_; } bool operator!=(const Handle& h) const { return handle_ != h.handle_; } - uint32 hash() const { return handle_; } + uint32_t hash() const { return handle_; } bool valid() const { return handle_ != kInvalidValue; } private: // Arena needs to be able to access the internal data. friend class BaseArena; - explicit Handle(uint32 handle) : handle_(handle) { } + explicit Handle(uint32_t handle) : handle_(handle) { } - uint32 handle_; + uint32_t handle_; }; // they're "slow" only 'cause they're virtual (subclasses define "fast" ones) diff --git a/src/base/macros.h b/src/base/macros.h index 9d0327c..0d1e3ec 100644 --- a/src/base/macros.h +++ b/src/base/macros.h @@ -4,9 +4,6 @@ // Provides macros and typedefs based on config.h settings. // Provides the following macros: // UNALIGNED_LOAD32 (may be an inline function on some architectures) -// and the following typedefs: -// uint32 -// uint64 #ifndef CTEMPLATE_MACROS_H_ #define CTEMPLATE_MACROS_H_ @@ -24,23 +21,6 @@ #endif // a third place for uint32_t or u_int32_t #endif -#if defined(HAVE_U_INT32_T) -typedef u_int32_t uint32; -#elif defined(HAVE_UINT32_T) -typedef uint32_t uint32; -#elif defined(HAVE___INT32) -typedef unsigned __int32 uint32; -#endif - -#if defined(HAVE_U_INT64_T) -typedef u_int64_t uint64; -#elif defined(HAVE_UINT64_T) -typedef uint64_t uint64; -#elif defined(HAVE___INT64) -typedef unsigned __int64 uint64; -#endif - - // This is all to figure out endian-ness and byte-swapping on various systems #if defined(HAVE_ENDIAN_H) #include // for the __BYTE_ORDER use below @@ -83,20 +63,20 @@ typedef unsigned __int64 uint64; #if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64) // We know they allow unaligned memory access and are little-endian -# define UNALIGNED_LOAD32(_p) (*reinterpret_cast(_p)) +# define UNALIGNED_LOAD32(_p) (*reinterpret_cast(_p)) #elif defined(__ppc__) || defined(__ppc64__) // We know they allow unaligned memory access and are big-endian -# define UNALIGNED_LOAD32(_p) BSWAP32(*reinterpret_cast(_p)) +# define UNALIGNED_LOAD32(_p) BSWAP32(*reinterpret_cast(_p)) #elif (BYTE_ORDER == 1234) || (_BYTE_ORDER == 1234) || defined(_LITTLE_ENDIAN) // Use memcpy to align the memory properly - inline uint32 UNALIGNED_LOAD32(const void *p) { - uint32 t; + inline uint32_t UNALIGNED_LOAD32(const void *p) { + uint32_t t; memcpy(&t, p, sizeof(t)); return t; } #elif (BYTE_ORDER == 4321) || (_BYTE_ORDER == 4321) || defined(_BIG_ENDIAN) - inline uint32 UNALIGNED_LOAD32(const void *p) { - uint32 t; + inline uint32_t UNALIGNED_LOAD32(const void *p) { + uint32_t t; memcpy(&t, p, sizeof(t)); return BSWAP32(t); } diff --git a/src/template_string.cc b/src/template_string.cc index a77d37f..e5c7e5d 100644 --- a/src/template_string.cc +++ b/src/template_string.cc @@ -39,7 +39,7 @@ #include "base/arena.h" #include "base/thread_annotations.h" #include -#include "base/macros.h" // for uint32, uint64, UNALIGNED_LOAD32 +#include "base/macros.h" #include "base/util.h" #ifdef HAVE_UNORDERED_MAP @@ -61,15 +61,15 @@ namespace ctemplate { // 64-bits, // - uses a fixed seed. // This is not static because template_string_test accesses it directly. -uint64 MurmurHash64(const char* ptr, size_t len) { - const uint32 kMultiplyVal = 0x5bd1e995; +uint64_t MurmurHash64(const char* ptr, size_t len) { + const uint32_t kMultiplyVal = 0x5bd1e995; const int kShiftVal = 24; - const uint32 kHashSeed1 = 0xc86b14f7; - const uint32 kHashSeed2 = 0x650f5c4d; + const uint32_t kHashSeed1 = 0xc86b14f7; + const uint32_t kHashSeed2 = 0x650f5c4d; - uint32 h1 = kHashSeed1 ^ len, h2 = kHashSeed2; + uint32_t h1 = kHashSeed1 ^ len, h2 = kHashSeed2; while (len >= 8) { - uint32 k1 = UNALIGNED_LOAD32(ptr); + uint32_t k1 = UNALIGNED_LOAD32(ptr); k1 *= kMultiplyVal; k1 ^= k1 >> kShiftVal; k1 *= kMultiplyVal; @@ -78,7 +78,7 @@ uint64 MurmurHash64(const char* ptr, size_t len) { h1 ^= k1; ptr += 4; - uint32 k2 = UNALIGNED_LOAD32(ptr); + uint32_t k2 = UNALIGNED_LOAD32(ptr); k2 *= kMultiplyVal; k2 ^= k2 >> kShiftVal; k2 *= kMultiplyVal; @@ -91,7 +91,7 @@ uint64 MurmurHash64(const char* ptr, size_t len) { } if (len >= 4) { - uint32 k1 = UNALIGNED_LOAD32(ptr); + uint32_t k1 = UNALIGNED_LOAD32(ptr); k1 *= kMultiplyVal; k1 ^= k1 >> kShiftVal; k1 *= kMultiplyVal; @@ -121,7 +121,7 @@ uint64 MurmurHash64(const char* ptr, size_t len) { h1 ^= h2 >> 17; h1 *= kMultiplyVal; - uint64 h = h1; + uint64_t h = h1; h = (h << 32) | h2; return h; }