1
0
mirror of https://github.com/OlafvdSpek/ctemplate.git synced 2025-09-28 19:05:49 +08:00

Use uint32_t and uint64_t

This commit is contained in:
Olaf van der Spek 2017-01-30 11:05:09 +01:00
parent dce932e90e
commit dfc73bec45
4 changed files with 27 additions and 47 deletions

View File

@ -400,19 +400,19 @@ void* BaseArena::GetMemoryWithHandle(
}
CHECK_GE(block_index, 0) << "Failed to find block that was allocated from";
CHECK(block != NULL) << "Failed to find block that was allocated from";
const uint64 offset = reinterpret_cast<char*>(p) - block->mem;
const uint64_t offset = reinterpret_cast<char*>(p) - block->mem;
DCHECK_LT(offset, block_size_);
DCHECK((offset & ((1 << handle_alignment_bits_) - 1)) == 0);
DCHECK((block_size_ & ((1 << handle_alignment_bits_) - 1)) == 0);
uint64 handle_value =
((static_cast<uint64>(block_index) << block_size_bits_) + offset) >>
uint64_t handle_value =
((static_cast<uint64_t>(block_index) << block_size_bits_) + offset) >>
handle_alignment_bits_;
if (handle_value >= static_cast<uint64>(0xFFFFFFFF)) {
if (handle_value >= static_cast<uint64_t>(0xFFFFFFFF)) {
// We ran out of space to be able to return a handle, so return an invalid
// handle.
handle_value = Handle::kInvalidValue;
}
handle->handle_ = static_cast<uint32>(handle_value);
handle->handle_ = static_cast<uint32_t>(handle_value);
return p;
}
@ -448,7 +448,7 @@ void BaseArena::set_handle_alignment(int align) {
void* BaseArena::HandleToPointer(const Handle& h) const {
CHECK(h.valid());
uint64 handle = static_cast<uint64>(h.handle_) << handle_alignment_bits_;
uint64_t handle = static_cast<uint64_t>(h.handle_) << handle_alignment_bits_;
int block_index = static_cast<int>(handle >> block_size_bits_);
size_t block_offset =
static_cast<size_t>(handle & ((1 << block_size_bits_) - 1));

View File

@ -276,7 +276,7 @@
#include <string.h>
#include <vector>
#include "base/thread_annotations.h"
#include "base/macros.h" // for uint32
#include "base/macros.h"
#include "base/util.h" // for CHECK, etc
namespace ctemplate {
@ -306,23 +306,23 @@ class CTEMPLATE_DLL_DECL BaseArena {
// copy and assignment semantics.
class Handle {
public:
static const uint32 kInvalidValue = 0xFFFFFFFF; // int32-max
static const uint32_t kInvalidValue = 0xFFFFFFFF; // int32-max
Handle() : handle_(kInvalidValue) { }
// Default copy constructors are fine here.
bool operator==(const Handle& h) const { return handle_ == h.handle_; }
bool operator!=(const Handle& h) const { return handle_ != h.handle_; }
uint32 hash() const { return handle_; }
uint32_t hash() const { return handle_; }
bool valid() const { return handle_ != kInvalidValue; }
private:
// Arena needs to be able to access the internal data.
friend class BaseArena;
explicit Handle(uint32 handle) : handle_(handle) { }
explicit Handle(uint32_t handle) : handle_(handle) { }
uint32 handle_;
uint32_t handle_;
};
// they're "slow" only 'cause they're virtual (subclasses define "fast" ones)

View File

@ -4,9 +4,6 @@
// Provides macros and typedefs based on config.h settings.
// Provides the following macros:
// UNALIGNED_LOAD32 (may be an inline function on some architectures)
// and the following typedefs:
// uint32
// uint64
#ifndef CTEMPLATE_MACROS_H_
#define CTEMPLATE_MACROS_H_
@ -24,23 +21,6 @@
#endif // a third place for uint32_t or u_int32_t
#endif
#if defined(HAVE_U_INT32_T)
typedef u_int32_t uint32;
#elif defined(HAVE_UINT32_T)
typedef uint32_t uint32;
#elif defined(HAVE___INT32)
typedef unsigned __int32 uint32;
#endif
#if defined(HAVE_U_INT64_T)
typedef u_int64_t uint64;
#elif defined(HAVE_UINT64_T)
typedef uint64_t uint64;
#elif defined(HAVE___INT64)
typedef unsigned __int64 uint64;
#endif
// This is all to figure out endian-ness and byte-swapping on various systems
#if defined(HAVE_ENDIAN_H)
#include <endian.h> // for the __BYTE_ORDER use below
@ -83,20 +63,20 @@ typedef unsigned __int64 uint64;
#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
// We know they allow unaligned memory access and are little-endian
# define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
# define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t*>(_p))
#elif defined(__ppc__) || defined(__ppc64__)
// We know they allow unaligned memory access and are big-endian
# define UNALIGNED_LOAD32(_p) BSWAP32(*reinterpret_cast<const uint32 *>(_p))
# define UNALIGNED_LOAD32(_p) BSWAP32(*reinterpret_cast<const uint32_t*>(_p))
#elif (BYTE_ORDER == 1234) || (_BYTE_ORDER == 1234) || defined(_LITTLE_ENDIAN)
// Use memcpy to align the memory properly
inline uint32 UNALIGNED_LOAD32(const void *p) {
uint32 t;
inline uint32_t UNALIGNED_LOAD32(const void *p) {
uint32_t t;
memcpy(&t, p, sizeof(t));
return t;
}
#elif (BYTE_ORDER == 4321) || (_BYTE_ORDER == 4321) || defined(_BIG_ENDIAN)
inline uint32 UNALIGNED_LOAD32(const void *p) {
uint32 t;
inline uint32_t UNALIGNED_LOAD32(const void *p) {
uint32_t t;
memcpy(&t, p, sizeof(t));
return BSWAP32(t);
}

View File

@ -39,7 +39,7 @@
#include "base/arena.h"
#include "base/thread_annotations.h"
#include <assert.h>
#include "base/macros.h" // for uint32, uint64, UNALIGNED_LOAD32
#include "base/macros.h"
#include "base/util.h"
#ifdef HAVE_UNORDERED_MAP
@ -61,15 +61,15 @@ namespace ctemplate {
// 64-bits,
// - uses a fixed seed.
// This is not static because template_string_test accesses it directly.
uint64 MurmurHash64(const char* ptr, size_t len) {
const uint32 kMultiplyVal = 0x5bd1e995;
uint64_t MurmurHash64(const char* ptr, size_t len) {
const uint32_t kMultiplyVal = 0x5bd1e995;
const int kShiftVal = 24;
const uint32 kHashSeed1 = 0xc86b14f7;
const uint32 kHashSeed2 = 0x650f5c4d;
const uint32_t kHashSeed1 = 0xc86b14f7;
const uint32_t kHashSeed2 = 0x650f5c4d;
uint32 h1 = kHashSeed1 ^ len, h2 = kHashSeed2;
uint32_t h1 = kHashSeed1 ^ len, h2 = kHashSeed2;
while (len >= 8) {
uint32 k1 = UNALIGNED_LOAD32(ptr);
uint32_t k1 = UNALIGNED_LOAD32(ptr);
k1 *= kMultiplyVal;
k1 ^= k1 >> kShiftVal;
k1 *= kMultiplyVal;
@ -78,7 +78,7 @@ uint64 MurmurHash64(const char* ptr, size_t len) {
h1 ^= k1;
ptr += 4;
uint32 k2 = UNALIGNED_LOAD32(ptr);
uint32_t k2 = UNALIGNED_LOAD32(ptr);
k2 *= kMultiplyVal;
k2 ^= k2 >> kShiftVal;
k2 *= kMultiplyVal;
@ -91,7 +91,7 @@ uint64 MurmurHash64(const char* ptr, size_t len) {
}
if (len >= 4) {
uint32 k1 = UNALIGNED_LOAD32(ptr);
uint32_t k1 = UNALIGNED_LOAD32(ptr);
k1 *= kMultiplyVal;
k1 ^= k1 >> kShiftVal;
k1 *= kMultiplyVal;
@ -121,7 +121,7 @@ uint64 MurmurHash64(const char* ptr, size_t len) {
h1 ^= h2 >> 17;
h1 *= kMultiplyVal;
uint64 h = h1;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}