Merge pull request #138 from Arkshine/sync-amtl

Sync AMTL from upstream
This commit is contained in:
Vincent Herbet 2014-09-29 19:05:58 +02:00
commit a8efe40293
5 changed files with 62 additions and 6 deletions

View File

@ -38,9 +38,13 @@ namespace ke {
extern "C" { extern "C" {
long __cdecl _InterlockedIncrement(long volatile *dest); long __cdecl _InterlockedIncrement(long volatile *dest);
long __cdecl _InterlockedDecrement(long volatile *dest); long __cdecl _InterlockedDecrement(long volatile *dest);
long __cdecl _InterlockedIncrement64(long long volatile *dest);
long __cdecl _InterlockedDecrement64(long long volatile *dest);
} }
# pragma intrinsic(_InterlockedIncrement) # pragma intrinsic(_InterlockedIncrement)
# pragma intrinsic(_InterlockedDecrement) # pragma intrinsic(_InterlockedDecrement)
# pragma intrinsic(_InterlockedIncrement64)
# pragma intrinsic(_InterlockedDecrement64)
#endif #endif
template <size_t Width> template <size_t Width>
@ -50,7 +54,7 @@ template <>
struct AtomicOps<4> struct AtomicOps<4>
{ {
#if defined(_MSC_VER) #if defined(_MSC_VER)
typedef long Type; typedef volatile long Type;
static Type Increment(Type *ptr) { static Type Increment(Type *ptr) {
return _InterlockedIncrement(ptr); return _InterlockedIncrement(ptr);
@ -59,7 +63,7 @@ struct AtomicOps<4>
return _InterlockedDecrement(ptr); return _InterlockedDecrement(ptr);
}; };
#elif defined(__GNUC__) #elif defined(__GNUC__)
typedef int Type; typedef volatile int Type;
// x86/x64 notes: When using GCC < 4.8, this will compile to a spinlock. // x86/x64 notes: When using GCC < 4.8, this will compile to a spinlock.
// On 4.8+, or when using Clang, we'll get the more optimal "lock addl" // On 4.8+, or when using Clang, we'll get the more optimal "lock addl"
@ -73,6 +77,34 @@ struct AtomicOps<4>
#endif #endif
}; };
template <>
struct AtomicOps<8>
{
#if defined(_MSC_VER)
typedef volatile long long Type;
static Type Increment(Type *ptr) {
return _InterlockedIncrement64(ptr);
}
static Type Decrement(Type *ptr) {
return _InterlockedDecrement64(ptr);
};
#elif defined(__GNUC__)
typedef volatile int64_t Type;
// x86/x64 notes: When using GCC < 4.8, this will compile to a spinlock.
// On 4.8+, or when using Clang, we'll get the more optimal "lock addl"
// variant.
static Type Increment(Type *ptr) {
return __sync_add_and_fetch(ptr, 1);
}
static Type Decrement(Type *ptr) {
return __sync_sub_and_fetch(ptr, 1);
}
#endif
};
class AtomicRefCount class AtomicRefCount
{ {
typedef AtomicOps<sizeof(uintptr_t)> Ops; typedef AtomicOps<sizeof(uintptr_t)> Ops;

View File

@ -143,6 +143,11 @@ class PassRef
bool operator !() const { bool operator !() const {
return !thing_; return !thing_;
} }
#if defined(KE_CXX11)
explicit operator bool() const {
return !!thing_;
}
#endif
T *release() const { T *release() const {
return ReturnAndVoid(thing_); return ReturnAndVoid(thing_);

View File

@ -31,6 +31,7 @@
#define _include_amtl_thread_local_h_ #define _include_amtl_thread_local_h_
#include <am-thread-utils.h> #include <am-thread-utils.h>
#include <stdio.h>
namespace ke { namespace ke {
@ -82,7 +83,7 @@ class ThreadLocal
#if !defined(KE_SINGLE_THREADED) #if !defined(KE_SINGLE_THREADED)
private: private:
int allocated_; volatile int allocated_;
public: public:
ThreadLocal() { ThreadLocal() {
@ -116,7 +117,7 @@ class ThreadLocal
TlsSetValue(key_, reinterpret_cast<LPVOID>(t)); TlsSetValue(key_, reinterpret_cast<LPVOID>(t));
} }
bool allocate() { bool allocate() {
if (InterlockedCompareExchange(&allocated_, 1, 0) == 1) if (InterlockedCompareExchange((volatile LONG *)&allocated_, 1, 0) == 1)
return true; return true;
key_ = TlsAlloc(); key_ = TlsAlloc();
return key_ != TLS_OUT_OF_INDEXES; return key_ != TLS_OUT_OF_INDEXES;

View File

@ -36,6 +36,8 @@
#include <stdint.h> #include <stdint.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
# include <intrin.h> # include <intrin.h>
#else
# include <inttypes.h>
#endif #endif
#include <am-moveable.h> #include <am-moveable.h>
@ -116,6 +118,9 @@ class AutoPtr
t_ = t; t_ = t;
return t_; return t_;
} }
T **address() {
return &t_;
}
T *operator =(Moveable<AutoPtr<T> > other) { T *operator =(Moveable<AutoPtr<T> > other) {
delete t_; delete t_;
t_ = other->t_; t_ = other->t_;
@ -384,8 +389,8 @@ class StackLinked
# define KE_U64_FMT "%I64u" # define KE_U64_FMT "%I64u"
#elif defined(__GNUC__) #elif defined(__GNUC__)
# define KE_SIZET_FMT "%zu" # define KE_SIZET_FMT "%zu"
# define KE_I64_FMT "%lld" # define KE_I64_FMT "%" PRId64
# define KE_U64_FMT "%llu" # define KE_U64_FMT "%" PRIu64
#else #else
# error "Implement format specifier string" # error "Implement format specifier string"
#endif #endif

View File

@ -160,6 +160,19 @@ class Vector : public AllocPolicy
return data_; return data_;
} }
bool resize(size_t newLength) {
if (newLength < length()) {
while (newLength < length())
pop();
} else if (newLength > length()) {
if (!ensure(newLength))
return false;
size_t count = newLength - length();
for (size_t i = 0; i < count; i++)
infallibleAppend(T());
}
return true;
}
bool ensure(size_t desired) { bool ensure(size_t desired) {
if (desired <= length()) if (desired <= length())
return true; return true;