From f3aee6d7661701dd294ffeb43979f8e2640d6357 Mon Sep 17 00:00:00 2001 From: Arkshine Date: Sat, 3 May 2014 13:00:21 +0200 Subject: [PATCH] Add AMTL files --- AMBuildScript | 1 + amxmodx/Makefile | 4 +- amxmodx/msvc10/amxmodx_mm.vcxproj | 9 +- public/amtl/am-allocator-policies.h | 65 +++ public/amtl/am-atomics.h | 102 ++++ public/amtl/am-hashmap.h | 196 ++++++++ public/amtl/am-hashset.h | 129 +++++ public/amtl/am-hashtable.h | 631 ++++++++++++++++++++++++ public/amtl/am-inlinelist.h | 183 +++++++ public/amtl/am-linkedlist.h | 309 ++++++++++++ public/amtl/am-moveable.h | 73 +++ public/amtl/am-refcounting-threadsafe.h | 68 +++ public/amtl/am-refcounting.h | 302 ++++++++++++ public/amtl/am-string.h | 134 +++++ public/amtl/am-thread-posix.h | 213 ++++++++ public/amtl/am-thread-utils.h | 265 ++++++++++ public/amtl/am-thread-windows.h | 161 ++++++ public/amtl/am-utility.h | 346 +++++++++++++ public/amtl/am-vector.h | 239 +++++++++ 19 files changed, 3424 insertions(+), 6 deletions(-) create mode 100644 public/amtl/am-allocator-policies.h create mode 100644 public/amtl/am-atomics.h create mode 100644 public/amtl/am-hashmap.h create mode 100644 public/amtl/am-hashset.h create mode 100644 public/amtl/am-hashtable.h create mode 100644 public/amtl/am-inlinelist.h create mode 100644 public/amtl/am-linkedlist.h create mode 100644 public/amtl/am-moveable.h create mode 100644 public/amtl/am-refcounting-threadsafe.h create mode 100644 public/amtl/am-refcounting.h create mode 100644 public/amtl/am-string.h create mode 100644 public/amtl/am-thread-posix.h create mode 100644 public/amtl/am-thread-utils.h create mode 100644 public/amtl/am-thread-windows.h create mode 100644 public/amtl/am-utility.h create mode 100644 public/amtl/am-vector.h diff --git a/AMBuildScript b/AMBuildScript index 7bf83a7a..aa9d91e3 100644 --- a/AMBuildScript +++ b/AMBuildScript @@ -220,6 +220,7 @@ class AMXXConfig(object): ] cfg.includes += [os.path.join(builder.buildPath, 'includes')] + cfg.includes += [os.path.join(builder.sourcePath, 'public', 'amtl')] return # diff --git a/amxmodx/Makefile b/amxmodx/Makefile index 71581592..5c0422ec 100755 --- a/amxmodx/Makefile +++ b/amxmodx/Makefile @@ -6,7 +6,7 @@ ########################################### HLSDK = ../../hlsdk -MM_ROOT = ../../metamod/metamod +MM_ROOT = ../../metamod-am/metamod ##################################### ### EDIT BELOW FOR OTHER PROJECTS ### @@ -36,7 +36,7 @@ CPP_OSX = clang LINK = -Lzlib -INCLUDE = -I. -I$(HLSDK) -I$(HLSDK)/common -I$(HLSDK)/dlls -I$(HLSDK)/engine -I$(HLSDK)/game_shared \ +INCLUDE = -I. -I../public/amtl -I$(HLSDK) -I$(HLSDK)/common -I$(HLSDK)/dlls -I$(HLSDK)/engine -I$(HLSDK)/game_shared \ -I$(HLSDK)/public -I$(MM_ROOT) ################################################ diff --git a/amxmodx/msvc10/amxmodx_mm.vcxproj b/amxmodx/msvc10/amxmodx_mm.vcxproj index 814224ba..a030557a 100644 --- a/amxmodx/msvc10/amxmodx_mm.vcxproj +++ b/amxmodx/msvc10/amxmodx_mm.vcxproj @@ -92,7 +92,7 @@ Disabled - ..\;$(METAMOD)\metamod;$(HLSDK)\common;$(HLSDK)\engine;$(HLSDK)\dlls;$(HLSDK)\pm_shared;$(HLSDK)\public;%(AdditionalIncludeDirectories) + ..\;..\..\public\amtl;$(METAMOD)\metamod;$(HLSDK)\common;$(HLSDK)\engine;$(HLSDK)\dlls;$(HLSDK)\pm_shared;$(HLSDK)\public;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;_USRDLL;amxmodx_EXPORTS;PAWN_CELL_SIZE=32;ASM32;JIT;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebug @@ -143,7 +143,7 @@ true Speed true - ..\;$(METAMOD)\metamod;$(HLSDK)\common;$(HLSDK)\engine;$(HLSDK)\dlls;$(HLSDK)\pm_shared;$(HLSDK)\public;%(AdditionalIncludeDirectories) + ..\;..\..\public\amtl;$(METAMOD)\metamod;$(HLSDK)\common;$(HLSDK)\engine;$(HLSDK)\dlls;$(HLSDK)\pm_shared;$(HLSDK)\public;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;_USRDLL;amxmodx_EXPORTS;JIT;ASM32;PAWN_CELL_SIZE=32;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) false true @@ -192,7 +192,7 @@ Disabled - ..\;$(METAMOD)\metamod;$(HLSDK)\multiplayer\common;$(HLSDK)\multiplayer\engine;$(HLSDK)\multiplayer\dlls;$(HLSDK)\multiplayer\pm_shared;%(AdditionalIncludeDirectories) + ..\;..\..\public\amtl;$(METAMOD)\metamod;$(HLSDK)\multiplayer\common;$(HLSDK)\multiplayer\engine;$(HLSDK)\multiplayer\dlls;$(HLSDK)\multiplayer\pm_shared;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;_USRDLL;amxmodx_EXPORTS;PAWN_CELL_SIZE=32;ASM32;JIT;BINLOG_ENABLED;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebug @@ -243,7 +243,7 @@ true Speed true - ..\;$(METAMOD)\metamod;$(HLSDK)\multiplayer\common;$(HLSDK)\multiplayer\engine;$(HLSDK)\multiplayer\dlls;$(HLSDK)\multiplayer\pm_shared;%(AdditionalIncludeDirectories) + ..\;..\..\public\amtl;$(METAMOD)\metamod;$(HLSDK)\multiplayer\common;$(HLSDK)\multiplayer\engine;$(HLSDK)\multiplayer\dlls;$(HLSDK)\multiplayer\pm_shared;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;_USRDLL;amxmodx_EXPORTS;JIT;ASM32;PAWN_CELL_SIZE=32;BINLOG_ENABLED;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) false true @@ -389,6 +389,7 @@ + diff --git a/public/amtl/am-allocator-policies.h b/public/amtl/am-allocator-policies.h new file mode 100644 index 00000000..5f9712bf --- /dev/null +++ b/public/amtl/am-allocator-policies.h @@ -0,0 +1,65 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_allocatorpolicies_h_ +#define _include_amtl_allocatorpolicies_h_ + +#include +#include + +namespace ke { + +// The default system allocator policy will crash on out-of-memory. +class SystemAllocatorPolicy +{ + public: + void reportOutOfMemory() { + fprintf(stderr, "OUT OF MEMORY\n"); + abort(); + } + void reportAllocationOverflow() { + fprintf(stderr, "OUT OF MEMORY\n"); + abort(); + } + + public: + void free(void *memory) { + ::free(memory); + } + void *malloc(size_t bytes) { + void *ptr = ::malloc(bytes); + if (!ptr) + reportOutOfMemory(); + return ptr; + } +}; + +} + +#endif // _include_amtl_allocatorpolicies_h_ diff --git a/public/amtl/am-atomics.h b/public/amtl/am-atomics.h new file mode 100644 index 00000000..e32d1df9 --- /dev/null +++ b/public/amtl/am-atomics.h @@ -0,0 +1,102 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_atomics_h_ +#define _include_amtl_atomics_h_ + +#include + +namespace ke { + +#if defined(_MSC_VER) +extern "C" { + long __cdecl _InterlockedIncrement(long volatile *dest); + long __cdecl _InterlockedDecrement(long volatile *dest); +} +# pragma intrinsic(_InterlockedIncrement) +# pragma intrinsic(_InterlockedDecrement) +#endif + +template +struct AtomicOps; + +template <> +struct AtomicOps<4> +{ +#if defined(_MSC_VER) + typedef long Type; + + static Type Increment(Type *ptr) { + return _InterlockedIncrement(ptr); + } + static Type Decrement(Type *ptr) { + return _InterlockedDecrement(ptr); + }; +#elif defined(__GNUC__) + typedef int Type; + + // x86/x64 notes: When using GCC < 4.8, this will compile to a spinlock. + // On 4.8+, or when using Clang, we'll get the more optimal "lock addl" + // variant. + static Type Increment(Type *ptr) { + return __sync_add_and_fetch(ptr, 1); + } + static Type Decrement(Type *ptr) { + return __sync_sub_and_fetch(ptr, 1); + } +#endif +}; + +class AtomicRefCount +{ + typedef AtomicOps Ops; + + public: + AtomicRefCount(uintptr_t value) + : value_(value) + { + } + + void increment() { + Ops::Increment(&value_); + } + + // Return false if all references are gone. + bool decrement() { + return Ops::Decrement(&value_) != 0; + } + + private: + Ops::Type value_; +}; + +} + +#endif // _include_amtl_atomics_h_ + diff --git a/public/amtl/am-hashmap.h b/public/amtl/am-hashmap.h new file mode 100644 index 00000000..9a2d1ee6 --- /dev/null +++ b/public/amtl/am-hashmap.h @@ -0,0 +1,196 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_hashmap_h_ +#define _include_amtl_hashmap_h_ + +#include + +namespace ke { + +// Template parameters: +// +// K - Key type. +// V - Value type. +// HashPolicy - A struct with a hash and comparator function for each lookup type: +// static uint32_t hash(const Type &value); +// static bool matches(const Type &value, const K &key); +// +// All types that match a given key, must compute the same hash. +// +// Note that like HashTable, a HashMap is not usable until init() has been called. +template +class HashMap : public AllocPolicy +{ + private: + struct Entry + { + K key; + V value; + + Entry() + { + } + Entry(Moveable other) + : key(Moveable(other->key)), + value(Moveable(other->value)) + { + } + + Entry(const K &aKey, const V &aValue) + : key(aKey), + value(aValue) + { } + Entry(const K &aKey, Moveable aValue) + : key(aKey), + value(aValue) + { } + Entry(Moveable aKey, const V &aValue) + : key(aKey), + value(aValue) + { } + Entry(Moveable aKey, Moveable aValue) + : key(aKey), + value(aValue) + { } + }; + + struct Policy + { + typedef Entry Payload; + + template + static uint32_t hash(const Lookup &key) { + return HashPolicy::hash(key); + } + + template + static bool matches(const Lookup &key, const Payload &payload) { + return HashPolicy::matches(key, payload.key); + } + }; + + typedef HashTable Internal; + + public: + HashMap(AllocPolicy ap = AllocPolicy()) + : table_(ap) + { + } + + // capacity must be a power of two. + bool init(size_t capacity = 16) { + return table_.init(capacity); + } + + typedef typename Internal::Result Result; + typedef typename Internal::Insert Insert; + typedef typename Internal::iterator iterator; + + template + Result find(const Lookup &key) { + return table_.find(key); + } + + template + Insert findForAdd(const Lookup &key) { + return table_.findForAdd(key); + } + + template + void removeIfExists(const Lookup &key) { + return table_.remove(key); + } + + void remove(Result &r) { + table_.remove(r); + } + + // The map must not have been mutated in between findForAdd() and add(). + // The Insert object is still valid after add() returns, however. + bool add(Insert &i, const K &key, const V &value) { + return table_.add(i, Entry(key, value)); + } + bool add(Insert &i, Moveable key, const V &value) { + return table_.add(i, Entry(key, value)); + } + bool add(Insert &i, const K &key, Moveable value) { + return table_.add(i, Entry(key, value)); + } + bool add(Insert &i, Moveable key, Moveable value) { + return table_.add(i, Entry(key, value)); + } + bool add(Insert &i, Moveable key) { + return table_.add(i, Entry(key, V())); + } + + // This can be used to avoid compiler constructed temporaries, since AMTL + // does not yet support move semantics. If you use this, the key and value + // must be set after. + bool add(Insert &i) { + return table_.add(i); + } + + iterator iter() { + return iterator(&table_); + } + + void clear() { + table_.clear(); + } + + size_t elements() const { + return table_.elements(); + } + + size_t estimateMemoryUse() const { + return table_.estimateMemoryUse(); + } + + private: + Internal table_; +}; + +template +struct PointerPolicy +{ + static inline uint32_t hash(T *p) { + return HashPointer(p); + } + static inline bool matches(T *p1, T *p2) { + return p1 == p2; + } +}; + +} + +#endif // _include_amtl_hashmap_h_ diff --git a/public/amtl/am-hashset.h b/public/amtl/am-hashset.h new file mode 100644 index 00000000..13f8eb8f --- /dev/null +++ b/public/amtl/am-hashset.h @@ -0,0 +1,129 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_hashmap_h_ +#define _include_amtl_hashmap_h_ + +#include + +namespace ke { + +// Template parameters: +// +// K - Key type. +// HashPolicy - A struct with a hash and comparator function for each lookup type: +// static uint32_t hash(const Type &value); +// static bool matches(const Type &value, const K &key); +// +// Like HashMap and HashTable, init() must be called to construct the set. +template +class HashSet : public AllocPolicy +{ + struct Policy { + typedef K Payload; + + template + static uint32_t hash(const Lookup &key) { + return HashPolicy::hash(key); + } + + template + static bool matches(const Lookup &key, const Payload &payload) { + return HashPolicy::matches(key, payload); + } + }; + + typedef HashTable Internal; + + public: + HashSet(AllocPolicy ap = AllocPolicy()) + : table_(ap) + { + } + + // capacity must be a power of two. + bool init(size_t capacity = 16) { + return table_.init(capacity); + } + + typedef typename Internal::Result Result; + typedef typename Internal::Insert Insert; + + template + Result find(const Lookup &key) { + return table_.find(key); + } + + template + Insert findForAdd(const Lookup &key) { + return table_.findForAdd(key); + } + + template + void removeIfExists(const Lookup &key) { + return table_.remove(key); + } + + void remove(Result &r) { + table_.remove(r); + } + + // The map must not have been mutated in between findForAdd() and add(). + // The Insert object is still valid after add() returns, however. + bool add(Insert &i, const K &key) { + return table_.add(i, key); + } + bool add(Insert &i, Moveable key) { + return table_.add(i, key); + } + + // This can be used to avoid compiler constructed temporaries, since AMTL + // does not yet support move semantics. If you use this, the key and value + // must be set after. + bool add(Insert &i) { + return table_.add(i); + } + + void clear() { + table_.clear(); + } + + size_t estimateMemoryUse() const { + return table_.estimateMemoryUse(); + } + + private: + Internal table_; +}; + +} + +#endif // _include_amtl_hashset_h_ diff --git a/public/amtl/am-hashtable.h b/public/amtl/am-hashtable.h new file mode 100644 index 00000000..1101a247 --- /dev/null +++ b/public/amtl/am-hashtable.h @@ -0,0 +1,631 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _INCLUDE_KEIMA_HASHTABLE_H_ +#define _INCLUDE_KEIMA_HASHTABLE_H_ + +#include +#include +#include +#include "am-allocator-policies.h" +#include "am-utility.h" +#include "am-moveable.h" + +namespace ke { + +namespace detail { + template + class HashTableEntry + { + uint32_t hash_; + T t_; + + public: + static const uint32_t kFreeHash = 0; + static const uint32_t kRemovedHash = 1; + + public: + void setHash(uint32_t hash) { + hash_ = hash; + } + void construct() { + new (&t_) T(); + } + void construct(const T &t) { + new (&t_) T(t); + } + void construct(Moveable t) { + new (&t_) T(t); + } + uint32_t hash() const { + return hash_; + } + void setRemoved() { + destruct(); + hash_ = kRemovedHash; + } + void setFree() { + destruct(); + hash_ = kFreeHash; + } + void initialize() { + hash_ = kFreeHash; + } + void destruct() { + if (isLive()) + t_.~T(); + } + bool removed() const { + return hash_ == kRemovedHash; + } + bool free() const { + return hash_ == kFreeHash; + } + bool isLive() const { + return hash_ > kRemovedHash; + } + T &payload() { + assert(isLive()); + return t_; + } + bool sameHash(uint32_t hash) const { + return hash_ == hash; + } + + private: + HashTableEntry(const HashTableEntry &other) KE_DELETE; + HashTableEntry &operator =(const HashTableEntry &other) KE_DELETE; + }; +} + +// The HashPolicy for the table must have the following members: +// +// Payload +// static uint32_t hash(const LookupType &key); +// static bool matches(const LookupType &key, const Payload &other); +// +// Payload must be a type, and LookupType is any type that lookups will be +// performed with (these functions can be overloaded). Example: +// +// struct Policy { +// typedef KeyValuePair Payload; +// static uint32 hash(const Key &key) { +// ... +// } +// static bool matches(const Key &key, const KeyValuePair &pair) { +// ... +// } +// }; +// +// Note that the table is not usable until init() has been called. +// +template +class HashTable : public AllocPolicy +{ + friend class iterator; + + typedef typename HashPolicy::Payload Payload; + typedef detail::HashTableEntry Entry; + + private: + static const uint32_t kMinCapacity = 16; + static const uint32_t kMaxCapacity = INT_MAX / sizeof(Entry); + + template + uint32_t computeHash(const Key &key) { + // Multiply by golden ratio. + uint32_t hash = HashPolicy::hash(key) * 0x9E3779B9; + if (hash == Entry::kFreeHash || hash == Entry::kRemovedHash) + hash += 2; + return hash; + } + + Entry *createTable(uint32_t capacity) { + assert(capacity <= kMaxCapacity); + + Entry *table = (Entry *)this->malloc(capacity * sizeof(Entry)); + if (!table) + return NULL; + + for (size_t i = 0; i < capacity; i++) + table[i].initialize(); + + return table; + } + + public: + class Result + { + friend class HashTable; + + Entry *entry_; + + Entry &entry() { + return *entry_; + } + + public: + Result(Entry *entry) + : entry_(entry) + { } + + Payload * operator ->() { + return &entry_->payload(); + } + Payload & operator *() { + return entry_->payload(); + } + + bool found() const { + return entry_->isLive(); + } + }; + + class Insert : public Result + { + uint32_t hash_; + + public: + Insert(Entry *entry, uint32_t hash) + : Result(entry), + hash_(hash) + { + } + + uint32_t hash() const { + return hash_; + } + }; + + private: + class Probulator { + uint32_t hash_; + uint32_t capacity_; + + public: + Probulator(uint32_t hash, uint32_t capacity) + : hash_(hash), + capacity_(capacity) + { + assert(IsPowerOfTwo(capacity_)); + } + + uint32_t entry() const { + return hash_ & (capacity_ - 1); + } + uint32_t next() { + hash_++; + return entry(); + } + }; + + bool underloaded() const { + // Check if the table is underloaded: < 25% entries used. + return (capacity_ > kMinCapacity) && (nelements_ + ndeleted_ < capacity_ / 4); + } + bool overloaded() const { + // Grow if the table is overloaded: > 75% entries used. + return (nelements_ + ndeleted_) > ((capacity_ / 2) + (capacity_ / 4)); + } + + bool shrink() { + if ((capacity_ >> 1) < minCapacity_) + return true; + return changeCapacity(capacity_ >> 1); + } + + bool grow() { + if (capacity_ >= kMaxCapacity) { + this->reportAllocationOverflow(); + return false; + } + return changeCapacity(capacity_ << 1); + } + + bool changeCapacity(uint32_t newCapacity) { + assert(newCapacity <= kMaxCapacity); + + Entry *newTable = createTable(newCapacity); + if (!newTable) + return false; + + Entry *oldTable = table_; + uint32_t oldCapacity = capacity_; + + table_ = newTable; + capacity_ = newCapacity; + ndeleted_ = 0; + + for (uint32_t i = 0; i < oldCapacity; i++) { + Entry &oldEntry = oldTable[i]; + if (oldEntry.isLive()) { + Insert p = insertUnique(oldEntry.hash()); + p.entry().setHash(p.hash()); + p.entry().construct(Moveable(oldEntry.payload())); + } + oldEntry.destruct(); + } + this->free(oldTable); + + return true; + } + + // For use when the key is known to be unique. + Insert insertUnique(uint32_t hash) { + Probulator probulator(hash, capacity_); + + Entry *e = &table_[probulator.entry()]; + for (;;) { + if (e->free() || e->removed()) + break; + e = &table_[probulator.next()]; + } + + return Insert(e, hash); + } + + template + Result lookup(const Key &key) { + uint32_t hash = computeHash(key); + Probulator probulator(hash, capacity_); + + Entry *e = &table_[probulator.entry()]; + for (;;) { + if (e->free()) + break; + if (e->isLive() && + e->sameHash(hash) && + HashPolicy::matches(key, e->payload())) + { + return Result(e); + } + e = &table_[probulator.next()]; + } + + return Result(e); + } + + template + Insert lookupForAdd(const Key &key) { + uint32_t hash = computeHash(key); + Probulator probulator(hash, capacity_); + + Entry *e = &table_[probulator.entry()]; + for (;;) { + if (!e->isLive()) + break; + if (e->sameHash(hash) && HashPolicy::matches(key, e->payload())) + break; + e = &table_[probulator.next()]; + } + + return Insert(e, hash); + } + + bool internalAdd(Insert &i) { + assert(!i.found()); + + // If the entry is deleted, just re-use the slot. + if (i.entry().removed()) { + ndeleted_--; + } else { + // Otherwise, see if we're at max capacity. + if (nelements_ == kMaxCapacity) { + this->reportAllocationOverflow(); + return false; + } + + // Check if the table is over or underloaded. The table is always at + // least 25% free, so this check is enough to guarantee one free slot. + // (Without one free slot, insertion search could infinite loop.) + uint32_t oldCapacity = capacity_; + if (!checkDensity()) + return false; + + // If the table changed size, we need to find a new insertion point. + // Note that a removed entry is impossible: either we caught it above, + // or we just resized and no entries are removed. + if (capacity_ != oldCapacity) + i = insertUnique(i.hash()); + } + + nelements_++; + i.entry().setHash(i.hash()); + return true; + } + + void removeEntry(Entry &e) { + assert(e.isLive()); + e.setRemoved(); + ndeleted_++; + nelements_--; + } + + public: + HashTable(AllocPolicy ap = AllocPolicy()) + : AllocPolicy(ap), + capacity_(0), + nelements_(0), + ndeleted_(0), + table_(NULL), + minCapacity_(kMinCapacity) + { + } + + ~HashTable() + { + for (uint32_t i = 0; i < capacity_; i++) + table_[i].destruct(); + this->free(table_); + } + + bool init(uint32_t capacity = 0) { + if (capacity < kMinCapacity) { + capacity = kMinCapacity; + } else if (capacity > kMaxCapacity) { + this->reportAllocationOverflow(); + return false; + } + + minCapacity_ = capacity; + + assert(IsPowerOfTwo(capacity)); + capacity_ = capacity; + + table_ = createTable(capacity_); + if (!table_) + return false; + + return true; + } + + // The Result object must not be used past mutating table operations. + template + Result find(const Key &key) { + return lookup(key); + } + + // The Insert object must not be used past mutating table operations. + template + Insert findForAdd(const Key &key) { + return lookupForAdd(key); + } + + template + void removeIfExists(const Key &key) { + Result r = find(key); + if (!r.found()) + return; + remove(r); + } + + void remove(Result &r) { + assert(r.found()); + removeEntry(r.entry()); + } + + // The table must not have been mutated in between findForAdd() and add(). + // The Insert object is still valid after add() returns, however. + bool add(Insert &i, const Payload &payload) { + if (!internalAdd(i)) + return false; + i.entry().construct(payload); + return true; + } + bool add(Insert &i, Moveable payload) { + if (!internalAdd(i)) + return false; + i.entry().construct(payload); + return true; + } + bool add(Insert &i) { + if (!internalAdd(i)) + return false; + i.entry().construct(); + return true; + } + + bool checkDensity() { + if (underloaded()) + return shrink(); + if (overloaded()) + return grow(); + return true; + } + + void clear() { + for (size_t i = 0; i < capacity_; i++) { + table_[i].setFree(); + } + ndeleted_ = 0; + nelements_ = 0; + } + + size_t elements() const { + return nelements_; + } + + size_t estimateMemoryUse() const { + return sizeof(Entry) * capacity_; + } + + public: + // It is illegal to mutate a HashTable during iteration. + class iterator + { + public: + iterator(HashTable *table) + : table_(table), + i_(table->table_), + end_(table->table_ + table->capacity_) + { + while (i_ < end_ && !i_->isLive()) + i_++; + } + + bool empty() const { + return i_ == end_; + } + + void erase() { + assert(!empty()); + table_->removeEntry(*i_); + } + + Payload *operator ->() const { + return &i_->payload(); + } + Payload &operator *() const { + return i_->payload(); + } + + void next() { + do { + i_++; + } while (i_ < end_ && !i_->isLive()); + } + + private: + HashTable *table_; + Entry *i_; + Entry *end_; + }; + + private: + HashTable(const HashTable &other) KE_DELETE; + HashTable &operator =(const HashTable &other) KE_DELETE; + + private: + uint32_t capacity_; + uint32_t nelements_; + uint32_t ndeleted_; + Entry *table_; + uint32_t minCapacity_; +}; + +// Bob Jenkin's one-at-a-time hash function[1]. +// +// [1] http://burtleburtle.net/bob/hash/doobs.html +class CharacterStreamHasher +{ + uint32_t hash; + + public: + CharacterStreamHasher() + : hash(0) + { } + + void add(char c) { + hash += c; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + void add(const char *s, size_t length) { + for (size_t i = 0; i < length; i++) + add(s[i]); + } + + uint32_t result() { + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + return hash; + } +}; + +static inline uint32_t +HashCharSequence(const char *s, size_t length) +{ + CharacterStreamHasher hasher; + hasher.add(s, length); + return hasher.result(); +} + +static inline uint32_t +FastHashCharSequence(const char *s, size_t length) +{ + uint32_t hash = 0; + for (size_t i = 0; i < length; i++) + hash = s[i] + (hash << 6) + (hash << 16) - hash; + return hash; +} + +// From http://burtleburtle.net/bob/hash/integer.html +static inline uint32_t +HashInt32(int32_t a) +{ + a = (a ^ 61) ^ (a >> 16); + a = a + (a << 3); + a = a ^ (a >> 4); + a = a * 0x27d4eb2d; + a = a ^ (a >> 15); + return a; +} + +// From http://www.cris.com/~Ttwang/tech/inthash.htm +static inline uint32_t +HashInt64(int64_t key) +{ + key = (~key) + (key << 18); // key = (key << 18) - key - 1; + key = key ^ (uint64_t(key) >> 31); + key = key * 21; // key = (key + (key << 2)) + (key << 4); + key = key ^ (uint64_t(key) >> 11); + key = key + (key << 6); + key = key ^ (uint64_t(key) >> 22); + return uint32_t(key); +} + +template +static inline uint32_t +HashInteger(uintptr_t value); + +template <> +inline uint32_t +HashInteger<4>(uintptr_t value) +{ + return HashInt32(value); +} + +template <> +inline uint32_t +HashInteger<8>(uintptr_t value) +{ + return HashInt64(value); +} + +static inline uint32_t +HashPointer(void *ptr) +{ + return HashInteger(reinterpret_cast(ptr)); +} + +} // namespace ke + +#endif // _INCLUDE_KEIMA_HASHTABLE_H_ diff --git a/public/amtl/am-inlinelist.h b/public/amtl/am-inlinelist.h new file mode 100644 index 00000000..99c37793 --- /dev/null +++ b/public/amtl/am-inlinelist.h @@ -0,0 +1,183 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_inline_list_h_ +#define _include_amtl_inline_list_h_ + +#include +#include + +namespace ke { + +template class InlineList; + +// Objects can recursively inherit from InlineListNode in order to have +// membership in an InlineList. +template +class InlineListNode +{ + friend class InlineList; + + public: + InlineListNode() + : next_(NULL), + prev_(NULL) + { + } + + InlineListNode(InlineListNode *next, InlineListNode *prev) + : next_(next), + prev_(prev) + { + } + + protected: + InlineListNode *next_; + InlineListNode *prev_; +}; + +// An InlineList is a linked list that threads link pointers through objects, +// rather than allocating node memory. A node can be in at most one list at +// any time. +// +// Since InlineLists are designed to be very cheap, there is no requirement +// that elements be removed from a list once the list is destructed. However, +// for as long as the list is alive, all of its contained nodes must also +// be alive. +template +class InlineList +{ + typedef InlineListNode Node; + + Node head_; + + // Work around a clang bug where we can't initialize with &head_ in the ctor. + inline Node *head() { + return &head_; + } + + public: + InlineList() + : head_(head(), head()) + { + } + + ~InlineList() + { +#if !defined(NDEBUG) + // Remove all items to clear their next/prev fields. + while (begin() != end()) + remove(*begin()); +#endif + } + + public: + class iterator + { + friend class InlineList; + Node *iter_; + + public: + iterator(Node *iter) + : iter_(iter) + { + } + + iterator & operator ++() { + iter_ = iter_->next; + return *this; + } + iterator operator ++(int) { + iterator old(*this); + iter_ = iter_->next_; + return old; + } + T * operator *() { + return static_cast(iter_); + } + T * operator ->() { + return static_cast(iter_); + } + bool operator !=(const iterator &where) const { + return iter_ != where.iter_; + } + bool operator ==(const iterator &where) const { + return iter_ == where.iter_; + } + }; + + iterator begin() { + return iterator(head_.next_); + } + + iterator end() { + return iterator(&head_); + } + + iterator erase(iterator &at) { + iterator next = at; + next++; + + remove(at.iter_); + + // Iterator is no longer valid. + at.iter_ = NULL; + + return next; + } + + bool empty() const { + return head_.next_ == &head_; + } + + void remove(Node *t) { + t->prev_->next_ = t->next_; + t->next_->prev_ = t->prev_; + +#if !defined(NDEBUG) + t->next_ = NULL; + t->prev_ = NULL; +#endif + } + + void append(Node *t) { + assert(!t->next_); + assert(!t->prev_); + + t->prev_ = head_.prev_; + t->next_ = &head_; + head_.prev_->next_ = t; + head_.prev_ = t; + } +}; + +} + +#endif // _include_amtl_inline_list_h_ + diff --git a/public/amtl/am-linkedlist.h b/public/amtl/am-linkedlist.h new file mode 100644 index 00000000..488e97a2 --- /dev/null +++ b/public/amtl/am-linkedlist.h @@ -0,0 +1,309 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_linkedlist_h_ +#define _include_amtl_linkedlist_h_ + +#include +#include +#include +#include +#include + +namespace ke { + +// LinkedList, analagous to std::list or SourceHook::List. Since it performs a +// malloc() and free() on every contained node, it should be avoided unless +// absolutely necessary, or for when allocation performance is not a factor. It +// is provided here to safely port old AlliedModders code to AMTL. +// +// In order to use a circular chain, LinkedList's allocation size includes +// exactly one T. If T is very large, LinkedList should be allocated on the +// heap, to avoid using the stack. +template +class LinkedList : public AllocPolicy +{ + public: + friend class iterator; + + class Node + { + public: + Node(const T &o) + : obj(o) + { + } + Node(Moveable o) + : obj(o) + { + } + + T obj; + Node *next; + Node *prev; + }; + +public: + LinkedList(AllocPolicy = AllocPolicy()) + : length_(0) + { + head()->prev = head(); + head()->next = head(); + } + ~LinkedList() { + clear(); + } + + bool append(const T &obj) { + return insertBefore(end(), obj) != end(); + } + bool append(Moveable obj) { + return insertBefore(end(), obj) != end(); + } + + bool prepend(const T &obj) { + return insertBefore(begin(), obj) != begin(); + } + bool prepend(Moveable obj) { + return insertBefore(begin(), obj) != begin(); + } + + size_t length() const { + return length_; + } + + void clear() { + Node *node = head()->next; + Node *temp; + head()->next = head(); + head()->prev = head(); + + // Iterate through the nodes until we find the sentinel again. + while (node != head()) { + temp = node->next; + freeNode(node); + node = temp; + } + length_ = 0; + } + + bool empty() const { + return (length_ == 0); + } + + T &front() { + assert(!empty()); + return head()->next->obj; + } + T &back() { + assert(!empty()); + return head()->prev->obj; + } + + private: + const Node *head() const { + return sentinel_.address(); + } + Node *head() { + return sentinel_.address(); + } + + Node *allocNode(const T &obj) { + Node *node = (Node *)this->malloc(sizeof(Node)); + if (!node) + return NULL; + new (node) Node(obj); + return node; + } + Node *allocNode(Moveable obj) { + Node *node = (Node *)this->malloc(sizeof(Node)); + if (!node) + return NULL; + new (node) Node(obj); + return node; + } + + void freeNode(Node *node) { + node->obj.~T(); + this->free(node); + } + + private: + StorageBuffer sentinel_; + size_t length_; + + public: + class iterator + { + friend class LinkedList; + + public: + iterator() + : this_(NULL) + { + } + iterator(const LinkedList &src) + : this_(src.head()) + { + } + iterator(Node *n) + : this_(n) + { + } + iterator(const iterator &where) + : this_(where.this_) + { + } + + iterator &operator --() { + if (this_) + this_ = this_->prev; + return *this; + } + iterator operator --(int) { + iterator old(*this); + if (this_) + this_ = this_->prev; + return old; + } + iterator &operator ++() { + if (this_) + this_ = this_->next; + return *this; + } + iterator operator ++(int) { + iterator old(*this); + if (this_) + this_ = this_->next; + return old; + } + + const T &operator * () const { + return this_->obj; + } + T &operator * () { + return this_->obj; + } + T *operator ->() { + return &this_->obj; + } + const T *operator ->() const { + return &(this_->obj); + } + + bool operator !=(const iterator &where) const { + return (this_ != where.this_); + } + bool operator ==(const iterator &where) const { + return (this_ == where.this_); + } + + operator bool() { + return !!this_; + } + + private: + Node *this_; + }; + + private: + // Insert obj right before where. + iterator insert(iterator where, Node *node) { + if (!node) + return where; + + Node *pWhereNode = where.this_; + + pWhereNode->prev->next = node; + node->prev = pWhereNode->prev; + pWhereNode->prev = node; + node->next = pWhereNode; + + length_++; + return iterator(node); + } + + public: + iterator begin() { + return iterator(head()->next); + } + iterator end() { + return iterator(head()); + } + iterator erase(iterator where) { + Node *pNode = where.this_; + iterator iter(where); + iter++; + + pNode->prev->next = pNode->next; + pNode->next->prev = pNode->prev; + + freeNode(pNode); + length_--; + + return iter; + } + iterator insertBefore(iterator where, const T &obj) { + return insert(where, allocNode(obj)); + } + iterator insertBefore(iterator where, Moveable obj) { + return insert(where, allocNode(obj)); + } + + public: + // Removes one instance of |obj| from the list, if found. + void remove(const T &obj) { + for (iterator b = begin(); b != end(); b++) { + if (*b == obj) { + erase(b); + break; + } + } + } + + template + iterator find(const U &equ) { + for (iterator iter = begin(); iter != end(); iter++) { + if (*iter == equ) + return iter; + } + return end(); + } + + + private: + // These are disallowed because they basically violate the failure handling + // model for AllocPolicies and are also likely to have abysmal performance. + LinkedList &operator =(const LinkedList &other) KE_DELETE; + LinkedList(const LinkedList &other) KE_DELETE; +}; + +} // namespace ke + +#endif //_INCLUDE_CSDM_LIST_H diff --git a/public/amtl/am-moveable.h b/public/amtl/am-moveable.h new file mode 100644 index 00000000..e500dbc7 --- /dev/null +++ b/public/amtl/am-moveable.h @@ -0,0 +1,73 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_moveable_h_ +#define _include_amtl_moveable_h_ + +namespace ke { + +// This is a feature in C++11, but since AM projects do not have access to +// C++11 yet, we provide templates to implement move semantics. A class can +// provide a constructor for (ke::Moveable t) which containers will try +// to use. +// +// When implementing a constructor that takes a Moveable, the object being +// moved should be left in a state that is safe, since its destructor will +// be called even though it has been moved. + +template +struct Moveable +{ + public: + explicit Moveable(T &t) + : t_(t) + { + } + + T *operator ->() { + return &t_; + } + operator T &() { + return t_; + } + + private: + T &t_; +}; + +template +static inline Moveable +Move(T &t) +{ + return Moveable(t); +} + +} // namespace ke + +#endif // _include_amtl_moveable_h_ diff --git a/public/amtl/am-refcounting-threadsafe.h b/public/amtl/am-refcounting-threadsafe.h new file mode 100644 index 00000000..a37b39d4 --- /dev/null +++ b/public/amtl/am-refcounting-threadsafe.h @@ -0,0 +1,68 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_ts_refcounting_h_ +#define _include_amtl_ts_refcounting_h_ + +#include +#include + +namespace ke { + +template +class RefcountedThreadsafe +{ + public: + RefcountedThreadsafe() + : refcount_(1) + { + } + + void AddRef() { + refcount_.increment(); + } + bool Release() { + if (!refcount_.decrement()) { + delete static_cast(this); + return false; + } + return true; + } + + protected: + ~RefcountedThreadsafe() { + } + + private: + AtomicRefCount refcount_; +}; + +} // namespace ke + +#endif // _include_amtl_ts_refcounting_h_ diff --git a/public/amtl/am-refcounting.h b/public/amtl/am-refcounting.h new file mode 100644 index 00000000..71a6f554 --- /dev/null +++ b/public/amtl/am-refcounting.h @@ -0,0 +1,302 @@ +// vim: set sts=8 ts=4 sw=4 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_refcounting_h_ +#define _include_amtl_refcounting_h_ + +#include +#include + +namespace ke { + +template class Ref; + +// Holds a refcounted T without addrefing it. This is similar to PassRef<> +// below, but is intended only for freshly allocated objects which start +// with reference count 1, and we don't want to add an extra ref just by +// assigning to PassRef<> or Ref<>. +template +class Newborn +{ + public: + Newborn(T *t) + : thing_(t) + { + } + + T *release() const { + return ReturnAndVoid(thing_); + } + + private: + mutable T *thing_; +}; + +template +static inline Newborn +NoAddRef(T *t) +{ + return Newborn(t); +} + +// When returning a value, we'd rather not be needlessly changing the refcount, +// so we have a special type to use for returns. +template +class PassRef +{ + public: + PassRef(T *thing) + : thing_(thing) + { + AddRef(); + } + PassRef() + : thing_(NULL) + { + } + + PassRef(const Newborn &other) + : thing_(other.release()) + { + // Don't addref, newborn means already addref'd. + } + + template + inline PassRef(const Ref &other); + + PassRef(const PassRef &other) + : thing_(other.release()) + { + } + template + PassRef(const PassRef &other) + : thing_(other.release()) + { + } + ~PassRef() + { + Release(); + } + + operator T &() { + return *thing_; + } + operator T *() const { + return thing_; + } + T *operator ->() const { + return operator *(); + } + T *operator *() const { + return thing_; + } + bool operator !() const { + return !thing_; + } + + T *release() const { + return ReturnAndVoid(thing_); + } + + template + PassRef &operator =(const PassRef &other) { + Release(); + thing_ = other.release(); + return *this; + } + + private: + // Disallowed operators. + PassRef &operator =(T *other); + PassRef &operator =(Newborn &other); + + void AddRef() { + if (thing_) + thing_->AddRef(); + } + void Release() { + if (thing_) + thing_->Release(); + } + + private: + mutable T *thing_; +}; + +// Classes which are refcounted should inherit from this. +template +class Refcounted +{ + public: + Refcounted() + : refcount_(1) + { + } + + void AddRef() { + refcount_++; + } + void Release() { + assert(refcount_ > 0); + if (--refcount_ == 0) + delete static_cast(this); + } + + protected: + ~Refcounted() { + } + + private: + uintptr_t refcount_; +}; + +// Simple class for automatic refcounting. +template +class Ref +{ + public: + Ref(T *thing) + : thing_(thing) + { + AddRef(); + } + + Ref() + : thing_(NULL) + { + } + + Ref(const Ref &other) + : thing_(other.thing_) + { + AddRef(); + } + Ref(Moveable other) + : thing_(other->thing_) + { + other->thing_ = NULL; + } + template + Ref(const Ref &other) + : thing_(*other) + { + AddRef(); + } + Ref(const PassRef &other) + : thing_(other.release()) + { + } + template + Ref(const PassRef &other) + : thing_(other.release()) + { + } + Ref(const Newborn &other) + : thing_(other.release()) + { + } + ~Ref() + { + Release(); + } + + T *operator ->() const { + return operator *(); + } + T *operator *() const { + return thing_; + } + operator T *() { + return thing_; + } + bool operator !() const { + return !thing_; + } + + template + Ref &operator =(S *thing) { + Release(); + thing_ = thing; + AddRef(); + return *this; + } + + template + Ref &operator =(const PassRef &other) { + Release(); + thing_ = other.release(); + return *this; + } + + template + Ref &operator =(const Newborn &other) { + Release(); + thing_ = other.release(); + return *this; + } + + Ref &operator =(const Ref &other) { + Release(); + thing_ = other.thing_; + AddRef(); + return *this; + } + + Ref &operator =(Moveable other) { + Release(); + thing_ = other->thing_; + other->thing_ = NULL; + return *this; + } + + private: + void AddRef() { + if (thing_) + thing_->AddRef(); + } + void Release() { + if (thing_) + thing_->Release(); + } + + protected: + T *thing_; +}; + +template template +PassRef::PassRef(const Ref &other) + : thing_(*other) +{ + AddRef(); +} + +} // namespace ke + +#endif // _include_amtl_refcounting_h_ + diff --git a/public/amtl/am-string.h b/public/amtl/am-string.h new file mode 100644 index 00000000..074339fa --- /dev/null +++ b/public/amtl/am-string.h @@ -0,0 +1,134 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_string_h_ +#define _include_amtl_string_h_ + +#include +#include +#include +#include + +namespace ke { + +// ASCII string. +class AString +{ + public: + AString() + : length_(0) + { + } + + explicit AString(const char *str) { + set(str, strlen(str)); + } + AString(const char *str, size_t length) { + set(str, length); + } + AString(const AString &other) { + if (other.length_) + set(other.chars_, other.length_); + else + length_ = 0; + } + AString(Moveable other) + : chars_(other->chars_.take()), + length_(other->length_) + { + other->length_ = 0; + } + + AString &operator =(const char *str) { + if (str && str[0]) { + set(str, strlen(str)); + } else { + chars_ = NULL; + length_ = 0; + } + return *this; + } + AString &operator =(const AString &other) { + if (other.length_) { + set(other.chars_, other.length_); + } else { + chars_ = NULL; + length_ = 0; + } + return *this; + } + AString &operator =(Moveable other) { + chars_ = other->chars_.take(); + length_ = other->length_; + other->length_ = 0; + return *this; + } + + int compare(const char *str) const { + return strcmp(chars(), str); + } + int compare(const AString &other) const { + return strcmp(chars(), other.chars()); + } + bool operator ==(const AString &other) const { + return other.length() == length() && + memcmp(other.chars(), chars(), length()) == 0; + } + + char operator [](size_t index) const { + assert(index < length()); + return chars()[index]; + } + + size_t length() const { + return length_; + } + + const char *chars() const { + if (!chars_) + return ""; + return chars_; + } + + private: + void set(const char *str, size_t length) { + chars_ = new char[length + 1]; + length_ = length; + memcpy(chars_, str, length); + chars_[length] = '\0'; + } + + private: + AutoArray chars_; + size_t length_; +}; + +} + +#endif // _include_amtl_string_h_ diff --git a/public/amtl/am-thread-posix.h b/public/amtl/am-thread-posix.h new file mode 100644 index 00000000..bc654261 --- /dev/null +++ b/public/amtl/am-thread-posix.h @@ -0,0 +1,213 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_thread_posix_h_ +#define _include_amtl_thread_posix_h_ + +#include +#include +#include +#include +#if defined(__linux__) +# include +#endif +#if defined(__APPLE__) +# include +#endif + +namespace ke { + +class Mutex : public Lockable +{ + public: + Mutex() { +#if !defined(NDEBUG) + int rv = +#endif + pthread_mutex_init(&mutex_, NULL); + assert(rv == 0); + } + ~Mutex() { + pthread_mutex_destroy(&mutex_); + } + + bool DoTryLock() KE_OVERRIDE { + return pthread_mutex_trylock(&mutex_) == 0; + } + + void DoLock() KE_OVERRIDE { + pthread_mutex_lock(&mutex_); + } + + void DoUnlock() KE_OVERRIDE { + pthread_mutex_unlock(&mutex_); + } + + pthread_mutex_t *raw() { + return &mutex_; + } + + private: + pthread_mutex_t mutex_; +}; + +// Currently, this class only supports single-listener CVs. +class ConditionVariable : public Lockable +{ + public: + ConditionVariable() { +#if !defined(NDEBUG) + int rv = +#endif + pthread_cond_init(&cv_, NULL); + assert(rv == 0); + } + ~ConditionVariable() { + pthread_cond_destroy(&cv_); + } + + bool DoTryLock() KE_OVERRIDE { + return mutex_.DoTryLock(); + } + void DoLock() KE_OVERRIDE { + mutex_.DoLock(); + } + void DoUnlock() KE_OVERRIDE { + mutex_.DoUnlock(); + } + + void Notify() { + AssertCurrentThreadOwns(); + pthread_cond_signal(&cv_); + } + + WaitResult Wait(size_t timeout_ms) { + AssertCurrentThreadOwns(); + +#if defined(__linux__) + struct timespec ts; + if (clock_gettime(CLOCK_REALTIME, &ts) == -1) + return Wait_Error; +#else + struct timeval tv; + gettimeofday(&tv, NULL); + + struct timespec ts; + ts.tv_sec = tv.tv_sec; + ts.tv_nsec = tv.tv_usec * 1000; +#endif + + ts.tv_sec += timeout_ms / 1000; + ts.tv_nsec += (timeout_ms % 1000) * 1000000; + if (ts.tv_nsec >= 1000000000) { + ts.tv_sec++; + ts.tv_nsec -= 1000000000; + } + + DebugSetUnlocked(); + int rv = pthread_cond_timedwait(&cv_, mutex_.raw(), &ts); + DebugSetLocked(); + + if (rv == ETIMEDOUT) + return Wait_Timeout; + if (rv == 0) + return Wait_Signaled; + return Wait_Error; + } + + WaitResult Wait() { + AssertCurrentThreadOwns(); + + DebugSetUnlocked(); + int rv = pthread_cond_wait(&cv_, mutex_.raw()); + DebugSetLocked(); + + if (rv == 0) + return Wait_Signaled; + return Wait_Error; + } + + private: + Mutex mutex_; + pthread_cond_t cv_; +}; + +class Thread +{ + struct ThreadData { + IRunnable *run; + char name[17]; + }; + public: + Thread(IRunnable *run, const char *name = NULL) { + ThreadData *data = new ThreadData; + data->run = run; + snprintf(data->name, sizeof(data->name), "%s", name ? name : ""); + + initialized_ = (pthread_create(&thread_, NULL, Main, data) == 0); + if (!initialized_) + delete data; + } + + bool Succeeded() const { + return initialized_; + } + + void Join() { + if (!Succeeded()) + return; + pthread_join(thread_, NULL); + } + + private: + static void *Main(void *arg) { + AutoPtr data((ThreadData *)arg); + + if (data->name[0]) { +#if defined(__linux__) + prctl(PR_SET_NAME, (unsigned long)data->name); +#elif defined(__APPLE__) + int (*fn)(const char *) = (int (*)(const char *))dlsym(RTLD_DEFAULT, "pthread_setname_np"); + if (fn) + fn(data->name); +#endif + } + data->run->Run(); + return NULL; + } + + private: + bool initialized_; + pthread_t thread_; +}; + +} // namespace ke + +#endif // _include_amtl_thread_posix_h_ + diff --git a/public/amtl/am-thread-utils.h b/public/amtl/am-thread-utils.h new file mode 100644 index 00000000..98db2616 --- /dev/null +++ b/public/amtl/am-thread-utils.h @@ -0,0 +1,265 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_threads_ +#define _include_amtl_threads_ + +#include +#if defined(_MSC_VER) +# include +# include +#else +# include +#endif +#include + +// Thread primitives for SourcePawn. +// +// Linking Requirements: +// +// OS X: None (-lpthread and -ldl are optional) +// Windows: None +// Linux: -lpthread -lrt required +// +// -- Mutexes -- +// +// A Lockable is a mutual exclusion primitive. It can be owned by at most one +// thread at a time, and ownership blocks any other thread from taking taking +// ownership. Ownership must be acquired and released on the same thread. +// Lockables are not re-entrant. +// +// While a few classes support the Lockable interface, the simplest Lockable +// object that can be instantiated is a Mutex. +// +// -- Condition Variables -- +// +// A ConditionVariable provides mutually exclusive access based on a +// condition ocurring. CVs provide two capabilities: Wait(), which will block +// until the condition is triggered, and Notify(), which signals any blocking +// thread that the condition has occurred. +// +// Condition variables have an underlying mutex lock. This lock must be +// acquired before calling Wait() or Notify(). It is automatically released +// once Wait begins blocking. This operation is atomic with respect to other +// threads and the mutex. For example, it is not possible for the lock to be +// acquired by another thread in between unlocking and blocking. Since Notify +// also requires the lock to be acquired, there is no risk of an event +// accidentally dissipating into thin air because it was sent before the other +// thread began blocking. +// +// When Wait() returns, the lock is automatically re-acquired. This operation +// is NOT atomic. In between waking up and re-acquiring the lock, another +// thread may steal the lock and issue another event. Applications must +// account for this. For example, a message pump should check that there are +// no messages left to process before blocking again. +// +// Likewise, it is also not defined whether a Signal() will have any effect +// while a thread is not waiting on the monitor. This is yet another reason +// the above paragraph is so important - applications should, under a lock of +// the condition variable - check for state changes before waiting. +// +// -- Threads -- +// +// A Thread object, when created, spawns a new thread with the given callback +// (the callbacks must implement IRunnable). Threads have one method of +// interest, Join(), which will block until the thread's execution finishes. +// Deleting a thread object will free any operating system resources associated +// with that thread, if the thread has finished executing. +// +// Threads can fail to spawn; make sure to check Succeeded(). +// + +namespace ke { + +// Abstraction for getting a unique thread identifier. Debug-only. +#if defined(_MSC_VER) +typedef DWORD ThreadId; + +static inline ThreadId GetCurrentThreadId() +{ + return ::GetCurrentThreadId(); +} +#else +typedef pthread_t ThreadId; + +static inline ThreadId GetCurrentThreadId() +{ + return pthread_self(); +} +#endif + +// Classes which use non-reentrant, same-thread lock/unlock semantics should +// inherit from this and implement DoLock/DoUnlock. +class Lockable +{ + public: + Lockable() + { +#if !defined(NDEBUG) + owner_ = 0; +#endif + } + virtual ~Lockable() { + } + + bool TryLock() { + if (DoTryLock()) { + DebugSetLocked(); + return true; + } + return false; + } + + void Lock() { + assert(Owner() != GetCurrentThreadId()); + DoLock(); + DebugSetLocked(); + } + + void Unlock() { + assert(Owner() == GetCurrentThreadId()); + DebugSetUnlocked(); + DoUnlock(); + } + + void AssertCurrentThreadOwns() const { + assert(Owner() == GetCurrentThreadId()); + } +#if !defined(NDEBUG) + bool Locked() const { + return owner_ != 0; + } + ThreadId Owner() const { + return owner_; + } +#endif + + virtual bool DoTryLock() = 0; + virtual void DoLock() = 0; + virtual void DoUnlock() = 0; + + protected: + void DebugSetUnlocked() { +#if !defined(NDEBUG) + owner_ = 0; +#endif + } + void DebugSetLocked() { +#if !defined(NDEBUG) + owner_ = GetCurrentThreadId(); +#endif + } + + protected: +#if !defined(NDEBUG) + ThreadId owner_; +#endif +}; + +// RAII for automatically locking and unlocking an object. +class AutoLock +{ + public: + AutoLock(Lockable *lock) + : lock_(lock) + { + lock_->Lock(); + } + ~AutoLock() { + lock_->Unlock(); + } + + private: + Lockable *lock_; +}; + +class AutoTryLock +{ + public: + AutoTryLock(Lockable *lock) + { + lock_ = lock->TryLock() ? lock : NULL; + } + ~AutoTryLock() { + if (lock_) + lock_->Unlock(); + } + + private: + Lockable *lock_; +}; + +// RAII for automatically unlocking and relocking an object. +class AutoUnlock +{ + public: + AutoUnlock(Lockable *lock) + : lock_(lock) + { + lock_->Unlock(); + } + ~AutoUnlock() { + lock_->Lock(); + } + + private: + Lockable *lock_; +}; + +enum WaitResult { + // Woke up because something happened. + Wait_Signaled, + + // Woke up because nothing happened and a timeout was specified. + Wait_Timeout, + + // Woke up, but because of an error. + Wait_Error +}; + +// This must be implemented in order to spawn a new thread. +class IRunnable +{ + public: + virtual ~IRunnable() { + } + + virtual void Run() = 0; +}; + +} // namespace ke + +// Include the actual thread implementations. +#if defined(_MSC_VER) +# include "am-thread-windows.h" +#else +# include "am-thread-posix.h" +#endif + +#endif // _include_amtl_threads_ diff --git a/public/amtl/am-thread-windows.h b/public/amtl/am-thread-windows.h new file mode 100644 index 00000000..c4e34ed2 --- /dev/null +++ b/public/amtl/am-thread-windows.h @@ -0,0 +1,161 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_thread_windows_h_ +#define _include_amtl_thread_windows_h_ + +#include + +namespace ke { + +class CriticalSection : public Lockable +{ + public: + CriticalSection() { + InitializeCriticalSection(&cs_); + } + ~CriticalSection() { + DeleteCriticalSection(&cs_); + } + + bool DoTryLock() KE_OVERRIDE { + return !!TryEnterCriticalSection(&cs_); + } + void DoLock() KE_OVERRIDE { + EnterCriticalSection(&cs_); + } + + void DoUnlock() KE_OVERRIDE { + LeaveCriticalSection(&cs_); + } + + private: + CRITICAL_SECTION cs_; +}; + +typedef CriticalSection Mutex; + +// Currently, this class only supports single-listener CVs. +class ConditionVariable : public Lockable +{ + public: + ConditionVariable() { + event_ = CreateEvent(NULL, FALSE, FALSE, NULL); + } + ~ConditionVariable() { + CloseHandle(event_); + } + + bool DoTryLock() KE_OVERRIDE { + return cs_.DoTryLock(); + } + void DoLock() KE_OVERRIDE { + cs_.DoLock(); + } + void DoUnlock() KE_OVERRIDE { + cs_.DoUnlock(); + } + + void Notify() { + AssertCurrentThreadOwns(); + SetEvent(event_); + } + + WaitResult Wait(size_t timeout_ms) { + // This will assert if the lock has not been acquired. We don't need to be + // atomic here, like pthread_cond_wait, because the event bit will stick + // until reset by a wait function. + Unlock(); + DWORD rv = WaitForSingleObject(event_, timeout_ms); + Lock(); + + if (rv == WAIT_TIMEOUT) + return Wait_Timeout; + if (rv == WAIT_FAILED) + return Wait_Error; + return Wait_Signaled; + } + + WaitResult Wait() { + return Wait(INFINITE); + } + + private: + CriticalSection cs_; + HANDLE event_; +}; + +class Thread +{ + public: + Thread(IRunnable *run, const char *name = NULL) { + thread_ = CreateThread(NULL, 0, Main, run, 0, NULL); + } + ~Thread() { + if (!thread_) + return; + CloseHandle(thread_); + } + + bool Succeeded() const { + return !!thread_; + } + + void Join() { + if (!Succeeded()) + return; + WaitForSingleObject(thread_, INFINITE); + } + + HANDLE handle() const { + return thread_; + } + + private: + static DWORD WINAPI Main(LPVOID arg) { + ((IRunnable *)arg)->Run(); + return 0; + } + +#pragma pack(push, 8) + struct ThreadNameInfo { + DWORD dwType; + LPCSTR szName; + DWORD dwThreadID; + DWORD dwFlags; + }; +#pragma pack(pop) + + private: + HANDLE thread_; +}; + +} // namespace ke + +#endif // _include_amtl_thread_windows_h_ diff --git a/public/amtl/am-utility.h b/public/amtl/am-utility.h new file mode 100644 index 00000000..afa0d44a --- /dev/null +++ b/public/amtl/am-utility.h @@ -0,0 +1,346 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _include_amtl_utility_h_ +#define _include_amtl_utility_h_ + +#include +#include +#include +//#include +#if defined(_MSC_VER) +# include +#endif +#include + +#define KE_32BIT + +#if defined(_MSC_VER) +# pragma warning(disable:4355) +#endif + +namespace ke { + +static const size_t kMallocAlignment = sizeof(void *) * 2; + +static const size_t kKB = 1024; +static const size_t kMB = 1024 * kKB; +static const size_t kGB = 1024 * kMB; + +typedef unsigned char * Address; + +template T +ReturnAndVoid(T &t) +{ + T saved = t; + t = T(); + return saved; +} + +#if __cplusplus >= 201103L +# define KE_CXX11 +#endif + +#if defined(KE_CXX11) +# define KE_DELETE = delete +# define KE_OVERRIDE = override +#else +# define KE_DELETE +# define KE_OVERRIDE +#endif + +// Wrapper that automatically deletes its contents. The pointer can be taken +// to avoid destruction. +template +class AutoPtr +{ + T *t_; + + public: + AutoPtr() + : t_(NULL) + { + } + AutoPtr(T *t) + : t_(t) + { + } + AutoPtr(Moveable > other) + { + t_ = other->t_; + other->t_ = NULL; + } + ~AutoPtr() { + delete t_; + } + T *take() { + return ReturnAndVoid(t_); + } + T *operator *() const { + return t_; + } + T *operator ->() const { + return t_; + } + operator T *() const { + return t_; + } + T *operator =(T *t) { + delete t_; + t_ = t; + return t_; + } + T *operator =(Moveable > other) { + delete t_; + t_ = other->t_; + other->t_ = NULL; + return t_; + } + bool operator !() const { + return !t_; + } + + private: + AutoPtr(const AutoPtr &other) KE_DELETE; + AutoPtr &operator =(const AutoPtr &other) KE_DELETE; +}; + +// Wrapper that automatically deletes its contents. The pointer can be taken +// to avoid destruction. +template +class AutoArray +{ + T *t_; + + public: + AutoArray() + : t_(NULL) + { + } + explicit AutoArray(T *t) + : t_(t) + { + } + ~AutoArray() { + delete [] t_; + } + T *take() { + return ReturnAndVoid(t_); + } + T *operator *() const { + return t_; + } + T &operator [](size_t index) { + return t_[index]; + } + const T &operator [](size_t index) const { + return t_[index]; + } + operator T *() const { + return t_; + } + void operator =(T *t) { + delete [] t_; + t_ = t; + } + bool operator !() const { + return !t_; + } +}; + +static inline size_t +Log2(size_t number) +{ + assert(number != 0); + +#ifdef _MSC_VER + unsigned long rval; +# ifdef _M_IX86 + _BitScanReverse(&rval, number); +# elif _M_X64 + _BitScanReverse64(&rval, number); +# endif + return rval; +#else + size_t bit; + asm("bsr %1, %0\n" + : "=r" (bit) + : "rm" (number)); + return bit; +#endif +} + +static inline size_t +FindRightmostBit(size_t number) +{ + assert(number != 0); + +#ifdef _MSC_VER + unsigned long rval; +# ifdef _M_IX86 + _BitScanForward(&rval, number); +# elif _M_X64 + _BitScanForward64(&rval, number); +# endif + return rval; +#else + size_t bit; + asm("bsf %1, %0\n" + : "=r" (bit) + : "rm" (number)); + return bit; +#endif +} + +static inline bool +IsPowerOfTwo(size_t value) +{ + if (value == 0) + return false; + return !(value & (value - 1)); +} + +static inline size_t +Align(size_t count, size_t alignment) +{ + assert(IsPowerOfTwo(alignment)); + return count + (alignment - (count % alignment)) % alignment; +} + +static inline bool +IsUint32AddSafe(unsigned a, unsigned b) +{ + if (!a || !b) + return true; + size_t log2_a = Log2(a); + size_t log2_b = Log2(b); + return (log2_a < sizeof(unsigned) * 8) && + (log2_b < sizeof(unsigned) * 8); +} + +static inline bool +IsUintPtrAddSafe(size_t a, size_t b) +{ + if (!a || !b) + return true; + size_t log2_a = Log2(a); + size_t log2_b = Log2(b); + return (log2_a < sizeof(size_t) * 8) && + (log2_b < sizeof(size_t) * 8); +} + +static inline bool +IsUint32MultiplySafe(unsigned a, unsigned b) +{ + if (a <= 1 || b <= 1) + return true; + + size_t log2_a = Log2(a); + size_t log2_b = Log2(b); + return log2_a + log2_b <= sizeof(unsigned) * 8; +} + +static inline bool +IsUintPtrMultiplySafe(size_t a, size_t b) +{ + if (a <= 1 || b <= 1) + return true; + + size_t log2_a = Log2(a); + size_t log2_b = Log2(b); + return log2_a + log2_b <= sizeof(size_t) * 8; +} + +#define ARRAY_LENGTH(array) (sizeof(array) / sizeof(array[0])) +#define STATIC_ASSERT(cond) extern int static_assert_f(int a[(cond) ? 1 : -1]) + +#define IS_ALIGNED(addr, alignment) (!(uintptr_t(addr) & ((alignment) - 1))) + +template +static inline bool +IsAligned(T addr, size_t alignment) +{ + assert(IsPowerOfTwo(alignment)); + return !(uintptr_t(addr) & (alignment - 1)); +} + +static inline Address +AlignedBase(Address addr, size_t alignment) +{ + assert(IsPowerOfTwo(alignment)); + return Address(uintptr_t(addr) & ~(alignment - 1)); +} + +template static inline T +Min(const T &t1, const T &t2) +{ + return t1 < t2 ? t1 : t2; +} + +template static inline T +Max(const T &t1, const T &t2) +{ + return t1 > t2 ? t1 : t2; +} + +template +class StorageBuffer +{ + public: + T *address() { + return reinterpret_cast(buffer_); + } + const T *address() const { + return reinterpret_cast(buffer_); + } + + private: + union { + char buffer_[sizeof(T)]; + uint64_t aligned_; + }; +}; + +#if defined(_MSC_VER) +# define KE_SIZET_FMT "%Iu" +#elif defined(__GNUC__) +# define KE_SIZET_FMT "%zu" +#else +# error "Implement format specifier string" +#endif + +#if defined(__GNUC__) +# define KE_CRITICAL_LIKELY(x) __builtin_expect(!!(x), 1) +#else +# define KE_CRITICAL_LIKELY(x) x +#endif + +} + +#endif // _include_amtl_utility_h_ diff --git a/public/amtl/am-vector.h b/public/amtl/am-vector.h new file mode 100644 index 00000000..2b0b8feb --- /dev/null +++ b/public/amtl/am-vector.h @@ -0,0 +1,239 @@ +// vim: set sts=8 ts=2 sw=2 tw=99 et: +// +// Copyright (C) 2013, David Anderson and AlliedModders LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of AlliedModders LLC nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _INCLUDE_KEIMA_TPL_CPP_VECTOR_H_ +#define _INCLUDE_KEIMA_TPL_CPP_VECTOR_H_ + +#include +#include +#include +#include +#include + +namespace ke { + +template +class Vector : public AllocPolicy +{ + public: + Vector(AllocPolicy = AllocPolicy()) + : data_(NULL), + nitems_(0), + maxsize_(0) + { + } + + Vector(Moveable > other) { + data_ = other->data_; + nitems_ = other->nitems_; + maxsize_ = other->maxsize_; + other->reset(); + } + + ~Vector() { + zap(); + } + + bool append(const T &item) { + if (!growIfNeeded(1)) + return false; + new (&data_[nitems_]) T(item); + nitems_++; + return true; + } + bool append(Moveable item) { + if (!growIfNeeded(1)) + return false; + new (&data_[nitems_]) T(item); + nitems_++; + return true; + } + void infallibleAppend(const T &item) { + assert(growIfNeeded(1)); + new (&data_[nitems_]) T(item); + nitems_++; + } + void infallibleAppend(Moveable item) { + assert(growIfNeeded(1)); + new (&data_[nitems_]) T(item); + nitems_++; + } + + // Shift all elements including |at| up by one, and insert |item| at the + // given position. If |at| is one greater than the last usable index, + // i.e. |at == length()|, then this is the same as append(). No other + // invalid indexes are allowed. + // + // This is a linear-time operation. + bool insert(size_t at, const T &item) { + if (at == length()) + return append(item); + if (!moveUp(at)) + return false; + new (&data_[at]) T(item); + return true; + } + bool insert(size_t at, Moveable item) { + if (at == length()) + return append(item); + if (!moveUp(at)) + return false; + new (&data_[at]) T(item); + return true; + } + + // Shift all elements at the given position down, removing the given + // element. This is a linear-time operation. + void remove(size_t at) { + for (size_t i = at; i < length() - 1; i++) + data_[i] = Moveable(data_[i + 1]); + pop(); + } + + T popCopy() { + T t = at(length() - 1); + pop(); + return t; + } + void pop() { + assert(nitems_); + data_[nitems_ - 1].~T(); + nitems_--; + } + bool empty() const { + return length() == 0; + } + size_t length() const { + return nitems_; + } + T& at(size_t i) { + assert(i < length()); + return data_[i]; + } + const T& at(size_t i) const { + assert(i < length()); + return data_[i]; + } + T& operator [](size_t i) { + return at(i); + } + const T& operator [](size_t i) const { + return at(i); + } + void clear() { + nitems_ = 0; + } + const T &back() const { + return at(length() - 1); + } + T &back() { + return at(length() - 1); + } + + T *buffer() const { + return data_; + } + + bool ensure(size_t desired) { + if (desired <= length()) + return true; + + return growIfNeeded(desired - length()); + } + + private: + // These are disallowed because they basically violate the failure handling + // model for AllocPolicies and are also likely to have abysmal performance. + Vector(const Vector &other) KE_DELETE; + Vector &operator =(const Vector &other) KE_DELETE; + + private: + void zap() { + for (size_t i = 0; i < nitems_; i++) + data_[i].~T(); + this->free(data_); + } + void reset() { + data_ = NULL; + nitems_ = 0; + maxsize_ = 0; + } + + bool moveUp(size_t at) { + assert(at < nitems_); + if (!append(Moveable(data_[nitems_ - 1]))) + return false; + + for (size_t i = nitems_ - 2; i > at; i--) + data_[i] = Moveable(data_[i - 1]); + return true; + } + + bool growIfNeeded(size_t needed) + { + if (!IsUintPtrAddSafe(nitems_, needed)) { + this->reportAllocationOverflow(); + return false; + } + if (nitems_ + needed < maxsize_) + return true; + + size_t new_maxsize = maxsize_ ? maxsize_ : 8; + while (nitems_ + needed > new_maxsize) { + if (!IsUintPtrMultiplySafe(new_maxsize, 2)) { + this->reportAllocationOverflow(); + return false; + } + new_maxsize *= 2; + } + + T* newdata = (T*)this->malloc(sizeof(T) * new_maxsize); + if (newdata == NULL) + return false; + for (size_t i = 0; i < nitems_; i++) { + new (&newdata[i]) T(Moveable(data_[i])); + data_[i].~T(); + } + this->free(data_); + + data_ = newdata; + maxsize_ = new_maxsize; + return true; + } + + private: + T* data_; + size_t nitems_; + size_t maxsize_; +}; + +} + +#endif /* _INCLUDE_KEIMA_TPL_CPP_VECTOR_H_ */ +