update macos/ios to Filament v1.51.2

This commit is contained in:
Nick Fisher
2024-04-20 13:46:58 +08:00
parent 15882891e2
commit ea04e94c1f
156 changed files with 5394 additions and 5884 deletions

View File

@@ -17,12 +17,10 @@
#ifndef TNT_UTILS_ALLOCATOR_H
#define TNT_UTILS_ALLOCATOR_H
#include <utils/compiler.h>
#include <utils/debug.h>
#include <utils/memalign.h>
#include <utils/Mutex.h>
#include <utils/SpinLock.h>
#include <atomic>
#include <cstddef>
@@ -31,6 +29,8 @@
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <vector>
namespace utils {
@@ -44,14 +44,14 @@ static inline P* add(P* a, T b) noexcept {
template <typename P>
static inline P* align(P* p, size_t alignment) noexcept {
// alignment must be a power-of-two
assert(alignment && !(alignment & alignment-1));
assert_invariant(alignment && !(alignment & alignment-1));
return (P*)((uintptr_t(p) + alignment - 1) & ~(alignment - 1));
}
template <typename P>
static inline P* align(P* p, size_t alignment, size_t offset) noexcept {
P* const r = align(add(p, offset), alignment);
assert(r >= add(p, offset));
assert_invariant(r >= add(p, offset));
return r;
}
@@ -90,20 +90,19 @@ public:
// branch-less allocation
void* const p = pointermath::align(current(), alignment, extra);
void* const c = pointermath::add(p, size);
bool success = c <= end();
bool const success = c <= end();
set_current(success ? c : current());
return success ? p : nullptr;
}
// API specific to this allocator
void *getCurrent() UTILS_RESTRICT noexcept {
return current();
}
// free memory back to the specified point
void rewind(void* p) UTILS_RESTRICT noexcept {
assert(p>=mBegin && p<end());
assert_invariant(p >= mBegin && p < end());
set_current(p);
}
@@ -123,16 +122,21 @@ public:
void swap(LinearAllocator& rhs) noexcept;
void *base() noexcept { return mBegin; }
void const *base() const noexcept { return mBegin; }
void free(void*, size_t) UTILS_RESTRICT noexcept { }
private:
protected:
void* end() UTILS_RESTRICT noexcept { return pointermath::add(mBegin, mSize); }
void const* end() const UTILS_RESTRICT noexcept { return pointermath::add(mBegin, mSize); }
void* current() UTILS_RESTRICT noexcept { return pointermath::add(mBegin, mCur); }
void const* current() const UTILS_RESTRICT noexcept { return pointermath::add(mBegin, mCur); }
private:
void set_current(void* p) UTILS_RESTRICT noexcept {
mCur = uint32_t(uintptr_t(p) - uintptr_t(mBegin));
}
void* mBegin = nullptr;
uint32_t mSize = 0;
uint32_t mCur = 0;
@@ -153,9 +157,7 @@ public:
explicit HeapAllocator(const AREA&) { }
// our allocator concept
void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) {
// this allocator doesn't support 'extra'
assert(extra == 0);
void* alloc(size_t size, size_t alignment = alignof(std::max_align_t)) {
return aligned_alloc(size, alignment);
}
@@ -172,6 +174,50 @@ public:
void swap(HeapAllocator&) noexcept { }
};
/* ------------------------------------------------------------------------------------------------
* LinearAllocatorWithFallback
*
* This is a LinearAllocator that falls back to a HeapAllocator when allocation fail. The Heap
* allocator memory is freed only when the LinearAllocator is reset or destroyed.
* ------------------------------------------------------------------------------------------------
*/
class LinearAllocatorWithFallback : private LinearAllocator, private HeapAllocator {
std::vector<void*> mHeapAllocations;
public:
LinearAllocatorWithFallback(void* begin, void* end) noexcept
: LinearAllocator(begin, end) {
}
template <typename AREA>
explicit LinearAllocatorWithFallback(const AREA& area)
: LinearAllocatorWithFallback(area.begin(), area.end()) {
}
~LinearAllocatorWithFallback() noexcept {
LinearAllocatorWithFallback::reset();
}
void* alloc(size_t size, size_t alignment = alignof(std::max_align_t));
void *getCurrent() noexcept {
return LinearAllocator::getCurrent();
}
void rewind(void* p) noexcept {
if (p >= LinearAllocator::base() && p < LinearAllocator::end()) {
LinearAllocator::rewind(p);
}
}
void reset() noexcept;
void free(void*, size_t) noexcept { }
bool isHeapAllocation(void* p) const noexcept {
return p < LinearAllocator::base() || p >= LinearAllocator::end();
}
};
// ------------------------------------------------------------------------------------------------
class FreeList {
@@ -187,13 +233,13 @@ public:
Node* const head = mHead;
mHead = head ? head->next : nullptr;
// this could indicate a use after free
assert(!mHead || mHead >= mBegin && mHead < mEnd);
assert_invariant(!mHead || mHead >= mBegin && mHead < mEnd);
return head;
}
void push(void* p) noexcept {
assert(p);
assert(p >= mBegin && p < mEnd);
assert_invariant(p);
assert_invariant(p >= mBegin && p < mEnd);
// TODO: assert this is one of our pointer (i.e.: it's address match one of ours)
Node* const head = static_cast<Node*>(p);
head->next = mHead;
@@ -204,11 +250,11 @@ public:
return mHead;
}
private:
struct Node {
Node* next;
};
private:
static Node* init(void* begin, void* end,
size_t elementSize, size_t alignment, size_t extra) noexcept;
@@ -226,20 +272,20 @@ public:
AtomicFreeList() noexcept = default;
AtomicFreeList(void* begin, void* end,
size_t elementSize, size_t alignment, size_t extra) noexcept;
AtomicFreeList(const FreeList& rhs) = delete;
AtomicFreeList& operator=(const FreeList& rhs) = delete;
AtomicFreeList(const AtomicFreeList& rhs) = delete;
AtomicFreeList& operator=(const AtomicFreeList& rhs) = delete;
void* pop() noexcept {
Node* const storage = mStorage;
Node* const pStorage = mStorage;
HeadPtr currentHead = mHead.load();
while (currentHead.offset >= 0) {
// The value of "next" we load here might already contain application data if another
// The value of "pNext" we load here might already contain application data if another
// thread raced ahead of us. But in that case, the computed "newHead" will be discarded
// since compare_exchange_weak fails. Then this thread will loop with the updated
// value of currentHead, and try again.
Node* const next = storage[currentHead.offset].next.load(std::memory_order_relaxed);
const HeadPtr newHead{ next ? int32_t(next - storage) : -1, currentHead.tag + 1 };
Node* const pNext = pStorage[currentHead.offset].next.load(std::memory_order_relaxed);
const HeadPtr newHead{ pNext ? int32_t(pNext - pStorage) : -1, currentHead.tag + 1 };
// In the rare case that the other thread that raced ahead of us already returned the
// same mHead we just loaded, but it now has a different "next" value, the tag field will not
// match, and compare_exchange_weak will fail and prevent that particular race condition.
@@ -247,18 +293,18 @@ public:
// This assert needs to occur after we have validated that there was no race condition
// Otherwise, next might already contain application data, if another thread
// raced ahead of us after we loaded mHead, but before we loaded mHead->next.
assert(!next || next >= storage);
assert_invariant(!pNext || pNext >= pStorage);
break;
}
}
void* p = (currentHead.offset >= 0) ? (storage + currentHead.offset) : nullptr;
assert(!p || p >= storage);
void* p = (currentHead.offset >= 0) ? (pStorage + currentHead.offset) : nullptr;
assert_invariant(!p || p >= pStorage);
return p;
}
void push(void* p) noexcept {
Node* const storage = mStorage;
assert(p && p >= storage);
assert_invariant(p && p >= storage);
Node* const node = static_cast<Node*>(p);
HeadPtr currentHead = mHead.load();
HeadPtr newHead = { int32_t(node - storage), currentHead.tag + 1 };
@@ -273,7 +319,6 @@ public:
return mStorage + mHead.load(std::memory_order_relaxed).offset;
}
private:
struct Node {
// This should be a regular (non-atomic) pointer, but this causes TSAN to complain
// about a data-race that exists but is benin. We always use this atomic<> in
@@ -304,6 +349,7 @@ private:
std::atomic<Node*> next;
};
private:
// This struct is using a 32-bit offset into the arena rather than
// a direct pointer, because together with the 32-bit tag, it needs to
// fit into 8 bytes. If it was any larger, it would not be possible to
@@ -326,14 +372,15 @@ template <
size_t OFFSET = 0,
typename FREELIST = FreeList>
class PoolAllocator {
static_assert(ELEMENT_SIZE >= sizeof(void*), "ELEMENT_SIZE must accommodate at least a pointer");
static_assert(ELEMENT_SIZE >= sizeof(typename FREELIST::Node),
"ELEMENT_SIZE must accommodate at least a FreeList::Node");
public:
// our allocator concept
void* alloc(size_t size = ELEMENT_SIZE,
size_t alignment = ALIGNMENT, size_t offset = OFFSET) noexcept {
assert(size <= ELEMENT_SIZE);
assert(alignment <= ALIGNMENT);
assert(offset == OFFSET);
assert_invariant(size <= ELEMENT_SIZE);
assert_invariant(alignment <= ALIGNMENT);
assert_invariant(offset == OFFSET);
return mFreeList.pop();
}
@@ -347,7 +394,11 @@ public:
: mFreeList(begin, end, ELEMENT_SIZE, ALIGNMENT, OFFSET) {
}
template <typename AREA>
PoolAllocator(void* begin, size_t size) noexcept
: PoolAllocator(begin, static_cast<char *>(begin) + size) {
}
template<typename AREA>
explicit PoolAllocator(const AREA& area) noexcept
: PoolAllocator(area.begin(), area.end()) {
}
@@ -373,6 +424,53 @@ private:
FREELIST mFreeList;
};
template <
size_t ELEMENT_SIZE,
size_t ALIGNMENT = alignof(std::max_align_t),
typename FREELIST = FreeList>
class PoolAllocatorWithFallback :
private PoolAllocator<ELEMENT_SIZE, ALIGNMENT, 0, FREELIST>,
private HeapAllocator {
using PoolAllocator = PoolAllocator<ELEMENT_SIZE, ALIGNMENT, 0, FREELIST>;
void* mBegin;
void* mEnd;
public:
PoolAllocatorWithFallback(void* begin, void* end) noexcept
: PoolAllocator(begin, end), mBegin(begin), mEnd(end) {
}
PoolAllocatorWithFallback(void* begin, size_t size) noexcept
: PoolAllocatorWithFallback(begin, static_cast<char*>(begin) + size) {
}
template<typename AREA>
explicit PoolAllocatorWithFallback(const AREA& area) noexcept
: PoolAllocatorWithFallback(area.begin(), area.end()) {
}
bool isHeapAllocation(void* p) const noexcept {
return p < mBegin || p >= mEnd;
}
// our allocator concept
void* alloc(size_t size = ELEMENT_SIZE, size_t alignment = ALIGNMENT) noexcept {
void* p = PoolAllocator::alloc(size, alignment);
if (UTILS_UNLIKELY(!p)) {
p = HeapAllocator::alloc(size, alignment);
}
assert_invariant(p);
return p;
}
void free(void* p, size_t size) noexcept {
if (UTILS_LIKELY(!isHeapAllocation(p))) {
PoolAllocator::free(p, size);
} else {
HeapAllocator::free(p);
}
}
};
#define UTILS_MAX(a,b) ((a) > (b) ? (a) : (b))
template <typename T, size_t OFFSET = 0>
@@ -478,7 +576,6 @@ struct NoLock {
void unlock() noexcept { }
};
using SpinLock = utils::SpinLock;
using Mutex = utils::Mutex;
} // namespace LockingPolicy
@@ -587,32 +684,54 @@ public:
mListener(name, mArea.data(), mArea.size()) {
}
template<typename ... ARGS>
void* alloc(size_t size, size_t alignment, size_t extra, ARGS&& ... args) noexcept {
std::lock_guard<LockingPolicy> guard(mLock);
void* p = mAllocator.alloc(size, alignment, extra, std::forward<ARGS>(args) ...);
mListener.onAlloc(p, size, alignment, extra);
return p;
}
// allocate memory from arena with given size and alignment
// (acceptable size/alignment may depend on the allocator provided)
void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) noexcept {
void* alloc(size_t size, size_t alignment, size_t extra) noexcept {
std::lock_guard<LockingPolicy> guard(mLock);
void* p = mAllocator.alloc(size, alignment, extra);
mListener.onAlloc(p, size, alignment, extra);
return p;
}
void* alloc(size_t size, size_t alignment = alignof(std::max_align_t)) noexcept {
std::lock_guard<LockingPolicy> guard(mLock);
void* p = mAllocator.alloc(size, alignment);
mListener.onAlloc(p, size, alignment, 0);
return p;
}
// Allocate an array of trivially destructible objects
// for safety, we disable the object-based alloc method if the object type is not
// trivially destructible, since free() won't call the destructor and this is allocating
// an array.
template <typename T,
typename = typename std::enable_if<std::is_trivially_destructible<T>::value>::type>
T* alloc(size_t count, size_t alignment = alignof(T), size_t extra = 0) noexcept {
T* alloc(size_t count, size_t alignment, size_t extra) noexcept {
return (T*)alloc(count * sizeof(T), alignment, extra);
}
// return memory pointed by p to the arena
// (actual behaviour may depend on allocator provided)
void free(void* p) noexcept {
template <typename T,
typename = typename std::enable_if<std::is_trivially_destructible<T>::value>::type>
T* alloc(size_t count, size_t alignment = alignof(T)) noexcept {
return (T*)alloc(count * sizeof(T), alignment);
}
// some allocators require more parameters
template<typename ... ARGS>
void free(void* p, size_t size, ARGS&& ... args) noexcept {
if (p) {
std::lock_guard<LockingPolicy> guard(mLock);
mListener.onFree(p);
mAllocator.free(p);
mListener.onFree(p, size);
mAllocator.free(p, size, std::forward<ARGS>(args) ...);
}
}
@@ -625,6 +744,16 @@ public:
}
}
// return memory pointed by p to the arena
// (actual behaviour may depend on allocator provided)
void free(void* p) noexcept {
if (p) {
std::lock_guard<LockingPolicy> guard(mLock);
mListener.onFree(p);
mAllocator.free(p);
}
}
// some allocators don't have a free() call, but a single reset() or rewind() instead
void reset() noexcept {
std::lock_guard<LockingPolicy> guard(mLock);
@@ -722,6 +851,8 @@ class ArenaScope {
}
public:
using Arena = ARENA;
explicit ArenaScope(ARENA& allocator)
: mArena(allocator), mRewind(allocator.getCurrent()) {
}
@@ -773,7 +904,7 @@ public:
}
// use with caution
ARENA& getAllocator() noexcept { return mArena; }
ARENA& getArena() noexcept { return mArena; }
private:
ARENA& mArena;
@@ -800,7 +931,7 @@ public:
public:
// we don't make this explicit, so that we can initialize a vector using a STLAllocator
// from an Arena, avoiding to have to repeat the vector type.
// from an Arena, avoiding having to repeat the vector type.
STLAllocator(ARENA& arena) : mArena(arena) { } // NOLINT(google-explicit-constructor)
template<typename U>

View File

@@ -17,13 +17,10 @@
#ifndef TNT_UTILS_BITMASKENUM_H
#define TNT_UTILS_BITMASKENUM_H
#include <utils/compiler.h>
#include <type_traits> // for std::false_type
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
namespace utils {

View File

@@ -35,7 +35,7 @@ struct hashCStrings {
typedef size_t result_type;
result_type operator()(argument_type cstr) const noexcept {
size_t hash = 5381;
while (int c = *cstr++) {
while (int const c = *cstr++) {
hash = (hash * 33u) ^ size_t(c);
}
return hash;
@@ -192,8 +192,8 @@ private:
};
int compare(const CString& rhs) const noexcept {
size_type lhs_size = size();
size_type rhs_size = rhs.size();
size_type const lhs_size = size();
size_type const rhs_size = rhs.size();
if (lhs_size < rhs_size) return -1;
if (lhs_size > rhs_size) return 1;
return strncmp(data(), rhs.data(), size());
@@ -225,6 +225,28 @@ private:
template<typename T>
CString to_string(T value) noexcept;
// ------------------------------------------------------------------------------------------------
template <size_t N>
class UTILS_PUBLIC FixedSizeString {
public:
using value_type = char;
using pointer = value_type*;
using const_pointer = const value_type*;
static_assert(N > 0);
FixedSizeString() noexcept = default;
explicit FixedSizeString(const char* str) noexcept {
strncpy(mData, str, N - 1); // leave room for the null terminator
}
const_pointer c_str() const noexcept { return mData; }
pointer c_str() noexcept { return mData; }
private:
value_type mData[N] = {0};
};
} // namespace utils
#endif // TNT_UTILS_CSTRING_H

View File

@@ -22,7 +22,8 @@
#include <typeinfo>
#include <utils/CString.h>
#include <utils/Log.h>
#include <utils/compiler.h>
#include <utils/ostream.h>
namespace utils {

View File

@@ -23,7 +23,6 @@
#include <stdint.h>
namespace utils {
class UTILS_PUBLIC EntityInstanceBase {
@@ -77,7 +76,7 @@ public:
// return a value for this Instance (mostly needed for debugging
constexpr uint32_t asValue() const noexcept { return mInstance; }
// auto convert to Type so it can be used as an index
// auto convert to Type, so it can be used as an index
constexpr operator Type() const noexcept { return mInstance; } // NOLINT(google-explicit-constructor)
// conversion from Type so we can initialize from an index

View File

@@ -17,12 +17,13 @@
#ifndef TNT_UTILS_ENTITYMANAGER_H
#define TNT_UTILS_ENTITYMANAGER_H
#include <assert.h>
#include <stdint.h>
#include <utils/Entity.h>
#include <utils/compiler.h>
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#ifndef FILAMENT_UTILS_TRACK_ENTITIES
#define FILAMENT_UTILS_TRACK_ENTITIES false
#endif
@@ -44,23 +45,25 @@ public:
public:
virtual void onEntitiesDestroyed(size_t n, Entity const* entities) noexcept = 0;
protected:
~Listener() noexcept;
virtual ~Listener() noexcept;
};
// maximum number of entities that can exist at the same time
static size_t getMaxEntityCount() noexcept {
// because index 0 is reserved, we only have 2^GENERATION_SHIFT - 1 valid indices
return RAW_INDEX_COUNT - 1;
}
// create n entities. Thread safe.
// number of active Entities
size_t getEntityCount() const noexcept;
// Create n entities. Thread safe.
void create(size_t n, Entity* entities);
// destroys n entities. Thread safe.
void destroy(size_t n, Entity* entities) noexcept;
// create a new Entity. Thread safe.
// Create a new Entity. Thread safe.
// Return Entity.isNull() if the entity cannot be allocated.
Entity create() {
Entity e;
@@ -68,20 +71,20 @@ public:
return e;
}
// destroys an Entity. Thread safe.
// Destroys an Entity. Thread safe.
void destroy(Entity e) noexcept {
destroy(1, &e);
}
// return whether the given Entity has been destroyed (false) or not (true).
// Return whether the given Entity has been destroyed (false) or not (true).
// Thread safe.
bool isAlive(Entity e) const noexcept {
assert(getIndex(e) < RAW_INDEX_COUNT);
return (!e.isNull()) && (getGeneration(e) == mGens[getIndex(e)]);
}
// registers a listener to be called when an entity is destroyed. thread safe.
// if the listener is already register, this method has no effect.
// Registers a listener to be called when an entity is destroyed. Thread safe.
// If the listener is already registered, this method has no effect.
void registerListener(Listener* l) noexcept;
// unregisters a listener.
@@ -94,6 +97,7 @@ public:
uint8_t getGenerationForIndex(size_t index) const noexcept {
return mGens[index];
}
// singleton, can't be copied
EntityManager(const EntityManager& rhs) = delete;
EntityManager& operator=(const EntityManager& rhs) = delete;

View File

@@ -17,16 +17,18 @@
#ifndef TNT_UTILS_FIXEDCAPACITYVECTOR_H
#define TNT_UTILS_FIXEDCAPACITYVECTOR_H
#include <utils/compiler.h>
#include <utils/compressed_pair.h>
#include <utils/Panic.h>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <type_traits>
#include <utility>
#include <vector> // TODO: is this necessary?
#include <assert.h>
#include <stddef.h>
#include <stdint.h>

View File

@@ -24,7 +24,6 @@
#include <utils/SingleInstanceComponentManager.h>
#include <stddef.h>
#include <stdint.h>
namespace utils {
@@ -48,7 +47,7 @@ class EntityManager;
* printf("%s\n", names->getName(names->getInstance(myEntity));
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
class UTILS_PUBLIC NameComponentManager : public SingleInstanceComponentManager<utils::CString> {
class UTILS_PUBLIC NameComponentManager : private SingleInstanceComponentManager<utils::CString> {
public:
using Instance = EntityInstance<NameComponentManager>;
@@ -75,15 +74,6 @@ public:
return { SingleInstanceComponentManager::getInstance(e) };
}
/*! \cond PRIVATE */
// these are implemented in SingleInstanceComponentManager<>, but we need to
// reimplement them in each manager, to ensure they are generated in an implementation file
// for backward binary compatibility reasons.
size_t getComponentCount() const noexcept;
Entity const* getEntities() const noexcept;
void gc(const EntityManager& em, size_t ratio = 4) noexcept;
/*! \endcond */
/**
* Adds a name component to the given entity if it doesn't already exist.
*/
@@ -105,6 +95,12 @@ public:
* @return pointer to the copy that was made during setName()
*/
const char* getName(Instance instance) const noexcept;
void gc(EntityManager& em) noexcept {
SingleInstanceComponentManager<utils::CString>::gc(em, [this](Entity e) {
removeComponent(e);
});
}
};
} // namespace utils

View File

@@ -17,11 +17,11 @@
#ifndef TNT_UTILS_PANIC_H
#define TNT_UTILS_PANIC_H
#include <string>
#include <utils/CallStack.h>
#include <utils/compiler.h>
#include <string>
#ifdef __EXCEPTIONS
# define UTILS_EXCEPTIONS 1
#else

View File

@@ -17,8 +17,6 @@
#ifndef UTILS_PRIVATEIMPLEMENTATION_H
#define UTILS_PRIVATEIMPLEMENTATION_H
#include <utils/compiler.h>
#include <stddef.h>
namespace utils {

View File

@@ -98,7 +98,7 @@ public:
return pos != map.end() ? pos->second : 0;
}
// returns the number of components (i.e. size of each arrays)
// Returns the number of components (i.e. size of each array)
size_t getComponentCount() const noexcept {
// The array as an extra dummy component at index 0, so the visible count is 1 less.
return mData.size() - 1;
@@ -108,11 +108,8 @@ public:
return getComponentCount() == 0;
}
// returns a pointer to the Entity array. This is basically the list
// of entities this component manager handles.
// The pointer becomes invalid when adding or removing a component.
Entity const* getEntities() const noexcept {
return begin<ENTITY_INDEX>();
utils::Entity const* getEntities() const noexcept {
return data<ENTITY_INDEX>() + 1;
}
Entity getEntity(Instance i) const noexcept {
@@ -128,14 +125,6 @@ public:
// This invalidates all pointers components.
inline Instance removeComponent(Entity e);
// trigger one round of garbage collection. this is intended to be called on a regular
// basis. This gc gives up after it cannot randomly free 'ratio' component in a row.
void gc(const EntityManager& em, size_t ratio = 4) noexcept {
gc(em, ratio, [this](Entity e) {
removeComponent(e);
});
}
// return the first instance
Instance begin() const noexcept { return 1u; }
@@ -234,24 +223,33 @@ protected:
}
}
template<typename REMOVE>
void gc(const EntityManager& em,
REMOVE&& removeComponent) noexcept {
gc(em, 4, std::forward<REMOVE>(removeComponent));
}
template<typename REMOVE>
void gc(const EntityManager& em, size_t ratio,
REMOVE removeComponent) noexcept {
Entity const* entities = getEntities();
REMOVE&& removeComponent) noexcept {
Entity const* const pEntities = begin<ENTITY_INDEX>();
size_t count = getComponentCount();
size_t aliveInARow = 0;
default_random_engine& rng = mRng;
UTILS_NOUNROLL
while (count && aliveInARow < ratio) {
assert_invariant(count == getComponentCount());
// note: using the modulo favorizes lower number
size_t i = rng() % count;
if (UTILS_LIKELY(em.isAlive(entities[i]))) {
size_t const i = rng() % count;
Entity const entity = pEntities[i];
assert_invariant(entity);
if (UTILS_LIKELY(em.isAlive(entity))) {
++aliveInARow;
continue;
}
removeComponent(entity);
aliveInARow = 0;
count--;
removeComponent(entities[i]);
}
}

View File

@@ -1,90 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_SPINLOCK_H
#define TNT_UTILS_SPINLOCK_H
#include <utils/compiler.h>
#include <utils/Mutex.h>
#include <atomic>
#include <type_traits>
#include <assert.h>
#include <stddef.h>
namespace utils {
namespace details {
class SpinLock {
std::atomic_flag mLock = ATOMIC_FLAG_INIT;
public:
void lock() noexcept {
UTILS_PREFETCHW(&mLock);
#ifdef __ARM_ACLE
// we signal an event on this CPU, so that the first yield() will be a no-op,
// and falls through the test_and_set(). This is more efficient than a while { }
// construct.
UTILS_SIGNAL_EVENT();
do {
yield();
} while (mLock.test_and_set(std::memory_order_acquire));
#else
goto start;
do {
yield();
start: ;
} while (mLock.test_and_set(std::memory_order_acquire));
#endif
}
void unlock() noexcept {
mLock.clear(std::memory_order_release);
#ifdef __ARM_ARCH_7A__
// on ARMv7a SEL is needed
UTILS_SIGNAL_EVENT();
// as well as a memory barrier is needed
__dsb(0xA); // ISHST = 0xA (b1010)
#else
// on ARMv8 we could avoid the call to SE, but we'd need to write the
// test_and_set() above by hand, so the WFE only happens without a STRX first.
UTILS_BROADCAST_EVENT();
#endif
}
private:
inline void yield() noexcept {
// on x86 call pause instruction, on ARM call WFE
UTILS_WAIT_FOR_EVENT();
}
};
} // namespace details
#if UTILS_HAS_SANITIZE_THREAD
// Active spins with atomics slow down execution too much under ThreadSanitizer.
using SpinLock = Mutex;
#elif defined(__ARM_ARCH_7A__)
// We've had problems with "wfe" on some ARM-V7 devices, causing spurious SIGILL
using SpinLock = Mutex;
#else
using SpinLock = details::SpinLock;
#endif
} // namespace utils
#endif // TNT_UTILS_SPINLOCK_H

View File

@@ -17,8 +17,10 @@
#ifndef TNT_UTILS_STRUCTUREOFARRAYS_H
#define TNT_UTILS_STRUCTUREOFARRAYS_H
#include <type_traits>
#include <utils/Allocator.h>
#include <utils/compiler.h>
#include <utils/debug.h>
#include <utils/Slice.h>
#include <stddef.h>
@@ -352,33 +354,55 @@ public:
return push_back_unsafe(std::forward<Elements>(args)...);
}
// in C++20 we could use a lambda with explicit template parameter instead
struct PushBackUnsafeClosure {
size_t last;
std::tuple<Elements...> args;
inline explicit PushBackUnsafeClosure(size_t last, Structure&& args)
: last(last), args(std::forward<Structure>(args)) {}
template<size_t I>
inline void operator()(TypeAt<I>* p) {
new(p + last) TypeAt<I>{ std::get<I>(args) };
}
};
template <std::size_t... Indices>
struct ElementIndices {};
template <std::size_t N, std::size_t... Indices>
struct BuildElementIndices : BuildElementIndices<N - 1, N - 1, Indices...> {};
template <std::size_t... Indices>
struct BuildElementIndices<0, Indices...> : ElementIndices<Indices...> {};
template<std::size_t... Indices>
void push_back_unsafe(Structure&& args, ElementIndices<Indices...>){
size_t last = mSize++;
// Fold expression on the comma operator
([&]{
new(std::get<Indices>(mArrays) + last) Elements{std::get<Indices>(args)};
}() , ...);
}
template<std::size_t... Indices>
void push_back_unsafe(Elements const& ... args, ElementIndices<Indices...>){
size_t last = mSize++;
// Fold expression on the comma operator
([&]{
new(std::get<Indices>(mArrays) + last) Elements{args};
}() , ...);
}
template<std::size_t... Indices>
void push_back_unsafe(Elements && ... args, ElementIndices<Indices...>){
size_t last = mSize++;
// Fold expression on the comma operator
([&]{
new(std::get<Indices>(mArrays) + last) Elements{std::forward<Elements>(args)};
}() , ...);
}
StructureOfArraysBase& push_back_unsafe(Structure&& args) noexcept {
for_each_index(mArrays,
PushBackUnsafeClosure{ mSize++, std::forward<Structure>(args) });
push_back_unsafe(std::forward<Structure>(args), BuildElementIndices<sizeof...(Elements)>{});
return *this;
}
StructureOfArraysBase& push_back_unsafe(Elements const& ... args) noexcept {
for_each_index(mArrays,
PushBackUnsafeClosure{ mSize++, { args... } });
push_back_unsafe(args..., BuildElementIndices<sizeof...(Elements)>{});
return *this;
}
StructureOfArraysBase& push_back_unsafe(Elements&& ... args) noexcept {
for_each_index(mArrays,
PushBackUnsafeClosure{ mSize++, { std::forward<Elements>(args)... }});
push_back_unsafe(std::forward<Elements>(args)..., BuildElementIndices<sizeof...(Elements)>{});
return *this;
}
@@ -533,7 +557,7 @@ private:
}
inline void resizeNoCheck(size_t needed) noexcept {
assert(mCapacity >= needed);
assert_invariant(mCapacity >= needed);
if (needed < mSize) {
// we shrink the arrays
destroy_each(needed, mSize);

View File

@@ -1,242 +0,0 @@
/*
* Copyright (C) 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ANDROID_SYSTRACE_H
#define TNT_UTILS_ANDROID_SYSTRACE_H
#include <atomic>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <utils/compiler.h>
// enable tracing
#define SYSTRACE_ENABLE() ::utils::details::Systrace::enable(SYSTRACE_TAG)
// disable tracing
#define SYSTRACE_DISABLE() ::utils::details::Systrace::disable(SYSTRACE_TAG)
/**
* Creates a Systrace context in the current scope. needed for calling all other systrace
* commands below.
*/
#define SYSTRACE_CONTEXT() ::utils::details::Systrace ___trctx(SYSTRACE_TAG)
// SYSTRACE_NAME traces the beginning and end of the current scope. To trace
// the correct start and end times this macro should be declared first in the
// scope body.
// It also automatically creates a Systrace context
#define SYSTRACE_NAME(name) ::utils::details::ScopedTrace ___tracer(SYSTRACE_TAG, name)
// Denotes that a new frame has started processing.
#define SYSTRACE_FRAME_ID(frame) \
{ /* scope for frame id trace */ \
char buf[64]; \
snprintf(buf, 64, "frame %u", frame); \
SYSTRACE_NAME(buf); \
}
// SYSTRACE_CALL is an SYSTRACE_NAME that uses the current function name.
#define SYSTRACE_CALL() SYSTRACE_NAME(__FUNCTION__)
#define SYSTRACE_NAME_BEGIN(name) \
___trctx.traceBegin(SYSTRACE_TAG, name)
#define SYSTRACE_NAME_END() \
___trctx.traceEnd(SYSTRACE_TAG)
/**
* Trace the beginning of an asynchronous event. Unlike ATRACE_BEGIN/ATRACE_END
* contexts, asynchronous events do not need to be nested. The name describes
* the event, and the cookie provides a unique identifier for distinguishing
* simultaneous events. The name and cookie used to begin an event must be
* used to end it.
*/
#define SYSTRACE_ASYNC_BEGIN(name, cookie) \
___trctx.asyncBegin(SYSTRACE_TAG, name, cookie)
/**
* Trace the end of an asynchronous event.
* This should have a corresponding SYSTRACE_ASYNC_BEGIN.
*/
#define SYSTRACE_ASYNC_END(name, cookie) \
___trctx.asyncEnd(SYSTRACE_TAG, name, cookie)
/**
* Traces an integer counter value. name is used to identify the counter.
* This can be used to track how a value changes over time.
*/
#define SYSTRACE_VALUE32(name, val) \
___trctx.value(SYSTRACE_TAG, name, int32_t(val))
#define SYSTRACE_VALUE64(name, val) \
___trctx.value(SYSTRACE_TAG, name, int64_t(val))
// ------------------------------------------------------------------------------------------------
// No user serviceable code below...
// ------------------------------------------------------------------------------------------------
namespace utils {
namespace details {
class Systrace {
public:
enum tags {
NEVER = SYSTRACE_TAG_NEVER,
ALWAYS = SYSTRACE_TAG_ALWAYS,
FILAMENT = SYSTRACE_TAG_FILAMENT,
JOBSYSTEM = SYSTRACE_TAG_JOBSYSTEM
// we could define more TAGS here, as we need them.
};
explicit Systrace(uint32_t tag) noexcept {
if (tag) init(tag);
}
static void enable(uint32_t tags) noexcept;
static void disable(uint32_t tags) noexcept;
inline void traceBegin(uint32_t tag, const char* name) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
beginSection(this, name);
}
}
inline void traceEnd(uint32_t tag) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
endSection(this);
}
}
inline void asyncBegin(uint32_t tag, const char* name, int32_t cookie) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
beginAsyncSection(this, name, cookie);
}
}
inline void asyncEnd(uint32_t tag, const char* name, int32_t cookie) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
endAsyncSection(this, name, cookie);
}
}
inline void value(uint32_t tag, const char* name, int32_t value) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
setCounter(this, name, value);
}
}
inline void value(uint32_t tag, const char* name, int64_t value) noexcept {
if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) {
setCounter(this, name, value);
}
}
private:
friend class ScopedTrace;
// whether tracing is supported at all by the platform
using ATrace_isEnabled_t = bool (*)();
using ATrace_beginSection_t = void (*)(const char* sectionName);
using ATrace_endSection_t = void (*)();
using ATrace_beginAsyncSection_t = void (*)(const char* sectionName, int32_t cookie);
using ATrace_endAsyncSection_t = void (*)(const char* sectionName, int32_t cookie);
using ATrace_setCounter_t = void (*)(const char* counterName, int64_t counterValue);
struct GlobalState {
bool isTracingAvailable;
std::atomic<uint32_t> isTracingEnabled;
int markerFd;
ATrace_isEnabled_t ATrace_isEnabled;
ATrace_beginSection_t ATrace_beginSection;
ATrace_endSection_t ATrace_endSection;
ATrace_beginAsyncSection_t ATrace_beginAsyncSection;
ATrace_endAsyncSection_t ATrace_endAsyncSection;
ATrace_setCounter_t ATrace_setCounter;
void (*beginSection)(Systrace* that, const char* name);
void (*endSection)(Systrace* that);
void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*setCounter)(Systrace* that, const char* name, int64_t value);
};
static GlobalState sGlobalState;
// per-instance versions for better performance
ATrace_isEnabled_t ATrace_isEnabled;
ATrace_beginSection_t ATrace_beginSection;
ATrace_endSection_t ATrace_endSection;
ATrace_beginAsyncSection_t ATrace_beginAsyncSection;
ATrace_endAsyncSection_t ATrace_endAsyncSection;
ATrace_setCounter_t ATrace_setCounter;
void (*beginSection)(Systrace* that, const char* name);
void (*endSection)(Systrace* that);
void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie);
void (*setCounter)(Systrace* that, const char* name, int64_t value);
void init(uint32_t tag) noexcept;
// cached values for faster access, no need to be initialized
bool mIsTracingEnabled;
int mMarkerFd = -1;
pid_t mPid;
static void setup() noexcept;
static void init_once() noexcept;
static bool isTracingEnabled(uint32_t tag) noexcept;
static void begin_body(int fd, int pid, const char* name) noexcept;
static void end_body(int fd, int pid) noexcept;
static void async_begin_body(int fd, int pid, const char* name, int32_t cookie) noexcept;
static void async_end_body(int fd, int pid, const char* name, int32_t cookie) noexcept;
static void int64_body(int fd, int pid, const char* name, int64_t value) noexcept;
};
// ------------------------------------------------------------------------------------------------
class ScopedTrace {
public:
// we don't inline this because it's relatively heavy due to a global check
ScopedTrace(uint32_t tag, const char* name) noexcept: mTrace(tag), mTag(tag) {
mTrace.traceBegin(tag, name);
}
inline ~ScopedTrace() noexcept {
mTrace.traceEnd(mTag);
}
private:
Systrace mTrace;
const uint32_t mTag;
};
} // namespace details
} // namespace utils
#endif // TNT_UTILS_ANDROID_SYSTRACE_H

View File

@@ -1,60 +0,0 @@
/*
* Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_ANDROID_THERMALMANAGER_H
#define TNT_UTILS_ANDROID_THERMALMANAGER_H
#include <utils/compiler.h>
#include <stdint.h>
struct AThermalManager;
namespace utils {
class ThermalManager {
public:
enum class ThermalStatus : int8_t {
ERROR = -1,
NONE,
LIGHT,
MODERATE,
SEVERE,
CRITICAL,
EMERGENCY,
SHUTDOWN
};
ThermalManager();
~ThermalManager();
// Movable
ThermalManager(ThermalManager&& rhs) noexcept;
ThermalManager& operator=(ThermalManager&& rhs) noexcept;
// not copiable
ThermalManager(ThermalManager const& rhs) = delete;
ThermalManager& operator=(ThermalManager const& rhs) = delete;
ThermalStatus getCurrentThermalStatus() const noexcept;
private:
AThermalManager* mThermalManager = nullptr;
};
} // namespace utils
#endif // TNT_UTILS_ANDROID_THERMALMANAGER_H

View File

@@ -179,6 +179,14 @@
# define UTILS_HAS_FEATURE_CXX_THREAD_LOCAL 0
#endif
#if defined(__clang__)
#define UTILS_NONNULL _Nonnull
#define UTILS_NULLABLE _Nullable
#else
#define UTILS_NONNULL
#define UTILS_NULLABLE
#endif
#if defined(_MSC_VER)
// MSVC does not support loop unrolling hints
# define UTILS_UNROLL

View File

@@ -1,123 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_LINUX_CONDITION_H
#define TNT_UTILS_LINUX_CONDITION_H
#include <atomic>
#include <chrono>
#include <condition_variable> // for cv_status
#include <limits>
#include <mutex> // for unique_lock
#include <utils/linux/Mutex.h>
#include <time.h>
namespace utils {
/*
* A very simple condition variable class that can be used as an (almost) drop-in replacement
* for std::condition_variable (doesn't have the timed wait() though).
* It is very low overhead as most of it is inlined.
*/
class Condition {
public:
Condition() noexcept = default;
Condition(const Condition&) = delete;
Condition& operator=(const Condition&) = delete;
void notify_all() noexcept {
pulse(std::numeric_limits<int>::max());
}
void notify_one() noexcept {
pulse(1);
}
void notify_n(size_t n) noexcept {
if (n > 0) pulse(n);
}
void wait(std::unique_lock<Mutex>& lock) noexcept {
wait_until(lock.mutex(), false, nullptr);
}
template <class P>
void wait(std::unique_lock<Mutex>& lock, P predicate) {
while (!predicate()) {
wait(lock);
}
}
template<typename D>
std::cv_status wait_until(std::unique_lock<Mutex>& lock,
const std::chrono::time_point<std::chrono::steady_clock, D>& timeout_time) noexcept {
// convert to nanoseconds
uint64_t ns = std::chrono::duration<uint64_t, std::nano>(timeout_time.time_since_epoch()).count();
using sec_t = decltype(timespec::tv_sec);
using nsec_t = decltype(timespec::tv_nsec);
timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) };
return wait_until(lock.mutex(), false, &ts);
}
template<typename D>
std::cv_status wait_until(std::unique_lock<Mutex>& lock,
const std::chrono::time_point<std::chrono::system_clock, D>& timeout_time) noexcept {
// convert to nanoseconds
uint64_t ns = std::chrono::duration<uint64_t, std::nano>(timeout_time.time_since_epoch()).count();
using sec_t = decltype(timespec::tv_sec);
using nsec_t = decltype(timespec::tv_nsec);
timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) };
return wait_until(lock.mutex(), true, &ts);
}
template<typename C, typename D, typename P>
bool wait_until(std::unique_lock<Mutex>& lock,
const std::chrono::time_point<C, D>& timeout_time, P predicate) noexcept {
while (!predicate()) {
if (wait_until(lock, timeout_time) == std::cv_status::timeout) {
return predicate();
}
}
return true;
}
template<typename R, typename Period>
std::cv_status wait_for(std::unique_lock<Mutex>& lock,
const std::chrono::duration<R, Period>& rel_time) noexcept {
return wait_until(lock, std::chrono::steady_clock::now() + rel_time);
}
template<typename R, typename Period, typename P>
bool wait_for(std::unique_lock<Mutex>& lock,
const std::chrono::duration<R, Period>& rel_time, P pred) noexcept {
return wait_until(lock, std::chrono::steady_clock::now() + rel_time, std::move(pred));
}
private:
std::atomic<uint32_t> mState = { 0 };
void pulse(int threadCount) noexcept;
std::cv_status wait_until(Mutex* lock,
bool realtimeClock, timespec* ts) noexcept;
};
} // namespace utils
#endif // TNT_UTILS_LINUX_CONDITION_H

View File

@@ -1,64 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TNT_UTILS_LINUX_MUTEX_H
#define TNT_UTILS_LINUX_MUTEX_H
#include <atomic>
#include <utils/compiler.h>
namespace utils {
/*
* A very simple mutex class that can be used as an (almost) drop-in replacement
* for std::mutex.
* It is very low overhead as most of it is inlined.
*/
class Mutex {
public:
constexpr Mutex() noexcept = default;
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
void lock() noexcept {
uint32_t old_state = UNLOCKED;
if (UTILS_UNLIKELY(!mState.compare_exchange_strong(old_state,
LOCKED, std::memory_order_acquire, std::memory_order_relaxed))) {
wait();
}
}
void unlock() noexcept {
if (UTILS_UNLIKELY(mState.exchange(UNLOCKED, std::memory_order_release) == LOCKED_CONTENDED)) {
wake();
}
}
private:
enum {
UNLOCKED = 0, LOCKED = 1, LOCKED_CONTENDED = 2
};
std::atomic<uint32_t> mState = { UNLOCKED };
void wait() noexcept;
void wake() noexcept;
};
} // namespace utils
#endif // TNT_UTILS_LINUX_MUTEX_H

View File

@@ -29,7 +29,7 @@ namespace utils::io {
struct ostream_;
class UTILS_PUBLIC ostream : protected utils::PrivateImplementation<ostream_> {
class UTILS_PUBLIC ostream : protected utils::PrivateImplementation<ostream_> {
friend struct ostream_;
public:
@@ -69,6 +69,13 @@ public:
ostream& dec() noexcept;
ostream& hex() noexcept;
/*! @cond PRIVATE */
// Sets a consumer of the log. The consumer is invoked on flush() and replaces the default.
// Thread safe and reentrant.
using ConsumerCallback = void(*)(void*, char const*);
void setConsumer(ConsumerCallback consumer, void* user) noexcept;
/*! @endcond */
protected:
ostream& print(const char* format, ...) noexcept;
@@ -85,6 +92,7 @@ protected:
std::pair<char*, size_t> grow(size_t s) noexcept;
void advance(ssize_t n) noexcept;
void reset() noexcept;
size_t length() const noexcept;
private:
void reserve(size_t newSize) noexcept;
@@ -104,7 +112,7 @@ private:
friend ostream& hex(ostream& s) noexcept;
friend ostream& dec(ostream& s) noexcept;
friend ostream& endl(ostream& s) noexcept;
friend ostream& flush(ostream& s) noexcept;
UTILS_PUBLIC friend ostream& flush(ostream& s) noexcept;
enum type {
SHORT, USHORT, CHAR, UCHAR, INT, UINT, LONG, ULONG, LONG_LONG, ULONG_LONG, FLOAT, DOUBLE,
@@ -132,8 +140,7 @@ inline ostream& operator<<(ostream& stream, const VECTOR<T>& v) {
inline ostream& hex(ostream& s) noexcept { return s.hex(); }
inline ostream& dec(ostream& s) noexcept { return s.dec(); }
inline ostream& endl(ostream& s) noexcept { s << '\n'; return s.flush(); }
inline ostream& flush(ostream& s) noexcept { return s.flush(); }
inline ostream& endl(ostream& s) noexcept { return flush(s << '\n'); }
} // namespace utils::io