From a08f3d95e31080c2cefa281e1676c5c90ecc9a59 Mon Sep 17 00:00:00 2001 From: Nick Fisher Date: Sun, 6 Feb 2022 13:28:28 +0800 Subject: [PATCH] update external headers --- ios/include/CMakeLists.txt | 57 + ios/include/backend/BufferDescriptor.h | 78 +- ios/include/backend/CallbackHandler.h | 74 + ios/include/backend/DriverEnums.h | 21 +- ios/include/backend/Handle.h | 2 + ios/include/backend/PixelBufferDescriptor.h | 70 +- ios/include/cgltf.h | 6552 +++++++++++++++++ ios/include/common/CallbackUtils.cpp | 120 + ios/include/common/CallbackUtils.h | 95 + ios/include/common/NioUtils.cpp | 151 + ios/include/common/NioUtils.h | 80 + ios/include/filamat/Enums.h | 96 - ios/include/filamat/IncludeCallback.h | 71 - ios/include/filamat/MaterialBuilder.h | 746 -- ios/include/filamat/Package.h | 103 - .../IBLPrefilterContext.h | 242 - ios/include/filament/DebugRegistry.h | 30 +- ios/include/filament/Engine.h | 4 + ios/include/filament/LightManager.h | 19 +- ios/include/filament/Material.h | 5 +- ios/include/filament/MaterialChunkType.h | 1 + ios/include/filament/MaterialEnums.h | 22 +- ios/include/filament/MorphTargetBuffer.h | 119 + ios/include/filament/Options.h | 66 +- ios/include/filament/RenderTarget.h | 14 + ios/include/filament/RenderableManager.h | 21 +- ios/include/filament/Renderer.h | 17 +- ios/include/filament/Stream.h | 13 + ios/include/filament/ToneMapper.h | 30 +- ios/include/filament/View.h | 86 +- ios/include/filameshio/MeshReader.h | 120 - ios/include/geometry/SurfaceOrientation.h | 131 - ios/include/geometry/Transcoder.h | 104 - ios/include/gltfio/FilamentAsset.h | 10 + ios/include/gltfio/Image.h | 2 +- ios/include/gltfio/ResourceLoader.h | 4 + ios/include/gltfio/resources/gltfresources.h | 46 - .../gltfio/resources/gltfresources_lite.h | 16 - ios/include/ibl/Cubemap.h | 199 - ios/include/ibl/CubemapIBL.h | 91 - ios/include/ibl/CubemapSH.h | 125 - ios/include/ibl/CubemapUtils.h | 124 - ios/include/ibl/Image.h | 77 - ios/include/ibl/utilities.h | 57 - ios/include/imageio/BlockCompression.h | 194 + ios/include/imageio/HDRDecoder.h | 47 + ios/include/imageio/ImageDecoder.h | 69 + ios/include/imageio/ImageDiffer.h | 34 + ios/include/imageio/ImageEncoder.h | 64 + ios/include/math/TMatHelpers.h | 6 +- ios/include/math/TQuatHelpers.h | 15 +- ios/include/math/TVecHelpers.h | 13 +- ios/include/math/compiler.h | 11 +- ios/include/math/fast.h | 9 +- ios/include/math/half.h | 4 +- ios/include/math/mat2.h | 9 +- ios/include/math/mat3.h | 10 +- ios/include/math/mat4.h | 28 +- ios/include/math/mathfwd.h | 6 +- ios/include/math/quat.h | 10 +- ios/include/math/vec2.h | 11 +- ios/include/math/vec3.h | 10 +- ios/include/math/vec4.h | 9 +- ios/include/mathio/ostream.h | 60 - ios/include/src/Bookmark.cpp | 98 + ios/include/src/FreeFlightManipulator.h | 206 + ios/include/src/Manipulator.cpp | 323 + ios/include/src/MapManipulator.h | 197 + ios/include/src/OrbitManipulator.h | 201 + ios/include/tests/test_camutils.cpp | 85 + .../tsl/array-hash/array_growth_policy.h | 307 + ios/include/tsl/array-hash/array_hash.h | 1766 +++++ ios/include/tsl/array-hash/array_map.h | 863 +++ ios/include/tsl/array-hash/array_set.h | 664 ++ ios/include/tsl/htrie_hash.h | 2090 ++++++ ios/include/tsl/htrie_map.h | 647 ++ ios/include/tsl/htrie_set.h | 586 ++ ios/include/tsl/robin_growth_policy.h | 580 +- ios/include/tsl/robin_hash.h | 2504 +++---- ios/include/tsl/robin_map.h | 1336 ++-- ios/include/tsl/robin_set.h | 1070 +-- ios/include/utils/Allocator.h | 1651 ++--- ios/include/utils/BinaryTreeArray.h | 228 +- ios/include/utils/BitmaskEnum.h | 198 +- ios/include/utils/CString.h | 786 +- ios/include/utils/CallStack.h | 254 +- ios/include/utils/Condition.h | 52 +- ios/include/utils/CountDownLatch.h | 182 +- ios/include/utils/CyclicBarrier.h | 168 +- ios/include/utils/Entity.h | 196 +- ios/include/utils/EntityInstance.h | 178 +- ios/include/utils/EntityManager.h | 266 +- ios/include/utils/FixedCapacityVector.h | 821 +-- ios/include/utils/Hash.h | 148 +- ios/include/utils/JobSystem.h | 1090 +-- ios/include/utils/Log.h | 89 +- ios/include/utils/Mutex.h | 52 +- ios/include/utils/NameComponentManager.h | 266 +- ios/include/utils/Panic.h | 1122 +-- ios/include/utils/Path.h | 580 +- ios/include/utils/Profiler.h | 424 +- ios/include/utils/Range.h | 172 +- .../utils/SingleInstanceComponentManager.h | 628 +- ios/include/utils/Slice.h | 746 +- ios/include/utils/SpinLock.h | 180 +- ios/include/utils/StructureOfArrays.h | 1292 ++-- ios/include/utils/Systrace.h | 556 +- ios/include/utils/ThermalManager.h | 26 + ios/include/utils/WorkStealingDequeue.h | 404 +- ios/include/utils/Zip2Iterator.h | 244 +- ios/include/utils/algorithm.h | 554 +- ios/include/utils/android/ThermalManager.h | 60 + ios/include/utils/api_level.h | 68 +- ios/include/utils/architecture.h | 56 +- ios/include/utils/ashmem.h | 56 +- ios/include/utils/bitset.h | 640 +- ios/include/utils/compiler.h | 454 +- ios/include/utils/compressed_pair.h | 136 +- ios/include/utils/debug.h | 66 +- ios/include/utils/generic/Condition.h | 78 +- ios/include/utils/generic/Mutex.h | 56 +- ios/include/utils/generic/ThermalManager.h | 56 + ios/include/utils/linux/Condition.h | 246 +- ios/include/utils/linux/Mutex.h | 128 +- ios/include/utils/memalign.h | 230 +- ios/include/utils/ostream.h | 275 +- ios/include/utils/sstream.h | 74 +- .../morph/Log.h => include/utils/string.h} | 25 +- ios/include/utils/trap.h | 80 +- ios/include/utils/unwindows.h | 102 +- ios/include/utils/vector.h | 120 +- ios/include/utils/win32/stdtypes.h | 66 +- ios/include/viewer/AutomationEngine.h | 264 - ios/include/viewer/AutomationSpec.h | 86 - ios/include/viewer/RemoteServer.h | 102 - ios/include/viewer/Settings.h | 214 - ios/include/viewer/SimpleViewer.h | 257 - ios/src/{morph => }/DependencyGraph.h | 0 ios/src/{morph => }/DracoCache.h | 0 ios/src/{morph => }/FFilamentAsset.h | 11 +- ios/src/{morph => }/FFilamentInstance.h | 0 ios/src/{morph => }/GltfEnums.h | 8 +- ios/src/Log.h | 25 + ios/src/MorphHelper.h | 73 + ios/src/{morph => }/TangentsJob.h | 0 ios/src/Wireframe.h | 40 + ios/src/morph/CPUMorpher.h | 91 - ios/src/morph/GPUMorphHelper.h | 81 - ios/src/morph/GltfHelpers.h | 120 - ios/src/{morph => }/upcast.h | 0 150 files changed, 27445 insertions(+), 14805 deletions(-) create mode 100644 ios/include/CMakeLists.txt create mode 100644 ios/include/backend/CallbackHandler.h create mode 100644 ios/include/cgltf.h create mode 100644 ios/include/common/CallbackUtils.cpp create mode 100644 ios/include/common/CallbackUtils.h create mode 100644 ios/include/common/NioUtils.cpp create mode 100644 ios/include/common/NioUtils.h delete mode 100644 ios/include/filamat/Enums.h delete mode 100644 ios/include/filamat/IncludeCallback.h delete mode 100644 ios/include/filamat/MaterialBuilder.h delete mode 100644 ios/include/filamat/Package.h delete mode 100644 ios/include/filament-iblprefilter/IBLPrefilterContext.h create mode 100644 ios/include/filament/MorphTargetBuffer.h delete mode 100644 ios/include/filameshio/MeshReader.h delete mode 100644 ios/include/geometry/SurfaceOrientation.h delete mode 100644 ios/include/geometry/Transcoder.h delete mode 100644 ios/include/gltfio/resources/gltfresources.h delete mode 100644 ios/include/gltfio/resources/gltfresources_lite.h delete mode 100644 ios/include/ibl/Cubemap.h delete mode 100644 ios/include/ibl/CubemapIBL.h delete mode 100644 ios/include/ibl/CubemapSH.h delete mode 100644 ios/include/ibl/CubemapUtils.h delete mode 100644 ios/include/ibl/Image.h delete mode 100644 ios/include/ibl/utilities.h create mode 100644 ios/include/imageio/BlockCompression.h create mode 100644 ios/include/imageio/HDRDecoder.h create mode 100644 ios/include/imageio/ImageDecoder.h create mode 100644 ios/include/imageio/ImageDiffer.h create mode 100644 ios/include/imageio/ImageEncoder.h delete mode 100644 ios/include/mathio/ostream.h create mode 100644 ios/include/src/Bookmark.cpp create mode 100644 ios/include/src/FreeFlightManipulator.h create mode 100644 ios/include/src/Manipulator.cpp create mode 100644 ios/include/src/MapManipulator.h create mode 100644 ios/include/src/OrbitManipulator.h create mode 100644 ios/include/tests/test_camutils.cpp create mode 100644 ios/include/tsl/array-hash/array_growth_policy.h create mode 100644 ios/include/tsl/array-hash/array_hash.h create mode 100644 ios/include/tsl/array-hash/array_map.h create mode 100644 ios/include/tsl/array-hash/array_set.h create mode 100644 ios/include/tsl/htrie_hash.h create mode 100644 ios/include/tsl/htrie_map.h create mode 100644 ios/include/tsl/htrie_set.h create mode 100644 ios/include/utils/ThermalManager.h create mode 100644 ios/include/utils/android/ThermalManager.h create mode 100644 ios/include/utils/generic/ThermalManager.h rename ios/{src/morph/Log.h => include/utils/string.h} (58%) delete mode 100644 ios/include/viewer/AutomationEngine.h delete mode 100644 ios/include/viewer/AutomationSpec.h delete mode 100644 ios/include/viewer/RemoteServer.h delete mode 100644 ios/include/viewer/Settings.h delete mode 100644 ios/include/viewer/SimpleViewer.h rename ios/src/{morph => }/DependencyGraph.h (100%) rename ios/src/{morph => }/DracoCache.h (100%) rename ios/src/{morph => }/FFilamentAsset.h (96%) rename ios/src/{morph => }/FFilamentInstance.h (100%) rename ios/src/{morph => }/GltfEnums.h (98%) create mode 100644 ios/src/Log.h create mode 100644 ios/src/MorphHelper.h rename ios/src/{morph => }/TangentsJob.h (100%) create mode 100644 ios/src/Wireframe.h delete mode 100644 ios/src/morph/CPUMorpher.h delete mode 100644 ios/src/morph/GPUMorphHelper.h delete mode 100644 ios/src/morph/GltfHelpers.h rename ios/src/{morph => }/upcast.h (100%) diff --git a/ios/include/CMakeLists.txt b/ios/include/CMakeLists.txt new file mode 100644 index 00000000..966a469a --- /dev/null +++ b/ios/include/CMakeLists.txt @@ -0,0 +1,57 @@ +cmake_minimum_required(VERSION 3.19) +project(camutils) + +set(TARGET camutils) +set(PUBLIC_HDR_DIR include) + +# ================================================================================================== +# Sources and headers +# ================================================================================================== +set(PUBLIC_HDRS + include/camutils/Bookmark.h + include/camutils/compiler.h + include/camutils/Manipulator.h +) + +set(SRCS + src/Bookmark.cpp + src/FreeFlightManipulator.h + src/Manipulator.cpp + src/MapManipulator.h + src/OrbitManipulator.h +) + +# ================================================================================================== +# Include and target definitions +# ================================================================================================== +include_directories(${PUBLIC_HDR_DIR}) + +add_library(${TARGET} STATIC ${PUBLIC_HDRS} ${SRCS}) + +target_link_libraries(${TARGET} PUBLIC math) + +target_include_directories(${TARGET} PUBLIC ${PUBLIC_HDR_DIR}) + +# ================================================================================================== +# Compiler flags +# ================================================================================================== +if (MSVC) + target_compile_options(${TARGET} PRIVATE $<$:/fp:fast>) +else() + target_compile_options(${TARGET} PRIVATE $<$:-ffast-math>) + target_compile_options(${TARGET} PRIVATE -Wno-deprecated-register) +endif() + +# ================================================================================================== +# Installation +# ================================================================================================== +install(TARGETS ${TARGET} ARCHIVE DESTINATION lib/${DIST_DIR}) +install(DIRECTORY ${PUBLIC_HDR_DIR}/camutils DESTINATION include) + +# ================================================================================================== +# Tests +# ================================================================================================== +if (NOT ANDROID AND NOT WEBGL AND NOT IOS) + add_executable(test_${TARGET} tests/test_camutils.cpp) + target_link_libraries(test_${TARGET} PRIVATE camutils gtest) +endif() diff --git a/ios/include/backend/BufferDescriptor.h b/ios/include/backend/BufferDescriptor.h index 8e72eaed..80fe182a 100644 --- a/ios/include/backend/BufferDescriptor.h +++ b/ios/include/backend/BufferDescriptor.h @@ -27,6 +27,8 @@ namespace filament::backend { +class CallbackHandler; + /** * A CPU memory-buffer descriptor, typically used to transfer data from the CPU to the GPU. * @@ -53,8 +55,8 @@ public: //! calls the callback to advertise BufferDescriptor no-longer owns the buffer ~BufferDescriptor() noexcept { - if (callback) { - callback(buffer, size, user); + if (mCallback) { + mCallback(buffer, size, mUser); } } @@ -62,19 +64,21 @@ public: BufferDescriptor& operator=(const BufferDescriptor& rhs) = delete; BufferDescriptor(BufferDescriptor&& rhs) noexcept - : buffer(rhs.buffer), size(rhs.size), callback(rhs.callback), user(rhs.user) { + : buffer(rhs.buffer), size(rhs.size), + mCallback(rhs.mCallback), mUser(rhs.mUser), mHandler(rhs.mHandler) { rhs.buffer = nullptr; - rhs.callback = nullptr; + rhs.mCallback = nullptr; } BufferDescriptor& operator=(BufferDescriptor&& rhs) noexcept { if (this != &rhs) { buffer = rhs.buffer; size = rhs.size; - callback = rhs.callback; - user = rhs.user; + mCallback = rhs.mCallback; + mUser = rhs.mUser; + mHandler = rhs.mHandler; rhs.buffer = nullptr; - rhs.callback = nullptr; + rhs.mCallback = nullptr; } return *this; } @@ -88,7 +92,20 @@ public: */ BufferDescriptor(void const* buffer, size_t size, Callback callback = nullptr, void* user = nullptr) noexcept - : buffer(const_cast(buffer)), size(size), callback(callback), user(user) { + : buffer(const_cast(buffer)), size(size), mCallback(callback), mUser(user) { + } + + /** + * Creates a BufferDescriptor that references a CPU memory-buffer + * @param buffer Memory address of the CPU buffer to reference + * @param size Size of the CPU buffer in bytes + * @param callback A callback used to release the CPU buffer from this BufferDescriptor + * @param user An opaque user pointer passed to the callback function when it's called + */ + BufferDescriptor(void const* buffer, size_t size, + CallbackHandler* handler, Callback callback, void* user = nullptr) noexcept + : buffer(const_cast(buffer)), size(size), + mCallback(callback), mUser(user), mHandler(handler) { } // -------------------------------------------------------------------------------------------- @@ -100,14 +117,15 @@ public: * * @param buffer Memory address of the CPU buffer to reference * @param size Size of the CPU buffer in bytes + * @param handler Handler to use to dispatch the callback, or nullptr for the default handler * @return a new BufferDescriptor */ - template + template static BufferDescriptor make( - void const* buffer, size_t size, T* data) noexcept { + void const* buffer, size_t size, T* data, CallbackHandler* handler = nullptr) noexcept { return { buffer, size, - [](void* b, size_t s, void* u) { + handler, [](void* b, size_t s, void* u) { (*static_cast(u)->*method)(b, s); }, data }; @@ -123,14 +141,15 @@ public: * @param buffer Memory address of the CPU buffer to reference * @param size Size of the CPU buffer in bytes * @param functor functor of type f(void const* buffer, size_t size) + * @param handler Handler to use to dispatch the callback, or nullptr for the default handler * @return a new BufferDescriptor */ template static BufferDescriptor make( - void const* buffer, size_t size, T&& functor) noexcept { + void const* buffer, size_t size, T&& functor, CallbackHandler* handler = nullptr) noexcept { return { buffer, size, - [](void* b, size_t s, void* u) { + handler, [](void* b, size_t s, void* u) { T& that = *static_cast(u); that(b, s); delete &that; @@ -147,21 +166,39 @@ public: * @param user An opaque user pointer passed to the callbeck function when it's called */ void setCallback(Callback callback, void* user = nullptr) noexcept { - this->callback = callback; - this->user = user; + this->mCallback = callback; + this->mUser = user; + this->mHandler = nullptr; + } + + /** + * Set or replace the release callback function + * @param handler The Handler to use to dispatch the callback + * @param callback The new callback function + * @param user An opaque user pointer passed to the callbeck function when it's called + */ + void setCallback(CallbackHandler* handler, Callback callback, void* user = nullptr) noexcept { + mCallback = callback; + mUser = user; + mHandler = handler; } //! Returns whether a release callback is set - bool hasCallback() const noexcept { return callback != nullptr; } + bool hasCallback() const noexcept { return mCallback != nullptr; } //! Returns the currently set release callback function Callback getCallback() const noexcept { - return callback; + return mCallback; + } + + //! Returns the handler for this callback or nullptr if the default handler is to be used. + CallbackHandler* getHandler() const noexcept { + return mHandler; } //! Returns the user opaque pointer associated to this BufferDescriptor void* getUser() const noexcept { - return user; + return mUser; } //! CPU mempry-buffer virtual address @@ -172,8 +209,9 @@ public: private: // callback when the buffer is consumed. - Callback callback = nullptr; - void* user = nullptr; + Callback mCallback = nullptr; + void* mUser = nullptr; + CallbackHandler* mHandler = nullptr; }; } // namespace filament::backend diff --git a/ios/include/backend/CallbackHandler.h b/ios/include/backend/CallbackHandler.h new file mode 100644 index 00000000..dee3aaa2 --- /dev/null +++ b/ios/include/backend/CallbackHandler.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_FILAMENT_BACKEND_CALLBACKHANDLER_H +#define TNT_FILAMENT_BACKEND_CALLBACKHANDLER_H + +#include + +namespace filament::backend { + +/** + * A generic interface to dispatch callbacks. + * + * All APIs that take a callback as argument also take a + * CallbackHandler* which is used to dispatch the + * callback: CallbackHandler::post() method is called from a service thread as soon + * as possible (this will NEVER be the main thread), CallbackHandler::post() + * is responsible for scheduling the callback onto the thread the + * user desires. + * + * This is intended to make callbacks interoperate with + * the platform/OS's own messaging system. + * + * CallbackHandler* can always be nullptr in which case the default handler is used. The + * default handler always dispatches callbacks on filament's main thread opportunistically. + * + * Life time: + * --------- + * + * Filament make no attempts to manage the life time of the CallbackHandler* and never takes + * ownership. + * In particular, this means that the CallbackHandler instance must stay valid until all + * pending callbacks are been dispatched. + * + * Similarly, when shutting down filament, care must be taken to ensure that all pending callbacks + * that might access filament's state have been dispatched. Filament can no longer ensure this + * because callback execution is the responsibility of the CallbackHandler, which is external to + * filament. + * Typically, the concrete CallbackHandler would have a mechanism to drain and/or wait for all + * callbacks to be processed. + * + */ +class CallbackHandler { +public: + using Callback = void(*)(void* user); + + /** + * Schedules the callback to be called onto the appropriate thread. + * Typically this will be the application's main thead. + * + * Must be thread-safe. + */ + virtual void post(void* user, Callback callback) = 0; + +protected: + virtual ~CallbackHandler(); +}; + +} // namespace filament::backend + +#endif // TNT_FILAMENT_BACKEND_CALLBACKHANDLER_H diff --git a/ios/include/backend/DriverEnums.h b/ios/include/backend/DriverEnums.h index 97cb3a08..abd38cc7 100644 --- a/ios/include/backend/DriverEnums.h +++ b/ios/include/backend/DriverEnums.h @@ -193,10 +193,12 @@ static constexpr size_t SHADER_MODEL_COUNT = 3; */ enum class PrimitiveType : uint8_t { // don't change the enums values (made to match GL) - POINTS = 0, //!< points - LINES = 1, //!< lines - TRIANGLES = 4, //!< triangles - NONE = 0xFF + POINTS = 0, //!< points + LINES = 1, //!< lines + LINE_STRIP = 3, //!< line strip + TRIANGLES = 4, //!< triangles + TRIANGLE_STRIP = 5, //!< triangle strip + NONE = 0xFF }; /** @@ -220,7 +222,8 @@ enum class UniformType : uint8_t { UINT3, UINT4, MAT3, //!< a 3x3 float matrix - MAT4 //!< a 4x4 float matrix + MAT4, //!< a 4x4 float matrix + STRUCT }; enum class Precision : uint8_t { @@ -806,10 +809,10 @@ using AttributeArray = std::array; //! Raster state descriptor struct RasterState { - using CullingMode = CullingMode; - using DepthFunc = SamplerCompareFunc; - using BlendEquation = BlendEquation; - using BlendFunction = BlendFunction; + using CullingMode = backend::CullingMode; + using DepthFunc = backend::SamplerCompareFunc; + using BlendEquation = backend::BlendEquation; + using BlendFunction = backend::BlendFunction; RasterState() noexcept { // NOLINT static_assert(sizeof(RasterState) == sizeof(uint32_t), diff --git a/ios/include/backend/Handle.h b/ios/include/backend/Handle.h index 8dd8ed6f..bbea5a1b 100644 --- a/ios/include/backend/Handle.h +++ b/ios/include/backend/Handle.h @@ -21,6 +21,8 @@ #include #include +#include + namespace filament { namespace backend { diff --git a/ios/include/backend/PixelBufferDescriptor.h b/ios/include/backend/PixelBufferDescriptor.h index 69678d06..9c19f194 100644 --- a/ios/include/backend/PixelBufferDescriptor.h +++ b/ios/include/backend/PixelBufferDescriptor.h @@ -28,8 +28,7 @@ #include #include -namespace filament { -namespace backend { +namespace filament::backend { /** * A descriptor to an image in main memory, typically used to transfer image data from the CPU @@ -58,9 +57,19 @@ public: * @param left Left coordinate in pixels * @param top Top coordinate in pixels * @param stride Stride of a row in pixels + * @param handler Handler to dispatch the callback or nullptr for the default handler * @param callback A callback used to release the CPU buffer * @param user An opaque user pointer passed to the callback function when it's called */ + PixelBufferDescriptor(void const* buffer, size_t size, + PixelDataFormat format, PixelDataType type, uint8_t alignment, + uint32_t left, uint32_t top, uint32_t stride, + CallbackHandler* handler, Callback callback, void* user = nullptr) noexcept + : BufferDescriptor(buffer, size, handler, callback, user), + left(left), top(top), stride(stride), + format(format), type(type), alignment(alignment) { + } + PixelBufferDescriptor(void const* buffer, size_t size, PixelDataFormat format, PixelDataType type, uint8_t alignment = 1, uint32_t left = 0, uint32_t top = 0, uint32_t stride = 0, @@ -77,9 +86,17 @@ public: * @param size Size in bytes of the buffer containing the image * @param format Format of the image pixels * @param type Type of the image pixels + * @param handler Handler to dispatch the callback or nullptr for the default handler * @param callback A callback used to release the CPU buffer * @param user An opaque user pointer passed to the callback function when it's called */ + PixelBufferDescriptor(void const* buffer, size_t size, + PixelDataFormat format, PixelDataType type, + CallbackHandler* handler, Callback callback, void* user = nullptr) noexcept + : BufferDescriptor(buffer, size, handler, callback, user), + stride(0), format(format), type(type), alignment(1) { + } + PixelBufferDescriptor(void const* buffer, size_t size, PixelDataFormat format, PixelDataType type, Callback callback, void* user = nullptr) noexcept @@ -87,6 +104,7 @@ public: stride(0), format(format), type(type), alignment(1) { } + /** * Creates a new PixelBufferDescriptor referencing a compressed image in main memory * @@ -94,9 +112,18 @@ public: * @param size Size in bytes of the buffer containing the image * @param format Compressed format of the image * @param imageSize Compressed size of the image + * @param handler Handler to dispatch the callback or nullptr for the default handler * @param callback A callback used to release the CPU buffer * @param user An opaque user pointer passed to the callback function when it's called */ + PixelBufferDescriptor(void const* buffer, size_t size, + backend::CompressedPixelDataType format, uint32_t imageSize, + CallbackHandler* handler, Callback callback, void* user = nullptr) noexcept + : BufferDescriptor(buffer, size, handler, callback, user), + imageSize(imageSize), compressedFormat(format), type(PixelDataType::COMPRESSED), + alignment(1) { + } + PixelBufferDescriptor(void const* buffer, size_t size, backend::CompressedPixelDataType format, uint32_t imageSize, Callback callback, void* user = nullptr) noexcept @@ -107,26 +134,29 @@ public: // -------------------------------------------------------------------------------------------- - template + template static PixelBufferDescriptor make(void const* buffer, size_t size, PixelDataFormat format, PixelDataType type, uint8_t alignment, - uint32_t left, uint32_t top, uint32_t stride, T* data) noexcept { + uint32_t left, uint32_t top, uint32_t stride, T* data, + CallbackHandler* handler = nullptr) noexcept { return { buffer, size, format, type, alignment, left, top, stride, - [](void* b, size_t s, void* u) { + handler, [](void* b, size_t s, void* u) { (*static_cast(u)->*method)(b, s); }, data }; } - template + template static PixelBufferDescriptor make(void const* buffer, size_t size, - PixelDataFormat format, PixelDataType type, T* data) noexcept { - return { buffer, size, format, type, [](void* b, size_t s, void* u) { + PixelDataFormat format, PixelDataType type, T* data, + CallbackHandler* handler = nullptr) noexcept { + return { buffer, size, format, type, handler, [](void* b, size_t s, void* u) { (*static_cast(u)->*method)(b, s); }, data }; } - template + template static PixelBufferDescriptor make(void const* buffer, size_t size, - backend::CompressedPixelDataType format, uint32_t imageSize, T* data) noexcept { - return { buffer, size, format, imageSize, [](void* b, size_t s, void* u) { + backend::CompressedPixelDataType format, uint32_t imageSize, T* data, + CallbackHandler* handler = nullptr) noexcept { + return { buffer, size, format, imageSize, handler, [](void* b, size_t s, void* u) { (*static_cast(u)->*method)(b, s); }, data }; } @@ -134,9 +164,10 @@ public: template static PixelBufferDescriptor make(void const* buffer, size_t size, PixelDataFormat format, PixelDataType type, uint8_t alignment, - uint32_t left, uint32_t top, uint32_t stride, T&& functor) noexcept { + uint32_t left, uint32_t top, uint32_t stride, T&& functor, + CallbackHandler* handler = nullptr) noexcept { return { buffer, size, format, type, alignment, left, top, stride, - [](void* b, size_t s, void* u) { + handler, [](void* b, size_t s, void* u) { T& that = *static_cast(u); that(b, s); delete &that; @@ -146,9 +177,10 @@ public: template static PixelBufferDescriptor make(void const* buffer, size_t size, - PixelDataFormat format, PixelDataType type, T&& functor) noexcept { + PixelDataFormat format, PixelDataType type, T&& functor, + CallbackHandler* handler = nullptr) noexcept { return { buffer, size, format, type, - [](void* b, size_t s, void* u) { + handler, [](void* b, size_t s, void* u) { T& that = *static_cast(u); that(b, s); delete &that; @@ -158,9 +190,10 @@ public: template static PixelBufferDescriptor make(void const* buffer, size_t size, - backend::CompressedPixelDataType format, uint32_t imageSize, T&& functor) noexcept { + backend::CompressedPixelDataType format, uint32_t imageSize, T&& functor, + CallbackHandler* handler = nullptr) noexcept { return { buffer, size, format, imageSize, - [](void* b, size_t s, void* u) { + handler, [](void* b, size_t s, void* u) { T& that = *static_cast(u); that(b, s); delete &that; @@ -275,8 +308,7 @@ public: uint8_t alignment : 4; }; -} // namespace backend -} // namespace filament +} // namespace backend::filament #if !defined(NDEBUG) utils::io::ostream& operator<<(utils::io::ostream& out, const filament::backend::PixelBufferDescriptor& b); diff --git a/ios/include/cgltf.h b/ios/include/cgltf.h new file mode 100644 index 00000000..8b386b02 --- /dev/null +++ b/ios/include/cgltf.h @@ -0,0 +1,6552 @@ +/** + * cgltf - a single-file glTF 2.0 parser written in C99. + * + * Version: 1.11 + * + * Website: https://github.com/jkuhlmann/cgltf + * + * Distributed under the MIT License, see notice at the end of this file. + * + * Building: + * Include this file where you need the struct and function + * declarations. Have exactly one source file where you define + * `CGLTF_IMPLEMENTATION` before including this file to get the + * function definitions. + * + * Reference: + * `cgltf_result cgltf_parse(const cgltf_options*, const void*, + * cgltf_size, cgltf_data**)` parses both glTF and GLB data. If + * this function returns `cgltf_result_success`, you have to call + * `cgltf_free()` on the created `cgltf_data*` variable. + * Note that contents of external files for buffers and images are not + * automatically loaded. You'll need to read these files yourself using + * URIs in the `cgltf_data` structure. + * + * `cgltf_options` is the struct passed to `cgltf_parse()` to control + * parts of the parsing process. You can use it to force the file type + * and provide memory allocation as well as file operation callbacks. + * Should be zero-initialized to trigger default behavior. + * + * `cgltf_data` is the struct allocated and filled by `cgltf_parse()`. + * It generally mirrors the glTF format as described by the spec (see + * https://github.com/KhronosGroup/glTF/tree/master/specification/2.0). + * + * `void cgltf_free(cgltf_data*)` frees the allocated `cgltf_data` + * variable. + * + * `cgltf_result cgltf_load_buffers(const cgltf_options*, cgltf_data*, + * const char* gltf_path)` can be optionally called to open and read buffer + * files using the `FILE*` APIs. The `gltf_path` argument is the path to + * the original glTF file, which allows the parser to resolve the path to + * buffer files. + * + * `cgltf_result cgltf_load_buffer_base64(const cgltf_options* options, + * cgltf_size size, const char* base64, void** out_data)` decodes + * base64-encoded data content. Used internally by `cgltf_load_buffers()`. + * This is useful when decoding data URIs in images. + * + * `cgltf_result cgltf_parse_file(const cgltf_options* options, const + * char* path, cgltf_data** out_data)` can be used to open the given + * file using `FILE*` APIs and parse the data using `cgltf_parse()`. + * + * `cgltf_result cgltf_validate(cgltf_data*)` can be used to do additional + * checks to make sure the parsed glTF data is valid. + * + * `cgltf_node_transform_local` converts the translation / rotation / scale properties of a node + * into a mat4. + * + * `cgltf_node_transform_world` calls `cgltf_node_transform_local` on every ancestor in order + * to compute the root-to-node transformation. + * + * `cgltf_accessor_unpack_floats` reads in the data from an accessor, applies sparse data (if any), + * and converts them to floating point. Assumes that `cgltf_load_buffers` has already been called. + * By passing null for the output pointer, users can find out how many floats are required in the + * output buffer. + * + * `cgltf_accessor_num_components` is a tiny utility that tells you the dimensionality of + * a certain accessor type. This can be used before `cgltf_accessor_unpack_floats` to help allocate + * the necessary amount of memory. + * + * `cgltf_accessor_read_float` reads a certain element from a non-sparse accessor and converts it to + * floating point, assuming that `cgltf_load_buffers` has already been called. The passed-in element + * size is the number of floats in the output buffer, which should be in the range [1, 16]. Returns + * false if the passed-in element_size is too small, or if the accessor is sparse. + * + * `cgltf_accessor_read_uint` is similar to its floating-point counterpart, but limited to reading + * vector types and does not support matrix types. The passed-in element size is the number of uints + * in the output buffer, which should be in the range [1, 4]. Returns false if the passed-in + * element_size is too small, or if the accessor is sparse. + * + * `cgltf_accessor_read_index` is similar to its floating-point counterpart, but it returns size_t + * and only works with single-component data types. + * + * `cgltf_result cgltf_copy_extras_json(const cgltf_data*, const cgltf_extras*, + * char* dest, cgltf_size* dest_size)` allows users to retrieve the "extras" data that + * can be attached to many glTF objects (which can be arbitrary JSON data). The + * `cgltf_extras` struct stores the offsets of the start and end of the extras JSON data + * as it appears in the complete glTF JSON data. This function copies the extras data + * into the provided buffer. If `dest` is NULL, the length of the data is written into + * `dest_size`. You can then parse this data using your own JSON parser + * or, if you've included the cgltf implementation using the integrated JSMN JSON parser. + */ +#ifndef CGLTF_H_INCLUDED__ +#define CGLTF_H_INCLUDED__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef size_t cgltf_size; +typedef float cgltf_float; +typedef int cgltf_int; +typedef unsigned int cgltf_uint; +typedef int cgltf_bool; + +typedef enum cgltf_file_type +{ + cgltf_file_type_invalid, + cgltf_file_type_gltf, + cgltf_file_type_glb, +} cgltf_file_type; + +typedef enum cgltf_result +{ + cgltf_result_success, + cgltf_result_data_too_short, + cgltf_result_unknown_format, + cgltf_result_invalid_json, + cgltf_result_invalid_gltf, + cgltf_result_invalid_options, + cgltf_result_file_not_found, + cgltf_result_io_error, + cgltf_result_out_of_memory, + cgltf_result_legacy_gltf, +} cgltf_result; + +typedef struct cgltf_memory_options +{ + void* (*alloc)(void* user, cgltf_size size); + void (*free) (void* user, void* ptr); + void* user_data; +} cgltf_memory_options; + +typedef struct cgltf_file_options +{ + cgltf_result(*read)(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, const char* path, cgltf_size* size, void** data); + void (*release)(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, void* data); + void* user_data; +} cgltf_file_options; + +typedef struct cgltf_options +{ + cgltf_file_type type; /* invalid == auto detect */ + cgltf_size json_token_count; /* 0 == auto */ + cgltf_memory_options memory; + cgltf_file_options file; +} cgltf_options; + +typedef enum cgltf_buffer_view_type +{ + cgltf_buffer_view_type_invalid, + cgltf_buffer_view_type_indices, + cgltf_buffer_view_type_vertices, +} cgltf_buffer_view_type; + +typedef enum cgltf_attribute_type +{ + cgltf_attribute_type_invalid, + cgltf_attribute_type_position, + cgltf_attribute_type_normal, + cgltf_attribute_type_tangent, + cgltf_attribute_type_texcoord, + cgltf_attribute_type_color, + cgltf_attribute_type_joints, + cgltf_attribute_type_weights, +} cgltf_attribute_type; + +typedef enum cgltf_component_type +{ + cgltf_component_type_invalid, + cgltf_component_type_r_8, /* BYTE */ + cgltf_component_type_r_8u, /* UNSIGNED_BYTE */ + cgltf_component_type_r_16, /* SHORT */ + cgltf_component_type_r_16u, /* UNSIGNED_SHORT */ + cgltf_component_type_r_32u, /* UNSIGNED_INT */ + cgltf_component_type_r_32f, /* FLOAT */ +} cgltf_component_type; + +typedef enum cgltf_type +{ + cgltf_type_invalid, + cgltf_type_scalar, + cgltf_type_vec2, + cgltf_type_vec3, + cgltf_type_vec4, + cgltf_type_mat2, + cgltf_type_mat3, + cgltf_type_mat4, +} cgltf_type; + +typedef enum cgltf_primitive_type +{ + cgltf_primitive_type_points, + cgltf_primitive_type_lines, + cgltf_primitive_type_line_loop, + cgltf_primitive_type_line_strip, + cgltf_primitive_type_triangles, + cgltf_primitive_type_triangle_strip, + cgltf_primitive_type_triangle_fan, +} cgltf_primitive_type; + +typedef enum cgltf_alpha_mode +{ + cgltf_alpha_mode_opaque, + cgltf_alpha_mode_mask, + cgltf_alpha_mode_blend, +} cgltf_alpha_mode; + +typedef enum cgltf_animation_path_type { + cgltf_animation_path_type_invalid, + cgltf_animation_path_type_translation, + cgltf_animation_path_type_rotation, + cgltf_animation_path_type_scale, + cgltf_animation_path_type_weights, +} cgltf_animation_path_type; + +typedef enum cgltf_interpolation_type { + cgltf_interpolation_type_linear, + cgltf_interpolation_type_step, + cgltf_interpolation_type_cubic_spline, +} cgltf_interpolation_type; + +typedef enum cgltf_camera_type { + cgltf_camera_type_invalid, + cgltf_camera_type_perspective, + cgltf_camera_type_orthographic, +} cgltf_camera_type; + +typedef enum cgltf_light_type { + cgltf_light_type_invalid, + cgltf_light_type_directional, + cgltf_light_type_point, + cgltf_light_type_spot, +} cgltf_light_type; + +typedef enum cgltf_data_free_method { + cgltf_data_free_method_none, + cgltf_data_free_method_file_release, + cgltf_data_free_method_memory_free, +} cgltf_data_free_method; + +typedef struct cgltf_extras { + cgltf_size start_offset; + cgltf_size end_offset; +} cgltf_extras; + +typedef struct cgltf_extension { + char* name; + char* data; +} cgltf_extension; + +typedef struct cgltf_buffer +{ + char* name; + cgltf_size size; + char* uri; + void* data; /* loaded by cgltf_load_buffers */ + cgltf_data_free_method data_free_method; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_buffer; + +typedef enum cgltf_meshopt_compression_mode { + cgltf_meshopt_compression_mode_invalid, + cgltf_meshopt_compression_mode_attributes, + cgltf_meshopt_compression_mode_triangles, + cgltf_meshopt_compression_mode_indices, +} cgltf_meshopt_compression_mode; + +typedef enum cgltf_meshopt_compression_filter { + cgltf_meshopt_compression_filter_none, + cgltf_meshopt_compression_filter_octahedral, + cgltf_meshopt_compression_filter_quaternion, + cgltf_meshopt_compression_filter_exponential, +} cgltf_meshopt_compression_filter; + +typedef struct cgltf_meshopt_compression +{ + cgltf_buffer* buffer; + cgltf_size offset; + cgltf_size size; + cgltf_size stride; + cgltf_size count; + cgltf_meshopt_compression_mode mode; + cgltf_meshopt_compression_filter filter; +} cgltf_meshopt_compression; + +typedef struct cgltf_buffer_view +{ + char *name; + cgltf_buffer* buffer; + cgltf_size offset; + cgltf_size size; + cgltf_size stride; /* 0 == automatically determined by accessor */ + cgltf_buffer_view_type type; + void* data; /* overrides buffer->data if present, filled by extensions */ + cgltf_bool has_meshopt_compression; + cgltf_meshopt_compression meshopt_compression; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_buffer_view; + +typedef struct cgltf_accessor_sparse +{ + cgltf_size count; + cgltf_buffer_view* indices_buffer_view; + cgltf_size indices_byte_offset; + cgltf_component_type indices_component_type; + cgltf_buffer_view* values_buffer_view; + cgltf_size values_byte_offset; + cgltf_extras extras; + cgltf_extras indices_extras; + cgltf_extras values_extras; + cgltf_size extensions_count; + cgltf_extension* extensions; + cgltf_size indices_extensions_count; + cgltf_extension* indices_extensions; + cgltf_size values_extensions_count; + cgltf_extension* values_extensions; +} cgltf_accessor_sparse; + +typedef struct cgltf_accessor +{ + char* name; + cgltf_component_type component_type; + cgltf_bool normalized; + cgltf_type type; + cgltf_size offset; + cgltf_size count; + cgltf_size stride; + cgltf_buffer_view* buffer_view; + cgltf_bool has_min; + cgltf_float min[16]; + cgltf_bool has_max; + cgltf_float max[16]; + cgltf_bool is_sparse; + cgltf_accessor_sparse sparse; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_accessor; + +typedef struct cgltf_attribute +{ + char* name; + cgltf_attribute_type type; + cgltf_int index; + cgltf_accessor* data; +} cgltf_attribute; + +typedef struct cgltf_image +{ + char* name; + char* uri; + cgltf_buffer_view* buffer_view; + char* mime_type; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_image; + +typedef struct cgltf_sampler +{ + char* name; + cgltf_int mag_filter; + cgltf_int min_filter; + cgltf_int wrap_s; + cgltf_int wrap_t; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_sampler; + +typedef struct cgltf_texture +{ + char* name; + cgltf_image* image; + cgltf_sampler* sampler; + cgltf_bool has_basisu; + cgltf_image* basisu_image; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_texture; + +typedef struct cgltf_texture_transform +{ + cgltf_float offset[2]; + cgltf_float rotation; + cgltf_float scale[2]; + cgltf_bool has_texcoord; + cgltf_int texcoord; +} cgltf_texture_transform; + +typedef struct cgltf_texture_view +{ + cgltf_texture* texture; + cgltf_int texcoord; + cgltf_float scale; /* equivalent to strength for occlusion_texture */ + cgltf_bool has_transform; + cgltf_texture_transform transform; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_texture_view; + +typedef struct cgltf_pbr_metallic_roughness +{ + cgltf_texture_view base_color_texture; + cgltf_texture_view metallic_roughness_texture; + + cgltf_float base_color_factor[4]; + cgltf_float metallic_factor; + cgltf_float roughness_factor; + + cgltf_extras extras; +} cgltf_pbr_metallic_roughness; + +typedef struct cgltf_pbr_specular_glossiness +{ + cgltf_texture_view diffuse_texture; + cgltf_texture_view specular_glossiness_texture; + + cgltf_float diffuse_factor[4]; + cgltf_float specular_factor[3]; + cgltf_float glossiness_factor; +} cgltf_pbr_specular_glossiness; + +typedef struct cgltf_clearcoat +{ + cgltf_texture_view clearcoat_texture; + cgltf_texture_view clearcoat_roughness_texture; + cgltf_texture_view clearcoat_normal_texture; + + cgltf_float clearcoat_factor; + cgltf_float clearcoat_roughness_factor; +} cgltf_clearcoat; + +typedef struct cgltf_transmission +{ + cgltf_texture_view transmission_texture; + cgltf_float transmission_factor; +} cgltf_transmission; + +typedef struct cgltf_ior +{ + cgltf_float ior; +} cgltf_ior; + +typedef struct cgltf_specular +{ + cgltf_texture_view specular_texture; + cgltf_texture_view specular_color_texture; + cgltf_float specular_color_factor[3]; + cgltf_float specular_factor; +} cgltf_specular; + +typedef struct cgltf_volume +{ + cgltf_texture_view thickness_texture; + cgltf_float thickness_factor; + cgltf_float attenuation_color[3]; + cgltf_float attenuation_distance; +} cgltf_volume; + +typedef struct cgltf_sheen +{ + cgltf_texture_view sheen_color_texture; + cgltf_float sheen_color_factor[3]; + cgltf_texture_view sheen_roughness_texture; + cgltf_float sheen_roughness_factor; +} cgltf_sheen; + +typedef struct cgltf_emissive_strength +{ + cgltf_float emissive_strength; +} cgltf_emissive_strength; + +typedef struct cgltf_material +{ + char* name; + cgltf_bool has_pbr_metallic_roughness; + cgltf_bool has_pbr_specular_glossiness; + cgltf_bool has_clearcoat; + cgltf_bool has_transmission; + cgltf_bool has_volume; + cgltf_bool has_ior; + cgltf_bool has_specular; + cgltf_bool has_sheen; + cgltf_bool has_emissive_strength; + cgltf_pbr_metallic_roughness pbr_metallic_roughness; + cgltf_pbr_specular_glossiness pbr_specular_glossiness; + cgltf_clearcoat clearcoat; + cgltf_ior ior; + cgltf_specular specular; + cgltf_sheen sheen; + cgltf_transmission transmission; + cgltf_volume volume; + cgltf_emissive_strength emissive_strength; + cgltf_texture_view normal_texture; + cgltf_texture_view occlusion_texture; + cgltf_texture_view emissive_texture; + cgltf_float emissive_factor[3]; + cgltf_alpha_mode alpha_mode; + cgltf_float alpha_cutoff; + cgltf_bool double_sided; + cgltf_bool unlit; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_material; + +typedef struct cgltf_material_mapping +{ + cgltf_size variant; + cgltf_material* material; + cgltf_extras extras; +} cgltf_material_mapping; + +typedef struct cgltf_morph_target { + cgltf_attribute* attributes; + cgltf_size attributes_count; +} cgltf_morph_target; + +typedef struct cgltf_draco_mesh_compression { + cgltf_buffer_view* buffer_view; + cgltf_attribute* attributes; + cgltf_size attributes_count; +} cgltf_draco_mesh_compression; + +typedef struct cgltf_primitive { + cgltf_primitive_type type; + cgltf_accessor* indices; + cgltf_material* material; + cgltf_attribute* attributes; + cgltf_size attributes_count; + cgltf_morph_target* targets; + cgltf_size targets_count; + cgltf_extras extras; + cgltf_bool has_draco_mesh_compression; + cgltf_draco_mesh_compression draco_mesh_compression; + cgltf_material_mapping* mappings; + cgltf_size mappings_count; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_primitive; + +typedef struct cgltf_mesh { + char* name; + cgltf_primitive* primitives; + cgltf_size primitives_count; + cgltf_float* weights; + cgltf_size weights_count; + char** target_names; + cgltf_size target_names_count; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_mesh; + +typedef struct cgltf_node cgltf_node; + +typedef struct cgltf_skin { + char* name; + cgltf_node** joints; + cgltf_size joints_count; + cgltf_node* skeleton; + cgltf_accessor* inverse_bind_matrices; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_skin; + +typedef struct cgltf_camera_perspective { + cgltf_bool has_aspect_ratio; + cgltf_float aspect_ratio; + cgltf_float yfov; + cgltf_bool has_zfar; + cgltf_float zfar; + cgltf_float znear; + cgltf_extras extras; +} cgltf_camera_perspective; + +typedef struct cgltf_camera_orthographic { + cgltf_float xmag; + cgltf_float ymag; + cgltf_float zfar; + cgltf_float znear; + cgltf_extras extras; +} cgltf_camera_orthographic; + +typedef struct cgltf_camera { + char* name; + cgltf_camera_type type; + union { + cgltf_camera_perspective perspective; + cgltf_camera_orthographic orthographic; + } data; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_camera; + +typedef struct cgltf_light { + char* name; + cgltf_float color[3]; + cgltf_float intensity; + cgltf_light_type type; + cgltf_float range; + cgltf_float spot_inner_cone_angle; + cgltf_float spot_outer_cone_angle; + cgltf_extras extras; +} cgltf_light; + +struct cgltf_node { + char* name; + cgltf_node* parent; + cgltf_node** children; + cgltf_size children_count; + cgltf_skin* skin; + cgltf_mesh* mesh; + cgltf_camera* camera; + cgltf_light* light; + cgltf_float* weights; + cgltf_size weights_count; + cgltf_bool has_translation; + cgltf_bool has_rotation; + cgltf_bool has_scale; + cgltf_bool has_matrix; + cgltf_float translation[3]; + cgltf_float rotation[4]; + cgltf_float scale[3]; + cgltf_float matrix[16]; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +}; + +typedef struct cgltf_scene { + char* name; + cgltf_node** nodes; + cgltf_size nodes_count; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_scene; + +typedef struct cgltf_animation_sampler { + cgltf_accessor* input; + cgltf_accessor* output; + cgltf_interpolation_type interpolation; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_animation_sampler; + +typedef struct cgltf_animation_channel { + cgltf_animation_sampler* sampler; + cgltf_node* target_node; + cgltf_animation_path_type target_path; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_animation_channel; + +typedef struct cgltf_animation { + char* name; + cgltf_animation_sampler* samplers; + cgltf_size samplers_count; + cgltf_animation_channel* channels; + cgltf_size channels_count; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_animation; + +typedef struct cgltf_material_variant +{ + char* name; + cgltf_extras extras; +} cgltf_material_variant; + +typedef struct cgltf_asset { + char* copyright; + char* generator; + char* version; + char* min_version; + cgltf_extras extras; + cgltf_size extensions_count; + cgltf_extension* extensions; +} cgltf_asset; + +typedef struct cgltf_data +{ + cgltf_file_type file_type; + void* file_data; + + cgltf_asset asset; + + cgltf_mesh* meshes; + cgltf_size meshes_count; + + cgltf_material* materials; + cgltf_size materials_count; + + cgltf_accessor* accessors; + cgltf_size accessors_count; + + cgltf_buffer_view* buffer_views; + cgltf_size buffer_views_count; + + cgltf_buffer* buffers; + cgltf_size buffers_count; + + cgltf_image* images; + cgltf_size images_count; + + cgltf_texture* textures; + cgltf_size textures_count; + + cgltf_sampler* samplers; + cgltf_size samplers_count; + + cgltf_skin* skins; + cgltf_size skins_count; + + cgltf_camera* cameras; + cgltf_size cameras_count; + + cgltf_light* lights; + cgltf_size lights_count; + + cgltf_node* nodes; + cgltf_size nodes_count; + + cgltf_scene* scenes; + cgltf_size scenes_count; + + cgltf_scene* scene; + + cgltf_animation* animations; + cgltf_size animations_count; + + cgltf_material_variant* variants; + cgltf_size variants_count; + + cgltf_extras extras; + + cgltf_size data_extensions_count; + cgltf_extension* data_extensions; + + char** extensions_used; + cgltf_size extensions_used_count; + + char** extensions_required; + cgltf_size extensions_required_count; + + const char* json; + cgltf_size json_size; + + const void* bin; + cgltf_size bin_size; + + cgltf_memory_options memory; + cgltf_file_options file; +} cgltf_data; + +cgltf_result cgltf_parse( + const cgltf_options* options, + const void* data, + cgltf_size size, + cgltf_data** out_data); + +cgltf_result cgltf_parse_file( + const cgltf_options* options, + const char* path, + cgltf_data** out_data); + +cgltf_result cgltf_load_buffers( + const cgltf_options* options, + cgltf_data* data, + const char* gltf_path); + +cgltf_result cgltf_load_buffer_base64(const cgltf_options* options, cgltf_size size, const char* base64, void** out_data); + +cgltf_size cgltf_decode_string(char* string); +cgltf_size cgltf_decode_uri(char* uri); + +cgltf_result cgltf_validate(cgltf_data* data); + +void cgltf_free(cgltf_data* data); + +void cgltf_node_transform_local(const cgltf_node* node, cgltf_float* out_matrix); +void cgltf_node_transform_world(const cgltf_node* node, cgltf_float* out_matrix); + +cgltf_bool cgltf_accessor_read_float(const cgltf_accessor* accessor, cgltf_size index, cgltf_float* out, cgltf_size element_size); +cgltf_bool cgltf_accessor_read_uint(const cgltf_accessor* accessor, cgltf_size index, cgltf_uint* out, cgltf_size element_size); +cgltf_size cgltf_accessor_read_index(const cgltf_accessor* accessor, cgltf_size index); + +cgltf_size cgltf_num_components(cgltf_type type); + +cgltf_size cgltf_accessor_unpack_floats(const cgltf_accessor* accessor, cgltf_float* out, cgltf_size float_count); + +cgltf_result cgltf_copy_extras_json(const cgltf_data* data, const cgltf_extras* extras, char* dest, cgltf_size* dest_size); + +#ifdef __cplusplus +} +#endif + +#endif /* #ifndef CGLTF_H_INCLUDED__ */ + +/* + * + * Stop now, if you are only interested in the API. + * Below, you find the implementation. + * + */ + +#if defined(__INTELLISENSE__) || defined(__JETBRAINS_IDE__) +/* This makes MSVC/CLion intellisense work. */ +#define CGLTF_IMPLEMENTATION +#endif + +#ifdef CGLTF_IMPLEMENTATION + +#include /* For uint8_t, uint32_t */ +#include /* For strncpy */ +#include /* For fopen */ +#include /* For UINT_MAX etc */ +#include /* For FLT_MAX */ + +#if !defined(CGLTF_MALLOC) || !defined(CGLTF_FREE) || !defined(CGLTF_ATOI) || !defined(CGLTF_ATOF) || !defined(CGLTF_ATOLL) +#include /* For malloc, free, atoi, atof */ +#endif + +/* JSMN_PARENT_LINKS is necessary to make parsing large structures linear in input size */ +#define JSMN_PARENT_LINKS + +/* JSMN_STRICT is necessary to reject invalid JSON documents */ +#define JSMN_STRICT + +/* + * -- jsmn.h start -- + * Source: https://github.com/zserge/jsmn + * License: MIT + */ +typedef enum { + JSMN_UNDEFINED = 0, + JSMN_OBJECT = 1, + JSMN_ARRAY = 2, + JSMN_STRING = 3, + JSMN_PRIMITIVE = 4 +} jsmntype_t; +enum jsmnerr { + /* Not enough tokens were provided */ + JSMN_ERROR_NOMEM = -1, + /* Invalid character inside JSON string */ + JSMN_ERROR_INVAL = -2, + /* The string is not a full JSON packet, more bytes expected */ + JSMN_ERROR_PART = -3 +}; +typedef struct { + jsmntype_t type; + int start; + int end; + int size; +#ifdef JSMN_PARENT_LINKS + int parent; +#endif +} jsmntok_t; +typedef struct { + unsigned int pos; /* offset in the JSON string */ + unsigned int toknext; /* next token to allocate */ + int toksuper; /* superior token node, e.g parent object or array */ +} jsmn_parser; +static void jsmn_init(jsmn_parser *parser); +static int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens); +/* + * -- jsmn.h end -- + */ + + +static const cgltf_size GlbHeaderSize = 12; +static const cgltf_size GlbChunkHeaderSize = 8; +static const uint32_t GlbVersion = 2; +static const uint32_t GlbMagic = 0x46546C67; +static const uint32_t GlbMagicJsonChunk = 0x4E4F534A; +static const uint32_t GlbMagicBinChunk = 0x004E4942; + +#ifndef CGLTF_MALLOC +#define CGLTF_MALLOC(size) malloc(size) +#endif +#ifndef CGLTF_FREE +#define CGLTF_FREE(ptr) free(ptr) +#endif +#ifndef CGLTF_ATOI +#define CGLTF_ATOI(str) atoi(str) +#endif +#ifndef CGLTF_ATOF +#define CGLTF_ATOF(str) atof(str) +#endif +#ifndef CGLTF_ATOLL +#define CGLTF_ATOLL(str) atoll(str) +#endif +#ifndef CGLTF_VALIDATE_ENABLE_ASSERTS +#define CGLTF_VALIDATE_ENABLE_ASSERTS 0 +#endif + +static void* cgltf_default_alloc(void* user, cgltf_size size) +{ + (void)user; + return CGLTF_MALLOC(size); +} + +static void cgltf_default_free(void* user, void* ptr) +{ + (void)user; + CGLTF_FREE(ptr); +} + +static void* cgltf_calloc(cgltf_options* options, size_t element_size, cgltf_size count) +{ + if (SIZE_MAX / element_size < count) + { + return NULL; + } + void* result = options->memory.alloc(options->memory.user_data, element_size * count); + if (!result) + { + return NULL; + } + memset(result, 0, element_size * count); + return result; +} + +static cgltf_result cgltf_default_file_read(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, const char* path, cgltf_size* size, void** data) +{ + (void)file_options; + void* (*memory_alloc)(void*, cgltf_size) = memory_options->alloc ? memory_options->alloc : &cgltf_default_alloc; + void (*memory_free)(void*, void*) = memory_options->free ? memory_options->free : &cgltf_default_free; + + FILE* file = fopen(path, "rb"); + if (!file) + { + return cgltf_result_file_not_found; + } + + cgltf_size file_size = size ? *size : 0; + + if (file_size == 0) + { + fseek(file, 0, SEEK_END); + +#ifdef _WIN32 + __int64 length = _ftelli64(file); +#else + long length = ftell(file); +#endif + + if (length < 0) + { + fclose(file); + return cgltf_result_io_error; + } + + fseek(file, 0, SEEK_SET); + file_size = (cgltf_size)length; + } + + char* file_data = (char*)memory_alloc(memory_options->user_data, file_size); + if (!file_data) + { + fclose(file); + return cgltf_result_out_of_memory; + } + + cgltf_size read_size = fread(file_data, 1, file_size, file); + + fclose(file); + + if (read_size != file_size) + { + memory_free(memory_options->user_data, file_data); + return cgltf_result_io_error; + } + + if (size) + { + *size = file_size; + } + if (data) + { + *data = file_data; + } + + return cgltf_result_success; +} + +static void cgltf_default_file_release(const struct cgltf_memory_options* memory_options, const struct cgltf_file_options* file_options, void* data) +{ + (void)file_options; + void (*memfree)(void*, void*) = memory_options->free ? memory_options->free : &cgltf_default_free; + memfree(memory_options->user_data, data); +} + +static cgltf_result cgltf_parse_json(cgltf_options* options, const uint8_t* json_chunk, cgltf_size size, cgltf_data** out_data); + +cgltf_result cgltf_parse(const cgltf_options* options, const void* data, cgltf_size size, cgltf_data** out_data) +{ + if (size < GlbHeaderSize) + { + return cgltf_result_data_too_short; + } + + if (options == NULL) + { + return cgltf_result_invalid_options; + } + + cgltf_options fixed_options = *options; + if (fixed_options.memory.alloc == NULL) + { + fixed_options.memory.alloc = &cgltf_default_alloc; + } + if (fixed_options.memory.free == NULL) + { + fixed_options.memory.free = &cgltf_default_free; + } + + uint32_t tmp; + // Magic + memcpy(&tmp, data, 4); + if (tmp != GlbMagic) + { + if (fixed_options.type == cgltf_file_type_invalid) + { + fixed_options.type = cgltf_file_type_gltf; + } + else if (fixed_options.type == cgltf_file_type_glb) + { + return cgltf_result_unknown_format; + } + } + + if (fixed_options.type == cgltf_file_type_gltf) + { + cgltf_result json_result = cgltf_parse_json(&fixed_options, (const uint8_t*)data, size, out_data); + if (json_result != cgltf_result_success) + { + return json_result; + } + + (*out_data)->file_type = cgltf_file_type_gltf; + + return cgltf_result_success; + } + + const uint8_t* ptr = (const uint8_t*)data; + // Version + memcpy(&tmp, ptr + 4, 4); + uint32_t version = tmp; + if (version != GlbVersion) + { + return version < GlbVersion ? cgltf_result_legacy_gltf : cgltf_result_unknown_format; + } + + // Total length + memcpy(&tmp, ptr + 8, 4); + if (tmp > size) + { + return cgltf_result_data_too_short; + } + + const uint8_t* json_chunk = ptr + GlbHeaderSize; + + if (GlbHeaderSize + GlbChunkHeaderSize > size) + { + return cgltf_result_data_too_short; + } + + // JSON chunk: length + uint32_t json_length; + memcpy(&json_length, json_chunk, 4); + if (GlbHeaderSize + GlbChunkHeaderSize + json_length > size) + { + return cgltf_result_data_too_short; + } + + // JSON chunk: magic + memcpy(&tmp, json_chunk + 4, 4); + if (tmp != GlbMagicJsonChunk) + { + return cgltf_result_unknown_format; + } + + json_chunk += GlbChunkHeaderSize; + + const void* bin = 0; + cgltf_size bin_size = 0; + + if (GlbHeaderSize + GlbChunkHeaderSize + json_length + GlbChunkHeaderSize <= size) + { + // We can read another chunk + const uint8_t* bin_chunk = json_chunk + json_length; + + // Bin chunk: length + uint32_t bin_length; + memcpy(&bin_length, bin_chunk, 4); + if (GlbHeaderSize + GlbChunkHeaderSize + json_length + GlbChunkHeaderSize + bin_length > size) + { + return cgltf_result_data_too_short; + } + + // Bin chunk: magic + memcpy(&tmp, bin_chunk + 4, 4); + if (tmp != GlbMagicBinChunk) + { + return cgltf_result_unknown_format; + } + + bin_chunk += GlbChunkHeaderSize; + + bin = bin_chunk; + bin_size = bin_length; + } + + cgltf_result json_result = cgltf_parse_json(&fixed_options, json_chunk, json_length, out_data); + if (json_result != cgltf_result_success) + { + return json_result; + } + + (*out_data)->file_type = cgltf_file_type_glb; + (*out_data)->bin = bin; + (*out_data)->bin_size = bin_size; + + return cgltf_result_success; +} + +cgltf_result cgltf_parse_file(const cgltf_options* options, const char* path, cgltf_data** out_data) +{ + if (options == NULL) + { + return cgltf_result_invalid_options; + } + + cgltf_result (*file_read)(const struct cgltf_memory_options*, const struct cgltf_file_options*, const char*, cgltf_size*, void**) = options->file.read ? options->file.read : &cgltf_default_file_read; + void (*file_release)(const struct cgltf_memory_options*, const struct cgltf_file_options*, void* data) = options->file.release ? options->file.release : cgltf_default_file_release; + + void* file_data = NULL; + cgltf_size file_size = 0; + cgltf_result result = file_read(&options->memory, &options->file, path, &file_size, &file_data); + if (result != cgltf_result_success) + { + return result; + } + + result = cgltf_parse(options, file_data, file_size, out_data); + + if (result != cgltf_result_success) + { + file_release(&options->memory, &options->file, file_data); + return result; + } + + (*out_data)->file_data = file_data; + + return cgltf_result_success; +} + +static void cgltf_combine_paths(char* path, const char* base, const char* uri) +{ + const char* s0 = strrchr(base, '/'); + const char* s1 = strrchr(base, '\\'); + const char* slash = s0 ? (s1 && s1 > s0 ? s1 : s0) : s1; + + if (slash) + { + size_t prefix = slash - base + 1; + + strncpy(path, base, prefix); + strcpy(path + prefix, uri); + } + else + { + strcpy(path, uri); + } +} + +static cgltf_result cgltf_load_buffer_file(const cgltf_options* options, cgltf_size size, const char* uri, const char* gltf_path, void** out_data) +{ + void* (*memory_alloc)(void*, cgltf_size) = options->memory.alloc ? options->memory.alloc : &cgltf_default_alloc; + void (*memory_free)(void*, void*) = options->memory.free ? options->memory.free : &cgltf_default_free; + cgltf_result (*file_read)(const struct cgltf_memory_options*, const struct cgltf_file_options*, const char*, cgltf_size*, void**) = options->file.read ? options->file.read : &cgltf_default_file_read; + + char* path = (char*)memory_alloc(options->memory.user_data, strlen(uri) + strlen(gltf_path) + 1); + if (!path) + { + return cgltf_result_out_of_memory; + } + + cgltf_combine_paths(path, gltf_path, uri); + + // after combining, the tail of the resulting path is a uri; decode_uri converts it into path + cgltf_decode_uri(path + strlen(path) - strlen(uri)); + + void* file_data = NULL; + cgltf_result result = file_read(&options->memory, &options->file, path, &size, &file_data); + + memory_free(options->memory.user_data, path); + + *out_data = (result == cgltf_result_success) ? file_data : NULL; + + return result; +} + +cgltf_result cgltf_load_buffer_base64(const cgltf_options* options, cgltf_size size, const char* base64, void** out_data) +{ + void* (*memory_alloc)(void*, cgltf_size) = options->memory.alloc ? options->memory.alloc : &cgltf_default_alloc; + void (*memory_free)(void*, void*) = options->memory.free ? options->memory.free : &cgltf_default_free; + + unsigned char* data = (unsigned char*)memory_alloc(options->memory.user_data, size); + if (!data) + { + return cgltf_result_out_of_memory; + } + + unsigned int buffer = 0; + unsigned int buffer_bits = 0; + + for (cgltf_size i = 0; i < size; ++i) + { + while (buffer_bits < 8) + { + char ch = *base64++; + + int index = + (unsigned)(ch - 'A') < 26 ? (ch - 'A') : + (unsigned)(ch - 'a') < 26 ? (ch - 'a') + 26 : + (unsigned)(ch - '0') < 10 ? (ch - '0') + 52 : + ch == '+' ? 62 : + ch == '/' ? 63 : + -1; + + if (index < 0) + { + memory_free(options->memory.user_data, data); + return cgltf_result_io_error; + } + + buffer = (buffer << 6) | index; + buffer_bits += 6; + } + + data[i] = (unsigned char)(buffer >> (buffer_bits - 8)); + buffer_bits -= 8; + } + + *out_data = data; + + return cgltf_result_success; +} + +static int cgltf_unhex(char ch) +{ + return + (unsigned)(ch - '0') < 10 ? (ch - '0') : + (unsigned)(ch - 'A') < 6 ? (ch - 'A') + 10 : + (unsigned)(ch - 'a') < 6 ? (ch - 'a') + 10 : + -1; +} + +cgltf_size cgltf_decode_string(char* string) +{ + char* read = string + strcspn(string, "\\"); + if (*read == 0) + { + return read - string; + } + char* write = string; + char* last = string; + + for (;;) + { + // Copy characters since last escaped sequence + cgltf_size written = read - last; + memmove(write, last, written); + write += written; + + if (*read++ == 0) + { + break; + } + + // jsmn already checked that all escape sequences are valid + switch (*read++) + { + case '\"': *write++ = '\"'; break; + case '/': *write++ = '/'; break; + case '\\': *write++ = '\\'; break; + case 'b': *write++ = '\b'; break; + case 'f': *write++ = '\f'; break; + case 'r': *write++ = '\r'; break; + case 'n': *write++ = '\n'; break; + case 't': *write++ = '\t'; break; + case 'u': + { + // UCS-2 codepoint \uXXXX to UTF-8 + int character = 0; + for (cgltf_size i = 0; i < 4; ++i) + { + character = (character << 4) + cgltf_unhex(*read++); + } + + if (character <= 0x7F) + { + *write++ = character & 0xFF; + } + else if (character <= 0x7FF) + { + *write++ = 0xC0 | ((character >> 6) & 0xFF); + *write++ = 0x80 | (character & 0x3F); + } + else + { + *write++ = 0xE0 | ((character >> 12) & 0xFF); + *write++ = 0x80 | ((character >> 6) & 0x3F); + *write++ = 0x80 | (character & 0x3F); + } + break; + } + default: + break; + } + + last = read; + read += strcspn(read, "\\"); + } + + *write = 0; + return write - string; +} + +cgltf_size cgltf_decode_uri(char* uri) +{ + char* write = uri; + char* i = uri; + + while (*i) + { + if (*i == '%') + { + int ch1 = cgltf_unhex(i[1]); + + if (ch1 >= 0) + { + int ch2 = cgltf_unhex(i[2]); + + if (ch2 >= 0) + { + *write++ = (char)(ch1 * 16 + ch2); + i += 3; + continue; + } + } + } + + *write++ = *i++; + } + + *write = 0; + return write - uri; +} + +cgltf_result cgltf_load_buffers(const cgltf_options* options, cgltf_data* data, const char* gltf_path) +{ + if (options == NULL) + { + return cgltf_result_invalid_options; + } + + if (data->buffers_count && data->buffers[0].data == NULL && data->buffers[0].uri == NULL && data->bin) + { + if (data->bin_size < data->buffers[0].size) + { + return cgltf_result_data_too_short; + } + + data->buffers[0].data = (void*)data->bin; + data->buffers[0].data_free_method = cgltf_data_free_method_none; + } + + for (cgltf_size i = 0; i < data->buffers_count; ++i) + { + if (data->buffers[i].data) + { + continue; + } + + const char* uri = data->buffers[i].uri; + + if (uri == NULL) + { + continue; + } + + if (strncmp(uri, "data:", 5) == 0) + { + const char* comma = strchr(uri, ','); + + if (comma && comma - uri >= 7 && strncmp(comma - 7, ";base64", 7) == 0) + { + cgltf_result res = cgltf_load_buffer_base64(options, data->buffers[i].size, comma + 1, &data->buffers[i].data); + data->buffers[i].data_free_method = cgltf_data_free_method_memory_free; + + if (res != cgltf_result_success) + { + return res; + } + } + else + { + return cgltf_result_unknown_format; + } + } + else if (strstr(uri, "://") == NULL && gltf_path) + { + cgltf_result res = cgltf_load_buffer_file(options, data->buffers[i].size, uri, gltf_path, &data->buffers[i].data); + data->buffers[i].data_free_method = cgltf_data_free_method_file_release; + + if (res != cgltf_result_success) + { + return res; + } + } + else + { + return cgltf_result_unknown_format; + } + } + + return cgltf_result_success; +} + +static cgltf_size cgltf_calc_size(cgltf_type type, cgltf_component_type component_type); + +static cgltf_size cgltf_calc_index_bound(cgltf_buffer_view* buffer_view, cgltf_size offset, cgltf_component_type component_type, cgltf_size count) +{ + char* data = (char*)buffer_view->buffer->data + offset + buffer_view->offset; + cgltf_size bound = 0; + + switch (component_type) + { + case cgltf_component_type_r_8u: + for (size_t i = 0; i < count; ++i) + { + cgltf_size v = ((unsigned char*)data)[i]; + bound = bound > v ? bound : v; + } + break; + + case cgltf_component_type_r_16u: + for (size_t i = 0; i < count; ++i) + { + cgltf_size v = ((unsigned short*)data)[i]; + bound = bound > v ? bound : v; + } + break; + + case cgltf_component_type_r_32u: + for (size_t i = 0; i < count; ++i) + { + cgltf_size v = ((unsigned int*)data)[i]; + bound = bound > v ? bound : v; + } + break; + + default: + ; + } + + return bound; +} + +#if CGLTF_VALIDATE_ENABLE_ASSERTS +#define CGLTF_ASSERT_IF(cond, result) assert(!(cond)); if (cond) return result; +#else +#define CGLTF_ASSERT_IF(cond, result) if (cond) return result; +#endif + +cgltf_result cgltf_validate(cgltf_data* data) +{ + for (cgltf_size i = 0; i < data->accessors_count; ++i) + { + cgltf_accessor* accessor = &data->accessors[i]; + + cgltf_size element_size = cgltf_calc_size(accessor->type, accessor->component_type); + + if (accessor->buffer_view) + { + cgltf_size req_size = accessor->offset + accessor->stride * (accessor->count - 1) + element_size; + + CGLTF_ASSERT_IF(accessor->buffer_view->size < req_size, cgltf_result_data_too_short); + } + + if (accessor->is_sparse) + { + cgltf_accessor_sparse* sparse = &accessor->sparse; + + cgltf_size indices_component_size = cgltf_calc_size(cgltf_type_scalar, sparse->indices_component_type); + cgltf_size indices_req_size = sparse->indices_byte_offset + indices_component_size * sparse->count; + cgltf_size values_req_size = sparse->values_byte_offset + element_size * sparse->count; + + CGLTF_ASSERT_IF(sparse->indices_buffer_view->size < indices_req_size || + sparse->values_buffer_view->size < values_req_size, cgltf_result_data_too_short); + + CGLTF_ASSERT_IF(sparse->indices_component_type != cgltf_component_type_r_8u && + sparse->indices_component_type != cgltf_component_type_r_16u && + sparse->indices_component_type != cgltf_component_type_r_32u, cgltf_result_invalid_gltf); + + if (sparse->indices_buffer_view->buffer->data) + { + cgltf_size index_bound = cgltf_calc_index_bound(sparse->indices_buffer_view, sparse->indices_byte_offset, sparse->indices_component_type, sparse->count); + + CGLTF_ASSERT_IF(index_bound >= accessor->count, cgltf_result_data_too_short); + } + } + } + + for (cgltf_size i = 0; i < data->buffer_views_count; ++i) + { + cgltf_size req_size = data->buffer_views[i].offset + data->buffer_views[i].size; + + CGLTF_ASSERT_IF(data->buffer_views[i].buffer && data->buffer_views[i].buffer->size < req_size, cgltf_result_data_too_short); + + if (data->buffer_views[i].has_meshopt_compression) + { + cgltf_meshopt_compression* mc = &data->buffer_views[i].meshopt_compression; + + CGLTF_ASSERT_IF(mc->buffer == NULL || mc->buffer->size < mc->offset + mc->size, cgltf_result_data_too_short); + + CGLTF_ASSERT_IF(data->buffer_views[i].stride && mc->stride != data->buffer_views[i].stride, cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF(data->buffer_views[i].size != mc->stride * mc->count, cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF(mc->mode == cgltf_meshopt_compression_mode_invalid, cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF(mc->mode == cgltf_meshopt_compression_mode_attributes && !(mc->stride % 4 == 0 && mc->stride <= 256), cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF(mc->mode == cgltf_meshopt_compression_mode_triangles && mc->count % 3 != 0, cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF((mc->mode == cgltf_meshopt_compression_mode_triangles || mc->mode == cgltf_meshopt_compression_mode_indices) && mc->stride != 2 && mc->stride != 4, cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF((mc->mode == cgltf_meshopt_compression_mode_triangles || mc->mode == cgltf_meshopt_compression_mode_indices) && mc->filter != cgltf_meshopt_compression_filter_none, cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF(mc->filter == cgltf_meshopt_compression_filter_octahedral && mc->stride != 4 && mc->stride != 8, cgltf_result_invalid_gltf); + + CGLTF_ASSERT_IF(mc->filter == cgltf_meshopt_compression_filter_quaternion && mc->stride != 8, cgltf_result_invalid_gltf); + } + } + + for (cgltf_size i = 0; i < data->meshes_count; ++i) + { + if (data->meshes[i].weights) + { + CGLTF_ASSERT_IF(data->meshes[i].primitives_count && data->meshes[i].primitives[0].targets_count != data->meshes[i].weights_count, cgltf_result_invalid_gltf); + } + + if (data->meshes[i].target_names) + { + CGLTF_ASSERT_IF(data->meshes[i].primitives_count && data->meshes[i].primitives[0].targets_count != data->meshes[i].target_names_count, cgltf_result_invalid_gltf); + } + + for (cgltf_size j = 0; j < data->meshes[i].primitives_count; ++j) + { + CGLTF_ASSERT_IF(data->meshes[i].primitives[j].targets_count != data->meshes[i].primitives[0].targets_count, cgltf_result_invalid_gltf); + + if (data->meshes[i].primitives[j].attributes_count) + { + cgltf_accessor* first = data->meshes[i].primitives[j].attributes[0].data; + + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].attributes_count; ++k) + { + CGLTF_ASSERT_IF(data->meshes[i].primitives[j].attributes[k].data->count != first->count, cgltf_result_invalid_gltf); + } + + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].targets_count; ++k) + { + for (cgltf_size m = 0; m < data->meshes[i].primitives[j].targets[k].attributes_count; ++m) + { + CGLTF_ASSERT_IF(data->meshes[i].primitives[j].targets[k].attributes[m].data->count != first->count, cgltf_result_invalid_gltf); + } + } + + cgltf_accessor* indices = data->meshes[i].primitives[j].indices; + + CGLTF_ASSERT_IF(indices && + indices->component_type != cgltf_component_type_r_8u && + indices->component_type != cgltf_component_type_r_16u && + indices->component_type != cgltf_component_type_r_32u, cgltf_result_invalid_gltf); + + if (indices && indices->buffer_view && indices->buffer_view->buffer->data) + { + cgltf_size index_bound = cgltf_calc_index_bound(indices->buffer_view, indices->offset, indices->component_type, indices->count); + + CGLTF_ASSERT_IF(index_bound >= first->count, cgltf_result_data_too_short); + } + + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].mappings_count; ++k) + { + CGLTF_ASSERT_IF(data->meshes[i].primitives[j].mappings[k].variant >= data->variants_count, cgltf_result_invalid_gltf); + } + } + } + } + + for (cgltf_size i = 0; i < data->nodes_count; ++i) + { + if (data->nodes[i].weights && data->nodes[i].mesh) + { + CGLTF_ASSERT_IF (data->nodes[i].mesh->primitives_count && data->nodes[i].mesh->primitives[0].targets_count != data->nodes[i].weights_count, cgltf_result_invalid_gltf); + } + } + + for (cgltf_size i = 0; i < data->nodes_count; ++i) + { + cgltf_node* p1 = data->nodes[i].parent; + cgltf_node* p2 = p1 ? p1->parent : NULL; + + while (p1 && p2) + { + CGLTF_ASSERT_IF(p1 == p2, cgltf_result_invalid_gltf); + + p1 = p1->parent; + p2 = p2->parent ? p2->parent->parent : NULL; + } + } + + for (cgltf_size i = 0; i < data->scenes_count; ++i) + { + for (cgltf_size j = 0; j < data->scenes[i].nodes_count; ++j) + { + CGLTF_ASSERT_IF(data->scenes[i].nodes[j]->parent, cgltf_result_invalid_gltf); + } + } + + for (cgltf_size i = 0; i < data->animations_count; ++i) + { + for (cgltf_size j = 0; j < data->animations[i].channels_count; ++j) + { + cgltf_animation_channel* channel = &data->animations[i].channels[j]; + + if (!channel->target_node) + { + continue; + } + + cgltf_size components = 1; + + if (channel->target_path == cgltf_animation_path_type_weights) + { + CGLTF_ASSERT_IF(!channel->target_node->mesh || !channel->target_node->mesh->primitives_count, cgltf_result_invalid_gltf); + + components = channel->target_node->mesh->primitives[0].targets_count; + } + + cgltf_size values = channel->sampler->interpolation == cgltf_interpolation_type_cubic_spline ? 3 : 1; + + CGLTF_ASSERT_IF(channel->sampler->input->count * components * values != channel->sampler->output->count, cgltf_result_data_too_short); + } + } + + return cgltf_result_success; +} + +cgltf_result cgltf_copy_extras_json(const cgltf_data* data, const cgltf_extras* extras, char* dest, cgltf_size* dest_size) +{ + cgltf_size json_size = extras->end_offset - extras->start_offset; + + if (!dest) + { + if (dest_size) + { + *dest_size = json_size + 1; + return cgltf_result_success; + } + return cgltf_result_invalid_options; + } + + if (*dest_size + 1 < json_size) + { + strncpy(dest, data->json + extras->start_offset, *dest_size - 1); + dest[*dest_size - 1] = 0; + } + else + { + strncpy(dest, data->json + extras->start_offset, json_size); + dest[json_size] = 0; + } + + return cgltf_result_success; +} + +void cgltf_free_extensions(cgltf_data* data, cgltf_extension* extensions, cgltf_size extensions_count) +{ + for (cgltf_size i = 0; i < extensions_count; ++i) + { + data->memory.free(data->memory.user_data, extensions[i].name); + data->memory.free(data->memory.user_data, extensions[i].data); + } + data->memory.free(data->memory.user_data, extensions); +} + +void cgltf_free(cgltf_data* data) +{ + if (!data) + { + return; + } + + void (*file_release)(const struct cgltf_memory_options*, const struct cgltf_file_options*, void* data) = data->file.release ? data->file.release : cgltf_default_file_release; + + data->memory.free(data->memory.user_data, data->asset.copyright); + data->memory.free(data->memory.user_data, data->asset.generator); + data->memory.free(data->memory.user_data, data->asset.version); + data->memory.free(data->memory.user_data, data->asset.min_version); + + cgltf_free_extensions(data, data->asset.extensions, data->asset.extensions_count); + + for (cgltf_size i = 0; i < data->accessors_count; ++i) + { + data->memory.free(data->memory.user_data, data->accessors[i].name); + + if(data->accessors[i].is_sparse) + { + cgltf_free_extensions(data, data->accessors[i].sparse.extensions, data->accessors[i].sparse.extensions_count); + cgltf_free_extensions(data, data->accessors[i].sparse.indices_extensions, data->accessors[i].sparse.indices_extensions_count); + cgltf_free_extensions(data, data->accessors[i].sparse.values_extensions, data->accessors[i].sparse.values_extensions_count); + } + cgltf_free_extensions(data, data->accessors[i].extensions, data->accessors[i].extensions_count); + } + data->memory.free(data->memory.user_data, data->accessors); + + for (cgltf_size i = 0; i < data->buffer_views_count; ++i) + { + data->memory.free(data->memory.user_data, data->buffer_views[i].name); + data->memory.free(data->memory.user_data, data->buffer_views[i].data); + + cgltf_free_extensions(data, data->buffer_views[i].extensions, data->buffer_views[i].extensions_count); + } + data->memory.free(data->memory.user_data, data->buffer_views); + + for (cgltf_size i = 0; i < data->buffers_count; ++i) + { + data->memory.free(data->memory.user_data, data->buffers[i].name); + + if (data->buffers[i].data_free_method == cgltf_data_free_method_file_release) + { + file_release(&data->memory, &data->file, data->buffers[i].data); + } + else if (data->buffers[i].data_free_method == cgltf_data_free_method_memory_free) + { + data->memory.free(data->memory.user_data, data->buffers[i].data); + } + + data->memory.free(data->memory.user_data, data->buffers[i].uri); + + cgltf_free_extensions(data, data->buffers[i].extensions, data->buffers[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->buffers); + + for (cgltf_size i = 0; i < data->meshes_count; ++i) + { + data->memory.free(data->memory.user_data, data->meshes[i].name); + + for (cgltf_size j = 0; j < data->meshes[i].primitives_count; ++j) + { + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].attributes_count; ++k) + { + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].attributes[k].name); + } + + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].attributes); + + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].targets_count; ++k) + { + for (cgltf_size m = 0; m < data->meshes[i].primitives[j].targets[k].attributes_count; ++m) + { + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].targets[k].attributes[m].name); + } + + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].targets[k].attributes); + } + + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].targets); + + if (data->meshes[i].primitives[j].has_draco_mesh_compression) + { + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].draco_mesh_compression.attributes_count; ++k) + { + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].draco_mesh_compression.attributes[k].name); + } + + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].draco_mesh_compression.attributes); + } + + data->memory.free(data->memory.user_data, data->meshes[i].primitives[j].mappings); + + cgltf_free_extensions(data, data->meshes[i].primitives[j].extensions, data->meshes[i].primitives[j].extensions_count); + } + + data->memory.free(data->memory.user_data, data->meshes[i].primitives); + data->memory.free(data->memory.user_data, data->meshes[i].weights); + + for (cgltf_size j = 0; j < data->meshes[i].target_names_count; ++j) + { + data->memory.free(data->memory.user_data, data->meshes[i].target_names[j]); + } + + cgltf_free_extensions(data, data->meshes[i].extensions, data->meshes[i].extensions_count); + + data->memory.free(data->memory.user_data, data->meshes[i].target_names); + } + + data->memory.free(data->memory.user_data, data->meshes); + + for (cgltf_size i = 0; i < data->materials_count; ++i) + { + data->memory.free(data->memory.user_data, data->materials[i].name); + + if(data->materials[i].has_pbr_metallic_roughness) + { + cgltf_free_extensions(data, data->materials[i].pbr_metallic_roughness.metallic_roughness_texture.extensions, data->materials[i].pbr_metallic_roughness.metallic_roughness_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].pbr_metallic_roughness.base_color_texture.extensions, data->materials[i].pbr_metallic_roughness.base_color_texture.extensions_count); + } + if(data->materials[i].has_pbr_specular_glossiness) + { + cgltf_free_extensions(data, data->materials[i].pbr_specular_glossiness.diffuse_texture.extensions, data->materials[i].pbr_specular_glossiness.diffuse_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].pbr_specular_glossiness.specular_glossiness_texture.extensions, data->materials[i].pbr_specular_glossiness.specular_glossiness_texture.extensions_count); + } + if(data->materials[i].has_clearcoat) + { + cgltf_free_extensions(data, data->materials[i].clearcoat.clearcoat_texture.extensions, data->materials[i].clearcoat.clearcoat_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].clearcoat.clearcoat_roughness_texture.extensions, data->materials[i].clearcoat.clearcoat_roughness_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].clearcoat.clearcoat_normal_texture.extensions, data->materials[i].clearcoat.clearcoat_normal_texture.extensions_count); + } + if(data->materials[i].has_specular) + { + cgltf_free_extensions(data, data->materials[i].specular.specular_texture.extensions, data->materials[i].specular.specular_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].specular.specular_color_texture.extensions, data->materials[i].specular.specular_color_texture.extensions_count); + } + if(data->materials[i].has_transmission) + { + cgltf_free_extensions(data, data->materials[i].transmission.transmission_texture.extensions, data->materials[i].transmission.transmission_texture.extensions_count); + } + if (data->materials[i].has_volume) + { + cgltf_free_extensions(data, data->materials[i].volume.thickness_texture.extensions, data->materials[i].volume.thickness_texture.extensions_count); + } + if(data->materials[i].has_sheen) + { + cgltf_free_extensions(data, data->materials[i].sheen.sheen_color_texture.extensions, data->materials[i].sheen.sheen_color_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].sheen.sheen_roughness_texture.extensions, data->materials[i].sheen.sheen_roughness_texture.extensions_count); + } + + cgltf_free_extensions(data, data->materials[i].normal_texture.extensions, data->materials[i].normal_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].occlusion_texture.extensions, data->materials[i].occlusion_texture.extensions_count); + cgltf_free_extensions(data, data->materials[i].emissive_texture.extensions, data->materials[i].emissive_texture.extensions_count); + + cgltf_free_extensions(data, data->materials[i].extensions, data->materials[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->materials); + + for (cgltf_size i = 0; i < data->images_count; ++i) + { + data->memory.free(data->memory.user_data, data->images[i].name); + data->memory.free(data->memory.user_data, data->images[i].uri); + data->memory.free(data->memory.user_data, data->images[i].mime_type); + + cgltf_free_extensions(data, data->images[i].extensions, data->images[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->images); + + for (cgltf_size i = 0; i < data->textures_count; ++i) + { + data->memory.free(data->memory.user_data, data->textures[i].name); + cgltf_free_extensions(data, data->textures[i].extensions, data->textures[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->textures); + + for (cgltf_size i = 0; i < data->samplers_count; ++i) + { + data->memory.free(data->memory.user_data, data->samplers[i].name); + cgltf_free_extensions(data, data->samplers[i].extensions, data->samplers[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->samplers); + + for (cgltf_size i = 0; i < data->skins_count; ++i) + { + data->memory.free(data->memory.user_data, data->skins[i].name); + data->memory.free(data->memory.user_data, data->skins[i].joints); + + cgltf_free_extensions(data, data->skins[i].extensions, data->skins[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->skins); + + for (cgltf_size i = 0; i < data->cameras_count; ++i) + { + data->memory.free(data->memory.user_data, data->cameras[i].name); + cgltf_free_extensions(data, data->cameras[i].extensions, data->cameras[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->cameras); + + for (cgltf_size i = 0; i < data->lights_count; ++i) + { + data->memory.free(data->memory.user_data, data->lights[i].name); + } + + data->memory.free(data->memory.user_data, data->lights); + + for (cgltf_size i = 0; i < data->nodes_count; ++i) + { + data->memory.free(data->memory.user_data, data->nodes[i].name); + data->memory.free(data->memory.user_data, data->nodes[i].children); + data->memory.free(data->memory.user_data, data->nodes[i].weights); + cgltf_free_extensions(data, data->nodes[i].extensions, data->nodes[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->nodes); + + for (cgltf_size i = 0; i < data->scenes_count; ++i) + { + data->memory.free(data->memory.user_data, data->scenes[i].name); + data->memory.free(data->memory.user_data, data->scenes[i].nodes); + + cgltf_free_extensions(data, data->scenes[i].extensions, data->scenes[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->scenes); + + for (cgltf_size i = 0; i < data->animations_count; ++i) + { + data->memory.free(data->memory.user_data, data->animations[i].name); + for (cgltf_size j = 0; j < data->animations[i].samplers_count; ++j) + { + cgltf_free_extensions(data, data->animations[i].samplers[j].extensions, data->animations[i].samplers[j].extensions_count); + } + data->memory.free(data->memory.user_data, data->animations[i].samplers); + + for (cgltf_size j = 0; j < data->animations[i].channels_count; ++j) + { + cgltf_free_extensions(data, data->animations[i].channels[j].extensions, data->animations[i].channels[j].extensions_count); + } + data->memory.free(data->memory.user_data, data->animations[i].channels); + + cgltf_free_extensions(data, data->animations[i].extensions, data->animations[i].extensions_count); + } + + data->memory.free(data->memory.user_data, data->animations); + + for (cgltf_size i = 0; i < data->variants_count; ++i) + { + data->memory.free(data->memory.user_data, data->variants[i].name); + } + + data->memory.free(data->memory.user_data, data->variants); + + cgltf_free_extensions(data, data->data_extensions, data->data_extensions_count); + + for (cgltf_size i = 0; i < data->extensions_used_count; ++i) + { + data->memory.free(data->memory.user_data, data->extensions_used[i]); + } + + data->memory.free(data->memory.user_data, data->extensions_used); + + for (cgltf_size i = 0; i < data->extensions_required_count; ++i) + { + data->memory.free(data->memory.user_data, data->extensions_required[i]); + } + + data->memory.free(data->memory.user_data, data->extensions_required); + + file_release(&data->memory, &data->file, data->file_data); + + data->memory.free(data->memory.user_data, data); +} + +void cgltf_node_transform_local(const cgltf_node* node, cgltf_float* out_matrix) +{ + cgltf_float* lm = out_matrix; + + if (node->has_matrix) + { + memcpy(lm, node->matrix, sizeof(float) * 16); + } + else + { + float tx = node->translation[0]; + float ty = node->translation[1]; + float tz = node->translation[2]; + + float qx = node->rotation[0]; + float qy = node->rotation[1]; + float qz = node->rotation[2]; + float qw = node->rotation[3]; + + float sx = node->scale[0]; + float sy = node->scale[1]; + float sz = node->scale[2]; + + lm[0] = (1 - 2 * qy*qy - 2 * qz*qz) * sx; + lm[1] = (2 * qx*qy + 2 * qz*qw) * sx; + lm[2] = (2 * qx*qz - 2 * qy*qw) * sx; + lm[3] = 0.f; + + lm[4] = (2 * qx*qy - 2 * qz*qw) * sy; + lm[5] = (1 - 2 * qx*qx - 2 * qz*qz) * sy; + lm[6] = (2 * qy*qz + 2 * qx*qw) * sy; + lm[7] = 0.f; + + lm[8] = (2 * qx*qz + 2 * qy*qw) * sz; + lm[9] = (2 * qy*qz - 2 * qx*qw) * sz; + lm[10] = (1 - 2 * qx*qx - 2 * qy*qy) * sz; + lm[11] = 0.f; + + lm[12] = tx; + lm[13] = ty; + lm[14] = tz; + lm[15] = 1.f; + } +} + +void cgltf_node_transform_world(const cgltf_node* node, cgltf_float* out_matrix) +{ + cgltf_float* lm = out_matrix; + cgltf_node_transform_local(node, lm); + + const cgltf_node* parent = node->parent; + + while (parent) + { + float pm[16]; + cgltf_node_transform_local(parent, pm); + + for (int i = 0; i < 4; ++i) + { + float l0 = lm[i * 4 + 0]; + float l1 = lm[i * 4 + 1]; + float l2 = lm[i * 4 + 2]; + + float r0 = l0 * pm[0] + l1 * pm[4] + l2 * pm[8]; + float r1 = l0 * pm[1] + l1 * pm[5] + l2 * pm[9]; + float r2 = l0 * pm[2] + l1 * pm[6] + l2 * pm[10]; + + lm[i * 4 + 0] = r0; + lm[i * 4 + 1] = r1; + lm[i * 4 + 2] = r2; + } + + lm[12] += pm[12]; + lm[13] += pm[13]; + lm[14] += pm[14]; + + parent = parent->parent; + } +} + +static cgltf_size cgltf_component_read_index(const void* in, cgltf_component_type component_type) +{ + switch (component_type) + { + case cgltf_component_type_r_16: + return *((const int16_t*) in); + case cgltf_component_type_r_16u: + return *((const uint16_t*) in); + case cgltf_component_type_r_32u: + return *((const uint32_t*) in); + case cgltf_component_type_r_32f: + return (cgltf_size)*((const float*) in); + case cgltf_component_type_r_8: + return *((const int8_t*) in); + case cgltf_component_type_r_8u: + return *((const uint8_t*) in); + default: + return 0; + } +} + +static cgltf_float cgltf_component_read_float(const void* in, cgltf_component_type component_type, cgltf_bool normalized) +{ + if (component_type == cgltf_component_type_r_32f) + { + return *((const float*) in); + } + + if (normalized) + { + switch (component_type) + { + // note: glTF spec doesn't currently define normalized conversions for 32-bit integers + case cgltf_component_type_r_16: + return *((const int16_t*) in) / (cgltf_float)32767; + case cgltf_component_type_r_16u: + return *((const uint16_t*) in) / (cgltf_float)65535; + case cgltf_component_type_r_8: + return *((const int8_t*) in) / (cgltf_float)127; + case cgltf_component_type_r_8u: + return *((const uint8_t*) in) / (cgltf_float)255; + default: + return 0; + } + } + + return (cgltf_float)cgltf_component_read_index(in, component_type); +} + +static cgltf_size cgltf_component_size(cgltf_component_type component_type); + +static cgltf_bool cgltf_element_read_float(const uint8_t* element, cgltf_type type, cgltf_component_type component_type, cgltf_bool normalized, cgltf_float* out, cgltf_size element_size) +{ + cgltf_size num_components = cgltf_num_components(type); + + if (element_size < num_components) { + return 0; + } + + // There are three special cases for component extraction, see #data-alignment in the 2.0 spec. + + cgltf_size component_size = cgltf_component_size(component_type); + + if (type == cgltf_type_mat2 && component_size == 1) + { + out[0] = cgltf_component_read_float(element, component_type, normalized); + out[1] = cgltf_component_read_float(element + 1, component_type, normalized); + out[2] = cgltf_component_read_float(element + 4, component_type, normalized); + out[3] = cgltf_component_read_float(element + 5, component_type, normalized); + return 1; + } + + if (type == cgltf_type_mat3 && component_size == 1) + { + out[0] = cgltf_component_read_float(element, component_type, normalized); + out[1] = cgltf_component_read_float(element + 1, component_type, normalized); + out[2] = cgltf_component_read_float(element + 2, component_type, normalized); + out[3] = cgltf_component_read_float(element + 4, component_type, normalized); + out[4] = cgltf_component_read_float(element + 5, component_type, normalized); + out[5] = cgltf_component_read_float(element + 6, component_type, normalized); + out[6] = cgltf_component_read_float(element + 8, component_type, normalized); + out[7] = cgltf_component_read_float(element + 9, component_type, normalized); + out[8] = cgltf_component_read_float(element + 10, component_type, normalized); + return 1; + } + + if (type == cgltf_type_mat3 && component_size == 2) + { + out[0] = cgltf_component_read_float(element, component_type, normalized); + out[1] = cgltf_component_read_float(element + 2, component_type, normalized); + out[2] = cgltf_component_read_float(element + 4, component_type, normalized); + out[3] = cgltf_component_read_float(element + 8, component_type, normalized); + out[4] = cgltf_component_read_float(element + 10, component_type, normalized); + out[5] = cgltf_component_read_float(element + 12, component_type, normalized); + out[6] = cgltf_component_read_float(element + 16, component_type, normalized); + out[7] = cgltf_component_read_float(element + 18, component_type, normalized); + out[8] = cgltf_component_read_float(element + 20, component_type, normalized); + return 1; + } + + for (cgltf_size i = 0; i < num_components; ++i) + { + out[i] = cgltf_component_read_float(element + component_size * i, component_type, normalized); + } + return 1; +} + +const uint8_t* cgltf_buffer_view_data(const cgltf_buffer_view* view) +{ + if (view->data) + return (const uint8_t*)view->data; + + if (!view->buffer->data) + return NULL; + + const uint8_t* result = (const uint8_t*)view->buffer->data; + result += view->offset; + return result; +} + +cgltf_bool cgltf_accessor_read_float(const cgltf_accessor* accessor, cgltf_size index, cgltf_float* out, cgltf_size element_size) +{ + if (accessor->is_sparse) + { + return 0; + } + if (accessor->buffer_view == NULL) + { + memset(out, 0, element_size * sizeof(cgltf_float)); + return 1; + } + const uint8_t* element = cgltf_buffer_view_data(accessor->buffer_view); + if (element == NULL) + { + return 0; + } + element += accessor->offset + accessor->stride * index; + return cgltf_element_read_float(element, accessor->type, accessor->component_type, accessor->normalized, out, element_size); +} + +cgltf_size cgltf_accessor_unpack_floats(const cgltf_accessor* accessor, cgltf_float* out, cgltf_size float_count) +{ + cgltf_size floats_per_element = cgltf_num_components(accessor->type); + cgltf_size available_floats = accessor->count * floats_per_element; + if (out == NULL) + { + return available_floats; + } + + float_count = available_floats < float_count ? available_floats : float_count; + cgltf_size element_count = float_count / floats_per_element; + + // First pass: convert each element in the base accessor. + cgltf_float* dest = out; + cgltf_accessor dense = *accessor; + dense.is_sparse = 0; + for (cgltf_size index = 0; index < element_count; index++, dest += floats_per_element) + { + if (!cgltf_accessor_read_float(&dense, index, dest, floats_per_element)) + { + return 0; + } + } + + // Second pass: write out each element in the sparse accessor. + if (accessor->is_sparse) + { + const cgltf_accessor_sparse* sparse = &dense.sparse; + + const uint8_t* index_data = cgltf_buffer_view_data(sparse->indices_buffer_view); + const uint8_t* reader_head = cgltf_buffer_view_data(sparse->values_buffer_view); + + if (index_data == NULL || reader_head == NULL) + { + return 0; + } + + index_data += sparse->indices_byte_offset; + reader_head += sparse->values_byte_offset; + + cgltf_size index_stride = cgltf_component_size(sparse->indices_component_type); + for (cgltf_size reader_index = 0; reader_index < sparse->count; reader_index++, index_data += index_stride) + { + size_t writer_index = cgltf_component_read_index(index_data, sparse->indices_component_type); + float* writer_head = out + writer_index * floats_per_element; + + if (!cgltf_element_read_float(reader_head, dense.type, dense.component_type, dense.normalized, writer_head, floats_per_element)) + { + return 0; + } + + reader_head += dense.stride; + } + } + + return element_count * floats_per_element; +} + +static cgltf_uint cgltf_component_read_uint(const void* in, cgltf_component_type component_type) +{ + switch (component_type) + { + case cgltf_component_type_r_8: + return *((const int8_t*) in); + + case cgltf_component_type_r_8u: + return *((const uint8_t*) in); + + case cgltf_component_type_r_16: + return *((const int16_t*) in); + + case cgltf_component_type_r_16u: + return *((const uint16_t*) in); + + case cgltf_component_type_r_32u: + return *((const uint32_t*) in); + + default: + return 0; + } +} + +static cgltf_bool cgltf_element_read_uint(const uint8_t* element, cgltf_type type, cgltf_component_type component_type, cgltf_uint* out, cgltf_size element_size) +{ + cgltf_size num_components = cgltf_num_components(type); + + if (element_size < num_components) + { + return 0; + } + + // Reading integer matrices is not a valid use case + if (type == cgltf_type_mat2 || type == cgltf_type_mat3 || type == cgltf_type_mat4) + { + return 0; + } + + cgltf_size component_size = cgltf_component_size(component_type); + + for (cgltf_size i = 0; i < num_components; ++i) + { + out[i] = cgltf_component_read_uint(element + component_size * i, component_type); + } + return 1; +} + +cgltf_bool cgltf_accessor_read_uint(const cgltf_accessor* accessor, cgltf_size index, cgltf_uint* out, cgltf_size element_size) +{ + if (accessor->is_sparse) + { + return 0; + } + if (accessor->buffer_view == NULL) + { + memset(out, 0, element_size * sizeof( cgltf_uint )); + return 1; + } + const uint8_t* element = cgltf_buffer_view_data(accessor->buffer_view); + if (element == NULL) + { + return 0; + } + element += accessor->offset + accessor->stride * index; + return cgltf_element_read_uint(element, accessor->type, accessor->component_type, out, element_size); +} + +cgltf_size cgltf_accessor_read_index(const cgltf_accessor* accessor, cgltf_size index) +{ + if (accessor->is_sparse) + { + return 0; // This is an error case, but we can't communicate the error with existing interface. + } + if (accessor->buffer_view == NULL) + { + return 0; + } + const uint8_t* element = cgltf_buffer_view_data(accessor->buffer_view); + if (element == NULL) + { + return 0; // This is an error case, but we can't communicate the error with existing interface. + } + element += accessor->offset + accessor->stride * index; + return cgltf_component_read_index(element, accessor->component_type); +} + +#define CGLTF_ERROR_JSON -1 +#define CGLTF_ERROR_NOMEM -2 +#define CGLTF_ERROR_LEGACY -3 + +#define CGLTF_CHECK_TOKTYPE(tok_, type_) if ((tok_).type != (type_)) { return CGLTF_ERROR_JSON; } +#define CGLTF_CHECK_TOKTYPE_RETTYPE(tok_, type_, ret_) if ((tok_).type != (type_)) { return (ret_)CGLTF_ERROR_JSON; } +#define CGLTF_CHECK_KEY(tok_) if ((tok_).type != JSMN_STRING || (tok_).size == 0) { return CGLTF_ERROR_JSON; } /* checking size for 0 verifies that a value follows the key */ + +#define CGLTF_PTRINDEX(type, idx) (type*)((cgltf_size)idx + 1) +#define CGLTF_PTRFIXUP(var, data, size) if (var) { if ((cgltf_size)var > size) { return CGLTF_ERROR_JSON; } var = &data[(cgltf_size)var-1]; } +#define CGLTF_PTRFIXUP_REQ(var, data, size) if (!var || (cgltf_size)var > size) { return CGLTF_ERROR_JSON; } var = &data[(cgltf_size)var-1]; + +static int cgltf_json_strcmp(jsmntok_t const* tok, const uint8_t* json_chunk, const char* str) +{ + CGLTF_CHECK_TOKTYPE(*tok, JSMN_STRING); + size_t const str_len = strlen(str); + size_t const name_length = tok->end - tok->start; + return (str_len == name_length) ? strncmp((const char*)json_chunk + tok->start, str, str_len) : 128; +} + +static int cgltf_json_to_int(jsmntok_t const* tok, const uint8_t* json_chunk) +{ + CGLTF_CHECK_TOKTYPE(*tok, JSMN_PRIMITIVE); + char tmp[128]; + int size = (cgltf_size)(tok->end - tok->start) < sizeof(tmp) ? tok->end - tok->start : (int)(sizeof(tmp) - 1); + strncpy(tmp, (const char*)json_chunk + tok->start, size); + tmp[size] = 0; + return CGLTF_ATOI(tmp); +} + +static cgltf_size cgltf_json_to_size(jsmntok_t const* tok, const uint8_t* json_chunk) +{ + CGLTF_CHECK_TOKTYPE_RETTYPE(*tok, JSMN_PRIMITIVE, cgltf_size); + char tmp[128]; + int size = (cgltf_size)(tok->end - tok->start) < sizeof(tmp) ? tok->end - tok->start : (int)(sizeof(tmp) - 1); + strncpy(tmp, (const char*)json_chunk + tok->start, size); + tmp[size] = 0; + return (cgltf_size)CGLTF_ATOLL(tmp); +} + +static cgltf_float cgltf_json_to_float(jsmntok_t const* tok, const uint8_t* json_chunk) +{ + CGLTF_CHECK_TOKTYPE(*tok, JSMN_PRIMITIVE); + char tmp[128]; + int size = (cgltf_size)(tok->end - tok->start) < sizeof(tmp) ? tok->end - tok->start : (int)(sizeof(tmp) - 1); + strncpy(tmp, (const char*)json_chunk + tok->start, size); + tmp[size] = 0; + return (cgltf_float)CGLTF_ATOF(tmp); +} + +static cgltf_bool cgltf_json_to_bool(jsmntok_t const* tok, const uint8_t* json_chunk) +{ + int size = tok->end - tok->start; + return size == 4 && memcmp(json_chunk + tok->start, "true", 4) == 0; +} + +static int cgltf_skip_json(jsmntok_t const* tokens, int i) +{ + int end = i + 1; + + while (i < end) + { + switch (tokens[i].type) + { + case JSMN_OBJECT: + end += tokens[i].size * 2; + break; + + case JSMN_ARRAY: + end += tokens[i].size; + break; + + case JSMN_PRIMITIVE: + case JSMN_STRING: + break; + + default: + return -1; + } + + i++; + } + + return i; +} + +static void cgltf_fill_float_array(float* out_array, int size, float value) +{ + for (int j = 0; j < size; ++j) + { + out_array[j] = value; + } +} + +static int cgltf_parse_json_float_array(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, float* out_array, int size) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_ARRAY); + if (tokens[i].size != size) + { + return CGLTF_ERROR_JSON; + } + ++i; + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE); + out_array[j] = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + return i; +} + +static int cgltf_parse_json_string(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, char** out_string) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_STRING); + if (*out_string) + { + return CGLTF_ERROR_JSON; + } + int size = tokens[i].end - tokens[i].start; + char* result = (char*)options->memory.alloc(options->memory.user_data, size + 1); + if (!result) + { + return CGLTF_ERROR_NOMEM; + } + strncpy(result, (const char*)json_chunk + tokens[i].start, size); + result[size] = 0; + *out_string = result; + return i + 1; +} + +static int cgltf_parse_json_array(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, size_t element_size, void** out_array, cgltf_size* out_size) +{ + (void)json_chunk; + if (tokens[i].type != JSMN_ARRAY) + { + return tokens[i].type == JSMN_OBJECT ? CGLTF_ERROR_LEGACY : CGLTF_ERROR_JSON; + } + if (*out_array) + { + return CGLTF_ERROR_JSON; + } + int size = tokens[i].size; + void* result = cgltf_calloc(options, element_size, size); + if (!result) + { + return CGLTF_ERROR_NOMEM; + } + *out_array = result; + *out_size = size; + return i + 1; +} + +static int cgltf_parse_json_string_array(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, char*** out_array, cgltf_size* out_size) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_ARRAY); + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(char*), (void**)out_array, out_size); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < *out_size; ++j) + { + i = cgltf_parse_json_string(options, tokens, i, json_chunk, j + (*out_array)); + if (i < 0) + { + return i; + } + } + return i; +} + +static void cgltf_parse_attribute_type(const char* name, cgltf_attribute_type* out_type, int* out_index) +{ + const char* us = strchr(name, '_'); + size_t len = us ? (size_t)(us - name) : strlen(name); + + if (len == 8 && strncmp(name, "POSITION", 8) == 0) + { + *out_type = cgltf_attribute_type_position; + } + else if (len == 6 && strncmp(name, "NORMAL", 6) == 0) + { + *out_type = cgltf_attribute_type_normal; + } + else if (len == 7 && strncmp(name, "TANGENT", 7) == 0) + { + *out_type = cgltf_attribute_type_tangent; + } + else if (len == 8 && strncmp(name, "TEXCOORD", 8) == 0) + { + *out_type = cgltf_attribute_type_texcoord; + } + else if (len == 5 && strncmp(name, "COLOR", 5) == 0) + { + *out_type = cgltf_attribute_type_color; + } + else if (len == 6 && strncmp(name, "JOINTS", 6) == 0) + { + *out_type = cgltf_attribute_type_joints; + } + else if (len == 7 && strncmp(name, "WEIGHTS", 7) == 0) + { + *out_type = cgltf_attribute_type_weights; + } + else + { + *out_type = cgltf_attribute_type_invalid; + } + + if (us && *out_type != cgltf_attribute_type_invalid) + { + *out_index = CGLTF_ATOI(us + 1); + } +} + +static int cgltf_parse_json_attribute_list(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_attribute** out_attributes, cgltf_size* out_attributes_count) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + if (*out_attributes) + { + return CGLTF_ERROR_JSON; + } + + *out_attributes_count = tokens[i].size; + *out_attributes = (cgltf_attribute*)cgltf_calloc(options, sizeof(cgltf_attribute), *out_attributes_count); + ++i; + + if (!*out_attributes) + { + return CGLTF_ERROR_NOMEM; + } + + for (cgltf_size j = 0; j < *out_attributes_count; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + i = cgltf_parse_json_string(options, tokens, i, json_chunk, &(*out_attributes)[j].name); + if (i < 0) + { + return CGLTF_ERROR_JSON; + } + + cgltf_parse_attribute_type((*out_attributes)[j].name, &(*out_attributes)[j].type, &(*out_attributes)[j].index); + + (*out_attributes)[j].data = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + + return i; +} + +static int cgltf_parse_json_extras(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_extras* out_extras) +{ + (void)json_chunk; + out_extras->start_offset = tokens[i].start; + out_extras->end_offset = tokens[i].end; + i = cgltf_skip_json(tokens, i); + return i; +} + +static int cgltf_parse_json_unprocessed_extension(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_extension* out_extension) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_STRING); + CGLTF_CHECK_TOKTYPE(tokens[i+1], JSMN_OBJECT); + if (out_extension->name) + { + return CGLTF_ERROR_JSON; + } + + cgltf_size name_length = tokens[i].end - tokens[i].start; + out_extension->name = (char*)options->memory.alloc(options->memory.user_data, name_length + 1); + if (!out_extension->name) + { + return CGLTF_ERROR_NOMEM; + } + strncpy(out_extension->name, (const char*)json_chunk + tokens[i].start, name_length); + out_extension->name[name_length] = 0; + i++; + + size_t start = tokens[i].start; + size_t size = tokens[i].end - start; + out_extension->data = (char*)options->memory.alloc(options->memory.user_data, size + 1); + if (!out_extension->data) + { + return CGLTF_ERROR_NOMEM; + } + strncpy(out_extension->data, (const char*)json_chunk + start, size); + out_extension->data[size] = '\0'; + + i = cgltf_skip_json(tokens, i); + + return i; +} + +static int cgltf_parse_json_unprocessed_extensions(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_size* out_extensions_count, cgltf_extension** out_extensions) +{ + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if(*out_extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + *out_extensions_count = 0; + *out_extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + + if (!*out_extensions) + { + return CGLTF_ERROR_NOMEM; + } + + ++i; + + for (int j = 0; j < extensions_size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + cgltf_size extension_index = (*out_extensions_count)++; + cgltf_extension* extension = &((*out_extensions)[extension_index]); + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, extension); + + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_draco_mesh_compression(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_draco_mesh_compression* out_draco_mesh_compression) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "attributes") == 0) + { + i = cgltf_parse_json_attribute_list(options, tokens, i + 1, json_chunk, &out_draco_mesh_compression->attributes, &out_draco_mesh_compression->attributes_count); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "bufferView") == 0) + { + ++i; + out_draco_mesh_compression->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_material_mapping_data(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_material_mapping* out_mappings, cgltf_size* offset) +{ + (void)options; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_ARRAY); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int obj_size = tokens[i].size; + ++i; + + int material = -1; + int variants_tok = -1; + cgltf_extras extras = {0, 0}; + + for (int k = 0; k < obj_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "material") == 0) + { + ++i; + material = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "variants") == 0) + { + variants_tok = i+1; + CGLTF_CHECK_TOKTYPE(tokens[variants_tok], JSMN_ARRAY); + + i = cgltf_skip_json(tokens, i+1); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &extras); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + if (material < 0 || variants_tok < 0) + { + return CGLTF_ERROR_JSON; + } + + if (out_mappings) + { + for (int k = 0; k < tokens[variants_tok].size; ++k) + { + int variant = cgltf_json_to_int(&tokens[variants_tok + 1 + k], json_chunk); + if (variant < 0) + return variant; + + out_mappings[*offset].material = CGLTF_PTRINDEX(cgltf_material, material); + out_mappings[*offset].variant = variant; + out_mappings[*offset].extras = extras; + + (*offset)++; + } + } + else + { + (*offset) += tokens[variants_tok].size; + } + } + + return i; +} + +static int cgltf_parse_json_material_mappings(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_primitive* out_prim) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "mappings") == 0) + { + if (out_prim->mappings) + { + return CGLTF_ERROR_JSON; + } + + cgltf_size mappings_offset = 0; + int k = cgltf_parse_json_material_mapping_data(options, tokens, i + 1, json_chunk, NULL, &mappings_offset); + if (k < 0) + { + return k; + } + + out_prim->mappings_count = mappings_offset; + out_prim->mappings = (cgltf_material_mapping*)cgltf_calloc(options, sizeof(cgltf_material_mapping), out_prim->mappings_count); + + mappings_offset = 0; + i = cgltf_parse_json_material_mapping_data(options, tokens, i + 1, json_chunk, out_prim->mappings, &mappings_offset); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_primitive(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_primitive* out_prim) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + out_prim->type = cgltf_primitive_type_triangles; + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "mode") == 0) + { + ++i; + out_prim->type + = (cgltf_primitive_type) + cgltf_json_to_int(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "indices") == 0) + { + ++i; + out_prim->indices = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "material") == 0) + { + ++i; + out_prim->material = CGLTF_PTRINDEX(cgltf_material, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "attributes") == 0) + { + i = cgltf_parse_json_attribute_list(options, tokens, i + 1, json_chunk, &out_prim->attributes, &out_prim->attributes_count); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "targets") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_morph_target), (void**)&out_prim->targets, &out_prim->targets_count); + if (i < 0) + { + return i; + } + + for (cgltf_size k = 0; k < out_prim->targets_count; ++k) + { + i = cgltf_parse_json_attribute_list(options, tokens, i, json_chunk, &out_prim->targets[k].attributes, &out_prim->targets[k].attributes_count); + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_prim->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if(out_prim->extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + out_prim->extensions_count = 0; + out_prim->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + + if (!out_prim->extensions) + { + return CGLTF_ERROR_NOMEM; + } + + ++i; + for (int k = 0; k < extensions_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_draco_mesh_compression") == 0) + { + out_prim->has_draco_mesh_compression = 1; + i = cgltf_parse_json_draco_mesh_compression(options, tokens, i + 1, json_chunk, &out_prim->draco_mesh_compression); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_variants") == 0) + { + i = cgltf_parse_json_material_mappings(options, tokens, i + 1, json_chunk, out_prim); + } + else + { + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_prim->extensions[out_prim->extensions_count++])); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_mesh(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_mesh* out_mesh) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_mesh->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "primitives") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_primitive), (void**)&out_mesh->primitives, &out_mesh->primitives_count); + if (i < 0) + { + return i; + } + + for (cgltf_size prim_index = 0; prim_index < out_mesh->primitives_count; ++prim_index) + { + i = cgltf_parse_json_primitive(options, tokens, i, json_chunk, &out_mesh->primitives[prim_index]); + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "weights") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_float), (void**)&out_mesh->weights, &out_mesh->weights_count); + if (i < 0) + { + return i; + } + + i = cgltf_parse_json_float_array(tokens, i - 1, json_chunk, out_mesh->weights, (int)out_mesh->weights_count); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + ++i; + + out_mesh->extras.start_offset = tokens[i].start; + out_mesh->extras.end_offset = tokens[i].end; + + if (tokens[i].type == JSMN_OBJECT) + { + int extras_size = tokens[i].size; + ++i; + + for (int k = 0; k < extras_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "targetNames") == 0 && tokens[i+1].type == JSMN_ARRAY) + { + i = cgltf_parse_json_string_array(options, tokens, i + 1, json_chunk, &out_mesh->target_names, &out_mesh->target_names_count); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i); + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_mesh->extensions_count, &out_mesh->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_meshes(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_mesh), (void**)&out_data->meshes, &out_data->meshes_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->meshes_count; ++j) + { + i = cgltf_parse_json_mesh(options, tokens, i, json_chunk, &out_data->meshes[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static cgltf_component_type cgltf_json_to_component_type(jsmntok_t const* tok, const uint8_t* json_chunk) +{ + int type = cgltf_json_to_int(tok, json_chunk); + + switch (type) + { + case 5120: + return cgltf_component_type_r_8; + case 5121: + return cgltf_component_type_r_8u; + case 5122: + return cgltf_component_type_r_16; + case 5123: + return cgltf_component_type_r_16u; + case 5125: + return cgltf_component_type_r_32u; + case 5126: + return cgltf_component_type_r_32f; + default: + return cgltf_component_type_invalid; + } +} + +static int cgltf_parse_json_accessor_sparse(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_accessor_sparse* out_sparse) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "count") == 0) + { + ++i; + out_sparse->count = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "indices") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int indices_size = tokens[i].size; + ++i; + + for (int k = 0; k < indices_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0) + { + ++i; + out_sparse->indices_buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0) + { + ++i; + out_sparse->indices_byte_offset = cgltf_json_to_size(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "componentType") == 0) + { + ++i; + out_sparse->indices_component_type = cgltf_json_to_component_type(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sparse->indices_extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->indices_extensions_count, &out_sparse->indices_extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "values") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int values_size = tokens[i].size; + ++i; + + for (int k = 0; k < values_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0) + { + ++i; + out_sparse->values_buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0) + { + ++i; + out_sparse->values_byte_offset = cgltf_json_to_size(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sparse->values_extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->values_extensions_count, &out_sparse->values_extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sparse->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->extensions_count, &out_sparse->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_accessor(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_accessor* out_accessor) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_accessor->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0) + { + ++i; + out_accessor->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0) + { + ++i; + out_accessor->offset = + cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "componentType") == 0) + { + ++i; + out_accessor->component_type = cgltf_json_to_component_type(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "normalized") == 0) + { + ++i; + out_accessor->normalized = cgltf_json_to_bool(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "count") == 0) + { + ++i; + out_accessor->count = + cgltf_json_to_int(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "type") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens+i, json_chunk, "SCALAR") == 0) + { + out_accessor->type = cgltf_type_scalar; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "VEC2") == 0) + { + out_accessor->type = cgltf_type_vec2; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "VEC3") == 0) + { + out_accessor->type = cgltf_type_vec3; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "VEC4") == 0) + { + out_accessor->type = cgltf_type_vec4; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "MAT2") == 0) + { + out_accessor->type = cgltf_type_mat2; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "MAT3") == 0) + { + out_accessor->type = cgltf_type_mat3; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "MAT4") == 0) + { + out_accessor->type = cgltf_type_mat4; + } + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "min") == 0) + { + ++i; + out_accessor->has_min = 1; + // note: we can't parse the precise number of elements since type may not have been computed yet + int min_size = tokens[i].size > 16 ? 16 : tokens[i].size; + i = cgltf_parse_json_float_array(tokens, i, json_chunk, out_accessor->min, min_size); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "max") == 0) + { + ++i; + out_accessor->has_max = 1; + // note: we can't parse the precise number of elements since type may not have been computed yet + int max_size = tokens[i].size > 16 ? 16 : tokens[i].size; + i = cgltf_parse_json_float_array(tokens, i, json_chunk, out_accessor->max, max_size); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "sparse") == 0) + { + out_accessor->is_sparse = 1; + i = cgltf_parse_json_accessor_sparse(options, tokens, i + 1, json_chunk, &out_accessor->sparse); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_accessor->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_accessor->extensions_count, &out_accessor->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_texture_transform(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_texture_transform* out_texture_transform) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "offset") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_texture_transform->offset, 2); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "rotation") == 0) + { + ++i; + out_texture_transform->rotation = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "scale") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_texture_transform->scale, 2); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "texCoord") == 0) + { + ++i; + out_texture_transform->has_texcoord = 1; + out_texture_transform->texcoord = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_texture_view(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_texture_view* out_texture_view) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + out_texture_view->scale = 1.0f; + cgltf_fill_float_array(out_texture_view->transform.scale, 2, 1.0f); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "index") == 0) + { + ++i; + out_texture_view->texture = CGLTF_PTRINDEX(cgltf_texture, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "texCoord") == 0) + { + ++i; + out_texture_view->texcoord = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "scale") == 0) + { + ++i; + out_texture_view->scale = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "strength") == 0) + { + ++i; + out_texture_view->scale = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_texture_view->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if(out_texture_view->extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + out_texture_view->extensions_count = 0; + out_texture_view->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + + if (!out_texture_view->extensions) + { + return CGLTF_ERROR_NOMEM; + } + + ++i; + + for (int k = 0; k < extensions_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_texture_transform") == 0) + { + out_texture_view->has_transform = 1; + i = cgltf_parse_json_texture_transform(tokens, i + 1, json_chunk, &out_texture_view->transform); + } + else + { + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_texture_view->extensions[out_texture_view->extensions_count++])); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_pbr_metallic_roughness(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_pbr_metallic_roughness* out_pbr) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "metallicFactor") == 0) + { + ++i; + out_pbr->metallic_factor = + cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "roughnessFactor") == 0) + { + ++i; + out_pbr->roughness_factor = + cgltf_json_to_float(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "baseColorFactor") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_pbr->base_color_factor, 4); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "baseColorTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, + &out_pbr->base_color_texture); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "metallicRoughnessTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, + &out_pbr->metallic_roughness_texture); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_pbr->extras); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_pbr_specular_glossiness(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_pbr_specular_glossiness* out_pbr) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "diffuseFactor") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_pbr->diffuse_factor, 4); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularFactor") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_pbr->specular_factor, 3); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "glossinessFactor") == 0) + { + ++i; + out_pbr->glossiness_factor = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "diffuseTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_pbr->diffuse_texture); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularGlossinessTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_pbr->specular_glossiness_texture); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_clearcoat(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_clearcoat* out_clearcoat) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatFactor") == 0) + { + ++i; + out_clearcoat->clearcoat_factor = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatRoughnessFactor") == 0) + { + ++i; + out_clearcoat->clearcoat_roughness_factor = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_clearcoat->clearcoat_texture); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatRoughnessTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_clearcoat->clearcoat_roughness_texture); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "clearcoatNormalTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_clearcoat->clearcoat_normal_texture); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_ior(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_ior* out_ior) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + // Default values + out_ior->ior = 1.5f; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "ior") == 0) + { + ++i; + out_ior->ior = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_specular(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_specular* out_specular) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + // Default values + out_specular->specular_factor = 1.0f; + cgltf_fill_float_array(out_specular->specular_color_factor, 3, 1.0f); + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "specularFactor") == 0) + { + ++i; + out_specular->specular_factor = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularColorFactor") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_specular->specular_color_factor, 3); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "specularTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_specular->specular_texture); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "specularColorTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_specular->specular_color_texture); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_transmission(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_transmission* out_transmission) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "transmissionFactor") == 0) + { + ++i; + out_transmission->transmission_factor = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "transmissionTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_transmission->transmission_texture); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_volume(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_volume* out_volume) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "thicknessFactor") == 0) + { + ++i; + out_volume->thickness_factor = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "thicknessTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_volume->thickness_texture); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "attenuationColor") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_volume->attenuation_color, 3); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "attenuationDistance") == 0) + { + ++i; + out_volume->attenuation_distance = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_sheen(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_sheen* out_sheen) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "sheenColorFactor") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_sheen->sheen_color_factor, 3); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "sheenColorTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_sheen->sheen_color_texture); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "sheenRoughnessFactor") == 0) + { + ++i; + out_sheen->sheen_roughness_factor = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "sheenRoughnessTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_sheen->sheen_roughness_texture); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_emissive_strength(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_emissive_strength* out_emissive_strength) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + // Default + out_emissive_strength->emissive_strength = 1.f; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "emissiveStrength") == 0) + { + ++i; + out_emissive_strength->emissive_strength = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_image(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_image* out_image) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "uri") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_image->uri); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "bufferView") == 0) + { + ++i; + out_image->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "mimeType") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_image->mime_type); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_image->name); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_image->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_image->extensions_count, &out_image->extensions); + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_sampler(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_sampler* out_sampler) +{ + (void)options; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + out_sampler->wrap_s = 10497; + out_sampler->wrap_t = 10497; + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_sampler->name); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "magFilter") == 0) + { + ++i; + out_sampler->mag_filter + = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "minFilter") == 0) + { + ++i; + out_sampler->min_filter + = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "wrapS") == 0) + { + ++i; + out_sampler->wrap_s + = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "wrapT") == 0) + { + ++i; + out_sampler->wrap_t + = cgltf_json_to_int(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sampler->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sampler->extensions_count, &out_sampler->extensions); + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_texture(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_texture* out_texture) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_texture->name); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "sampler") == 0) + { + ++i; + out_texture->sampler = CGLTF_PTRINDEX(cgltf_sampler, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "source") == 0) + { + ++i; + out_texture->image = CGLTF_PTRINDEX(cgltf_image, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_texture->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if (out_texture->extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + ++i; + out_texture->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + out_texture->extensions_count = 0; + + if (!out_texture->extensions) + { + return CGLTF_ERROR_NOMEM; + } + + for (int k = 0; k < extensions_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "KHR_texture_basisu") == 0) + { + out_texture->has_basisu = 1; + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int num_properties = tokens[i].size; + ++i; + + for (int t = 0; t < num_properties; ++t) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "source") == 0) + { + ++i; + out_texture->basisu_image = CGLTF_PTRINDEX(cgltf_image, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + } + } + else + { + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_texture->extensions[out_texture->extensions_count++])); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_material(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_material* out_material) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + cgltf_fill_float_array(out_material->pbr_metallic_roughness.base_color_factor, 4, 1.0f); + out_material->pbr_metallic_roughness.metallic_factor = 1.0f; + out_material->pbr_metallic_roughness.roughness_factor = 1.0f; + + cgltf_fill_float_array(out_material->pbr_specular_glossiness.diffuse_factor, 4, 1.0f); + cgltf_fill_float_array(out_material->pbr_specular_glossiness.specular_factor, 3, 1.0f); + out_material->pbr_specular_glossiness.glossiness_factor = 1.0f; + + cgltf_fill_float_array(out_material->volume.attenuation_color, 3, 1.0f); + out_material->volume.attenuation_distance = FLT_MAX; + + out_material->alpha_cutoff = 0.5f; + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_material->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "pbrMetallicRoughness") == 0) + { + out_material->has_pbr_metallic_roughness = 1; + i = cgltf_parse_json_pbr_metallic_roughness(options, tokens, i + 1, json_chunk, &out_material->pbr_metallic_roughness); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "emissiveFactor") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_material->emissive_factor, 3); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "normalTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, + &out_material->normal_texture); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "occlusionTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, + &out_material->occlusion_texture); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "emissiveTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, + &out_material->emissive_texture); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "alphaMode") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens + i, json_chunk, "OPAQUE") == 0) + { + out_material->alpha_mode = cgltf_alpha_mode_opaque; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "MASK") == 0) + { + out_material->alpha_mode = cgltf_alpha_mode_mask; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "BLEND") == 0) + { + out_material->alpha_mode = cgltf_alpha_mode_blend; + } + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "alphaCutoff") == 0) + { + ++i; + out_material->alpha_cutoff = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "doubleSided") == 0) + { + ++i; + out_material->double_sided = + cgltf_json_to_bool(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_material->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if(out_material->extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + ++i; + out_material->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + out_material->extensions_count= 0; + + if (!out_material->extensions) + { + return CGLTF_ERROR_NOMEM; + } + + for (int k = 0; k < extensions_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_pbrSpecularGlossiness") == 0) + { + out_material->has_pbr_specular_glossiness = 1; + i = cgltf_parse_json_pbr_specular_glossiness(options, tokens, i + 1, json_chunk, &out_material->pbr_specular_glossiness); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_unlit") == 0) + { + out_material->unlit = 1; + i = cgltf_skip_json(tokens, i+1); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_clearcoat") == 0) + { + out_material->has_clearcoat = 1; + i = cgltf_parse_json_clearcoat(options, tokens, i + 1, json_chunk, &out_material->clearcoat); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_ior") == 0) + { + out_material->has_ior = 1; + i = cgltf_parse_json_ior(tokens, i + 1, json_chunk, &out_material->ior); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_specular") == 0) + { + out_material->has_specular = 1; + i = cgltf_parse_json_specular(options, tokens, i + 1, json_chunk, &out_material->specular); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_transmission") == 0) + { + out_material->has_transmission = 1; + i = cgltf_parse_json_transmission(options, tokens, i + 1, json_chunk, &out_material->transmission); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "KHR_materials_volume") == 0) + { + out_material->has_volume = 1; + i = cgltf_parse_json_volume(options, tokens, i + 1, json_chunk, &out_material->volume); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_sheen") == 0) + { + out_material->has_sheen = 1; + i = cgltf_parse_json_sheen(options, tokens, i + 1, json_chunk, &out_material->sheen); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "KHR_materials_emissive_strength") == 0) + { + out_material->has_emissive_strength = 1; + i = cgltf_parse_json_emissive_strength(tokens, i + 1, json_chunk, &out_material->emissive_strength); + } + else + { + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_material->extensions[out_material->extensions_count++])); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_accessors(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_accessor), (void**)&out_data->accessors, &out_data->accessors_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->accessors_count; ++j) + { + i = cgltf_parse_json_accessor(options, tokens, i, json_chunk, &out_data->accessors[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_materials(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_material), (void**)&out_data->materials, &out_data->materials_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->materials_count; ++j) + { + i = cgltf_parse_json_material(options, tokens, i, json_chunk, &out_data->materials[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_images(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_image), (void**)&out_data->images, &out_data->images_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->images_count; ++j) + { + i = cgltf_parse_json_image(options, tokens, i, json_chunk, &out_data->images[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_textures(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_texture), (void**)&out_data->textures, &out_data->textures_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->textures_count; ++j) + { + i = cgltf_parse_json_texture(options, tokens, i, json_chunk, &out_data->textures[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_samplers(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_sampler), (void**)&out_data->samplers, &out_data->samplers_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->samplers_count; ++j) + { + i = cgltf_parse_json_sampler(options, tokens, i, json_chunk, &out_data->samplers[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_meshopt_compression(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_meshopt_compression* out_meshopt_compression) +{ + (void)options; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "buffer") == 0) + { + ++i; + out_meshopt_compression->buffer = CGLTF_PTRINDEX(cgltf_buffer, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0) + { + ++i; + out_meshopt_compression->offset = cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteLength") == 0) + { + ++i; + out_meshopt_compression->size = cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteStride") == 0) + { + ++i; + out_meshopt_compression->stride = cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "count") == 0) + { + ++i; + out_meshopt_compression->count = cgltf_json_to_int(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "mode") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens+i, json_chunk, "ATTRIBUTES") == 0) + { + out_meshopt_compression->mode = cgltf_meshopt_compression_mode_attributes; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "TRIANGLES") == 0) + { + out_meshopt_compression->mode = cgltf_meshopt_compression_mode_triangles; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "INDICES") == 0) + { + out_meshopt_compression->mode = cgltf_meshopt_compression_mode_indices; + } + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "filter") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens+i, json_chunk, "NONE") == 0) + { + out_meshopt_compression->filter = cgltf_meshopt_compression_filter_none; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "OCTAHEDRAL") == 0) + { + out_meshopt_compression->filter = cgltf_meshopt_compression_filter_octahedral; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "QUATERNION") == 0) + { + out_meshopt_compression->filter = cgltf_meshopt_compression_filter_quaternion; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "EXPONENTIAL") == 0) + { + out_meshopt_compression->filter = cgltf_meshopt_compression_filter_exponential; + } + ++i; + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_buffer_view(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_buffer_view* out_buffer_view) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_buffer_view->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "buffer") == 0) + { + ++i; + out_buffer_view->buffer = CGLTF_PTRINDEX(cgltf_buffer, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteOffset") == 0) + { + ++i; + out_buffer_view->offset = + cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteLength") == 0) + { + ++i; + out_buffer_view->size = + cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteStride") == 0) + { + ++i; + out_buffer_view->stride = + cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "target") == 0) + { + ++i; + int type = cgltf_json_to_int(tokens+i, json_chunk); + switch (type) + { + case 34962: + type = cgltf_buffer_view_type_vertices; + break; + case 34963: + type = cgltf_buffer_view_type_indices; + break; + default: + type = cgltf_buffer_view_type_invalid; + break; + } + out_buffer_view->type = (cgltf_buffer_view_type)type; + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_buffer_view->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if(out_buffer_view->extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + out_buffer_view->extensions_count = 0; + out_buffer_view->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + + if (!out_buffer_view->extensions) + { + return CGLTF_ERROR_NOMEM; + } + + ++i; + for (int k = 0; k < extensions_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "EXT_meshopt_compression") == 0) + { + out_buffer_view->has_meshopt_compression = 1; + i = cgltf_parse_json_meshopt_compression(options, tokens, i + 1, json_chunk, &out_buffer_view->meshopt_compression); + } + else + { + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_buffer_view->extensions[out_buffer_view->extensions_count++])); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_buffer_views(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_buffer_view), (void**)&out_data->buffer_views, &out_data->buffer_views_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->buffer_views_count; ++j) + { + i = cgltf_parse_json_buffer_view(options, tokens, i, json_chunk, &out_data->buffer_views[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_buffer(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_buffer* out_buffer) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_buffer->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "byteLength") == 0) + { + ++i; + out_buffer->size = + cgltf_json_to_size(tokens+i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "uri") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_buffer->uri); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_buffer->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_buffer->extensions_count, &out_buffer->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_buffers(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_buffer), (void**)&out_data->buffers, &out_data->buffers_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->buffers_count; ++j) + { + i = cgltf_parse_json_buffer(options, tokens, i, json_chunk, &out_data->buffers[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_skin(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_skin* out_skin) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_skin->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "joints") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_node*), (void**)&out_skin->joints, &out_skin->joints_count); + if (i < 0) + { + return i; + } + + for (cgltf_size k = 0; k < out_skin->joints_count; ++k) + { + out_skin->joints[k] = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "skeleton") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE); + out_skin->skeleton = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "inverseBindMatrices") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE); + out_skin->inverse_bind_matrices = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_skin->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_skin->extensions_count, &out_skin->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_skins(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_skin), (void**)&out_data->skins, &out_data->skins_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->skins_count; ++j) + { + i = cgltf_parse_json_skin(options, tokens, i, json_chunk, &out_data->skins[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_camera(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_camera* out_camera) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_camera->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "type") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens + i, json_chunk, "perspective") == 0) + { + out_camera->type = cgltf_camera_type_perspective; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "orthographic") == 0) + { + out_camera->type = cgltf_camera_type_orthographic; + } + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "perspective") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int data_size = tokens[i].size; + ++i; + + out_camera->type = cgltf_camera_type_perspective; + + for (int k = 0; k < data_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "aspectRatio") == 0) + { + ++i; + out_camera->data.perspective.has_aspect_ratio = 1; + out_camera->data.perspective.aspect_ratio = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "yfov") == 0) + { + ++i; + out_camera->data.perspective.yfov = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "zfar") == 0) + { + ++i; + out_camera->data.perspective.has_zfar = 1; + out_camera->data.perspective.zfar = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "znear") == 0) + { + ++i; + out_camera->data.perspective.znear = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_camera->data.perspective.extras); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "orthographic") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int data_size = tokens[i].size; + ++i; + + out_camera->type = cgltf_camera_type_orthographic; + + for (int k = 0; k < data_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "xmag") == 0) + { + ++i; + out_camera->data.orthographic.xmag = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "ymag") == 0) + { + ++i; + out_camera->data.orthographic.ymag = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "zfar") == 0) + { + ++i; + out_camera->data.orthographic.zfar = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "znear") == 0) + { + ++i; + out_camera->data.orthographic.znear = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_camera->data.orthographic.extras); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_camera->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_camera->extensions_count, &out_camera->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_cameras(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_camera), (void**)&out_data->cameras, &out_data->cameras_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->cameras_count; ++j) + { + i = cgltf_parse_json_camera(options, tokens, i, json_chunk, &out_data->cameras[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_light(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_light* out_light) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + out_light->color[0] = 1.f; + out_light->color[1] = 1.f; + out_light->color[2] = 1.f; + out_light->intensity = 1.f; + + out_light->spot_inner_cone_angle = 0.f; + out_light->spot_outer_cone_angle = 3.1415926535f / 4.0f; + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_light->name); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "color") == 0) + { + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_light->color, 3); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "intensity") == 0) + { + ++i; + out_light->intensity = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "type") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens + i, json_chunk, "directional") == 0) + { + out_light->type = cgltf_light_type_directional; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "point") == 0) + { + out_light->type = cgltf_light_type_point; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "spot") == 0) + { + out_light->type = cgltf_light_type_spot; + } + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "range") == 0) + { + ++i; + out_light->range = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "spot") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int data_size = tokens[i].size; + ++i; + + for (int k = 0; k < data_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "innerConeAngle") == 0) + { + ++i; + out_light->spot_inner_cone_angle = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "outerConeAngle") == 0) + { + ++i; + out_light->spot_outer_cone_angle = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_light->extras); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_lights(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_light), (void**)&out_data->lights, &out_data->lights_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->lights_count; ++j) + { + i = cgltf_parse_json_light(options, tokens, i, json_chunk, &out_data->lights[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_node(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_node* out_node) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + out_node->rotation[3] = 1.0f; + out_node->scale[0] = 1.0f; + out_node->scale[1] = 1.0f; + out_node->scale[2] = 1.0f; + out_node->matrix[0] = 1.0f; + out_node->matrix[5] = 1.0f; + out_node->matrix[10] = 1.0f; + out_node->matrix[15] = 1.0f; + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_node->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "children") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_node*), (void**)&out_node->children, &out_node->children_count); + if (i < 0) + { + return i; + } + + for (cgltf_size k = 0; k < out_node->children_count; ++k) + { + out_node->children[k] = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "mesh") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE); + out_node->mesh = CGLTF_PTRINDEX(cgltf_mesh, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "skin") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE); + out_node->skin = CGLTF_PTRINDEX(cgltf_skin, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "camera") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE); + out_node->camera = CGLTF_PTRINDEX(cgltf_camera, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "translation") == 0) + { + out_node->has_translation = 1; + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->translation, 3); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "rotation") == 0) + { + out_node->has_rotation = 1; + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->rotation, 4); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "scale") == 0) + { + out_node->has_scale = 1; + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->scale, 3); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "matrix") == 0) + { + out_node->has_matrix = 1; + i = cgltf_parse_json_float_array(tokens, i + 1, json_chunk, out_node->matrix, 16); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "weights") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_float), (void**)&out_node->weights, &out_node->weights_count); + if (i < 0) + { + return i; + } + + i = cgltf_parse_json_float_array(tokens, i - 1, json_chunk, out_node->weights, (int)out_node->weights_count); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_node->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if(out_node->extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + out_node->extensions_count= 0; + out_node->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + + if (!out_node->extensions) + { + return CGLTF_ERROR_NOMEM; + } + + ++i; + + for (int k = 0; k < extensions_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_lights_punctual") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int data_size = tokens[i].size; + ++i; + + for (int m = 0; m < data_size; ++m) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "light") == 0) + { + ++i; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_PRIMITIVE); + out_node->light = CGLTF_PTRINDEX(cgltf_light, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_node->extensions[out_node->extensions_count++])); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_nodes(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_node), (void**)&out_data->nodes, &out_data->nodes_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->nodes_count; ++j) + { + i = cgltf_parse_json_node(options, tokens, i, json_chunk, &out_data->nodes[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_scene(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_scene* out_scene) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_scene->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "nodes") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_node*), (void**)&out_scene->nodes, &out_scene->nodes_count); + if (i < 0) + { + return i; + } + + for (cgltf_size k = 0; k < out_scene->nodes_count; ++k) + { + out_scene->nodes[k] = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_scene->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_scene->extensions_count, &out_scene->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_scenes(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_scene), (void**)&out_data->scenes, &out_data->scenes_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->scenes_count; ++j) + { + i = cgltf_parse_json_scene(options, tokens, i, json_chunk, &out_data->scenes[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_animation_sampler(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_animation_sampler* out_sampler) +{ + (void)options; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "input") == 0) + { + ++i; + out_sampler->input = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "output") == 0) + { + ++i; + out_sampler->output = CGLTF_PTRINDEX(cgltf_accessor, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "interpolation") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens + i, json_chunk, "LINEAR") == 0) + { + out_sampler->interpolation = cgltf_interpolation_type_linear; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "STEP") == 0) + { + out_sampler->interpolation = cgltf_interpolation_type_step; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "CUBICSPLINE") == 0) + { + out_sampler->interpolation = cgltf_interpolation_type_cubic_spline; + } + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_sampler->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sampler->extensions_count, &out_sampler->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_animation_channel(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_animation_channel* out_channel) +{ + (void)options; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "sampler") == 0) + { + ++i; + out_channel->sampler = CGLTF_PTRINDEX(cgltf_animation_sampler, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "target") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int target_size = tokens[i].size; + ++i; + + for (int k = 0; k < target_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "node") == 0) + { + ++i; + out_channel->target_node = CGLTF_PTRINDEX(cgltf_node, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "path") == 0) + { + ++i; + if (cgltf_json_strcmp(tokens+i, json_chunk, "translation") == 0) + { + out_channel->target_path = cgltf_animation_path_type_translation; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "rotation") == 0) + { + out_channel->target_path = cgltf_animation_path_type_rotation; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "scale") == 0) + { + out_channel->target_path = cgltf_animation_path_type_scale; + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "weights") == 0) + { + out_channel->target_path = cgltf_animation_path_type_weights; + } + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_channel->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_channel->extensions_count, &out_channel->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_animation(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_animation* out_animation) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_animation->name); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "samplers") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_animation_sampler), (void**)&out_animation->samplers, &out_animation->samplers_count); + if (i < 0) + { + return i; + } + + for (cgltf_size k = 0; k < out_animation->samplers_count; ++k) + { + i = cgltf_parse_json_animation_sampler(options, tokens, i, json_chunk, &out_animation->samplers[k]); + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "channels") == 0) + { + i = cgltf_parse_json_array(options, tokens, i + 1, json_chunk, sizeof(cgltf_animation_channel), (void**)&out_animation->channels, &out_animation->channels_count); + if (i < 0) + { + return i; + } + + for (cgltf_size k = 0; k < out_animation->channels_count; ++k) + { + i = cgltf_parse_json_animation_channel(options, tokens, i, json_chunk, &out_animation->channels[k]); + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_animation->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_animation->extensions_count, &out_animation->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_animations(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_animation), (void**)&out_data->animations, &out_data->animations_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->animations_count; ++j) + { + i = cgltf_parse_json_animation(options, tokens, i, json_chunk, &out_data->animations[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_variant(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_material_variant* out_variant) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "name") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_variant->name); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_variant->extras); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +static int cgltf_parse_json_variants(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + i = cgltf_parse_json_array(options, tokens, i, json_chunk, sizeof(cgltf_material_variant), (void**)&out_data->variants, &out_data->variants_count); + if (i < 0) + { + return i; + } + + for (cgltf_size j = 0; j < out_data->variants_count; ++j) + { + i = cgltf_parse_json_variant(options, tokens, i, json_chunk, &out_data->variants[j]); + if (i < 0) + { + return i; + } + } + return i; +} + +static int cgltf_parse_json_asset(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_asset* out_asset) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "copyright") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->copyright); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "generator") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->generator); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "version") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->version); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "minVersion") == 0) + { + i = cgltf_parse_json_string(options, tokens, i + 1, json_chunk, &out_asset->min_version); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_asset->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_asset->extensions_count, &out_asset->extensions); + } + else + { + i = cgltf_skip_json(tokens, i+1); + } + + if (i < 0) + { + return i; + } + } + + if (out_asset->version && CGLTF_ATOF(out_asset->version) < 2) + { + return CGLTF_ERROR_LEGACY; + } + + return i; +} + +cgltf_size cgltf_num_components(cgltf_type type) { + switch (type) + { + case cgltf_type_vec2: + return 2; + case cgltf_type_vec3: + return 3; + case cgltf_type_vec4: + return 4; + case cgltf_type_mat2: + return 4; + case cgltf_type_mat3: + return 9; + case cgltf_type_mat4: + return 16; + case cgltf_type_invalid: + case cgltf_type_scalar: + default: + return 1; + } +} + +static cgltf_size cgltf_component_size(cgltf_component_type component_type) { + switch (component_type) + { + case cgltf_component_type_r_8: + case cgltf_component_type_r_8u: + return 1; + case cgltf_component_type_r_16: + case cgltf_component_type_r_16u: + return 2; + case cgltf_component_type_r_32u: + case cgltf_component_type_r_32f: + return 4; + case cgltf_component_type_invalid: + default: + return 0; + } +} + +static cgltf_size cgltf_calc_size(cgltf_type type, cgltf_component_type component_type) +{ + cgltf_size component_size = cgltf_component_size(component_type); + if (type == cgltf_type_mat2 && component_size == 1) + { + return 8 * component_size; + } + else if (type == cgltf_type_mat3 && (component_size == 1 || component_size == 2)) + { + return 12 * component_size; + } + return component_size * cgltf_num_components(type); +} + +static int cgltf_fixup_pointers(cgltf_data* out_data); + +static int cgltf_parse_json_root(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_data* out_data) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int size = tokens[i].size; + ++i; + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "asset") == 0) + { + i = cgltf_parse_json_asset(options, tokens, i + 1, json_chunk, &out_data->asset); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "meshes") == 0) + { + i = cgltf_parse_json_meshes(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "accessors") == 0) + { + i = cgltf_parse_json_accessors(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "bufferViews") == 0) + { + i = cgltf_parse_json_buffer_views(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "buffers") == 0) + { + i = cgltf_parse_json_buffers(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "materials") == 0) + { + i = cgltf_parse_json_materials(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "images") == 0) + { + i = cgltf_parse_json_images(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "textures") == 0) + { + i = cgltf_parse_json_textures(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "samplers") == 0) + { + i = cgltf_parse_json_samplers(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "skins") == 0) + { + i = cgltf_parse_json_skins(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "cameras") == 0) + { + i = cgltf_parse_json_cameras(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "nodes") == 0) + { + i = cgltf_parse_json_nodes(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "scenes") == 0) + { + i = cgltf_parse_json_scenes(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "scene") == 0) + { + ++i; + out_data->scene = CGLTF_PTRINDEX(cgltf_scene, cgltf_json_to_int(tokens + i, json_chunk)); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "animations") == 0) + { + i = cgltf_parse_json_animations(options, tokens, i + 1, json_chunk, out_data); + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "extras") == 0) + { + i = cgltf_parse_json_extras(tokens, i + 1, json_chunk, &out_data->extras); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + if(out_data->data_extensions) + { + return CGLTF_ERROR_JSON; + } + + int extensions_size = tokens[i].size; + out_data->data_extensions_count = 0; + out_data->data_extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); + + if (!out_data->data_extensions) + { + return CGLTF_ERROR_NOMEM; + } + + ++i; + + for (int k = 0; k < extensions_size; ++k) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_lights_punctual") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int data_size = tokens[i].size; + ++i; + + for (int m = 0; m < data_size; ++m) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "lights") == 0) + { + i = cgltf_parse_json_lights(options, tokens, i + 1, json_chunk, out_data); + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens+i, json_chunk, "KHR_materials_variants") == 0) + { + ++i; + + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + + int data_size = tokens[i].size; + ++i; + + for (int m = 0; m < data_size; ++m) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "variants") == 0) + { + i = cgltf_parse_json_variants(options, tokens, i + 1, json_chunk, out_data); + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + } + else + { + i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_data->data_extensions[out_data->data_extensions_count++])); + } + + if (i < 0) + { + return i; + } + } + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensionsUsed") == 0) + { + i = cgltf_parse_json_string_array(options, tokens, i + 1, json_chunk, &out_data->extensions_used, &out_data->extensions_used_count); + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensionsRequired") == 0) + { + i = cgltf_parse_json_string_array(options, tokens, i + 1, json_chunk, &out_data->extensions_required, &out_data->extensions_required_count); + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + +cgltf_result cgltf_parse_json(cgltf_options* options, const uint8_t* json_chunk, cgltf_size size, cgltf_data** out_data) +{ + jsmn_parser parser = { 0, 0, 0 }; + + if (options->json_token_count == 0) + { + int token_count = jsmn_parse(&parser, (const char*)json_chunk, size, NULL, 0); + + if (token_count <= 0) + { + return cgltf_result_invalid_json; + } + + options->json_token_count = token_count; + } + + jsmntok_t* tokens = (jsmntok_t*)options->memory.alloc(options->memory.user_data, sizeof(jsmntok_t) * (options->json_token_count + 1)); + + if (!tokens) + { + return cgltf_result_out_of_memory; + } + + jsmn_init(&parser); + + int token_count = jsmn_parse(&parser, (const char*)json_chunk, size, tokens, options->json_token_count); + + if (token_count <= 0) + { + options->memory.free(options->memory.user_data, tokens); + return cgltf_result_invalid_json; + } + + // this makes sure that we always have an UNDEFINED token at the end of the stream + // for invalid JSON inputs this makes sure we don't perform out of bound reads of token data + tokens[token_count].type = JSMN_UNDEFINED; + + cgltf_data* data = (cgltf_data*)options->memory.alloc(options->memory.user_data, sizeof(cgltf_data)); + + if (!data) + { + options->memory.free(options->memory.user_data, tokens); + return cgltf_result_out_of_memory; + } + + memset(data, 0, sizeof(cgltf_data)); + data->memory = options->memory; + data->file = options->file; + + int i = cgltf_parse_json_root(options, tokens, 0, json_chunk, data); + + options->memory.free(options->memory.user_data, tokens); + + if (i < 0) + { + cgltf_free(data); + + switch (i) + { + case CGLTF_ERROR_NOMEM: return cgltf_result_out_of_memory; + case CGLTF_ERROR_LEGACY: return cgltf_result_legacy_gltf; + default: return cgltf_result_invalid_gltf; + } + } + + if (cgltf_fixup_pointers(data) < 0) + { + cgltf_free(data); + return cgltf_result_invalid_gltf; + } + + data->json = (const char*)json_chunk; + data->json_size = size; + + *out_data = data; + + return cgltf_result_success; +} + +static int cgltf_fixup_pointers(cgltf_data* data) +{ + for (cgltf_size i = 0; i < data->meshes_count; ++i) + { + for (cgltf_size j = 0; j < data->meshes[i].primitives_count; ++j) + { + CGLTF_PTRFIXUP(data->meshes[i].primitives[j].indices, data->accessors, data->accessors_count); + CGLTF_PTRFIXUP(data->meshes[i].primitives[j].material, data->materials, data->materials_count); + + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].attributes_count; ++k) + { + CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].attributes[k].data, data->accessors, data->accessors_count); + } + + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].targets_count; ++k) + { + for (cgltf_size m = 0; m < data->meshes[i].primitives[j].targets[k].attributes_count; ++m) + { + CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].targets[k].attributes[m].data, data->accessors, data->accessors_count); + } + } + + if (data->meshes[i].primitives[j].has_draco_mesh_compression) + { + CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].draco_mesh_compression.buffer_view, data->buffer_views, data->buffer_views_count); + for (cgltf_size m = 0; m < data->meshes[i].primitives[j].draco_mesh_compression.attributes_count; ++m) + { + CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].draco_mesh_compression.attributes[m].data, data->accessors, data->accessors_count); + } + } + + for (cgltf_size k = 0; k < data->meshes[i].primitives[j].mappings_count; ++k) + { + CGLTF_PTRFIXUP_REQ(data->meshes[i].primitives[j].mappings[k].material, data->materials, data->materials_count); + } + } + } + + for (cgltf_size i = 0; i < data->accessors_count; ++i) + { + CGLTF_PTRFIXUP(data->accessors[i].buffer_view, data->buffer_views, data->buffer_views_count); + + if (data->accessors[i].is_sparse) + { + CGLTF_PTRFIXUP_REQ(data->accessors[i].sparse.indices_buffer_view, data->buffer_views, data->buffer_views_count); + CGLTF_PTRFIXUP_REQ(data->accessors[i].sparse.values_buffer_view, data->buffer_views, data->buffer_views_count); + } + + if (data->accessors[i].buffer_view) + { + data->accessors[i].stride = data->accessors[i].buffer_view->stride; + } + + if (data->accessors[i].stride == 0) + { + data->accessors[i].stride = cgltf_calc_size(data->accessors[i].type, data->accessors[i].component_type); + } + } + + for (cgltf_size i = 0; i < data->textures_count; ++i) + { + CGLTF_PTRFIXUP(data->textures[i].image, data->images, data->images_count); + CGLTF_PTRFIXUP(data->textures[i].basisu_image, data->images, data->images_count); + CGLTF_PTRFIXUP(data->textures[i].sampler, data->samplers, data->samplers_count); + } + + for (cgltf_size i = 0; i < data->images_count; ++i) + { + CGLTF_PTRFIXUP(data->images[i].buffer_view, data->buffer_views, data->buffer_views_count); + } + + for (cgltf_size i = 0; i < data->materials_count; ++i) + { + CGLTF_PTRFIXUP(data->materials[i].normal_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].emissive_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].occlusion_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].pbr_metallic_roughness.base_color_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].pbr_metallic_roughness.metallic_roughness_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].pbr_specular_glossiness.diffuse_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].pbr_specular_glossiness.specular_glossiness_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].clearcoat.clearcoat_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].clearcoat.clearcoat_roughness_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].clearcoat.clearcoat_normal_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].specular.specular_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].specular.specular_color_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].transmission.transmission_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].volume.thickness_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].sheen.sheen_color_texture.texture, data->textures, data->textures_count); + CGLTF_PTRFIXUP(data->materials[i].sheen.sheen_roughness_texture.texture, data->textures, data->textures_count); + } + + for (cgltf_size i = 0; i < data->buffer_views_count; ++i) + { + CGLTF_PTRFIXUP_REQ(data->buffer_views[i].buffer, data->buffers, data->buffers_count); + + if (data->buffer_views[i].has_meshopt_compression) + { + CGLTF_PTRFIXUP_REQ(data->buffer_views[i].meshopt_compression.buffer, data->buffers, data->buffers_count); + } + } + + for (cgltf_size i = 0; i < data->skins_count; ++i) + { + for (cgltf_size j = 0; j < data->skins[i].joints_count; ++j) + { + CGLTF_PTRFIXUP_REQ(data->skins[i].joints[j], data->nodes, data->nodes_count); + } + + CGLTF_PTRFIXUP(data->skins[i].skeleton, data->nodes, data->nodes_count); + CGLTF_PTRFIXUP(data->skins[i].inverse_bind_matrices, data->accessors, data->accessors_count); + } + + for (cgltf_size i = 0; i < data->nodes_count; ++i) + { + for (cgltf_size j = 0; j < data->nodes[i].children_count; ++j) + { + CGLTF_PTRFIXUP_REQ(data->nodes[i].children[j], data->nodes, data->nodes_count); + + if (data->nodes[i].children[j]->parent) + { + return CGLTF_ERROR_JSON; + } + + data->nodes[i].children[j]->parent = &data->nodes[i]; + } + + CGLTF_PTRFIXUP(data->nodes[i].mesh, data->meshes, data->meshes_count); + CGLTF_PTRFIXUP(data->nodes[i].skin, data->skins, data->skins_count); + CGLTF_PTRFIXUP(data->nodes[i].camera, data->cameras, data->cameras_count); + CGLTF_PTRFIXUP(data->nodes[i].light, data->lights, data->lights_count); + } + + for (cgltf_size i = 0; i < data->scenes_count; ++i) + { + for (cgltf_size j = 0; j < data->scenes[i].nodes_count; ++j) + { + CGLTF_PTRFIXUP_REQ(data->scenes[i].nodes[j], data->nodes, data->nodes_count); + + if (data->scenes[i].nodes[j]->parent) + { + return CGLTF_ERROR_JSON; + } + } + } + + CGLTF_PTRFIXUP(data->scene, data->scenes, data->scenes_count); + + for (cgltf_size i = 0; i < data->animations_count; ++i) + { + for (cgltf_size j = 0; j < data->animations[i].samplers_count; ++j) + { + CGLTF_PTRFIXUP_REQ(data->animations[i].samplers[j].input, data->accessors, data->accessors_count); + CGLTF_PTRFIXUP_REQ(data->animations[i].samplers[j].output, data->accessors, data->accessors_count); + } + + for (cgltf_size j = 0; j < data->animations[i].channels_count; ++j) + { + CGLTF_PTRFIXUP_REQ(data->animations[i].channels[j].sampler, data->animations[i].samplers, data->animations[i].samplers_count); + CGLTF_PTRFIXUP(data->animations[i].channels[j].target_node, data->nodes, data->nodes_count); + } + } + + return 0; +} + +/* + * -- jsmn.c start -- + * Source: https://github.com/zserge/jsmn + * License: MIT + * + * Copyright (c) 2010 Serge A. Zaitsev + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * Allocates a fresh unused token from the token pull. + */ +static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser, + jsmntok_t *tokens, size_t num_tokens) { + jsmntok_t *tok; + if (parser->toknext >= num_tokens) { + return NULL; + } + tok = &tokens[parser->toknext++]; + tok->start = tok->end = -1; + tok->size = 0; +#ifdef JSMN_PARENT_LINKS + tok->parent = -1; +#endif + return tok; +} + +/** + * Fills token type and boundaries. + */ +static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type, + int start, int end) { + token->type = type; + token->start = start; + token->end = end; + token->size = 0; +} + +/** + * Fills next available token with JSON primitive. + */ +static int jsmn_parse_primitive(jsmn_parser *parser, const char *js, + size_t len, jsmntok_t *tokens, size_t num_tokens) { + jsmntok_t *token; + int start; + + start = parser->pos; + + for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { + switch (js[parser->pos]) { +#ifndef JSMN_STRICT + /* In strict mode primitive must be followed by "," or "}" or "]" */ + case ':': +#endif + case '\t' : case '\r' : case '\n' : case ' ' : + case ',' : case ']' : case '}' : + goto found; + } + if (js[parser->pos] < 32 || js[parser->pos] >= 127) { + parser->pos = start; + return JSMN_ERROR_INVAL; + } + } +#ifdef JSMN_STRICT + /* In strict mode primitive must be followed by a comma/object/array */ + parser->pos = start; + return JSMN_ERROR_PART; +#endif + +found: + if (tokens == NULL) { + parser->pos--; + return 0; + } + token = jsmn_alloc_token(parser, tokens, num_tokens); + if (token == NULL) { + parser->pos = start; + return JSMN_ERROR_NOMEM; + } + jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos); +#ifdef JSMN_PARENT_LINKS + token->parent = parser->toksuper; +#endif + parser->pos--; + return 0; +} + +/** + * Fills next token with JSON string. + */ +static int jsmn_parse_string(jsmn_parser *parser, const char *js, + size_t len, jsmntok_t *tokens, size_t num_tokens) { + jsmntok_t *token; + + int start = parser->pos; + + parser->pos++; + + /* Skip starting quote */ + for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { + char c = js[parser->pos]; + + /* Quote: end of string */ + if (c == '\"') { + if (tokens == NULL) { + return 0; + } + token = jsmn_alloc_token(parser, tokens, num_tokens); + if (token == NULL) { + parser->pos = start; + return JSMN_ERROR_NOMEM; + } + jsmn_fill_token(token, JSMN_STRING, start+1, parser->pos); +#ifdef JSMN_PARENT_LINKS + token->parent = parser->toksuper; +#endif + return 0; + } + + /* Backslash: Quoted symbol expected */ + if (c == '\\' && parser->pos + 1 < len) { + int i; + parser->pos++; + switch (js[parser->pos]) { + /* Allowed escaped symbols */ + case '\"': case '/' : case '\\' : case 'b' : + case 'f' : case 'r' : case 'n' : case 't' : + break; + /* Allows escaped symbol \uXXXX */ + case 'u': + parser->pos++; + for(i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0'; i++) { + /* If it isn't a hex character we have an error */ + if(!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */ + (js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */ + (js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */ + parser->pos = start; + return JSMN_ERROR_INVAL; + } + parser->pos++; + } + parser->pos--; + break; + /* Unexpected symbol */ + default: + parser->pos = start; + return JSMN_ERROR_INVAL; + } + } + } + parser->pos = start; + return JSMN_ERROR_PART; +} + +/** + * Parse JSON string and fill tokens. + */ +static int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, + jsmntok_t *tokens, size_t num_tokens) { + int r; + int i; + jsmntok_t *token; + int count = parser->toknext; + + for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { + char c; + jsmntype_t type; + + c = js[parser->pos]; + switch (c) { + case '{': case '[': + count++; + if (tokens == NULL) { + break; + } + token = jsmn_alloc_token(parser, tokens, num_tokens); + if (token == NULL) + return JSMN_ERROR_NOMEM; + if (parser->toksuper != -1) { + tokens[parser->toksuper].size++; +#ifdef JSMN_PARENT_LINKS + token->parent = parser->toksuper; +#endif + } + token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY); + token->start = parser->pos; + parser->toksuper = parser->toknext - 1; + break; + case '}': case ']': + if (tokens == NULL) + break; + type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY); +#ifdef JSMN_PARENT_LINKS + if (parser->toknext < 1) { + return JSMN_ERROR_INVAL; + } + token = &tokens[parser->toknext - 1]; + for (;;) { + if (token->start != -1 && token->end == -1) { + if (token->type != type) { + return JSMN_ERROR_INVAL; + } + token->end = parser->pos + 1; + parser->toksuper = token->parent; + break; + } + if (token->parent == -1) { + if(token->type != type || parser->toksuper == -1) { + return JSMN_ERROR_INVAL; + } + break; + } + token = &tokens[token->parent]; + } +#else + for (i = parser->toknext - 1; i >= 0; i--) { + token = &tokens[i]; + if (token->start != -1 && token->end == -1) { + if (token->type != type) { + return JSMN_ERROR_INVAL; + } + parser->toksuper = -1; + token->end = parser->pos + 1; + break; + } + } + /* Error if unmatched closing bracket */ + if (i == -1) return JSMN_ERROR_INVAL; + for (; i >= 0; i--) { + token = &tokens[i]; + if (token->start != -1 && token->end == -1) { + parser->toksuper = i; + break; + } + } +#endif + break; + case '\"': + r = jsmn_parse_string(parser, js, len, tokens, num_tokens); + if (r < 0) return r; + count++; + if (parser->toksuper != -1 && tokens != NULL) + tokens[parser->toksuper].size++; + break; + case '\t' : case '\r' : case '\n' : case ' ': + break; + case ':': + parser->toksuper = parser->toknext - 1; + break; + case ',': + if (tokens != NULL && parser->toksuper != -1 && + tokens[parser->toksuper].type != JSMN_ARRAY && + tokens[parser->toksuper].type != JSMN_OBJECT) { +#ifdef JSMN_PARENT_LINKS + parser->toksuper = tokens[parser->toksuper].parent; +#else + for (i = parser->toknext - 1; i >= 0; i--) { + if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) { + if (tokens[i].start != -1 && tokens[i].end == -1) { + parser->toksuper = i; + break; + } + } + } +#endif + } + break; +#ifdef JSMN_STRICT + /* In strict mode primitives are: numbers and booleans */ + case '-': case '0': case '1' : case '2': case '3' : case '4': + case '5': case '6': case '7' : case '8': case '9': + case 't': case 'f': case 'n' : + /* And they must not be keys of the object */ + if (tokens != NULL && parser->toksuper != -1) { + jsmntok_t *t = &tokens[parser->toksuper]; + if (t->type == JSMN_OBJECT || + (t->type == JSMN_STRING && t->size != 0)) { + return JSMN_ERROR_INVAL; + } + } +#else + /* In non-strict mode every unquoted value is a primitive */ + default: +#endif + r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens); + if (r < 0) return r; + count++; + if (parser->toksuper != -1 && tokens != NULL) + tokens[parser->toksuper].size++; + break; + +#ifdef JSMN_STRICT + /* Unexpected char in strict mode */ + default: + return JSMN_ERROR_INVAL; +#endif + } + } + + if (tokens != NULL) { + for (i = parser->toknext - 1; i >= 0; i--) { + /* Unmatched opened object or array */ + if (tokens[i].start != -1 && tokens[i].end == -1) { + return JSMN_ERROR_PART; + } + } + } + + return count; +} + +/** + * Creates a new parser based over a given buffer with an array of tokens + * available. + */ +static void jsmn_init(jsmn_parser *parser) { + parser->pos = 0; + parser->toknext = 0; + parser->toksuper = -1; +} +/* + * -- jsmn.c end -- + */ + +#endif /* #ifdef CGLTF_IMPLEMENTATION */ + +/* cgltf is distributed under MIT license: + * + * Copyright (c) 2018-2021 Johannes Kuhlmann + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ diff --git a/ios/include/common/CallbackUtils.cpp b/ios/include/common/CallbackUtils.cpp new file mode 100644 index 00000000..5cde5451 --- /dev/null +++ b/ios/include/common/CallbackUtils.cpp @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "CallbackUtils.h" + +#include "private/backend/VirtualMachineEnv.h" + +void acquireCallbackJni(JNIEnv* env, CallbackJni& callbackUtils) { +#ifdef __ANDROID__ + callbackUtils.handlerClass = env->FindClass("android/os/Handler"); + callbackUtils.handlerClass = (jclass) env->NewGlobalRef(callbackUtils.handlerClass); + callbackUtils.post = env->GetMethodID(callbackUtils.handlerClass, + "post", "(Ljava/lang/Runnable;)Z"); +#endif + + callbackUtils.executorClass = env->FindClass("java/util/concurrent/Executor"); + callbackUtils.executorClass = (jclass) env->NewGlobalRef(callbackUtils.executorClass); + callbackUtils.execute = env->GetMethodID(callbackUtils.executorClass, + "execute", "(Ljava/lang/Runnable;)V"); +} + +void releaseCallbackJni(JNIEnv* env, CallbackJni callbackUtils, jobject handler, jobject callback) { + if (handler && callback) { +#ifdef __ANDROID__ + if (env->IsInstanceOf(handler, callbackUtils.handlerClass)) { + env->CallBooleanMethod(handler, callbackUtils.post, callback); + } +#endif + if (env->IsInstanceOf(handler, callbackUtils.executorClass)) { + env->CallVoidMethod(handler, callbackUtils.execute, callback); + } + } + env->DeleteGlobalRef(handler); + env->DeleteGlobalRef(callback); +#ifdef __ANDROID__ + env->DeleteGlobalRef(callbackUtils.handlerClass); +#endif + env->DeleteGlobalRef(callbackUtils.executorClass); +} + +// ----------------------------------------------------------------------------------------------- + +JniCallback* JniCallback::make(JNIEnv* env, jobject handler, jobject callback) { + return new JniCallback(env, handler, callback); +} + +JniCallback::JniCallback(JNIEnv* env, jobject handler, jobject callback) + : mHandler(env->NewGlobalRef(handler)), + mCallback(env->NewGlobalRef(callback)) { + acquireCallbackJni(env, mCallbackUtils); +} + +JniCallback::~JniCallback() = default; + +void JniCallback::post(void* user, filament::backend::CallbackHandler::Callback callback) { + callback(user); +} + +void JniCallback::postToJavaAndDestroy(JniCallback* callback) { + JNIEnv* env = filament::VirtualMachineEnv::get().getEnvironment(); + releaseCallbackJni(env, callback->mCallbackUtils, callback->mHandler, callback->mCallback); + delete callback; +} + +// ----------------------------------------------------------------------------------------------- + +JniBufferCallback* JniBufferCallback::make(filament::Engine*, + JNIEnv* env, jobject handler, jobject callback, AutoBuffer&& buffer) { + return new JniBufferCallback(env, handler, callback, std::move(buffer)); +} + +JniBufferCallback::JniBufferCallback(JNIEnv* env, jobject handler, jobject callback, + AutoBuffer&& buffer) + : JniCallback(env, handler, callback), + mBuffer(std::move(buffer)) { +} + +JniBufferCallback::~JniBufferCallback() = default; + +void JniBufferCallback::postToJavaAndDestroy(void*, size_t, void* user) { + JniBufferCallback* callback = (JniBufferCallback*)user; + JNIEnv* env = filament::VirtualMachineEnv::get().getEnvironment(); + callback->mBuffer.attachToJniThread(env); + releaseCallbackJni(env, callback->mCallbackUtils, callback->mHandler, callback->mCallback); + delete callback; +} + +// ----------------------------------------------------------------------------------------------- + +JniImageCallback* JniImageCallback::make(filament::Engine*, + JNIEnv* env, jobject handler, jobject callback, long image) { + return new JniImageCallback(env, handler, callback, image); +} + +JniImageCallback::JniImageCallback(JNIEnv* env, jobject handler, jobject callback, long image) + : JniCallback(env, handler, callback), + mImage(image) { +} + +JniImageCallback::~JniImageCallback() = default; + +void JniImageCallback::postToJavaAndDestroy(void*, void* user) { + JniImageCallback* callback = (JniImageCallback*)user; + JNIEnv* env = filament::VirtualMachineEnv::get().getEnvironment(); + releaseCallbackJni(env, callback->mCallbackUtils, callback->mHandler, callback->mCallback); + delete callback; +} diff --git a/ios/include/common/CallbackUtils.h b/ios/include/common/CallbackUtils.h new file mode 100644 index 00000000..8cd8671b --- /dev/null +++ b/ios/include/common/CallbackUtils.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include "common/NioUtils.h" + +#include + +#include + +struct CallbackJni { +#ifdef __ANDROID__ + jclass handlerClass = nullptr; + jmethodID post = nullptr; +#endif + jclass executorClass = nullptr; + jmethodID execute = nullptr; +}; + +void acquireCallbackJni(JNIEnv* env, CallbackJni& callbackUtils); +void releaseCallbackJni(JNIEnv* env, CallbackJni callbackUtils, jobject handler, jobject callback); + +struct JniCallback : private filament::backend::CallbackHandler { + JniCallback(JniCallback const &) = delete; + JniCallback(JniCallback&&) = delete; + JniCallback& operator=(JniCallback const &) = delete; + JniCallback& operator=(JniCallback&&) = delete; + + // create a JniCallback + static JniCallback* make(JNIEnv* env, jobject handler, jobject runnable); + + // execute the callback on the java thread and destroy ourselves + static void postToJavaAndDestroy(JniCallback* callback); + + // CallbackHandler interface. + void post(void* user, Callback callback) override; + + // Get the CallbackHandler interface + filament::backend::CallbackHandler* getHandler() noexcept { return this; } + + jobject getCallbackObject() { return mCallback; } + +protected: + JniCallback(JNIEnv* env, jobject handler, jobject runnable); + explicit JniCallback() = default; // this version does nothing + virtual ~JniCallback(); + jobject mHandler{}; + jobject mCallback{}; + CallbackJni mCallbackUtils{}; +}; + + +struct JniBufferCallback : public JniCallback { + // create a JniBufferCallback + static JniBufferCallback* make(filament::Engine* engine, + JNIEnv* env, jobject handler, jobject callback, AutoBuffer&& buffer); + + // execute the callback on the java thread and destroy ourselves + static void postToJavaAndDestroy(void*, size_t, void* user); + +private: + JniBufferCallback(JNIEnv* env, jobject handler, jobject callback, AutoBuffer&& buffer); + virtual ~JniBufferCallback(); + AutoBuffer mBuffer; +}; + +struct JniImageCallback : public JniCallback { + // create a JniImageCallback + static JniImageCallback* make(filament::Engine* engine, JNIEnv* env, jobject handler, + jobject runnable, long image); + + // execute the callback on the java thread and destroy ourselves + static void postToJavaAndDestroy(void*, void* user); + +private: + JniImageCallback(JNIEnv* env, jobject handler, jobject runnable, long image); + virtual ~JniImageCallback(); + long mImage; +}; diff --git a/ios/include/common/NioUtils.cpp b/ios/include/common/NioUtils.cpp new file mode 100644 index 00000000..3a849978 --- /dev/null +++ b/ios/include/common/NioUtils.cpp @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/NioUtils.h" + +#include + +#include + +AutoBuffer::AutoBuffer(JNIEnv *env, jobject buffer, jint size, bool commit) noexcept : + mEnv(env), + mDoCommit(commit) { + + mNioUtils.jniClass = env->FindClass("com/google/android/filament/NioUtils"); + mNioUtils.jniClass = (jclass) env->NewGlobalRef(mNioUtils.jniClass); + + mNioUtils.getBasePointer = env->GetStaticMethodID(mNioUtils.jniClass, + "getBasePointer", "(Ljava/nio/Buffer;JI)J"); + mNioUtils.getBaseArray = env->GetStaticMethodID(mNioUtils.jniClass, + "getBaseArray", "(Ljava/nio/Buffer;)Ljava/lang/Object;"); + mNioUtils.getBaseArrayOffset = env->GetStaticMethodID(mNioUtils.jniClass, + "getBaseArrayOffset", "(Ljava/nio/Buffer;I)I"); + mNioUtils.getBufferType = env->GetStaticMethodID(mNioUtils.jniClass, + "getBufferType", "(Ljava/nio/Buffer;)I"); + + mBuffer = env->NewGlobalRef(buffer); + + mType = (BufferType) env->CallStaticIntMethod( + mNioUtils.jniClass, mNioUtils.getBufferType, mBuffer); + + switch (mType) { + case BufferType::BYTE: + mShift = 0; + break; + case BufferType::CHAR: + case BufferType::SHORT: + mShift = 1; + break; + case BufferType::INT: + case BufferType::FLOAT: + mShift = 2; + break; + case BufferType::LONG: + case BufferType::DOUBLE: + mShift = 3; + break; + } + + mSize = (size_t) size << mShift; + + jlong address = (jlong) env->GetDirectBufferAddress(mBuffer); + if (address) { + // Direct buffer case + mData = reinterpret_cast(env->CallStaticLongMethod(mNioUtils.jniClass, + mNioUtils.getBasePointer, mBuffer, address, mShift)); + mUserData = mData; + } else { + // wrapped array case + jarray array = (jarray) env->CallStaticObjectMethod(mNioUtils.jniClass, + mNioUtils.getBaseArray, mBuffer); + + jint offset = env->CallStaticIntMethod(mNioUtils.jniClass, + mNioUtils.getBaseArrayOffset, mBuffer, mShift); + + mBaseArray = (jarray) env->NewGlobalRef(array); + switch (mType) { + case BufferType::BYTE: + mData = env->GetByteArrayElements((jbyteArray)mBaseArray, nullptr); + break; + case BufferType::CHAR: + mData = env->GetCharArrayElements((jcharArray)mBaseArray, nullptr); + break; + case BufferType::SHORT: + mData = env->GetShortArrayElements((jshortArray)mBaseArray, nullptr); + break; + case BufferType::INT: + mData = env->GetIntArrayElements((jintArray)mBaseArray, nullptr); + break; + case BufferType::LONG: + mData = env->GetLongArrayElements((jlongArray)mBaseArray, nullptr); + break; + case BufferType::FLOAT: + mData = env->GetFloatArrayElements((jfloatArray)mBaseArray, nullptr); + break; + case BufferType::DOUBLE: + mData = env->GetDoubleArrayElements((jdoubleArray)mBaseArray, nullptr); + break; + } + mUserData = (void *) ((char *) mData + offset); + } +} + +AutoBuffer::AutoBuffer(AutoBuffer &&rhs) noexcept { + mEnv = rhs.mEnv; + std::swap(mData, rhs.mData); + std::swap(mUserData, rhs.mUserData); + std::swap(mSize, rhs.mSize); + std::swap(mType, rhs.mType); + std::swap(mShift, rhs.mShift); + std::swap(mBuffer, rhs.mBuffer); + std::swap(mBaseArray, rhs.mBaseArray); + std::swap(mNioUtils, rhs.mNioUtils); +} + +AutoBuffer::~AutoBuffer() noexcept { + JNIEnv *env = mEnv; + if (mBaseArray) { + jint mode = mDoCommit ? 0 : JNI_ABORT; + switch (mType) { + case BufferType::BYTE: + env->ReleaseByteArrayElements((jbyteArray)mBaseArray, (jbyte *) mData, mode); + break; + case BufferType::CHAR: + env->ReleaseCharArrayElements((jcharArray)mBaseArray, (jchar *) mData, mode); + break; + case BufferType::SHORT: + env->ReleaseShortArrayElements((jshortArray)mBaseArray, (jshort *) mData, mode); + break; + case BufferType::INT: + env->ReleaseIntArrayElements((jintArray)mBaseArray, (jint *) mData, mode); + break; + case BufferType::LONG: + env->ReleaseLongArrayElements((jlongArray)mBaseArray, (jlong *) mData, mode); + break; + case BufferType::FLOAT: + env->ReleaseFloatArrayElements((jfloatArray)mBaseArray, (jfloat *) mData, mode); + break; + case BufferType::DOUBLE: + env->ReleaseDoubleArrayElements((jdoubleArray)mBaseArray, (jdouble *) mData, mode); + break; + } + env->DeleteGlobalRef(mBaseArray); + } + if (mBuffer) { + env->DeleteGlobalRef(mBuffer); + } + mEnv->DeleteGlobalRef(mNioUtils.jniClass); +} diff --git a/ios/include/common/NioUtils.h b/ios/include/common/NioUtils.h new file mode 100644 index 00000000..72f4656e --- /dev/null +++ b/ios/include/common/NioUtils.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +class AutoBuffer { +public: + enum class BufferType : uint8_t { + BYTE, + CHAR, + SHORT, + INT, + LONG, + FLOAT, + DOUBLE + }; + + // Clients should pass "true" for the commit argument if they intend to mutate the buffer + // contents from native code. + AutoBuffer(JNIEnv* env, jobject buffer, jint size, bool commit = false) noexcept; + AutoBuffer(AutoBuffer&& rhs) noexcept; + ~AutoBuffer() noexcept; + + void attachToJniThread(JNIEnv* env) noexcept { + mEnv = env; + } + + void* getData() const noexcept { + return mUserData; + } + + size_t getSize() const noexcept { + return mSize; + } + + size_t getShift() const noexcept { + return mShift; + } + + size_t countToByte(size_t count) const noexcept { + return count << mShift; + } + +private: + void* mUserData = nullptr; + size_t mSize = 0; + BufferType mType = BufferType::BYTE; + uint8_t mShift = 0; + + JNIEnv* mEnv; + void* mData = nullptr; + jobject mBuffer = nullptr; + jarray mBaseArray = nullptr; + bool mDoCommit = false; + + struct { + jclass jniClass; + jmethodID getBasePointer; + jmethodID getBaseArray; + jmethodID getBaseArrayOffset; + jmethodID getBufferType; + } mNioUtils{}; +}; diff --git a/ios/include/filamat/Enums.h b/ios/include/filamat/Enums.h deleted file mode 100644 index ea626e81..00000000 --- a/ios/include/filamat/Enums.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_ENUMMANAGER_H -#define TNT_ENUMMANAGER_H - -#include -#include -#include - -#include - -namespace filamat { - -using Property = MaterialBuilder::Property; -using UniformType = MaterialBuilder::UniformType; -using SamplerType = MaterialBuilder::SamplerType; -using SubpassType = MaterialBuilder::SubpassType; -using SamplerFormat = MaterialBuilder::SamplerFormat; -using ParameterPrecision = MaterialBuilder::ParameterPrecision; -using OutputTarget = MaterialBuilder::OutputTarget; -using OutputQualifier = MaterialBuilder::VariableQualifier; -using OutputType = MaterialBuilder::OutputType; - -// Convenience methods to convert std::string to Enum and also iterate over Enum values. -class Enums { -public: - - // Returns true if string "s" is a valid string representation of an element of enum T. - template - static bool isValid(const std::string& s) noexcept { - std::unordered_map& map = getMap(); - return map.find(s) != map.end(); - } - - // Return enum matching its string representation. Returns undefined if s is not a valid enum T - // value. You should always call isValid() first to validate a string before calling toEnum(). - template - static T toEnum(const std::string& s) noexcept { - std::unordered_map& map = getMap(); - return map.at(s); - } - - template - static std::string toString(T t) noexcept; - - // Return a map of all values in an enum with their string representation. - template - static std::unordered_map& map() noexcept { - std::unordered_map& map = getMap(); - return map; - }; - -private: - template - static std::unordered_map& getMap() noexcept; - - static std::unordered_map mStringToProperty; - static std::unordered_map mStringToUniformType; - static std::unordered_map mStringToSamplerType; - static std::unordered_map mStringToSubpassType; - static std::unordered_map mStringToSamplerFormat; - static std::unordered_map mStringToSamplerPrecision; - static std::unordered_map mStringToOutputTarget; - static std::unordered_map mStringToOutputQualifier; - static std::unordered_map mStringToOutputType; -}; - -template -std::string Enums::toString(T t) noexcept { - std::unordered_map& map = getMap(); - auto result = std::find_if(map.begin(), map.end(), [t](auto& pair) { - return pair.second == t; - }); - if (result != map.end()) { - return result->first; - } - return ""; -} - -} // namespace filamat - -#endif //TNT_ENUMMANAGER_H diff --git a/ios/include/filamat/IncludeCallback.h b/ios/include/filamat/IncludeCallback.h deleted file mode 100644 index 659ba289..00000000 --- a/ios/include/filamat/IncludeCallback.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_FILAMAT_INCLUDER_H -#define TNT_FILAMAT_INCLUDER_H - -#include - -#include - -namespace filamat { - -struct IncludeResult { - // The include name of the root file, as if it were being included. - // I.e., 'foobar.h' in the case of #include "foobar.h" - const utils::CString includeName; - - // The following fields should be filled out by the IncludeCallback when processing an include, - // or when calling resolveIncludes for the root file. - - // The full contents of the include file. This may contain additional, recursive include - // directives. - utils::CString text; - - // The line number for the first line of text (first line is 0). - size_t lineNumberOffset = 0; - - // The name of the include file. This gets passed as "includerName" for any includes inside of - // source. This field isn't used by the include system; it's up to the callback to give meaning - // to this value and interpret it accordingly. In the case of DirIncluder, this is an empty - // string to represent the root include file, and a canonical path for subsequent included - // files. - utils::CString name; -}; - -/** - * A callback invoked by the include system when an #include "file.h" directive is found. - * - * For example, if a file main.h includes file.h on line 10, then IncludeCallback would be called - * with the following: - * includeCallback("main.h", {.includeName = "file.h" }) - * It's then up to the IncludeCallback to fill out the .text, .name, and (optionally) - * lineNumberOffset fields. - * - * @param includedBy is the value that was given to IncludeResult.name for this source file, or - * the empty string for the root source file. - * @param result is the IncludeResult that the callback should fill out. - * @return true, if the include was resolved successfully, false otherwise. - * - * For an example of implementing this callback, see tools/matc/src/matc/DirIncluder.h. - */ -using IncludeCallback = std::function; - -} // namespace filamat - -#endif diff --git a/ios/include/filamat/MaterialBuilder.h b/ios/include/filamat/MaterialBuilder.h deleted file mode 100644 index 9416d530..00000000 --- a/ios/include/filamat/MaterialBuilder.h +++ /dev/null @@ -1,746 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! \file - -#ifndef TNT_FILAMAT_MATERIAL_PACKAGE_BUILDER_H -#define TNT_FILAMAT_MATERIAL_PACKAGE_BUILDER_H - -#include -#include - -#include -#include -#include - -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -namespace utils { -class JobSystem; -} - -namespace filamat { - -struct MaterialInfo; -class ChunkContainer; -struct Variant; - -class UTILS_PUBLIC MaterialBuilderBase { -public: - /** - * High-level hint that works in concert with TargetApi to determine the shader models (used to - * generate GLSL) and final output representations (spirv and/or text). - */ - enum class Platform { - DESKTOP, - MOBILE, - ALL - }; - - enum class TargetApi : uint8_t { - OPENGL = 0x01u, - VULKAN = 0x02u, - METAL = 0x04u, - ALL = OPENGL | VULKAN | METAL - }; - - enum class TargetLanguage { - GLSL, // GLSL with OpenGL semantics - SPIRV // GLSL with Vulkan semantics - }; - - enum class Optimization { - NONE, - PREPROCESSOR, - SIZE, - PERFORMANCE - }; - - /** - * Initialize MaterialBuilder. - * - * init must be called first before building any materials. - */ - static void init(); - - /** - * Release internal MaterialBuilder resources. - * - * Call shutdown when finished building materials to release all internal resources. After - * calling shutdown, another call to MaterialBuilder::init must precede another material build. - */ - static void shutdown(); - -protected: - // Looks at platform and target API, then decides on shader models and output formats. - void prepare(bool vulkanSemantics); - - using ShaderModel = filament::backend::ShaderModel; - Platform mPlatform = Platform::DESKTOP; - TargetApi mTargetApi = (TargetApi) 0; - Optimization mOptimization = Optimization::PERFORMANCE; - bool mPrintShaders = false; - bool mGenerateDebugInfo = false; - utils::bitset32 mShaderModels; - struct CodeGenParams { - int shaderModel; - TargetApi targetApi; - TargetLanguage targetLanguage; - }; - std::vector mCodeGenPermutations; - // For finding properties and running semantic analysis, we always use the same code gen - // permutation. This is the first permutation generated with default arguments passed to matc. - const CodeGenParams mSemanticCodeGenParams = { - .shaderModel = (int) ShaderModel::GL_ES_30, - .targetApi = TargetApi::OPENGL, - .targetLanguage = TargetLanguage::SPIRV - }; - uint8_t mVariantFilter = 0; - - // Keeps track of how many times MaterialBuilder::init() has been called without a call to - // MaterialBuilder::shutdown(). Internally, glslang does something similar. We keep track for - // ourselves so we can inform the user if MaterialBuilder::init() hasn't been called before - // attempting to build a material. - static std::atomic materialBuilderClients; -}; - -// Utility function that looks at an Engine backend to determine TargetApi -inline constexpr MaterialBuilderBase::TargetApi targetApiFromBackend( - filament::backend::Backend backend) noexcept { - using filament::backend::Backend; - using TargetApi = MaterialBuilderBase::TargetApi; - switch (backend) { - case Backend::DEFAULT: return TargetApi::ALL; - case Backend::OPENGL: return TargetApi::OPENGL; - case Backend::VULKAN: return TargetApi::VULKAN; - case Backend::METAL: return TargetApi::METAL; - case Backend::NOOP: return TargetApi::OPENGL; - } -} - -/** - * MaterialBuilder builds Filament materials from shader code. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * #include - * using namespace filamat; - * - * // Must be called before any materials can be built. - * MaterialBuilder::init(); - - * MaterialBuilder builder; - * builder - * .name("My material") - * .material("void material (inout MaterialInputs material) {" - * " prepareMaterial(material);" - * " material.baseColor.rgb = float3(1.0, 0.0, 0.0);" - * "}") - * .shading(MaterialBuilder::Shading::LIT) - * .targetApi(MaterialBuilder::TargetApi::ALL) - * .platform(MaterialBuilder::Platform::ALL); - - * Package package = builder.build(); - * if (package.isValid()) { - * // success! - * } - - * // Call when finished building all materials to release internal - * // MaterialBuilder resources. - * MaterialBuilder::shutdown(); - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * @see filament::Material - */ -class UTILS_PUBLIC MaterialBuilder : public MaterialBuilderBase { -public: - MaterialBuilder(); - - static constexpr size_t MATERIAL_VARIABLES_COUNT = 4; - enum class Variable : uint8_t { - CUSTOM0, - CUSTOM1, - CUSTOM2, - CUSTOM3 - // when adding more variables, make sure to update MATERIAL_VARIABLES_COUNT - }; - - using MaterialDomain = filament::MaterialDomain; - using RefractionMode = filament::RefractionMode; - using RefractionType = filament::RefractionType; - - using ShaderQuality = filament::ShaderQuality; - using BlendingMode = filament::BlendingMode; - using Shading = filament::Shading; - using Interpolation = filament::Interpolation; - using VertexDomain = filament::VertexDomain; - using TransparencyMode = filament::TransparencyMode; - using SpecularAmbientOcclusion = filament::SpecularAmbientOcclusion; - - using UniformType = filament::backend::UniformType; - using SamplerType = filament::backend::SamplerType; - using SubpassType = filament::backend::SubpassType; - using SamplerFormat = filament::backend::SamplerFormat; - using ParameterPrecision = filament::backend::Precision; - using CullingMode = filament::backend::CullingMode; - - enum class VariableQualifier : uint8_t { - OUT - }; - - enum class OutputTarget : uint8_t { - COLOR, - DEPTH - }; - - enum class OutputType : uint8_t { - FLOAT, - FLOAT2, - FLOAT3, - FLOAT4 - }; - - struct PreprocessorDefine { - std::string name; - std::string value; - - PreprocessorDefine(const std::string& name, const std::string& value) : - name(name), value(value) {} - }; - using PreprocessorDefineList = std::vector; - - //! Set the name of this material. - MaterialBuilder& name(const char* name) noexcept; - - //! Set the file name of this material file. Used in error reporting. - MaterialBuilder& fileName(const char* name) noexcept; - - //! Set the shading model. - MaterialBuilder& shading(Shading shading) noexcept; - - //! Set the interpolation mode. - MaterialBuilder& interpolation(Interpolation interpolation) noexcept; - - //! Add a parameter (i.e., a uniform) to this material. - MaterialBuilder& parameter(UniformType type, ParameterPrecision precision, - const char* name) noexcept; - - //! Add a parameter (i.e., a uniform) to this material. - MaterialBuilder& parameter(UniformType type, const char* name) noexcept { - return parameter(type, ParameterPrecision::DEFAULT, name); - } - - //! Add a parameter array to this material. - MaterialBuilder& parameter(UniformType type, size_t size, - ParameterPrecision precision, const char* name) noexcept; - - //! Add a parameter array to this material. - MaterialBuilder& parameter(UniformType type, size_t size, const char* name) noexcept { - return parameter(type, size, ParameterPrecision::DEFAULT, name); - } - - /** - * Add a sampler parameter to this material. - * - * When SamplerType::SAMPLER_EXTERNAL is specifed, format and precision are ignored. - */ - MaterialBuilder& parameter(SamplerType samplerType, SamplerFormat format, - ParameterPrecision precision, const char* name) noexcept; - /// @copydoc parameter(SamplerType, SamplerFormat, ParameterPrecision, const char*) - MaterialBuilder& parameter(SamplerType samplerType, SamplerFormat format, - const char* name) noexcept; - /// @copydoc parameter(SamplerType, SamplerFormat, ParameterPrecision, const char*) - MaterialBuilder& parameter(SamplerType samplerType, ParameterPrecision precision, - const char* name) noexcept; - /// @copydoc parameter(SamplerType, SamplerFormat, ParameterPrecision, const char*) - MaterialBuilder& parameter(SamplerType samplerType, const char* name) noexcept; - - //! Custom variables (all float4). - MaterialBuilder& variable(Variable v, const char* name) noexcept; - - /** - * Require a specified attribute. - * - * position is always required and normal depends on the shading model. - */ - MaterialBuilder& require(filament::VertexAttribute attribute) noexcept; - - //! Specify the domain that this material will operate in. - MaterialBuilder& materialDomain(MaterialDomain materialDomain) noexcept; - - /** - * Set the code content of this material. - * - * Surface Domain - * -------------- - * - * Materials in the SURFACE domain must declare a function: - * ~~~~~ - * void material(inout MaterialInputs material) { - * prepareMaterial(material); - * material.baseColor.rgb = float3(1.0, 0.0, 0.0); - * } - * ~~~~~ - * this function *must* call `prepareMaterial(material)` before it returns. - * - * Post-process Domain - * ------------------- - * - * Materials in the POST_PROCESS domain must declare a function: - * ~~~~~ - * void postProcess(inout PostProcessInputs postProcess) { - * postProcess.color = float4(1.0); - * } - * ~~~~~ - * - * @param code The source code of the material. - * @param line The line number offset of the material, where 0 is the first line. Used for error - * reporting - */ - MaterialBuilder& material(const char* code, size_t line = 0) noexcept; - - /** - * Set the callback used for resolving include directives. - * The default is no callback, which disallows all includes. - */ - MaterialBuilder& includeCallback(IncludeCallback callback) noexcept; - - /** - * Set the vertex code content of this material. - * - * Surface Domain - * -------------- - * - * Materials in the SURFACE domain must declare a function: - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * void materialVertex(inout MaterialVertexInputs material) { - * - * } - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * Post-process Domain - * ------------------- - * - * Materials in the POST_PROCESS domain must declare a function: - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * void postProcessVertex(inout PostProcessVertexInputs postProcess) { - * - * } - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * @param code The source code of the material. - * @param line The line number offset of the material, where 0 is the first line. Used for error - * reporting - */ - MaterialBuilder& materialVertex(const char* code, size_t line = 0) noexcept; - - - MaterialBuilder& quality(ShaderQuality quality) noexcept; - - //! Set the blending mode for this material. - MaterialBuilder& blending(BlendingMode blending) noexcept; - - /** - * Set the blending mode of the post-lighting color for this material. - * Only OPAQUE, TRANSPARENT and ADD are supported, the default is TRANSPARENT. - * This setting requires the material property "postLightingColor" to be set. - */ - MaterialBuilder& postLightingBlending(BlendingMode blending) noexcept; - - //! Set the vertex domain for this material. - MaterialBuilder& vertexDomain(VertexDomain domain) noexcept; - - /** - * How triangles are culled by default (doesn't affect points or lines, BACK by default). - * Material instances can override this. - */ - MaterialBuilder& culling(CullingMode culling) noexcept; - - //! Enable / disable color-buffer write (enabled by default, material instances can override). - MaterialBuilder& colorWrite(bool enable) noexcept; - - //! Enable / disable depth-buffer write (enabled by default for opaque, disabled for others, material instances can override). - MaterialBuilder& depthWrite(bool enable) noexcept; - - //! Enable / disable depth based culling (enabled by default, material instances can override). - MaterialBuilder& depthCulling(bool enable) noexcept; - - /** - * Double-sided materials don't cull faces, equivalent to culling(CullingMode::NONE). - * doubleSided() overrides culling() if called. - * When called with "false", this enables the capability for a run-time toggle. - */ - MaterialBuilder& doubleSided(bool doubleSided) noexcept; - - /** - * Any fragment with an alpha below this threshold is clipped (MASKED blending mode only). - * The mask threshold can also be controlled by using the float material parameter called - * `_maskThreshold`, or by calling - * @ref filament::MaterialInstance::setMaskThreshold "MaterialInstance::setMaskThreshold". - */ - MaterialBuilder& maskThreshold(float threshold) noexcept; - - //! The material output is multiplied by the shadowing factor (UNLIT model only). - MaterialBuilder& shadowMultiplier(bool shadowMultiplier) noexcept; - - //! This material casts transparent shadows. The blending mode must be TRANSPARENT or FADE. - MaterialBuilder& transparentShadow(bool transparentShadow) noexcept; - - /** - * Reduces specular aliasing for materials that have low roughness. Turning this feature on also - * helps preserve the shapes of specular highlights as an object moves away from the camera. - * When turned on, two float material parameters are added to control the effect: - * `_specularAAScreenSpaceVariance` and `_specularAAThreshold`. You can also use - * @ref filament::MaterialInstance::setSpecularAntiAliasingVariance - * "MaterialInstance::setSpecularAntiAliasingVariance" and - * @ref filament::MaterialInstance::setSpecularAntiAliasingThreshold - * "setSpecularAntiAliasingThreshold" - * - * Disabled by default. - */ - MaterialBuilder& specularAntiAliasing(bool specularAntiAliasing) noexcept; - - /** - * Sets the screen-space variance of the filter kernel used when applying specular - * anti-aliasing. The default value is set to 0.15. The specified value should be between 0 and - * 1 and will be clamped if necessary. - */ - MaterialBuilder& specularAntiAliasingVariance(float screenSpaceVariance) noexcept; - - /** - * Sets the clamping threshold used to suppress estimation errors when applying specular - * anti-aliasing. The default value is set to 0.2. The specified value should be between 0 and 1 - * and will be clamped if necessary. - */ - MaterialBuilder& specularAntiAliasingThreshold(float threshold) noexcept; - - /** - * Enables or disables the index of refraction (IoR) change caused by the clear coat layer when - * present. When the IoR changes, the base color is darkened. Disabling this feature preserves - * the base color as initially specified. - * - * Enabled by default. - */ - MaterialBuilder& clearCoatIorChange(bool clearCoatIorChange) noexcept; - - //! Enable / disable flipping of the Y coordinate of UV attributes, enabled by default. - MaterialBuilder& flipUV(bool flipUV) noexcept; - - //! Enable / disable multi-bounce ambient occlusion, disabled by default on mobile. - MaterialBuilder& multiBounceAmbientOcclusion(bool multiBounceAO) noexcept; - - //! Set the specular ambient occlusion technique. Disabled by default on mobile. - MaterialBuilder& specularAmbientOcclusion(SpecularAmbientOcclusion specularAO) noexcept; - - //! Specify the refraction - MaterialBuilder& refractionMode(RefractionMode refraction) noexcept; - - //! Specify the refraction type - MaterialBuilder& refractionType(RefractionType refractionType) noexcept; - - //! Specifies how transparent objects should be rendered (default is DEFAULT). - MaterialBuilder& transparencyMode(TransparencyMode mode) noexcept; - - /** - * Enable / disable custom surface shading. Custom surface shading requires the LIT - * shading model. In addition, the following function must be defined in the fragment - * block: - * - * ~~~~~ - * vec3 surfaceShading(const MaterialInputs materialInputs, - * const ShadingData shadingData, const LightData lightData) { - * - * return vec3(1.0); // Compute surface shading with custom BRDF, etc. - * } - * ~~~~~ - * - * This function is invoked once per light. Please refer to the materials documentation - * for more information about the different parameters. - * - * @param customSurfaceShading Enables or disables custom surface shading - */ - MaterialBuilder& customSurfaceShading(bool customSurfaceShading) noexcept; - - /** - * Specifies desktop vs mobile; works in concert with TargetApi to determine the shader models - * (used to generate code) and final output representations (spirv and/or text). - */ - MaterialBuilder& platform(Platform platform) noexcept; - - /** - * Specifies OpenGL, Vulkan, or Metal. - * This can be called repeatedly to build for multiple APIs. - * Works in concert with Platform to determine the shader models (used to generate code) and - * final output representations (spirv and/or text). - * If linking against filamat_lite, only `OPENGL` is allowed. - */ - MaterialBuilder& targetApi(TargetApi targetApi) noexcept; - - /** - * Specifies the level of optimization to apply to the shaders (default is PERFORMANCE). - * If linking against filamat_lite, this _must_ be called with Optimization::NONE. - */ - MaterialBuilder& optimization(Optimization optimization) noexcept; - - // TODO: this is present here for matc's "--print" flag, but ideally does not belong inside - // MaterialBuilder. - //! If true, will output the generated GLSL shader code to stdout. - MaterialBuilder& printShaders(bool printShaders) noexcept; - - //! If true, will include debugging information in generated SPIRV. - MaterialBuilder& generateDebugInfo(bool generateDebugInfo) noexcept; - - //! Specifies a list of variants that should be filtered out during code generation. - MaterialBuilder& variantFilter(uint8_t variantFilter) noexcept; - - //! Adds a new preprocessor macro definition to the shader code. Can be called repeatedly. - MaterialBuilder& shaderDefine(const char* name, const char* value) noexcept; - - //! Add a new fragment shader output variable. Only valid for materials in the POST_PROCESS domain. - MaterialBuilder& output(VariableQualifier qualifier, OutputTarget target, - OutputType type, const char* name, int location = -1) noexcept; - - MaterialBuilder& enableFramebufferFetch() noexcept; - - /** - * Build the material. If you are using the Filament engine with this library, you should use - * the job system provided by Engine. - */ - Package build(utils::JobSystem& jobSystem) noexcept; - -public: - // The methods and types below are for internal use - /// @cond never - - /** - * Add a subpass parameter to this material. - */ - MaterialBuilder& parameter(SubpassType subpassType, SamplerFormat format, ParameterPrecision - precision, const char* name) noexcept; - MaterialBuilder& parameter(SubpassType subpassType, SamplerFormat format, const char* name) - noexcept; - MaterialBuilder& parameter(SubpassType subpassType, ParameterPrecision precision, - const char* name) noexcept; - MaterialBuilder& parameter(SubpassType subpassType, const char* name) noexcept; - - struct Parameter { - Parameter() noexcept : parameterType(INVALID) {} - - // Sampler - Parameter(const char* paramName, SamplerType t, SamplerFormat f, ParameterPrecision p) - : name(paramName), size(1), precision(p), samplerType(t), format(f), parameterType(SAMPLER) { } - - // Uniform - Parameter(const char* paramName, UniformType t, size_t typeSize, ParameterPrecision p) - : name(paramName), size(typeSize), uniformType(t), precision(p), parameterType(UNIFORM) { } - - // Subpass - Parameter(const char* paramName, SubpassType t, SamplerFormat f, ParameterPrecision p) - : name(paramName), size(1), precision(p), subpassType(t), format(f), parameterType(SUBPASS) { } - - utils::CString name; - size_t size; - UniformType uniformType; - ParameterPrecision precision; - SamplerType samplerType; - SubpassType subpassType; - SamplerFormat format; - enum { - INVALID, - UNIFORM, - SAMPLER, - SUBPASS - } parameterType; - - bool isSampler() const { return parameterType == SAMPLER; } - bool isUniform() const { return parameterType == UNIFORM; } - bool isSubpass() const { return parameterType == SUBPASS; } - }; - - struct Output { - Output() noexcept = default; - Output(const char* outputName, VariableQualifier qualifier, OutputTarget target, - OutputType type, int location) noexcept - : name(outputName), qualifier(qualifier), target(target), type(type), - location(location) { } - - utils::CString name; - VariableQualifier qualifier; - OutputTarget target; - OutputType type; - int location; - }; - - static constexpr size_t MATERIAL_PROPERTIES_COUNT = filament::MATERIAL_PROPERTIES_COUNT; - using Property = filament::Property; - - using PropertyList = bool[MATERIAL_PROPERTIES_COUNT]; - using VariableList = utils::CString[MATERIAL_VARIABLES_COUNT]; - using OutputList = std::vector; - - static constexpr size_t MAX_COLOR_OUTPUT = filament::backend::MRT::MAX_SUPPORTED_RENDER_TARGET_COUNT; - static constexpr size_t MAX_DEPTH_OUTPUT = 1; - static_assert(MAX_COLOR_OUTPUT == 8, - "When updating MRT::TARGET_COUNT, manually update post_process_inputs.fs" - " and post_process.fs"); - - // Preview the first shader generated by the given CodeGenParams. - // This is used to run Static Code Analysis before generating a package. - const std::string peek(filament::backend::ShaderType type, - const CodeGenParams& params, const PropertyList& properties) noexcept; - - // Returns true if any of the parameter samplers is of type samplerExternal - bool hasExternalSampler() const noexcept; - - static constexpr size_t MAX_PARAMETERS_COUNT = 48; - static constexpr size_t MAX_SUBPASS_COUNT = 1; - using ParameterList = Parameter[MAX_PARAMETERS_COUNT]; - - // returns the number of parameters declared in this material - uint8_t getParameterCount() const noexcept { return mParameterCount; } - - // returns a list of at least getParameterCount() parameters - const ParameterList& getParameters() const noexcept { return mParameters; } - - uint8_t getVariantFilter() const { return mVariantFilter; } - - /// @endcond - -private: - void prepareToBuild(MaterialInfo& info) noexcept; - - // Return true if the shader is syntactically and semantically valid. - // This method finds all the properties defined in the fragment and - // vertex shaders of the material. - bool findAllProperties() noexcept; - // Multiple calls to findProperties accumulate the property sets across fragment - // and vertex shaders in mProperties. - bool findProperties(filament::backend::ShaderType type, - MaterialBuilder::PropertyList& p) noexcept; - bool runSemanticAnalysis() noexcept; - - bool checkLiteRequirements() noexcept; - - void writeCommonChunks(ChunkContainer& container, MaterialInfo& info) const noexcept; - void writeSurfaceChunks(ChunkContainer& container) const noexcept; - - bool generateShaders( - utils::JobSystem& jobSystem, - const std::vector& variants, ChunkContainer& container, - const MaterialInfo& info) const noexcept; - - bool isLit() const noexcept { return mShading != filament::Shading::UNLIT; } - - utils::CString mMaterialName; - utils::CString mFileName; - - class ShaderCode { - public: - void setLineOffset(size_t offset) noexcept { mLineOffset = offset; } - void setUnresolved(const utils::CString& code) noexcept { - mIncludesResolved = false; - mCode = code; - } - - // Resolve all the #include directives, returns true if successful. - bool resolveIncludes(IncludeCallback callback, const utils::CString& fileName) noexcept; - - const utils::CString& getResolved() const noexcept { - assert(mIncludesResolved); - return mCode; - } - - size_t getLineOffset() const noexcept { return mLineOffset; } - - private: - utils::CString mCode; - size_t mLineOffset = 0; - bool mIncludesResolved = false; - }; - - ShaderCode mMaterialCode; - ShaderCode mMaterialVertexCode; - - IncludeCallback mIncludeCallback = nullptr; - - PropertyList mProperties; - ParameterList mParameters; - VariableList mVariables; - OutputList mOutputs; - - ShaderQuality mShaderQuality = ShaderQuality::DEFAULT; - BlendingMode mBlendingMode = BlendingMode::OPAQUE; - BlendingMode mPostLightingBlendingMode = BlendingMode::TRANSPARENT; - CullingMode mCullingMode = CullingMode::BACK; - Shading mShading = Shading::LIT; - MaterialDomain mMaterialDomain = MaterialDomain::SURFACE; - RefractionMode mRefractionMode = RefractionMode::NONE; - RefractionType mRefractionType = RefractionType::SOLID; - Interpolation mInterpolation = Interpolation::SMOOTH; - VertexDomain mVertexDomain = VertexDomain::OBJECT; - TransparencyMode mTransparencyMode = TransparencyMode::DEFAULT; - - filament::AttributeBitset mRequiredAttributes; - - float mMaskThreshold = 0.4f; - float mSpecularAntiAliasingVariance = 0.15f; - float mSpecularAntiAliasingThreshold = 0.2f; - - bool mShadowMultiplier = false; - bool mTransparentShadow = false; - - uint8_t mParameterCount = 0; - - bool mDoubleSided = false; - bool mDoubleSidedCapability = false; - bool mColorWrite = true; - bool mDepthTest = true; - bool mDepthWrite = true; - bool mDepthWriteSet = false; - - bool mSpecularAntiAliasing = false; - bool mClearCoatIorChange = true; - - bool mFlipUV = true; - - bool mMultiBounceAO = false; - bool mMultiBounceAOSet = false; - - SpecularAmbientOcclusion mSpecularAO = SpecularAmbientOcclusion::NONE; - bool mSpecularAOSet = false; - - bool mCustomSurfaceShading = false; - - bool mEnableFramebufferFetch = false; - - PreprocessorDefineList mDefines; -}; - -} // namespace filamat - -template<> struct utils::EnableBitMaskOperators - : public std::true_type {}; - -#endif diff --git a/ios/include/filamat/Package.h b/ios/include/filamat/Package.h deleted file mode 100644 index 93e74a58..00000000 --- a/ios/include/filamat/Package.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_FILAMAT_PACKAGE_H -#define TNT_FILAMAT_PACKAGE_H - -#include -#include -#include - -#include -#include - -#include - -namespace filamat { - -class UTILS_PUBLIC Package { -public: - Package() = default; - - // Regular constructor - explicit Package(size_t size) : mSize(size) { - mPayload = new uint8_t[size]; - } - - Package(const void* src, size_t size) : Package(size) { - memcpy(mPayload, src, size); - } - - // Move Constructor - Package(Package&& other) noexcept : mPayload(other.mPayload), mSize(other.mSize), - mValid(other.mValid) { - other.mPayload = nullptr; - other.mSize = 0; - other.mValid = false; - } - - // Move assignment - Package& operator=(Package&& other) noexcept { - std::swap(mPayload, other.mPayload); - std::swap(mSize, other.mSize); - std::swap(mValid, other.mValid); - return *this; - } - - // Copy assignment operator disallowed. - Package& operator=(const Package& other) = delete; - - // Copy constructor disallowed. - Package(const Package& other) = delete; - - ~Package() { - delete[] mPayload; - } - - uint8_t* getData() const noexcept { - return mPayload; - } - - size_t getSize() const noexcept { - return mSize; - } - - uint8_t* getEnd() const noexcept { - return mPayload + mSize; - } - - void setValid(bool valid) noexcept { - mValid = valid; - } - - bool isValid() const noexcept { - return mValid; - } - - static Package invalidPackage() { - Package package(0); - package.setValid(false); - return package; - } - -private: - uint8_t* mPayload = nullptr; - size_t mSize = 0; - bool mValid = true; -}; - -} // namespace filamat -#endif diff --git a/ios/include/filament-iblprefilter/IBLPrefilterContext.h b/ios/include/filament-iblprefilter/IBLPrefilterContext.h deleted file mode 100644 index 560005f8..00000000 --- a/ios/include/filament-iblprefilter/IBLPrefilterContext.h +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_IBL_PREFILTER_IBLPREFILTER_H -#define TNT_IBL_PREFILTER_IBLPREFILTER_H - -#include -#include - -#include - -namespace filament { -class Engine; -class View; -class Scene; -class Renderer; -class Material; -class MaterialInstance; -class VertexBuffer; -class IndexBuffer; -class Camera; -class Texture; -} // namespace filament - -/** - * IBLPrefilterContext creates and initializes GPU state common to all environment map filters - * supported. Typically, only one instance per filament Engine of this object needs to exist. - * - * Usage Example: - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * #include - * using namespace filament; - * - * Engine* engine = Engine::create(); - * - * IBLPrefilterContext context(engine); - * IBLPrefilterContext::SpecularFilter filter(context); - * Texture* texture = filter(environment_cubemap); - * - * IndirectLight* indirectLight = IndirectLight::Builder() - * .reflections(texture) - * .build(engine); - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ -class UTILS_PUBLIC IBLPrefilterContext { -public: - - /** - * Creates an IBLPrefilter context. - * @param engine filament engine to use - */ - IBLPrefilterContext(filament::Engine& engine); - - /** - * Destroys all GPU resources created during initialization. - */ - ~IBLPrefilterContext() noexcept; - - // not copyable - IBLPrefilterContext(IBLPrefilterContext const&) = delete; - IBLPrefilterContext& operator=(IBLPrefilterContext const&) = delete; - - // movable - IBLPrefilterContext(IBLPrefilterContext&& rhs) noexcept; - IBLPrefilterContext& operator=(IBLPrefilterContext&& rhs); - - // ------------------------------------------------------------------------------------------- - - /** - * EquirectangularToCubemap is use to convert an equirectangluar image to a cubemap. - */ - class EquirectangularToCubemap { - public: - /** - * Creates a EquirectangularToCubemap processor. - * @param context IBLPrefilterContext to use - */ - explicit EquirectangularToCubemap(IBLPrefilterContext& context); - - /** - * Destroys all GPU resources created during initialization. - */ - ~EquirectangularToCubemap() noexcept; - - EquirectangularToCubemap(EquirectangularToCubemap const&) = delete; - EquirectangularToCubemap& operator=(EquirectangularToCubemap const&) = delete; - EquirectangularToCubemap(EquirectangularToCubemap&& rhs) noexcept; - EquirectangularToCubemap& operator=(EquirectangularToCubemap&& rhs); - - /** - * Converts an equirectangular image to a cubemap. - * @param equirectangular Texture to convert to a cubemap. - * - Can't be null. - * - Must be a 2d texture - * - Must have equirectangular geometry, that is width == 2*height. - * - Must be allocated with all mip levels. - * - Must be SAMPLEABLE - * @param outCubemap Output cubemap. If null the texture is automatically created - * with default parameters (size of 256 with 5 levels). - * - Must be a cubemap - * - Must have SAMPLEABLE and COLOR_ATTACHMENT usage bits - * @return returns outCubemap - */ - filament::Texture* operator()( - filament::Texture const* equirectangular, - filament::Texture* outCubemap = nullptr); - - private: - IBLPrefilterContext& mContext; - filament::Material* mEquirectMaterial = nullptr; - }; - - - /** - * SpecularFilter is a GPU based implementation of the specular probe pre-integration filter. - * An instance of SpecularFilter is needed per filter configuration. A filter configuration - * contains the filter's kernel and sample count. - */ - class SpecularFilter { - public: - enum class Kernel : uint8_t { - D_GGX, // Trowbridge-reitz distribution - }; - - /** - * Filter configuration. - */ - struct Config { - uint16_t sampleCount = 1024u; //!< filter sample count (max 2048) - uint8_t levelCount = 5u; //!< number of roughness levels - Kernel kernel = Kernel::D_GGX; //!< filter kernel - }; - - /** - * Filtering options for the current environment. - */ - struct Options { - float hdrLinear = 1024.0f; //!< no HDR compression up to this value - float hdrMax = 16384.0f; //!< HDR compression between hdrLinear and hdrMax - float lodOffset = 1.0f; //!< Good values are 1.0 or 2.0. Higher values help with heavily HDR inputs. - bool generateMipmap = true; //!< set to false if the environment map already has mipmaps - }; - - /** - * Creates a SpecularFilter processor. - * @param context IBLPrefilterContext to use - * @param config Configuration of the filter - */ - SpecularFilter(IBLPrefilterContext& context, Config config); - - /** - * Creates a filter with the default configuration. - * @param context IBLPrefilterContext to use - */ - explicit SpecularFilter(IBLPrefilterContext& context); - - /** - * Destroys all GPU resources created during initialization. - */ - ~SpecularFilter() noexcept; - - SpecularFilter(SpecularFilter const&) = delete; - SpecularFilter& operator=(SpecularFilter const&) = delete; - SpecularFilter(SpecularFilter&& rhs) noexcept; - SpecularFilter& operator=(SpecularFilter&& rhs); - - /** - * Generates a prefiltered cubemap. - * @param options Options for this environment - * @param environmentCubemap Environment cubemap (input). Can't be null. - * This cubemap must be SAMPLEABLE and must have all its - * levels allocated. If Options.generateMipmap is true, - * the mipmap levels will be overwritten, otherwise - * it is assumed that all levels are correctly initialized. - * @param outReflectionsTexture Output prefiltered texture or, if null, it is - * automatically created with some default parameters. - * outReflectionsTexture must be a cubemap, it must have - * at least COLOR_ATTACHMENT and SAMPLEABLE usages and at - * least the same number of levels than requested by Config. - * @return returns outReflectionsTexture - */ - filament::Texture* operator()(Options options, - filament::Texture const* environmentCubemap, - filament::Texture* outReflectionsTexture = nullptr); - - /** - * Generates a prefiltered cubemap. - * @param environmentCubemap Environment cubemap (input). Can't be null. - * This cubemap must be SAMPLEABLE and must have all its - * levels allocated. All mipmap levels will be overwritten. - * @param outReflectionsTexture Output prefiltered texture or, if null, it is - * automatically created with some default parameters. - * outReflectionsTexture must be a cubemap, it must have - * at least COLOR_ATTACHMENT and SAMPLEABLE usages and at - * least the same number of levels than requested by Config. - * @return returns outReflectionsTexture - */ - filament::Texture* operator()( - filament::Texture const* environmentCubemap, - filament::Texture* outReflectionsTexture = nullptr); - - // TODO: option for progressive filtering - - // TODO: add a callback for when the processing is done? - - private: - filament::Texture* createReflectionsTexture(); - IBLPrefilterContext& mContext; - filament::Material* mKernelMaterial = nullptr; - filament::Texture* mKernelTexture = nullptr; - uint32_t mSampleCount = 0u; - uint8_t mLevelCount = 1u; - }; - -private: - friend class Filter; - filament::Engine& mEngine; - filament::Renderer* mRenderer{}; - filament::Scene* mScene{}; - filament::VertexBuffer* mVertexBuffer{}; - filament::IndexBuffer* mIndexBuffer{}; - filament::Camera* mCamera{}; - utils::Entity mFullScreenQuadEntity{}; - utils::Entity mCameraEntity{}; - filament::View* mView{}; - filament::Material* mIntegrationMaterial{}; -}; - -#endif //TNT_IBL_PREFILTER_IBLPREFILTER_H diff --git a/ios/include/filament/DebugRegistry.h b/ios/include/filament/DebugRegistry.h index a7a9e804..b5fb8f21 100644 --- a/ios/include/filament/DebugRegistry.h +++ b/ios/include/filament/DebugRegistry.h @@ -54,18 +54,6 @@ public: Type type; //!< property type }; - struct PropertyArray { - Property const* array; - size_t size; - }; - - /** - * Queries the list of all available properties. - * - * @return A pair containing a pointer to a Property array and the size of this array. - */ - PropertyArray getProperties() const noexcept; - /** * Queries whether a property exists * @param name The name of the property to query @@ -123,6 +111,24 @@ public: bool getProperty(const char* name, math::float4* v) const noexcept; /** @}*/ + struct DataSource { + void const* data; + size_t count; + }; + + DataSource getDataSource(const char* name) const noexcept; + + struct FrameHistory { + using duration_ms = float; + duration_ms target{}; + duration_ms targetWithHeadroom{}; + duration_ms frameTime{}; + duration_ms frameTimeDenoised{}; + float scale = 1.0f; + float pid_e = 0.0f; + float pid_i = 0.0f; + float pid_d = 0.0f; + }; }; diff --git a/ios/include/filament/Engine.h b/ios/include/filament/Engine.h index 3e32de68..0a31013f 100644 --- a/ios/include/filament/Engine.h +++ b/ios/include/filament/Engine.h @@ -35,9 +35,11 @@ class ColorGrading; class DebugRegistry; class Fence; class IndexBuffer; +class SkinningBuffer; class IndirectLight; class Material; class MaterialInstance; +class MorphTargetBuffer; class Renderer; class RenderTarget; class Scene; @@ -419,6 +421,8 @@ public: bool destroy(const VertexBuffer* p); //!< Destroys an VertexBuffer object. bool destroy(const Fence* p); //!< Destroys a Fence object. bool destroy(const IndexBuffer* p); //!< Destroys an IndexBuffer object. + bool destroy(const SkinningBuffer* p); //!< Destroys a SkinningBuffer object. + bool destroy(const MorphTargetBuffer* p); //!< Destroys a MorphTargetBuffer object. bool destroy(const IndirectLight* p); //!< Destroys an IndirectLight object. /** diff --git a/ios/include/filament/LightManager.h b/ios/include/filament/LightManager.h index eb542da2..fad7d09d 100644 --- a/ios/include/filament/LightManager.h +++ b/ios/include/filament/LightManager.h @@ -273,6 +273,7 @@ public: * Constant bias in depth-resolution units by which shadows are moved away from the * light. The default value of 0.5 is used to round depth values up. * Generally this value shouldn't be changed or at least be small and positive. + * This is ignored when the View's ShadowType is set to VSM. */ float polygonOffsetConstant = 0.5f; @@ -281,6 +282,7 @@ public: * away from the light. The default value of 2.0 works well with SHADOW_SAMPLING_PCF_LOW. * Generally this value is between 0.5 and the size in texel of the PCF filter. * Setting this value correctly is essential for LISPSM shadow-maps. + * This is ignored when the View's ShadowType is set to VSM. */ float polygonOffsetSlope = 2.0f; @@ -332,6 +334,12 @@ public: */ float blurWidth = 0.0f; } vsm; + + /** + * Light bulb radius used for soft shadows. Currently this is only used when DPCF or PCSS is + * enabled. (2cm by default). + */ + float shadowBulbRadius = 0.02f; }; struct ShadowCascades { @@ -558,10 +566,11 @@ public: * and are defined by the angle from the center axis to where the falloff begins (i.e. * cones are defined by their half-angle). * - * @param inner inner cone angle in *radians* between 0 and @f$ \pi/2 @f$ - * - * @param outer outer cone angle in *radians* between \p inner and @f$ \pi/2 @f$ + * Both inner and outer are silently clamped to a minimum value of 0.5 degrees + * (~0.00873 radians) to avoid floating-point precision issues during rendering. * + * @param inner inner cone angle in *radians* between 0.00873 and \p outer + * @param outer outer cone angle in *radians* between 0.00873 inner and @f$ \pi/2 @f$ * @return This Builder, for chaining calls. * * @note @@ -812,8 +821,8 @@ public: * Dynamically updates a spot light's cone as angles * * @param i Instance of the component obtained from getInstance(). - * @param inner inner cone angle in *radians* between 0 and pi/2 - * @param outer outer cone angle in *radians* between inner and pi/2 + * @param inner inner cone angle in *radians* between 0.00873 and outer + * @param outer outer cone angle in *radians* between 0.00873 and pi/2 * * @see Builder.spotLightCone() */ diff --git a/ios/include/filament/Material.h b/ios/include/filament/Material.h index a8722804..572de844 100644 --- a/ios/include/filament/Material.h +++ b/ios/include/filament/Material.h @@ -193,9 +193,12 @@ public: //! Returns the refraction mode used by this material. RefractionMode getRefractionMode() const noexcept; - // Return the refraction type used by this material. + //! Return the refraction type used by this material. RefractionType getRefractionType() const noexcept; + //! Returns the reflection mode used by this material. + ReflectionMode getReflectionMode() const noexcept; + /** * Returns the number of parameters declared by this material. * The returned value can be 0. diff --git a/ios/include/filament/MaterialChunkType.h b/ios/include/filament/MaterialChunkType.h index 56d6eb50..b80e6354 100644 --- a/ios/include/filament/MaterialChunkType.h +++ b/ios/include/filament/MaterialChunkType.h @@ -62,6 +62,7 @@ enum UTILS_PUBLIC ChunkType : uint64_t { MaterialDomain = charTo64bitNum("MAT_DOMN"), MaterialRefraction = charTo64bitNum("MAT_REFM"), MaterialRefractionType = charTo64bitNum("MAT_REFT"), + MaterialReflectionMode = charTo64bitNum("MAT_REFL"), MaterialRequiredAttributes = charTo64bitNum("MAT_REQA"), MaterialDepthWriteSet = charTo64bitNum("MAT_DEWS"), diff --git a/ios/include/filament/MaterialEnums.h b/ios/include/filament/MaterialEnums.h index 4a7a35e8..c4ef79b5 100644 --- a/ios/include/filament/MaterialEnums.h +++ b/ios/include/filament/MaterialEnums.h @@ -27,7 +27,7 @@ namespace filament { // update this when a new version of filament wouldn't work with older materials -static constexpr size_t MATERIAL_VERSION = 12; +static constexpr size_t MATERIAL_VERSION = 17; /** * Supported shading models @@ -136,20 +136,10 @@ enum VertexAttribute : uint8_t { CUSTOM6 = 14, CUSTOM7 = 15, - // Aliases for vertex morphing. - MORPH_POSITION_0 = CUSTOM0, - MORPH_POSITION_1 = CUSTOM1, - MORPH_POSITION_2 = CUSTOM2, - MORPH_POSITION_3 = CUSTOM3, - MORPH_TANGENTS_0 = CUSTOM4, - MORPH_TANGENTS_1 = CUSTOM5, - MORPH_TANGENTS_2 = CUSTOM6, - MORPH_TANGENTS_3 = CUSTOM7, - // this is limited by driver::MAX_VERTEX_ATTRIBUTE_COUNT }; -static constexpr size_t MAX_MORPH_TARGETS = 4; +static constexpr size_t MAX_MORPH_TARGETS = 128; // this is limited by filament::CONFIG_MAX_MORPH_TARGET_COUNT static constexpr size_t MAX_CUSTOM_ATTRIBUTES = 8; /** @@ -186,6 +176,14 @@ enum class RefractionType : uint8_t { THIN = 1, //!< refraction through thin objects (e.g. window) }; +/** + * Reflection mode + */ +enum class ReflectionMode : uint8_t { + DEFAULT = 0, //! reflections sample from the scene's IBL only + SCREEN_SPACE = 1, //! reflections sample from screen space, and fallback to the scene's IBL +}; + // can't really use std::underlying_type::type because the driver takes a uint32_t using AttributeBitset = utils::bitset32; diff --git a/ios/include/filament/MorphTargetBuffer.h b/ios/include/filament/MorphTargetBuffer.h new file mode 100644 index 00000000..f3c08770 --- /dev/null +++ b/ios/include/filament/MorphTargetBuffer.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_FILAMENT_MORPHTARGETBUFFER_H +#define TNT_FILAMENT_MORPHTARGETBUFFER_H + +#include + +#include + +#include + + +namespace filament { + +/** + * MorphTargetBuffer is used to hold morphing data (positions and tangents). + */ +class UTILS_PUBLIC MorphTargetBuffer : public FilamentAPI { + struct BuilderDetails; + +public: + class Builder : public BuilderBase { + friend struct BuilderDetails; + public: + Builder() noexcept; + Builder(Builder const& rhs) noexcept; + Builder(Builder&& rhs) noexcept; + ~Builder() noexcept; + Builder& operator=(Builder const& rhs) noexcept; + Builder& operator=(Builder&& rhs) noexcept; + + /** + * Size of the morph targets in vertex counts. + * @param vertexCount Number of vertex counts the morph targets can hold. + * @return A reference to this Builder for chaining calls. + */ + Builder& vertexCount(size_t vertexCount) noexcept; + + /** + * Size of the morph targets in targets. + * @param count Number of targets the morph targets can hold. + * @return A reference to this Builder for chaining calls. + */ + Builder& count(size_t count) noexcept; + + /** + * Creates the MorphTargetBuffer object and returns a pointer to it. + * + * @param engine Reference to the filament::Engine to associate this MorphTargetBuffer with. + * + * @return pointer to the newly created object or nullptr if exceptions are disabled and + * an error occurred. + * + * @exception utils::PostConditionPanic if a runtime error occurred, such as running out of + * memory or other resources. + * @exception utils::PreConditionPanic if a parameter to a builder function was invalid. + */ + MorphTargetBuffer* build(Engine& engine); + private: + friend class FMorphTargetBuffer; + }; + + /** + * Updates the position of morph target at the index. + * @param engine Reference to the filament::Engine associated with this MorphTargetBuffer. + * @param targetIndex the index of morph target to be updated. + * @param weights pointer to at least count positions + * @param count number of position elements in positions + */ + void setPositionsAt(Engine& engine, size_t targetIndex, math::float3 const* positions, size_t count); + + /** + * Updates the position of morph target at the index. + * @param engine Reference to the filament::Engine associated with this MorphTargetBuffer. + * @param targetIndex the index of morph target to be updated. + * @param weights pointer to at least count positions + * @param count number of position elements in positions + */ + void setPositionsAt(Engine& engine, size_t targetIndex, math::float4 const* positions, size_t count); + + /** + * Updates the position of morph target at the index. + * @param engine Reference to the filament::Engine associated with this MorphTargetBuffer. + * @param targetIndex the index of morph target to be updated. + * @param tangents pointer to at least count tangents + * @param count number of tangent elements in tangents + */ + void setTangentsAt(Engine& engine, size_t targetIndex, math::short4 const* tangents, size_t count); + + /** + * Returns the vertex count of this MorphTargetBuffer. + * @return The number of vertices the MorphTargetBuffer holds. + */ + size_t getVertexCount() const noexcept; + + /** + * Returns the target count of this MorphTargetBuffer. + * @return The number of targets the MorphTargetBuffer holds. + */ + size_t getCount() const noexcept; +}; + +} // namespace filament + +#endif //TNT_FILAMENT_MORPHTARGETBUFFER_H diff --git a/ios/include/filament/Options.h b/ios/include/filament/Options.h index fd9a58a1..b43a5f34 100644 --- a/ios/include/filament/Options.h +++ b/ios/include/filament/Options.h @@ -284,6 +284,28 @@ struct AmbientOcclusionOptions { } ssct; }; +/** + * Options for Temporal Multi-Sample Anti-aliasing (MSAA) + * @see setMultiSampleAntiAliasingOptions() + */ +struct MultiSampleAntiAliasingOptions { + bool enabled = false; //!< enables or disables msaa + + /** + * sampleCount number of samples to use for multi-sampled anti-aliasing.\n + * 0: treated as 1 + * 1: no anti-aliasing + * n: sample count. Effective sample could be different depending on the + * GPU capabilities. + */ + uint8_t sampleCount = 4; + + /** + * custom resolve improves quality for HDR scenes, but may impact performance. + */ + bool customResolve = false; +}; + /** * Options for Temporal Anti-aliasing (TAA) * @see setTemporalAntiAliasingOptions() @@ -294,6 +316,18 @@ struct TemporalAntiAliasingOptions { bool enabled = false; //!< enables or disables temporal anti-aliasing }; +/** + * Options for Screen-space Reflections. + * @see setScreenSpaceReflectionsOptions() + */ +struct ScreenSpaceReflectionsOptions { + float thickness = 0.5f; //!< ray thickness, in world units + float bias = 0.01f; //!< bias, in world units, to prevent self-intersections + float maxDistance = 3.0f; //!< maximum distance, in world units, to raycast + float stride = 1.0f; //!< stride, in texels, for samples along the ray. + bool enabled = false; +}; + /** * List of available post-processing anti-aliasing techniques. * @see setAntiAliasing, getAntiAliasing, setSampleCount @@ -317,7 +351,9 @@ enum class Dithering : uint8_t { */ enum class ShadowType : uint8_t { PCF, //!< percentage-closer filtered shadows (default) - VSM //!< variance shadows + VSM, //!< variance shadows + DPCF, //!< PCF with contact hardening simulation + PCSS //!< PCF with soft shadows and contact hardening }; /** @@ -339,13 +375,6 @@ struct VsmShadowOptions { */ bool mipmapping = false; - /** - * EVSM exponent. - * The maximum value permissible is 5.54 for a shadow map in fp16, or 42.0 for a - * shadow map in fp32. Currently the shadow map bit depth is always fp16. - */ - float exponent = 5.54f; - /** * VSM minimum variance scale, must be positive. */ @@ -357,6 +386,27 @@ struct VsmShadowOptions { float lightBleedReduction = 0.15f; }; +/** + * View-level options for DPCF and PCSS Shadowing. + * @see setSoftShadowOptions() + * @warning This API is still experimental and subject to change. + */ +struct SoftShadowOptions { + /** + * Globally scales the penumbra of all DPCF and PCSS shadows + * Acceptable values are greater than 0 + */ + float penumbraScale = 1.0f; + + /** + * Globally scales the computed penumbra ratio of all DPCF and PCSS shadows. + * This effectively controls the strength of contact hardening effect and is useful for + * artistic purposes. Higher values make the shadows become softer faster. + * Acceptable values are equal to or greater than 1. + */ + float penumbraRatioScale = 1.0f; +}; + } // namespace filament #endif //TNT_FILAMENT_OPTIONS_H diff --git a/ios/include/filament/RenderTarget.h b/ios/include/filament/RenderTarget.h index cc4ae38a..950bbb8d 100644 --- a/ios/include/filament/RenderTarget.h +++ b/ios/include/filament/RenderTarget.h @@ -37,6 +37,12 @@ class Texture; * An offscreen render target that can be associated with a View and contains * weak references to a set of attached Texture objects. * + * RenderTarget is intended to be used with the View's post-processing disabled for the most part. + * especially when a DEPTH attachment is also used (see Builder::texture()). + * + * Custom RenderTarget are ultimately intended to render into textures that might be used during + * the main render pass. + * * Clients are responsible for the lifetime of all associated Texture attachments. * * @see View @@ -87,6 +93,14 @@ public: * * All RenderTargets must have a non-null COLOR attachment. * + * When using a DEPTH attachment, it is important to always disable post-processing + * in the View. Failing to do so will cause the DEPTH attachment to be ignored in most + * cases. + * + * When the intention is to keep the content of the DEPTH attachment after rendering, + * Usage::SAMPLEABLE must be set on the DEPTH attachment, otherwise the content of the + * DEPTH buffer may be discarded. + * * @param attachment The attachment point of the texture. * @param texture The associated texture object. * @return A reference to this Builder for chaining calls. diff --git a/ios/include/filament/RenderableManager.h b/ios/include/filament/RenderableManager.h index 0e602c3e..47f4fc56 100644 --- a/ios/include/filament/RenderableManager.h +++ b/ios/include/filament/RenderableManager.h @@ -41,9 +41,11 @@ class Engine; class IndexBuffer; class Material; class MaterialInstance; +class MorphTargetBuffer; class Renderer; class SkinningBuffer; class VertexBuffer; +class Texture; class FEngine; class FRenderPrimitive; @@ -105,8 +107,8 @@ public: * Clients can specify bones either using this quat-vec3 pair, or by using 4x4 matrices. */ struct Bone { - math::quatf unitQuaternion = { 1, 0, 0, 0 }; - math::float3 translation = { 0, 0, 0 }; + math::quatf unitQuaternion = { 1.f, 0.f, 0.f, 0.f }; + math::float3 translation = { 0.f, 0.f, 0.f }; float reserved = 0; }; @@ -300,9 +302,6 @@ public: /** * Controls if the renderable has vertex morphing targets, false by default. * - * This is required to enable GPU morphing for up to 4 attributes. The attached VertexBuffer - * must provide data in the appropriate VertexAttribute slots (\c MORPH_POSITION_0 etc). - * * See also RenderableManager::setMorphWeights(), which can be called on a per-frame basis * to advance the animation. */ @@ -456,12 +455,16 @@ public: /** * Updates the vertex morphing weights on a renderable, all zeroes by default. * - * This is specified using a 4-tuple, one float per morph target. If the renderable has fewer - * than 4 morph targets, then clients should fill the unused components with zeroes. - * * The renderable must be built with morphing enabled, see Builder::morphing(). */ - void setMorphWeights(Instance instance, math::float4 const& weights) noexcept; + void setMorphWeights(Instance instance, float const* weights, size_t count) noexcept; + + + /** + * Associates a MorphTargetBuffer to the given primitive. + */ + void setMorphTargetBufferAt(Instance instance, + size_t primitiveIndex, MorphTargetBuffer* morphTargetBuffer) noexcept; /** * Gets the bounding box used for frustum culling. diff --git a/ios/include/filament/Renderer.h b/ios/include/filament/Renderer.h index bd9c5c23..cbb3aa19 100644 --- a/ios/include/filament/Renderer.h +++ b/ios/include/filament/Renderer.h @@ -99,7 +99,7 @@ public: * headRoomRatio: additional headroom for the GPU as a ratio of the targetFrameTime. * Useful for taking into account constant costs like post-processing or * GPU drivers on different platforms. - * history: History size. higher values, tend to filter more (clamped to 30) + * history: History size. higher values, tend to filter more (clamped to 31) * scaleRate: rate at which the gpu load is adjusted to reach the target frame rate * This value can be computed as 1 / N, where N is the number of frames * needed to reach 64% of the target scale factor. @@ -110,10 +110,10 @@ public: * */ struct FrameRateOptions { - float headRoomRatio = 0.0f; //!< additional headroom for the GPU - float scaleRate = 0.125f; //!< rate at which the system reacts to load changes - uint8_t history = 3; //!< history size - uint8_t interval = 1; //!< desired frame interval in unit of 1.0 / DisplayInfo::refreshRate + float headRoomRatio = 0.0f; //!< additional headroom for the GPU + float scaleRate = 1.0f / 8.0f; //!< rate at which the system reacts to load changes + uint8_t history = 15; //!< history size + uint8_t interval = 1; //!< desired frame interval in unit of 1.0 / DisplayInfo::refreshRate }; /** @@ -252,7 +252,7 @@ public: * * render() generates commands for each of the following stages: * - * 1. Shadow map pass, if needed (currently only a single shadow map is supported). + * 1. Shadow map passes, if needed. * 2. Depth pre-pass. * 3. Color pass. * 4. Post-processing pass. @@ -341,7 +341,7 @@ public: * * Framebuffer as seen on User buffer (PixelBufferDescriptor&) * screen - * + * * +--------------------+ * | | .stride .alignment * | | ----------------------->--> @@ -359,7 +359,8 @@ public: * O------------+-------+ * * - * Typically readPixels() will be called after render() and before endFrame(). + * readPixels() must be called within a frame, meaning after beginFrame() and before endFrame(). + * Typically, readPixels() will be called after render(). * * After issuing this method, the callback associated with `buffer` will be invoked on the * main thread, indicating that the read-back has completed. Typically, this will happen diff --git a/ios/include/filament/Stream.h b/ios/include/filament/Stream.h index af675ee7..ab877ac7 100644 --- a/ios/include/filament/Stream.h +++ b/ios/include/filament/Stream.h @@ -22,6 +22,7 @@ #include #include +#include #include @@ -199,6 +200,18 @@ public: */ void setAcquiredImage(void* image, Callback callback, void* userdata) noexcept; + /** + * @see setAcquiredImage(void*, Callback, void*) + * + * @param image Pointer to AHardwareBuffer, casted to void* since this is a public header. + * @param handler Handler to dispatch the AcquiredImage or nullptr for the default handler. + * @param callback This is triggered by Filament when it wishes to release the image. + * It callback tales two arguments: the AHardwareBuffer and the userdata. + * @param userdata Optional closure data. Filament will pass this into the callback when it + * releases the image. + */ + void setAcquiredImage(void* image, backend::CallbackHandler* handler, Callback callback, void* userdata) noexcept; + /** * Updates the size of the incoming stream. Whether this value is used is * stream dependent. On Android, it must be set when using diff --git a/ios/include/filament/ToneMapper.h b/ios/include/filament/ToneMapper.h index b961e952..0bbe06fe 100644 --- a/ios/include/filament/ToneMapper.h +++ b/ios/include/filament/ToneMapper.h @@ -74,7 +74,7 @@ struct UTILS_PUBLIC LinearToneMapper final : public ToneMapper { LinearToneMapper() noexcept; ~LinearToneMapper() noexcept final; - math::float3 operator()(math::float3 c) const noexcept; + math::float3 operator()(math::float3 c) const noexcept override; }; /** @@ -86,7 +86,7 @@ struct UTILS_PUBLIC ACESToneMapper final : public ToneMapper { ACESToneMapper() noexcept; ~ACESToneMapper() noexcept final; - math::float3 operator()(math::float3 c) const noexcept; + math::float3 operator()(math::float3 c) const noexcept override; }; /** @@ -99,7 +99,7 @@ struct UTILS_PUBLIC ACESLegacyToneMapper final : public ToneMapper { ACESLegacyToneMapper() noexcept; ~ACESLegacyToneMapper() noexcept final; - math::float3 operator()(math::float3 c) const noexcept; + math::float3 operator()(math::float3 c) const noexcept override; }; /** @@ -112,7 +112,7 @@ struct UTILS_PUBLIC FilmicToneMapper final : public ToneMapper { FilmicToneMapper() noexcept; ~FilmicToneMapper() noexcept final; - math::float3 operator()(math::float3 x) const noexcept; + math::float3 operator()(math::float3 x) const noexcept override; }; /** @@ -123,8 +123,6 @@ struct UTILS_PUBLIC FilmicToneMapper final : public ToneMapper { * * The tone mapping curve is defined by 5 parameters: * - contrast: controls the contrast of the curve - * - shoulder: controls the shoulder of the curve, i.e. how quickly scene - * referred values map to output white * - midGrayIn: sets the input middle gray * - midGrayOut: sets the output middle gray * - hdrMax: defines the maximum input value that will be mapped to @@ -138,18 +136,15 @@ struct UTILS_PUBLIC GenericToneMapper final : public ToneMapper { * * @param contrast: controls the contrast of the curve, must be > 0.0, values * in the range 0.5..2.0 are recommended. - * @param shoulder: controls the shoulder of the curve, i.e. how quickly scene - * referred values map to output white, between 0.0 and 1.0. * @param midGrayIn: sets the input middle gray, between 0.0 and 1.0. * @param midGrayOut: sets the output middle gray, between 0.0 and 1.0. * @param hdrMax: defines the maximum input value that will be mapped to * output white. Must be >= 1.0. */ - GenericToneMapper( - float contrast = 1.585f, - float shoulder = 0.5f, + explicit GenericToneMapper( + float contrast = 1.55f, float midGrayIn = 0.18f, - float midGrayOut = 0.268f, + float midGrayOut = 0.215f, float hdrMax = 10.0f ) noexcept; ~GenericToneMapper() noexcept final; @@ -157,9 +152,9 @@ struct UTILS_PUBLIC GenericToneMapper final : public ToneMapper { GenericToneMapper(GenericToneMapper const&) = delete; GenericToneMapper& operator=(GenericToneMapper const&) = delete; GenericToneMapper(GenericToneMapper&& rhs) noexcept; - GenericToneMapper& operator=(GenericToneMapper& rhs) noexcept; + GenericToneMapper& operator=(GenericToneMapper&& rhs) noexcept; - math::float3 operator()(math::float3 x) const noexcept; + math::float3 operator()(math::float3 x) const noexcept override; /** Returns the contrast of the curve as a strictly positive value. */ float getContrast() const noexcept; @@ -179,9 +174,6 @@ struct UTILS_PUBLIC GenericToneMapper final : public ToneMapper { /** Sets the contrast of the curve, must be > 0.0, values in the range 0.5..2.0 are recommended. */ void setContrast(float contrast) noexcept; - /** Sets how quickly scene referred values map to output white, between 0.0 and 1.0. */ - void setShoulder(float shoulder) noexcept; - /** Sets the input middle gray, between 0.0 and 1.0. */ void setMidGrayIn(float midGrayIn) noexcept; @@ -225,9 +217,9 @@ private: */ struct UTILS_PUBLIC DisplayRangeToneMapper final : public ToneMapper { DisplayRangeToneMapper() noexcept; - ~DisplayRangeToneMapper() noexcept; + ~DisplayRangeToneMapper() noexcept override; - math::float3 operator()(math::float3 c) const noexcept; + math::float3 operator()(math::float3 c) const noexcept override; }; } // namespace filament diff --git a/ios/include/filament/View.h b/ios/include/filament/View.h index dc692bd5..1083560c 100644 --- a/ios/include/filament/View.h +++ b/ios/include/filament/View.h @@ -32,6 +32,10 @@ namespace filament { +namespace backend { +class CallbackHandler; +} // namespace backend + class Camera; class ColorGrading; class MaterialInstance; @@ -76,7 +80,10 @@ public: using RenderQuality = RenderQuality; using AmbientOcclusionOptions = AmbientOcclusionOptions; using TemporalAntiAliasingOptions = TemporalAntiAliasingOptions; + using MultiSampleAntiAliasingOptions = MultiSampleAntiAliasingOptions; using VsmShadowOptions = VsmShadowOptions; + using SoftShadowOptions = SoftShadowOptions; + using ScreenSpaceReflectionsOptions = ScreenSpaceReflectionsOptions; /** * Sets the View's name. Only useful for debugging. @@ -275,7 +282,9 @@ public: * cost. See setAntialiasing. * * @see setAntialiasing + * @deprecated use setMultiSampleAntiAliasingOptions instead */ + UTILS_DEPRECATED void setSampleCount(uint8_t count = 1) noexcept; /** @@ -283,7 +292,9 @@ public: * A value of 0 or 1 means MSAA is disabled. * * @return value set by setSampleCount(). + * @deprecated use getMultiSampleAntiAliasingOptions instead */ + UTILS_DEPRECATED uint8_t getSampleCount() const noexcept; /** @@ -320,6 +331,34 @@ public: */ TemporalAntiAliasingOptions const& getTemporalAntiAliasingOptions() const noexcept; + /** + * Enables or disable screen-space reflections. Disabled by default. + * + * @param options screen-space reflections options + */ + void setScreenSpaceReflectionsOptions(ScreenSpaceReflectionsOptions options) noexcept; + + /** + * Returns screen-space reflections options. + * + * @return screen-space reflections options + */ + ScreenSpaceReflectionsOptions const& getScreenSpaceReflectionsOptions() const noexcept; + + /** + * Enables or disable multi-sample anti-aliasing (MSAA). Disabled by default. + * + * @param options multi-sample anti-aliasing options + */ + void setMultiSampleAntiAliasingOptions(MultiSampleAntiAliasingOptions options) noexcept; + + /** + * Returns multi-sample anti-aliasing options. + * + * @return multi-sample anti-aliasing options + */ + MultiSampleAntiAliasingOptions const& getMultiSampleAntiAliasingOptions() const noexcept; + /** * Sets this View's color grading transforms. * @@ -514,19 +553,44 @@ public: */ VsmShadowOptions getVsmShadowOptions() const noexcept; + /** + * Sets soft shadowing options that apply across the entire View. + * + * Additional light-specific soft shadow parameters can be set with LightManager::setShadowOptions. + * + * Only applicable when shadow type is set to ShadowType::DPCF or ShadowType::PCSS. + * + * @param options Options for shadowing. + * + * @see setShadowType + * + * @warning This API is still experimental and subject to change. + */ + void setSoftShadowOptions(SoftShadowOptions const& options) noexcept; + + /** + * Returns the soft shadowing options associated with this View. + * + * @return value set by setSoftShadowOptions(). + */ + SoftShadowOptions getSoftShadowOptions() const noexcept; + /** * Enables or disables post processing. Enabled by default. * * Post-processing includes: + * - Depth-of-field * - Bloom - * - Tone-mapping & gamma encoding + * - Vignetting + * - Temporal Anti-aliasing (TAA) + * - Color grading & gamma encoding * - Dithering - * - MSAA * - FXAA * - Dynamic scaling * - * Disabling post-processing forgoes color correctness as well as anti-aliasing and - * should only be used experimentally (e.g., for UI overlays). + * Disabling post-processing forgoes color correctness as well as some anti-aliasing techniques + * and should only be used for debugging, UI overlays or when using custom render targets + * (see RenderTarget). * * @param enabled true enables post processing, false disables it. * @@ -611,9 +675,10 @@ public: * @param x Horizontal coordinate to query in the viewport with origin on the left. * @param y Vertical coordinate to query on the viewport with origin at the bottom. * @param data A pointer to an instance of T + * @param handler Handler to dispatch the callback or nullptr for the default handler. */ template - void pick(uint32_t x, uint32_t y, T* instance) noexcept { + void pick(uint32_t x, uint32_t y, T* instance, backend::CallbackHandler* handler = nullptr) noexcept { PickingQuery& query = pick(x, y, [](PickingQueryResult const& result, PickingQuery* pq) { void* user = pq->storage; (*static_cast(user)->*method)(result); @@ -630,9 +695,10 @@ public: * @param x Horizontal coordinate to query in the viewport with origin on the left. * @param y Vertical coordinate to query on the viewport with origin at the bottom. * @param data An instance of T + * @param handler Handler to dispatch the callback or nullptr for the default handler. */ template - void pick(uint32_t x, uint32_t y, T instance) noexcept { + void pick(uint32_t x, uint32_t y, T instance, backend::CallbackHandler* handler = nullptr) noexcept { static_assert(sizeof(instance) <= sizeof(PickingQuery::storage), "user data too large"); PickingQuery& query = pick(x, y, [](PickingQueryResult const& result, PickingQuery* pq) { void* user = pq->storage; @@ -650,11 +716,12 @@ public: * @param x Horizontal coordinate to query in the viewport with origin on the left. * @param y Vertical coordinate to query on the viewport with origin at the bottom. * @param functor A functor, typically a lambda function. + * @param handler Handler to dispatch the callback or nullptr for the default handler. */ template - void pick(uint32_t x, uint32_t y, T functor) noexcept { + void pick(uint32_t x, uint32_t y, T functor, backend::CallbackHandler* handler = nullptr) noexcept { static_assert(sizeof(functor) <= sizeof(PickingQuery::storage), "functor too large"); - PickingQuery& query = pick(x, y, + PickingQuery& query = pick(x, y, handler, (PickingQueryResultCallback)[](PickingQueryResult const& result, PickingQuery* pq) { void* user = pq->storage; T& that = *static_cast(user); @@ -674,11 +741,12 @@ public: * @param x Horizontal coordinate to query in the viewport with origin on the left. * @param y Vertical coordinate to query on the viewport with origin at the bottom. * @param callback User callback, called when the picking query result is available. + * @param handler Handler to dispatch the callback or nullptr for the default handler. * @return A reference to a PickingQuery structure, which can be used to store up to * 8*sizeof(void*) bytes of user data. This user data is later accessible * in the PickingQueryResultCallback callback 3rd parameter. */ - PickingQuery& pick(uint32_t x, uint32_t y, + PickingQuery& pick(uint32_t x, uint32_t y, backend::CallbackHandler* handler, PickingQueryResultCallback callback) noexcept; diff --git a/ios/include/filameshio/MeshReader.h b/ios/include/filameshio/MeshReader.h deleted file mode 100644 index f9da44bd..00000000 --- a/ios/include/filameshio/MeshReader.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_FILAMENT_FILAMESHIO_MESHREADER_H -#define TNT_FILAMENT_FILAMESHIO_MESHREADER_H - -#include -#include -#include - -namespace filament { - class Engine; - class VertexBuffer; - class IndexBuffer; - class MaterialInstance; -} - -namespace utils { - class Path; -} - -namespace filamesh { - - -/** - * This API can be used to read meshes stored in the "filamesh" format produced - * by the command line tool of the same name. This file format is documented in - * "docs/filamesh.md" in the Filament distribution. - */ -class UTILS_PUBLIC MeshReader { -public: - using Callback = void(*)(void* buffer, size_t size, void* user); - - // Class to track material instances - class MaterialRegistry { - public: - MaterialRegistry(); - MaterialRegistry(const MaterialRegistry& rhs); - MaterialRegistry& operator=(const MaterialRegistry& rhs); - ~MaterialRegistry(); - MaterialRegistry(MaterialRegistry&&); - MaterialRegistry& operator=(MaterialRegistry&&); - - filament::MaterialInstance* getMaterialInstance(const utils::CString& name); - - void registerMaterialInstance(const utils::CString& name, - filament::MaterialInstance* materialInstance); - - void unregisterMaterialInstance(const utils::CString& name); - - void unregisterAll(); - - std::size_t numRegistered() const noexcept; - - void getRegisteredMaterials(filament::MaterialInstance** materialList, - utils::CString* materialNameList) const; - - void getRegisteredMaterials(filament::MaterialInstance** materialList) const; - - void getRegisteredMaterialNames(utils::CString* materialNameList) const; - - private: - struct MaterialRegistryImpl; - MaterialRegistryImpl* mImpl; - }; - - struct Mesh { - utils::Entity renderable; - filament::VertexBuffer* vertexBuffer = nullptr; - filament::IndexBuffer* indexBuffer = nullptr; - }; - - /** - * Loads a filamesh renderable from the specified file. The material registry - * can be used to provide named materials. If a material found in the filamesh - * file cannot be matched to a material in the registry, a default material is - * used instead. The default material can be overridden by adding a material - * named "DefaultMaterial" to the registry. - */ - static Mesh loadMeshFromFile(filament::Engine* engine, - const utils::Path& path, - MaterialRegistry& materials); - - /** - * Loads a filamesh renderable from an in-memory buffer. The material registry - * can be used to provide named materials. If a material found in the filamesh - * file cannot be matched to a material in the registry, a default material is - * used instead. The default material can be overridden by adding a material - * named "DefaultMaterial" to the registry. - */ - static Mesh loadMeshFromBuffer(filament::Engine* engine, - void const* data, Callback destructor, void* user, - MaterialRegistry& materials); - - /** - * Loads a filamesh renderable from an in-memory buffer. The material registry - * can be used to provide named materials. All the primitives of the decoded - * renderable are assigned the specified default material. - */ - static Mesh loadMeshFromBuffer(filament::Engine* engine, - void const* data, Callback destructor, void* user, - filament::MaterialInstance* defaultMaterial); -}; - -} // namespace filamesh - -#endif // TNT_FILAMENT_FILAMESHIO_MESHREADER_H diff --git a/ios/include/geometry/SurfaceOrientation.h b/ios/include/geometry/SurfaceOrientation.h deleted file mode 100644 index e9ad75bb..00000000 --- a/ios/include/geometry/SurfaceOrientation.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_GEOMETRY_SURFACEORIENTATION_H -#define TNT_GEOMETRY_SURFACEORIENTATION_H - -#include -#include -#include - -#include - -namespace filament { - -/** - * Mesh-related utilities. - */ -namespace geometry { - -struct OrientationBuilderImpl; -struct OrientationImpl; - -/** - * The surface orientation helper can be used to populate Filament-style TANGENTS buffers. - */ -class UTILS_PUBLIC SurfaceOrientation { -public: - - /** - * The Builder is used to construct an immutable surface orientation helper. - * - * Clients provide pointers into their own data, which is synchronously consumed during build(). - * At a minimum, clients must supply a vertex count. They can supply data in any of the - * following combinations: - * - * 1. normals only ........................... not recommended, selects arbitrary orientation - * 2. normals + tangents ..................... sign of W determines bitangent orientation - * 3. normals + uvs + positions + indices .... selects Lengyel’s Method - * 4. positions + indices .................... generates normals for flat shading only - * - * Additionally, the client-side data has the following type constraints: - * - * - Normals must be float3 - * - Tangents must be float4 - * - UVs must be float2 - * - Positions must be float3 - * - Triangles must be uint3 or ushort3 - * - * Currently, mikktspace is not supported because it requires re-indexing the mesh. Instead - * we use the method described by Eric Lengyel in "Foundations of Game Engine Development" - * (Volume 2, Chapter 7). - */ - class Builder { - public: - Builder() noexcept; - ~Builder() noexcept; - Builder(Builder&& that) noexcept; - Builder& operator=(Builder&& that) noexcept; - - /** - * This attribute is required. - */ - Builder& vertexCount(size_t vertexCount) noexcept; - - Builder& normals(const filament::math::float3*, size_t stride = 0) noexcept; - Builder& tangents(const filament::math::float4*, size_t stride = 0) noexcept; - Builder& uvs(const filament::math::float2*, size_t stride = 0) noexcept; - Builder& positions(const filament::math::float3*, size_t stride = 0) noexcept; - - Builder& triangleCount(size_t triangleCount) noexcept; - Builder& triangles(const filament::math::uint3*) noexcept; - Builder& triangles(const filament::math::ushort3*) noexcept; - - /** - * Generates quats or returns null if the submitted data is an incomplete combination. - */ - SurfaceOrientation* build(); - - private: - OrientationBuilderImpl* mImpl; - Builder(const Builder&) = delete; - Builder& operator=(const Builder&) = delete; - }; - - ~SurfaceOrientation() noexcept; - SurfaceOrientation(SurfaceOrientation&& that) noexcept; - SurfaceOrientation& operator=(SurfaceOrientation&& that) noexcept; - - /** - * Returns the vertex count. - */ - size_t getVertexCount() const noexcept; - - /** - * Converts quaternions into the desired output format and writes up to "quatCount" - * to the given output pointer. Normally quatCount should be equal to the vertex count. - * The optional stride is the desired quat-to-quat stride in bytes. - * @{ - */ - void getQuats(filament::math::quatf* out, size_t quatCount, size_t stride = 0) const noexcept; - void getQuats(filament::math::short4* out, size_t quatCount, size_t stride = 0) const noexcept; - void getQuats(filament::math::quath* out, size_t quatCount, size_t stride = 0) const noexcept; - /** - * @} - */ - -private: - SurfaceOrientation(OrientationImpl*) noexcept; - SurfaceOrientation(const SurfaceOrientation&) = delete; - SurfaceOrientation& operator=(const SurfaceOrientation&) = delete; - OrientationImpl* mImpl; - friend struct OrientationBuilderImpl; -}; - -} // namespace geometry -} // namespace filament - -#endif // TNT_GEOMETRY_SURFACEORIENTATION_H diff --git a/ios/include/geometry/Transcoder.h b/ios/include/geometry/Transcoder.h deleted file mode 100644 index 0c77c941..00000000 --- a/ios/include/geometry/Transcoder.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_GEOMETRY_TRANSCODER_H -#define TNT_GEOMETRY_TRANSCODER_H - -#include - -#include -#include - -namespace filament { -namespace geometry { - -enum class ComponentType { - BYTE, //!< If normalization is enabled, this maps from [-127,127] to [-1,+1] - UBYTE, //!< If normalization is enabled, this maps from [0,255] to [0, +1] - SHORT, //!< If normalization is enabled, this maps from [-32767,32767] to [-1,+1] - USHORT, //!< If normalization is enabled, this maps from [0,65535] to [0, +1] - HALF, //!< 1 sign bit, 5 exponent bits, and 5 mantissa bits. -}; - -/** - * Creates a function object that can convert vertex attribute data into tightly packed floats. - * - * This is especially useful for 3-component formats which are not supported by all backends. - * e.g. The Vulkan minspec includes float3 but not short3. - * - * Usage Example: - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * using filament::geometry::Transcoder; - * using filament::geometry::ComponentType; - * - * Transcoder transcode({ - * .componentType = ComponentType::BYTE, - * .normalized = true, - * .componentCount = 3, - * .inputStrideBytes = 0 - * }); - * - * transcode(outputPtr, inputPtr, count); - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * The interpretation of signed normalized data is consistent with Vulkan and OpenGL ES 3.0+. - * Note that this slightly differs from earlier versions of OpenGL ES. For example, a signed byte - * value of -127 maps exactly to -1.0f under ES3 and VK rules, but not ES2. - */ -class UTILS_PUBLIC Transcoder { -public: - /** - * Describes the format of all input data that get passed to this transcoder object. - */ - struct Config { - ComponentType componentType; - bool normalized; - uint32_t componentCount; - uint32_t inputStrideBytes = 0; //!< If stride is 0, the transcoder assumes tight packing. - }; - - /** - * Creates an immutable function object with the specified configuration. - * - * The config is not passed by const reference to allow for type inference at the call site. - */ - Transcoder(Config config) noexcept : mConfig(config) {} - - /** - * Converts arbitrary data into tightly packed 32-bit floating point values. - * - * If target is non-null, writes up to "count" items into target and returns the number of bytes - * actually written. - * - * If target is null, returns the number of bytes required. - * - * @param target Client owned area to write into, or null for a size query - * @param source Pointer to the data to read from (does not get retained) - * @param count The maximum number of items to write (i.e. number of float3 values, not bytes) - * @return Number of bytes required to contain "count" items after conversion to packed floats - * - */ - size_t operator()(float* UTILS_RESTRICT target, void const* UTILS_RESTRICT source, - size_t count) const noexcept; - -private: - const Config mConfig; -}; - -} // namespace geometry -} // namespace filament - -#endif // TNT_GEOMETRY_TRANSCODER_H diff --git a/ios/include/gltfio/FilamentAsset.h b/ios/include/gltfio/FilamentAsset.h index 87daa3c3..2b0a6586 100644 --- a/ios/include/gltfio/FilamentAsset.h +++ b/ios/include/gltfio/FilamentAsset.h @@ -203,6 +203,16 @@ public: */ Animator* getAnimator() noexcept; + /** + * Updates the morphing weights in the given entity. + */ + void setMorphWeights(utils::Entity entity, const float* weights, size_t count); + + /** + * Gets the number of morphing in the given entity. + */ + int getMorphTargetCount(utils::Entity entity) noexcept; + /** * Lazily creates a single LINES renderable that draws the transformed bounding-box hierarchy * for diagnostic purposes. The wireframe is owned by the asset so clients should not delete it. diff --git a/ios/include/gltfio/Image.h b/ios/include/gltfio/Image.h index c2bc2023..c7047311 100644 --- a/ios/include/gltfio/Image.h +++ b/ios/include/gltfio/Image.h @@ -25,7 +25,7 @@ // For emscripten and Android builds, we never load from the file // system, so we-opt out of the stdio functionality in stb. -#if defined(__EMSCRIPTEN__) || defined(ANDROID) +#if defined(__EMSCRIPTEN__) || defined(__ANDROID__) #define STBI_NO_STDIO #endif diff --git a/ios/include/gltfio/ResourceLoader.h b/ios/include/gltfio/ResourceLoader.h index b69a16e5..a92a4f85 100644 --- a/ios/include/gltfio/ResourceLoader.h +++ b/ios/include/gltfio/ResourceLoader.h @@ -52,6 +52,10 @@ struct ResourceConfiguration { //! If true, computes the bounding boxes of all \c POSITION attibutes. Well formed glTF files //! do not need this, but it is useful for robustness. bool recomputeBoundingBoxes; + + //! If true, ignore skinned primitives bind transform when compute bounding box. Implicitly true + //! for instanced asset. Only applicable when recomputeBoundingBoxes is set to true + bool ignoreBindTransform; }; /** diff --git a/ios/include/gltfio/resources/gltfresources.h b/ios/include/gltfio/resources/gltfresources.h deleted file mode 100644 index a820383e..00000000 --- a/ios/include/gltfio/resources/gltfresources.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef GLTFRESOURCES_H_ -#define GLTFRESOURCES_H_ - -#include - -extern "C" { - extern const uint8_t GLTFRESOURCES_PACKAGE[]; - extern int GLTFRESOURCES_LIT_FADE_OFFSET; - extern int GLTFRESOURCES_LIT_FADE_SIZE; - extern int GLTFRESOURCES_LIT_OPAQUE_OFFSET; - extern int GLTFRESOURCES_LIT_OPAQUE_SIZE; - extern int GLTFRESOURCES_LIT_MASKED_OFFSET; - extern int GLTFRESOURCES_LIT_MASKED_SIZE; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_FADE_OFFSET; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_FADE_SIZE; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_OFFSET; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_SIZE; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_OFFSET; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_SIZE; - extern int GLTFRESOURCES_UNLIT_FADE_OFFSET; - extern int GLTFRESOURCES_UNLIT_FADE_SIZE; - extern int GLTFRESOURCES_UNLIT_OPAQUE_OFFSET; - extern int GLTFRESOURCES_UNLIT_OPAQUE_SIZE; - extern int GLTFRESOURCES_UNLIT_MASKED_OFFSET; - extern int GLTFRESOURCES_UNLIT_MASKED_SIZE; - extern int GLTFRESOURCES_LIT_VOLUME_OFFSET; - extern int GLTFRESOURCES_LIT_VOLUME_SIZE; - extern int GLTFRESOURCES_LIT_TRANSMISSION_OFFSET; - extern int GLTFRESOURCES_LIT_TRANSMISSION_SIZE; - extern int GLTFRESOURCES_LIT_SHEEN_OFFSET; - extern int GLTFRESOURCES_LIT_SHEEN_SIZE; -} -#define GLTFRESOURCES_LIT_FADE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_FADE_OFFSET) -#define GLTFRESOURCES_LIT_OPAQUE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_OPAQUE_OFFSET) -#define GLTFRESOURCES_LIT_MASKED_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_MASKED_OFFSET) -#define GLTFRESOURCES_SPECULARGLOSSINESS_FADE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_SPECULARGLOSSINESS_FADE_OFFSET) -#define GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_OFFSET) -#define GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_OFFSET) -#define GLTFRESOURCES_UNLIT_FADE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_UNLIT_FADE_OFFSET) -#define GLTFRESOURCES_UNLIT_OPAQUE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_UNLIT_OPAQUE_OFFSET) -#define GLTFRESOURCES_UNLIT_MASKED_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_UNLIT_MASKED_OFFSET) -#define GLTFRESOURCES_LIT_VOLUME_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_VOLUME_OFFSET) -#define GLTFRESOURCES_LIT_TRANSMISSION_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_TRANSMISSION_OFFSET) -#define GLTFRESOURCES_LIT_SHEEN_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_SHEEN_OFFSET) - -#endif diff --git a/ios/include/gltfio/resources/gltfresources_lite.h b/ios/include/gltfio/resources/gltfresources_lite.h deleted file mode 100644 index 8bef16f4..00000000 --- a/ios/include/gltfio/resources/gltfresources_lite.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef GLTFRESOURCES_LITE_H_ -#define GLTFRESOURCES_LITE_H_ - -#include - -extern "C" { - extern const uint8_t GLTFRESOURCES_LITE_PACKAGE[]; - extern int GLTFRESOURCES_LITE_LIT_OPAQUE_OFFSET; - extern int GLTFRESOURCES_LITE_LIT_OPAQUE_SIZE; - extern int GLTFRESOURCES_LITE_LIT_FADE_OFFSET; - extern int GLTFRESOURCES_LITE_LIT_FADE_SIZE; -} -#define GLTFRESOURCES_LITE_LIT_OPAQUE_DATA (GLTFRESOURCES_LITE_PACKAGE + GLTFRESOURCES_LITE_LIT_OPAQUE_OFFSET) -#define GLTFRESOURCES_LITE_LIT_FADE_DATA (GLTFRESOURCES_LITE_PACKAGE + GLTFRESOURCES_LITE_LIT_FADE_OFFSET) - -#endif diff --git a/ios/include/ibl/Cubemap.h b/ios/include/ibl/Cubemap.h deleted file mode 100644 index 2186bdb0..00000000 --- a/ios/include/ibl/Cubemap.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef IBL_CUBEMAP_H -#define IBL_CUBEMAP_H - -#include - -#include - -#include -#include -#include - -#include - -namespace filament { -namespace ibl { - -/** - * Generic cubemap class. It handles writing / reading into the 6 faces of a cubemap. - * - * Seamless trilinear filtering is handled. - * - * This class doesn't own the face data, it's just a "view" on the 6 images. - * - * @see CubemapUtils - * - */ -class UTILS_PUBLIC Cubemap { -public: - - /** - * Initialize the cubemap with a given size, but no face is set and no memory is allocated. - * - * Usually Cubemaps are created using CubemapUtils. - * - * @see CubemapUtils - */ - explicit Cubemap(size_t dim); - - Cubemap(Cubemap&&) = default; - Cubemap& operator=(Cubemap&&) = default; - - ~Cubemap(); - - - enum class Face : uint8_t { - PX = 0, // left +----+ - NX, // right | PY | - PY, // bottom +----+----+----+----+ - NY, // top | NX | PZ | PX | NZ | - PZ, // back +----+----+----+----+ - NZ // front | NY | - // +----+ - }; - - using Texel = filament::math::float3; - - - //! releases all images and reset the cubemap size - void resetDimensions(size_t dim); - - //! assigns an image to a face. - void setImageForFace(Face face, const Image& image); - - //! retrieves the image attached to a face - inline const Image& getImageForFace(Face face) const; - - //! retrieves the image attached to a face - inline Image& getImageForFace(Face face); - - //! computes the center of a pixel at coordinate x, y - static inline filament::math::float2 center(size_t x, size_t y); - - //! computes a direction vector from a face and a location of the center of pixel in an Image - inline filament::math::float3 getDirectionFor(Face face, size_t x, size_t y) const; - - //! computes a direction vector from a face and a location in pixel in an Image - inline filament::math::float3 getDirectionFor(Face face, float x, float y) const; - - //! samples the cubemap at the given direction using nearest neighbor filtering - inline Texel const& sampleAt(const filament::math::float3& direction) const; - - //! samples the cubemap at the given direction using bilinear filtering - inline Texel filterAt(const filament::math::float3& direction) const; - - //! samples an image at the given location in pixel using bilinear filtering - static Texel filterAt(const Image& image, float x, float y); - static Texel filterAtCenter(const Image& image, size_t x, size_t y); - - //! samples two cubemaps in a given direction and lerps the result by a given lerp factor - static Texel trilinearFilterAt(const Cubemap& c0, const Cubemap& c1, float lerp, - const filament::math::float3& direction); - - //! reads a texel at a given address - inline static const Texel& sampleAt(void const* data) { - return *static_cast(data); - } - - //! writes a texel at a given address - inline static void writeAt(void* data, const Texel& texel) { - *static_cast(data) = texel; - } - - //! returns the size of the cubemap in pixels - size_t getDimensions() const; - - /** - * Prepares a cubemap for seamless access to its faces. - * - * @warning All faces of the cubemap must be backed-up by the same Image, and must already - * be spaced by 2 lines/rows. - */ - void makeSeamless(); - - struct Address { - Face face; - float s = 0; - float t = 0; - }; - - //! returns the face and texture coordinates of the given direction - static Address getAddressFor(const filament::math::float3& direction); - -private: - size_t mDimensions = 0; - float mScale = 1; - float mUpperBound = 0; - Image mFaces[6]; -}; - -// ------------------------------------------------------------------------------------------------ - -inline const Image& Cubemap::getImageForFace(Face face) const { - return mFaces[int(face)]; -} - -inline Image& Cubemap::getImageForFace(Face face) { - return mFaces[int(face)]; -} - -inline filament::math::float2 Cubemap::center(size_t x, size_t y) { - return { x + 0.5f, y + 0.5f }; -} - -inline filament::math::float3 Cubemap::getDirectionFor(Face face, size_t x, size_t y) const { - return getDirectionFor(face, x + 0.5f, y + 0.5f); -} - -inline filament::math::float3 Cubemap::getDirectionFor(Face face, float x, float y) const { - // map [0, dim] to [-1,1] with (-1,-1) at bottom left - float cx = (x * mScale) - 1; - float cy = 1 - (y * mScale); - - filament::math::float3 dir; - const float l = std::sqrt(cx * cx + cy * cy + 1); - switch (face) { - case Face::PX: dir = { 1, cy, -cx }; break; - case Face::NX: dir = { -1, cy, cx }; break; - case Face::PY: dir = { cx, 1, -cy }; break; - case Face::NY: dir = { cx, -1, cy }; break; - case Face::PZ: dir = { cx, cy, 1 }; break; - case Face::NZ: dir = { -cx, cy, -1 }; break; - } - return dir * (1 / l); -} - -inline Cubemap::Texel const& Cubemap::sampleAt(const filament::math::float3& direction) const { - Cubemap::Address addr(getAddressFor(direction)); - const size_t x = std::min(size_t(addr.s * mDimensions), mDimensions - 1); - const size_t y = std::min(size_t(addr.t * mDimensions), mDimensions - 1); - return sampleAt(getImageForFace(addr.face).getPixelRef(x, y)); -} - -inline Cubemap::Texel Cubemap::filterAt(const filament::math::float3& direction) const { - Cubemap::Address addr(getAddressFor(direction)); - addr.s = std::min(addr.s * mDimensions, mUpperBound); - addr.t = std::min(addr.t * mDimensions, mUpperBound); - return filterAt(getImageForFace(addr.face), addr.s, addr.t); -} - -} // namespace ibl -} // namespace filament - -#endif /* IBL_CUBEMAP_H */ diff --git a/ios/include/ibl/CubemapIBL.h b/ios/include/ibl/CubemapIBL.h deleted file mode 100644 index 925e27c0..00000000 --- a/ios/include/ibl/CubemapIBL.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef IBL_CUBEMAPIBL_H -#define IBL_CUBEMAPIBL_H - -#include - -#include -#include - -#include - -#include -#include - -namespace utils { -class JobSystem; -} // namespace utils - -namespace filament { -namespace ibl { - -class Cubemap; -class Image; - -/** - * Generates cubemaps for the IBL. - */ -class UTILS_PUBLIC CubemapIBL { -public: - typedef void (*Progress)(size_t, float, void*); - - /** - * Computes a roughness LOD using prefiltered importance sampling GGX - * - * @param dst the destination cubemap - * @param levels a list of prefiltered lods of the source environment - * @param linearRoughness roughness - * @param maxNumSamples number of samples for importance sampling - * @param updater a callback for the caller to track progress - */ - static void roughnessFilter( - utils::JobSystem& js, Cubemap& dst, const utils::Slice& levels, - float linearRoughness, size_t maxNumSamples, math::float3 mirror, bool prefilter, - Progress updater = nullptr, void* userdata = nullptr); - - static void roughnessFilter( - utils::JobSystem& js, Cubemap& dst, const std::vector& levels, - float linearRoughness, size_t maxNumSamples, math::float3 mirror, bool prefilter, - Progress updater = nullptr, void* userdata = nullptr); - - //! Computes the "DFG" term of the "split-sum" approximation and stores it in a 2D image - static void DFG(utils::JobSystem& js, Image& dst, bool multiscatter, bool cloth); - - /** - * Computes the diffuse irradiance using prefiltered importance sampling GGX - * - * @note Usually this is done using spherical harmonics instead. - * - * @param dst the destination cubemap - * @param levels a list of prefiltered lods of the source environment - * @param maxNumSamples number of samples for importance sampling - * @param updater a callback for the caller to track progress - * - * @see CubemapSH - */ - static void diffuseIrradiance(utils::JobSystem& js, Cubemap& dst, const std::vector& levels, - size_t maxNumSamples = 1024, Progress updater = nullptr, void* userdata = nullptr); - - // for debugging. ignore. - static void brdf(utils::JobSystem& js, Cubemap& dst, float linearRoughness); -}; - -} // namespace ibl -} // namespace filament - -#endif /* IBL_CUBEMAPIBL_H */ diff --git a/ios/include/ibl/CubemapSH.h b/ios/include/ibl/CubemapSH.h deleted file mode 100644 index b9297c74..00000000 --- a/ios/include/ibl/CubemapSH.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef IBL_CUBEMAPSH_H -#define IBL_CUBEMAPSH_H - - -#include - -#include -#include - -#include -#include - -namespace utils { -class JobSystem; -} // namespace utils - -namespace filament { -namespace ibl { - -class Cubemap; - -/** - * Computes spherical harmonics - */ -class UTILS_PUBLIC CubemapSH { -public: - /** - * Spherical Harmonics decomposition of the given cubemap - * Optionally calculates irradiance by convolving with truncated cos. - */ - static std::unique_ptr computeSH( - utils::JobSystem& js, const Cubemap& cm, size_t numBands, bool irradiance); - - /** - * Render given spherical harmonics into a cubemap - */ - static void renderSH(utils::JobSystem& js, Cubemap& cm, - const std::unique_ptr& sh, size_t numBands); - - static void windowSH(std::unique_ptr& sh, size_t numBands, float cutoff); - - /** - * Compute spherical harmonics of the irradiance of the given cubemap. - * The SH basis are pre-scaled for easier rendering by the shader. The resulting coefficients - * are not spherical harmonics (as they're scalled by various factors). In particular they - * cannot be rendered with renderSH() above. Instead use renderPreScaledSH3Bands() which - * is exactly the code ran by our shader. - */ - static void preprocessSHForShader(std::unique_ptr& sh); - - /** - * Render pre-scaled irrandiance SH - */ - static void renderPreScaledSH3Bands(utils::JobSystem& js, Cubemap& cm, - const std::unique_ptr& sh); - - static constexpr size_t getShIndex(ssize_t m, size_t l) { - return SHindex(m, l); - } - -private: - class float5 { - float v[5]; - public: - float5() = default; - constexpr float5(float a, float b, float c, float d, float e) : v{ a, b, c, d, e } {} - constexpr float operator[](size_t i) const { return v[i]; } - float& operator[](size_t i) { return v[i]; } - }; - - static inline const float5 multiply(const float5 M[5], float5 x) noexcept { - return float5{ - M[0][0] * x[0] + M[1][0] * x[1] + M[2][0] * x[2] + M[3][0] * x[3] + M[4][0] * x[4], - M[0][1] * x[0] + M[1][1] * x[1] + M[2][1] * x[2] + M[3][1] * x[3] + M[4][1] * x[4], - M[0][2] * x[0] + M[1][2] * x[1] + M[2][2] * x[2] + M[3][2] * x[3] + M[4][2] * x[4], - M[0][3] * x[0] + M[1][3] * x[1] + M[2][3] * x[2] + M[3][3] * x[3] + M[4][3] * x[4], - M[0][4] * x[0] + M[1][4] * x[1] + M[2][4] * x[2] + M[3][4] * x[3] + M[4][4] * x[4] - }; - }; - - - static inline constexpr size_t SHindex(ssize_t m, size_t l) { - return l * (l + 1) + m; - } - - static void computeShBasis(float* SHb, size_t numBands, const math::float3& s); - - static float Kml(ssize_t m, size_t l); - - static std::vector Ki(size_t numBands); - - static constexpr float computeTruncatedCosSh(size_t l); - - static float sincWindow(size_t l, float w); - - static math::float3 rotateShericalHarmonicBand1(math::float3 band1, math::mat3f const& M); - - static float5 rotateShericalHarmonicBand2(float5 const& band2, math::mat3f const& M); - - // debugging only... - static float Legendre(ssize_t l, ssize_t m, float x); - static float TSH(int l, int m, const math::float3& d); - static void printShBase(std::ostream& out, int l, int m); -}; - -} // namespace ibl -} // namespace filament - -#endif /* IBL_CUBEMAPSH_H */ diff --git a/ios/include/ibl/CubemapUtils.h b/ios/include/ibl/CubemapUtils.h deleted file mode 100644 index 3b4a3154..00000000 --- a/ios/include/ibl/CubemapUtils.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef IBL_CUBEMAP_UTILS_H -#define IBL_CUBEMAP_UTILS_H - -#include -#include - -#include - -#include - -namespace utils { -class JobSystem; -} // namespace utils - -namespace filament { -namespace ibl { - -class CubemapIBL; - -/** - * Create and convert Cubemap formats - */ -class UTILS_PUBLIC CubemapUtils { -public: - //! Creates a cubemap object and its backing Image - static Cubemap create(Image& image, size_t dim, bool horizontal = true); - - struct EmptyState { - }; - - template - using ScanlineProc = std::function< - void(STATE& state, size_t y, Cubemap::Face f, Cubemap::Texel* data, size_t width)>; - - template - using ReduceProc = std::function; - - //! process the cubemap using multithreading - template - static void process(Cubemap& cm, - utils::JobSystem& js, - ScanlineProc proc, - ReduceProc reduce = [](STATE&) {}, - const STATE& prototype = STATE()); - - //! process the cubemap - template - static void processSingleThreaded(Cubemap& cm, - utils::JobSystem& js, - ScanlineProc proc, - ReduceProc reduce = [](STATE&) {}, - const STATE& prototype = STATE()); - - //! clamps image to acceptable range - static void clamp(Image& src); - - static void highlight(Image& src); - - //! Downsamples a cubemap by helf in x and y using a box filter - static void downsampleCubemapLevelBoxFilter(utils::JobSystem& js, Cubemap& dst, const Cubemap& src); - - //! Return the name of a face (suitable for a file name) - static const char* getFaceName(Cubemap::Face face); - - //! computes the solid angle of a pixel of a face of a cubemap - static float solidAngle(size_t dim, size_t u, size_t v); - - //! Sets a Cubemap faces from a cross image - static void setAllFacesFromCross(Cubemap& cm, const Image& image); - -private: - - //move these into cmgen? - static void setFaceFromCross(Cubemap& cm, Cubemap::Face face, const Image& image); - static Image createCubemapImage(size_t dim, bool horizontal = true); - -#ifndef FILAMENT_IBL_LITE - -public: - - //! Converts horizontal or vertical cross Image to a Cubemap - static void crossToCubemap(utils::JobSystem& js, Cubemap& dst, const Image& src); - - //! Converts equirectangular Image to a Cubemap - static void equirectangularToCubemap(utils::JobSystem& js, Cubemap& dst, const Image& src); - - //! Converts a Cubemap to an equirectangular Image - static void cubemapToEquirectangular(utils::JobSystem& js, Image& dst, const Cubemap& src); - - //! Converts a Cubemap to an octahedron - static void cubemapToOctahedron(utils::JobSystem& js, Image& dst, const Cubemap& src); - - //! mirror the cubemap in the horizontal direction - static void mirrorCubemap(utils::JobSystem& js, Cubemap& dst, const Cubemap& src); - - //! generates a UV grid in the cubemap -- useful for debugging. - static void generateUVGrid(utils::JobSystem& js, Cubemap& cml, size_t gridFrequencyX, size_t gridFrequencyY); - -#endif - - friend class CubemapIBL; -}; - - -} // namespace ibl -} // namespace filament - -#endif /* IBL_CUBEMAP_UTILS_H */ diff --git a/ios/include/ibl/Image.h b/ios/include/ibl/Image.h deleted file mode 100644 index 1ebaa62b..00000000 --- a/ios/include/ibl/Image.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef IBL_IMAGE_H -#define IBL_IMAGE_H - -#include -#include -#include - -#include - -#include - -namespace filament { -namespace ibl { - -class UTILS_PUBLIC Image { -public: - Image(); - Image(size_t w, size_t h, size_t stride = 0); - - void reset(); - - void set(Image const& image); - - void subset(Image const& image, size_t x, size_t y, size_t w, size_t h); - - bool isValid() const { return mData != nullptr; } - - size_t getWidth() const { return mWidth; } - - size_t getStride() const { return mBpr / getBytesPerPixel(); } - - size_t getHeight() const { return mHeight; } - - size_t getBytesPerRow() const { return mBpr; } - - size_t getBytesPerPixel() const { return sizeof(math::float3); } - - void* getData() const { return mData; } - - size_t getSize() const { return mBpr * mHeight; } - - void* getPixelRef(size_t x, size_t y) const; - - std::unique_ptr detach() { return std::move(mOwnedData); } - -private: - size_t mBpr = 0; - size_t mWidth = 0; - size_t mHeight = 0; - std::unique_ptr mOwnedData; - void* mData = nullptr; -}; - -inline void* Image::getPixelRef(size_t x, size_t y) const { - return static_cast(mData) + y * getBytesPerRow() + x * getBytesPerPixel(); -} - -} // namespace ibl -} // namespace filament - -#endif /* IBL_IMAGE_H */ diff --git a/ios/include/ibl/utilities.h b/ios/include/ibl/utilities.h deleted file mode 100644 index 6d40cc03..00000000 --- a/ios/include/ibl/utilities.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef IBL_UTILITIES_H -#define IBL_UTILITIES_H - -#include - -#include -#include - -namespace filament { -namespace ibl { - -template -static inline constexpr T sq(T x) { - return x * x; -} - -template -static inline constexpr T log4(T x) { - // log2(x)/log2(4) - // log2(x)/2 - return std::log2(x) * T(0.5); -} - -inline bool isPOT(size_t x) { - return !(x & (x - 1)); -} - -inline filament::math::float2 hammersley(uint32_t i, float iN) { - constexpr float tof = 0.5f / 0x80000000U; - uint32_t bits = i; - bits = (bits << 16u) | (bits >> 16u); - bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u); - bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u); - bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u); - bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u); - return { i * iN, bits * tof }; -} - -} // namespace ibl -} // namespace filament -#endif /* IBL_UTILITIES_H */ diff --git a/ios/include/imageio/BlockCompression.h b/ios/include/imageio/BlockCompression.h new file mode 100644 index 00000000..1b742590 --- /dev/null +++ b/ios/include/imageio/BlockCompression.h @@ -0,0 +1,194 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! \file Functions and types related to block-compressed texture formats. + +#ifndef IMAGEIO_BLOCKCOMPRESSION_H_ +#define IMAGEIO_BLOCKCOMPRESSION_H_ + +#include + +#include +#include + +#include + +#include + +#include + +namespace image { + +enum class CompressedFormat { + INVALID = 0, + + R11_EAC = 0x9270, + SIGNED_R11_EAC = 0x9271, + RG11_EAC = 0x9272, + SIGNED_RG11_EAC = 0x9273, + RGB8_ETC2 = 0x9274, + SRGB8_ETC2 = 0x9275, + RGB8_ALPHA1_ETC2 = 0x9276, + SRGB8_ALPHA1_ETC = 0x9277, + RGBA8_ETC2_EAC = 0x9278, + SRGB8_ALPHA8_ETC2_EAC = 0x9279, + + RGB_S3TC_DXT1 = 0x83F0, + RGBA_S3TC_DXT1 = 0x83F1, + RGBA_S3TC_DXT3 = 0x83F2, + RGBA_S3TC_DXT5 = 0x83F3, + SRGB_S3TC_DXT1 = 0x8C4C, + SRGB_ALPHA_S3TC_DXT1 = 0x8C4D, + SRGB_ALPHA_S3TC_DXT3 = 0x8C4E, + SRGB_ALPHA_S3TC_DXT5 = 0x8C4F, + + RGBA_ASTC_4x4 = 0x93B0, + RGBA_ASTC_5x4 = 0x93B1, + RGBA_ASTC_5x5 = 0x93B2, + RGBA_ASTC_6x5 = 0x93B3, + RGBA_ASTC_6x6 = 0x93B4, + RGBA_ASTC_8x5 = 0x93B5, + RGBA_ASTC_8x6 = 0x93B6, + RGBA_ASTC_8x8 = 0x93B7, + RGBA_ASTC_10x5 = 0x93B8, + RGBA_ASTC_10x6 = 0x93B9, + RGBA_ASTC_10x8 = 0x93BA, + RGBA_ASTC_10x10 = 0x93BB, + RGBA_ASTC_12x10 = 0x93BC, + RGBA_ASTC_12x12 = 0x93BD, + SRGB8_ALPHA8_ASTC_4x4 = 0x93D0, + SRGB8_ALPHA8_ASTC_5x4 = 0x93D1, + SRGB8_ALPHA8_ASTC_5x5 = 0x93D2, + SRGB8_ALPHA8_ASTC_6x5 = 0x93D3, + SRGB8_ALPHA8_ASTC_6x6 = 0x93D4, + SRGB8_ALPHA8_ASTC_8x5 = 0x93D5, + SRGB8_ALPHA8_ASTC_8x6 = 0x93D6, + SRGB8_ALPHA8_ASTC_8x8 = 0x93D7, + SRGB8_ALPHA8_ASTC_10x5 = 0x93D8, + SRGB8_ALPHA8_ASTC_10x6 = 0x93D9, + SRGB8_ALPHA8_ASTC_10x8 = 0x93DA, + SRGB8_ALPHA8_ASTC_10x10 = 0x93DB, + SRGB8_ALPHA8_ASTC_12x10 = 0x93DC, + SRGB8_ALPHA8_ASTC_12x12 = 0x93DD, +}; + +// Represents the opaque result of compression and the chosen texture format. +struct CompressedTexture { + const CompressedFormat format; + const uint32_t size; + std::unique_ptr data; +}; + +// ASTC //////////////////////////////////////////////////////////////////////////////////////////// + +// Controls how fast compression occurs at the cost of quality in the resulting image. +enum class AstcPreset { + VERYFAST, + FAST, + MEDIUM, + THOROUGH, + EXHAUSTIVE, +}; + +// Informs the encoder what texels represent; this is especially crucial for normal maps. +enum class AstcSemantic { + COLORS_LDR, + COLORS_HDR, + NORMALS, +}; + +// The encoder configuration controls the quality and speed of compression, as well as the resulting +// format. The specified block size must be one of the 14 block sizes that can be consumed by ES 3.2 +// as per https://www.khronos.org/registry/OpenGL-Refpages/es3/html/glCompressedTexImage2D.xhtml +struct AstcConfig { + AstcPreset quality; + AstcSemantic semantic; + filament::math::ushort2 blocksize; + bool srgb; +}; + +// Uses the CPU to compress a linear image (1 to 4 channels) into an ASTC texture. The 16-byte +// header block that ARM uses in their file format is not included. +CompressedTexture astcCompress(const LinearImage& source, AstcConfig config); + +// Parses a simple underscore-delimited string to produce an ASTC compression configuration. This +// makes it easy to incorporate the compression API into command-line tools. If the string is +// malformed, this returns a config with a 0x0 blocksize. Example strings: fast_ldr_4x4, +// thorough_normals_6x6, veryfast_hdr_12x10 +AstcConfig astcParseOptionString(const std::string& options); + +// ETC ///////////////////////////////////////////////////////////////////////////////////////////// + +enum class EtcErrorMetric { + RGBA, + RGBX, + REC709, + NUMERIC, + NORMALXYZ, +}; + +// Informs the ETC encoder of the desired output. Effort sets the quality / speed tradeoff with +// a number between 0 and 100. +struct EtcConfig { + CompressedFormat format; + EtcErrorMetric metric; + int effort; +}; + +// Uses the CPU to compress a linear image (1 to 4 channels) into an ETC texture. +CompressedTexture etcCompress(const LinearImage& source, EtcConfig config); + +// Converts a string into an ETC compression configuration where the string has the form +// FORMAT_METRIC_EFFORT where: +// - FORMAT is one of: r11, signed_r11, rg11, signed_rg11, rgb8, srgb8, rgb8_alpha, +// srgb8_alpha, rgba8, and srgb8_alpha8 +// - METRIC is one of: rgba, rgbx, rec709, numeric, and normalxyz +// - EFFORT is an integer between 0 and 100 +EtcConfig etcParseOptionString(const std::string& options); + +// S3TC //////////////////////////////////////////////////////////////////////////////////////////// + +// Informs the S3TC encoder of the desired output. +struct S3tcConfig { + CompressedFormat format; + bool srgb; +}; + +// Uses the CPU to compress a linear image (1 to 4 channels) into an S3TC texture. +CompressedTexture s3tcCompress(const LinearImage& source, S3tcConfig config); + +// Parses an underscore-delimited string to produce an S3TC compression configuration. Currently +// this only accepts "rgb_dxt1" and "rgba_dxt5". If the string is malformed, this returns a config +// with an invalid format. +S3tcConfig s3tcParseOptionString(const std::string& options); + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +struct CompressionConfig { + enum { INVALID, ASTC, S3TC, ETC } type; + AstcConfig astc; + S3tcConfig s3tc; + EtcConfig etc; +}; + +bool parseOptionString(const std::string& options, CompressionConfig* config); + +UTILS_PUBLIC +CompressedTexture compressTexture(const CompressionConfig& config, const LinearImage& image); + +} // namespace image + +#endif /* IMAGEIO_BLOCKCOMPRESSION_H_ */ diff --git a/ios/include/imageio/HDRDecoder.h b/ios/include/imageio/HDRDecoder.h new file mode 100644 index 00000000..bb7519ee --- /dev/null +++ b/ios/include/imageio/HDRDecoder.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef IMAGE_HDRDECODER_H_ +#define IMAGE_HDRDECODER_H_ + +#include + +namespace image { + +class HDRDecoder : public ImageDecoder::Decoder { +public: + static HDRDecoder* create(std::istream& stream); + static bool checkSignature(char const* buf); + + HDRDecoder(const HDRDecoder&) = delete; + HDRDecoder& operator=(const HDRDecoder&) = delete; + +private: + explicit HDRDecoder(std::istream& stream); + ~HDRDecoder() override; + + // ImageDecoder::Decoder interface + LinearImage decode() override; + + static const char sigRadiance[]; + static const char sigRGBE[]; + std::istream& mStream; + std::streampos mStreamStartPos; +}; + +} // namespace image + +#endif /* IMAGE_IMAGEDECODER_H_ */ diff --git a/ios/include/imageio/ImageDecoder.h b/ios/include/imageio/ImageDecoder.h new file mode 100644 index 00000000..cd341c49 --- /dev/null +++ b/ios/include/imageio/ImageDecoder.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef IMAGE_IMAGEDECODER_H_ +#define IMAGE_IMAGEDECODER_H_ + +#include +#include + +#include + +#include + +namespace image { + +class UTILS_PUBLIC ImageDecoder { +public: + enum class ColorSpace { + LINEAR, + SRGB + }; + + // Returns linear floating-point data, or a non-valid image if an error occured. + static LinearImage decode(std::istream& stream, const std::string& sourceName, + ColorSpace sourceSpace = ColorSpace::SRGB); + + class Decoder { + public: + virtual LinearImage decode() = 0; + virtual ~Decoder() = default; + + ColorSpace getColorSpace() const noexcept { + return mColorSpace; + } + + void setColorSpace(ColorSpace colorSpace) noexcept { + mColorSpace = colorSpace; + } + + private: + ColorSpace mColorSpace = ColorSpace::SRGB; + }; + +private: + enum class Format { + NONE, + PNG, + HDR, + PSD, + EXR + }; +}; + +} // namespace image + +#endif /* IMAGE_IMAGEDECODER_H_ */ diff --git a/ios/include/imageio/ImageDiffer.h b/ios/include/imageio/ImageDiffer.h new file mode 100644 index 00000000..c3c752e3 --- /dev/null +++ b/ios/include/imageio/ImageDiffer.h @@ -0,0 +1,34 @@ +/* + * Copyright 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +namespace image { + +enum class ComparisonMode { + SKIP, + COMPARE, + UPDATE, +}; + +// Saves an image to disk or does a load-and-compare, depending on comparison mode. +// This makes it easy for unit tests to have compare / update commands. +// The passed-in image is the "result image" and the expected image is the "golden image". +void updateOrCompare(LinearImage result, const utils::Path& golden, ComparisonMode, float epsilon); + +} // namespace image diff --git a/ios/include/imageio/ImageEncoder.h b/ios/include/imageio/ImageEncoder.h new file mode 100644 index 00000000..7cd4bd8f --- /dev/null +++ b/ios/include/imageio/ImageEncoder.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef IMAGE_IMAGEENCODER_H_ +#define IMAGE_IMAGEENCODER_H_ + +#include +#include + +#include + +#include + +namespace image { + +class UTILS_PUBLIC ImageEncoder { +public: + enum class Format { + PNG, // 8-bit sRGB, 1 or 3 channels + PNG_LINEAR, // 8-bit linear RGB, 1 or 3 channels + HDR, // 8-bit linear RGBE, 3 channels only + RGBM, // 8-bit RGBM, as PNG, 3 channels only + PSD, // 16-bit sRGB or 32-bit linear RGB, 3 channels only + // Default: 16 bit + EXR, // 16-bit linear RGB (half-float), 3 channels only + // Default: PIZ compression + DDS, // 8-bit sRGB, 1, 2 or 3 channels; + // 16-bit or 32-bit linear RGB, 1, 2 or 3 channels + // Default: 16 bit + DDS_LINEAR, // 8-bit, 16-bit or 32-bit linear RGB, 1, 2 or 3 channels + // Default: 16 bit + RGB_10_11_11_REV, // RGBA PNG file, but containing 11_11_10 data + }; + + // Consumes linear floating-point data, returns false if unable to encode. + static bool encode(std::ostream& stream, Format format, const LinearImage& image, + const std::string& compression, const std::string& destName); + + static Format chooseFormat(const std::string& name, bool forceLinear = false); + static std::string chooseExtension(Format format); + + class Encoder { + public: + virtual bool encode(const LinearImage& image) = 0; + virtual ~Encoder() = default; + }; +}; + +} // namespace image + +#endif /* IMAGE_IMAGEENCODER_H_ */ diff --git a/ios/include/math/TMatHelpers.h b/ios/include/math/TMatHelpers.h index f8ff71c9..818bb9fa 100644 --- a/ios/include/math/TMatHelpers.h +++ b/ios/include/math/TMatHelpers.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MATH_TMATHELPERS_H_ -#define MATH_TMATHELPERS_H_ +#ifndef TNT_MATH_TMATHELPERS_H +#define TNT_MATH_TMATHELPERS_H #include #include @@ -804,4 +804,4 @@ public: } // namespace math } // namespace filament -#endif // MATH_TMATHELPERS_H_ +#endif // TNT_MATH_TMATHELPERS_H diff --git a/ios/include/math/TQuatHelpers.h b/ios/include/math/TQuatHelpers.h index 184420b4..81e06a09 100644 --- a/ios/include/math/TQuatHelpers.h +++ b/ios/include/math/TQuatHelpers.h @@ -14,18 +14,17 @@ * limitations under the License. */ - -#ifndef MATH_TQUATHELPERS_H_ -#define MATH_TQUATHELPERS_H_ - -#include -#include -#include +#ifndef TNT_MATH_TQUATHELPERS_H +#define TNT_MATH_TQUATHELPERS_H #include #include #include +#include +#include +#include + namespace filament { namespace math { namespace details { @@ -289,4 +288,4 @@ public: } // namespace math } // namespace filament -#endif // MATH_TQUATHELPERS_H_ +#endif // TNT_MATH_TQUATHELPERS_H diff --git a/ios/include/math/TVecHelpers.h b/ios/include/math/TVecHelpers.h index ce66ec1a..f213dc71 100644 --- a/ios/include/math/TVecHelpers.h +++ b/ios/include/math/TVecHelpers.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MATH_TVECHELPERS_H_ -#define MATH_TVECHELPERS_H_ +#ifndef TNT_MATH_TVECHELPERS_H +#define TNT_MATH_TVECHELPERS_H #include @@ -477,6 +477,13 @@ private: return v; } + friend inline VECTOR MATH_PURE sign(VECTOR v) { + for (size_t i = 0; i < v.size(); i++) { + v[i] = std::copysign(T(1), v[i]); + } + return v; + } + friend inline VECTOR MATH_PURE pow(VECTOR v, T p) { for (size_t i = 0; i < v.size(); i++) { v[i] = std::pow(v[i], p); @@ -622,4 +629,4 @@ private: } // namespace math } // namespace filament -#endif // MATH_TVECHELPERS_H_ +#endif // TNT_MATH_TVECHELPERS_H diff --git a/ios/include/math/compiler.h b/ios/include/math/compiler.h index 20934fae..634e2077 100644 --- a/ios/include/math/compiler.h +++ b/ios/include/math/compiler.h @@ -14,7 +14,8 @@ * limitations under the License. */ -#pragma once +#ifndef TNT_MATH_COMPILER_H +#define TNT_MATH_COMPILER_H #include @@ -111,8 +112,7 @@ #endif // _MSC_VER -namespace filament { -namespace math { +namespace filament::math { // MSVC 2019 16.4 doesn't seem to like it when we specialize std::is_arithmetic for // filament::math::half, so we're forced to create our own is_arithmetic here and specialize it @@ -122,5 +122,6 @@ struct is_arithmetic : std::integral_constant::value || std::is_floating_point::value> { }; -} -} +} // filament::math + +#endif // TNT_MATH_COMPILER_H diff --git a/ios/include/math/fast.h b/ios/include/math/fast.h index 7e1e55b0..85b990d2 100644 --- a/ios/include/math/fast.h +++ b/ios/include/math/fast.h @@ -17,13 +17,14 @@ #ifndef TNT_MATH_FAST_H #define TNT_MATH_FAST_H -#include -#include -#include - #include #include +#include +#include + +#include + #ifdef __ARM_NEON #include #endif diff --git a/ios/include/math/half.h b/ios/include/math/half.h index 72a43be7..779bdbe4 100644 --- a/ios/include/math/half.h +++ b/ios/include/math/half.h @@ -17,14 +17,14 @@ #ifndef TNT_MATH_HALF_H #define TNT_MATH_HALF_H +#include + #include #include #include #include -#include - namespace filament { namespace math { diff --git a/ios/include/math/mat2.h b/ios/include/math/mat2.h index b4857cef..957588a2 100644 --- a/ios/include/math/mat2.h +++ b/ios/include/math/mat2.h @@ -14,12 +14,13 @@ * limitations under the License. */ -#ifndef MATH_MAT2_H_ -#define MATH_MAT2_H_ +#ifndef TNT_MATH_MAT2_H +#define TNT_MATH_MAT2_H #include -#include #include +#include + #include #include @@ -358,4 +359,4 @@ constexpr void swap(filament::math::details::TMat22& lhs, } } -#endif // MATH_MAT2_H_ +#endif // TNT_MATH_MAT2_H diff --git a/ios/include/math/mat3.h b/ios/include/math/mat3.h index b854b099..d7b673a3 100644 --- a/ios/include/math/mat3.h +++ b/ios/include/math/mat3.h @@ -14,13 +14,13 @@ * limitations under the License. */ -#ifndef MATH_MAT3_H_ -#define MATH_MAT3_H_ +#ifndef TNT_MATH_MAT3_H +#define TNT_MATH_MAT3_H -#include #include -#include #include +#include +#include #include #include @@ -487,4 +487,4 @@ constexpr void swap(filament::math::details::TMat33& lhs, } } -#endif // MATH_MAT3_H_ +#endif // TNT_MATH_MAT3_H diff --git a/ios/include/math/mat4.h b/ios/include/math/mat4.h index d3c52da9..f97c5e52 100644 --- a/ios/include/math/mat4.h +++ b/ios/include/math/mat4.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef MATH_MAT4_H_ -#define MATH_MAT4_H_ +#ifndef TNT_MATH_MAT4_H +#define TNT_MATH_MAT4_H +#include #include #include #include #include -#include #include #include @@ -557,6 +557,26 @@ constexpr typename TMat44::col_type MATH_PURE operator*(const TMat44& lhs, typedef details::TMat44 mat4; typedef details::TMat44 mat4f; +// mat4 * float4, with double precision intermediates +constexpr float4 highPrecisionMultiply(mat4f const& lhs, float4 const& rhs) noexcept { + double4 result{}; + result += lhs[0] * rhs[0]; + result += lhs[1] * rhs[1]; + result += lhs[2] * rhs[2]; + result += lhs[3] * rhs[3]; + return float4{ result }; +} + +// mat4 * mat4, with double precision intermediates +constexpr mat4f highPrecisionMultiply(mat4f const& lhs, mat4f const& rhs) noexcept { + return { + highPrecisionMultiply(lhs, rhs[0]), + highPrecisionMultiply(lhs, rhs[1]), + highPrecisionMultiply(lhs, rhs[2]), + highPrecisionMultiply(lhs, rhs[3]) + }; +} + // ---------------------------------------------------------------------------------------- } // namespace math } // namespace filament @@ -628,4 +648,4 @@ constexpr void swap(filament::math::details::TMat44& lhs, } } -#endif // MATH_MAT4_H_ +#endif // TNT_MATH_MAT4_H diff --git a/ios/include/math/mathfwd.h b/ios/include/math/mathfwd.h index d37ce335..c5eecfab 100644 --- a/ios/include/math/mathfwd.h +++ b/ios/include/math/mathfwd.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MATH_MATHFWD_H_ -#define MATH_MATHFWD_H_ +#ifndef TNT_MATH_MATHFWD_H +#define TNT_MATH_MATHFWD_H #ifdef _MSC_VER @@ -91,4 +91,4 @@ using mat4f = details::TMat44; #endif // _MSC_VER -#endif // MATH_MATHFWD_H_ +#endif // TNT_MATH_MATHFWD_H diff --git a/ios/include/math/quat.h b/ios/include/math/quat.h index 31c97c4f..be00f1ec 100644 --- a/ios/include/math/quat.h +++ b/ios/include/math/quat.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef MATH_QUAT_H_ -#define MATH_QUAT_H_ +#ifndef TNT_MATH_QUAT_H +#define TNT_MATH_QUAT_H -#include #include +#include +#include #include #include -#include #include #include @@ -164,4 +164,4 @@ constexpr inline quat operator "" _k(unsigned long long v) { } // namespace math } // namespace filament -#endif // MATH_QUAT_H_ +#endif // TNT_MATH_QUAT_H diff --git a/ios/include/math/vec2.h b/ios/include/math/vec2.h index 64e091cd..16c11858 100644 --- a/ios/include/math/vec2.h +++ b/ios/include/math/vec2.h @@ -14,16 +14,17 @@ * limitations under the License. */ -#ifndef MATH_VEC2_H_ -#define MATH_VEC2_H_ +#ifndef TNT_MATH_VEC2_H +#define TNT_MATH_VEC2_H #include #include + +#include + #include #include #include -#include - namespace filament { namespace math { @@ -110,4 +111,4 @@ using bool2 = vec2; } // namespace math } // namespace filament -#endif // MATH_VEC2_H_ +#endif // TNT_MATH_VEC2_H diff --git a/ios/include/math/vec3.h b/ios/include/math/vec3.h index 909111fb..fc856ede 100644 --- a/ios/include/math/vec3.h +++ b/ios/include/math/vec3.h @@ -14,15 +14,15 @@ * limitations under the License. */ -#ifndef MATH_VEC3_H_ -#define MATH_VEC3_H_ +#ifndef TNT_MATH_VEC3_H +#define TNT_MATH_VEC3_H -#include #include +#include + #include #include - namespace filament { namespace math { // ------------------------------------------------------------------------------------- @@ -130,4 +130,4 @@ using bool3 = vec3; } // namespace math } // namespace filament -#endif // MATH_VEC3_H_ +#endif // TNT_MATH_VEC3_H diff --git a/ios/include/math/vec4.h b/ios/include/math/vec4.h index da11b9e4..77877d5d 100644 --- a/ios/include/math/vec4.h +++ b/ios/include/math/vec4.h @@ -14,11 +14,12 @@ * limitations under the License. */ -#ifndef MATH_VEC4_H_ -#define MATH_VEC4_H_ +#ifndef TNT_MATH_VEC4_H +#define TNT_MATH_VEC4_H -#include #include +#include + #include #include @@ -129,4 +130,4 @@ using bool4 = vec4; } // namespace math } // namespace filament -#endif // MATH_VEC4_H_ +#endif // TNT_MATH_VEC4_H diff --git a/ios/include/mathio/ostream.h b/ios/include/mathio/ostream.h deleted file mode 100644 index 5f29c879..00000000 --- a/ios/include/mathio/ostream.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#if __has_attribute(visibility) -# define MATHIO_PUBLIC __attribute__((visibility("default"))) -#else -# define MATHIO_PUBLIC -#endif - -namespace filament { -namespace math { - -namespace details { template class TQuaternion; } - -template -MATHIO_PUBLIC -std::ostream& operator<<(std::ostream& out, const details::TVec2& v) noexcept; - -template -MATHIO_PUBLIC -std::ostream& operator<<(std::ostream& out, const details::TVec3& v) noexcept; - -template -MATHIO_PUBLIC -std::ostream& operator<<(std::ostream& out, const details::TVec4& v) noexcept; - -template -MATHIO_PUBLIC -std::ostream& operator<<(std::ostream& out, const details::TMat22& v) noexcept; - -template -MATHIO_PUBLIC -std::ostream& operator<<(std::ostream& out, const details::TMat33& v) noexcept; - -template -MATHIO_PUBLIC -std::ostream& operator<<(std::ostream& out, const details::TMat44& v) noexcept; - -template -MATHIO_PUBLIC -std::ostream& operator<<(std::ostream& out, const details::TQuaternion& v) noexcept; - -} // namespace math -} // namespace filament diff --git a/ios/include/src/Bookmark.cpp b/ios/include/src/Bookmark.cpp new file mode 100644 index 00000000..9c16177b --- /dev/null +++ b/ios/include/src/Bookmark.cpp @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include + +using namespace filament::math; + +namespace filament { +namespace camutils { + +template +Bookmark Bookmark::interpolate(Bookmark a, Bookmark b, double t) { + Bookmark result; + using float3 = filament::math::vec3; + + if (a.mode == Mode::MAP) { + assert(b.mode == Mode::MAP); + const double rho = sqrt(2.0); + const double rho2 = 2, rho4 = 4; + const double ux0 = a.map.center.x, uy0 = a.map.center.y, w0 = a.map.extent; + const double ux1 = b.map.center.x, uy1 = b.map.center.y, w1 = b.map.extent; + const double dx = ux1 - ux0, dy = uy1 - uy0, d2 = dx * dx + dy * dy, d1 = sqrt(d2); + const double b0 = (w1 * w1 - w0 * w0 + rho4 * d2) / (2.0 * w0 * rho2 * d1); + const double b1 = (w1 * w1 - w0 * w0 - rho4 * d2) / (2.0 * w1 * rho2 * d1); + const double r0 = log(sqrt(b0 * b0 + 1.0) - b0); + const double r1 = log(sqrt(b1 * b1 + 1) - b1); + const double dr = r1 - r0; + const int valid = !std::isnan(dr) && dr != 0; + const double S = (valid ? dr : log(w1 / w0)) / rho; + const double s = t * S; + + // This performs Van Wijk interpolation to animate between two waypoints on a map. + if (valid) { + const double coshr0 = cosh(r0); + const double u = w0 / (rho2 * d1) * (coshr0 * tanh(rho * s + r0) - sinh(r0)); + Bookmark result; + result.map.center.x = ux0 + u * dx; + result.map.center.y = uy0 + u * dy; + result.map.extent = w0 * coshr0 / cosh(rho * s + r0); + return result; + } + + // For degenerate cases, fall back to a simplified interpolation method. + result.map.center.x = ux0 + t * dx; + result.map.center.y = uy0 + t * dy; + result.map.extent = w0 * exp(rho * s); + return result; +} + + assert(b.mode == Mode::ORBIT); + result.orbit.phi = lerp(a.orbit.phi, b.orbit.phi, FLOAT(t)); + result.orbit.theta = lerp(a.orbit.theta, b.orbit.theta, FLOAT(t)); + result.orbit.distance = lerp(a.orbit.distance, b.orbit.distance, FLOAT(t)); + result.orbit.pivot = lerp(a.orbit.pivot, b.orbit.pivot, float3(t)); + return result; +} + +// Uses the Van Wijk method to suggest a duration for animating between two waypoints on a map. +// This does not have units, so just use it as a multiplier. +template +double Bookmark::duration(Bookmark a, Bookmark b) { + assert(a.mode == Mode::ORBIT && b.mode == Mode::ORBIT); + const double rho = sqrt(2.0); + const double rho2 = 2, rho4 = 4; + const double ux0 = a.map.center.x, uy0 = a.map.center.y, w0 = a.map.extent; + const double ux1 = b.map.center.x, uy1 = b.map.center.y, w1 = b.map.extent; + const double dx = ux1 - ux0, dy = uy1 - uy0, d2 = dx * dx + dy * dy, d1 = sqrt(d2); + const double b0 = (w1 * w1 - w0 * w0 + rho4 * d2) / (2.0 * w0 * rho2 * d1); + const double b1 = (w1 * w1 - w0 * w0 - rho4 * d2) / (2.0 * w1 * rho2 * d1); + const double r0 = log(sqrt(b0 * b0 + 1.0) - b0); + const double r1 = log(sqrt(b1 * b1 + 1) - b1); + const double dr = r1 - r0; + const int valid = !std::isnan(dr) && dr != 0; + const double S = (valid ? dr : log(w1 / w0)) / rho; + return fabs(S); +} + +template class Bookmark; + +} // namespace camutils +} // namespace filament diff --git a/ios/include/src/FreeFlightManipulator.h b/ios/include/src/FreeFlightManipulator.h new file mode 100644 index 00000000..07f5b215 --- /dev/null +++ b/ios/include/src/FreeFlightManipulator.h @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef CAMUTILS_FREEFLIGHT_MANIPULATOR_H +#define CAMUTILS_FREEFLIGHT_MANIPULATOR_H + +#include + +#include +#include +#include +#include + +#include + +namespace filament { +namespace camutils { + +using namespace filament::math; + +template +class FreeFlightManipulator : public Manipulator { +public: + using vec2 = filament::math::vec2; + using vec3 = filament::math::vec3; + using vec4 = filament::math::vec4; + using Bookmark = filament::camutils::Bookmark; + using Base = Manipulator; + using Config = typename Base::Config; + + FreeFlightManipulator(Mode mode, const Config& props) : Base(mode, props) { + setProperties(props); + Base::mEye = Base::mProps.flightStartPosition; + const auto pitch = Base::mProps.flightStartPitch; + const auto yaw = Base::mProps.flightStartYaw; + mTargetEuler = {pitch, yaw}; + updateTarget(pitch, yaw); + } + + void setProperties(const Config& props) override { + Config resolved = props; + + if (resolved.flightPanSpeed == vec2(0, 0)) { + resolved.flightPanSpeed = vec2(0.01, 0.01); + } + if (resolved.flightMaxSpeed == 0.0) { + resolved.flightMaxSpeed = 10.0; + } + if (resolved.flightSpeedSteps == 0) { + resolved.flightSpeedSteps = 80; + } + + Base::setProperties(resolved); + } + + void updateTarget(FLOAT pitch, FLOAT yaw) { + Base::mTarget = Base::mEye + (mat3::eulerZYX(0, yaw, pitch) * vec3(0.0, 0.0, -1.0)); + } + + void grabBegin(int x, int y, bool strafe) override { + mGrabWin = {x, y}; + mGrabbing = true; + mGrabEuler = mTargetEuler; + } + + void grabUpdate(int x, int y) override { + if (!mGrabbing) { + return; + } + + const vec2 del = mGrabWin - vec2{x, y}; + + const auto& grabPitch = mGrabEuler.x; + const auto& grabYaw = mGrabEuler.y; + auto& pitch = mTargetEuler.x; + auto& yaw = mTargetEuler.y; + + constexpr double EPSILON = 0.001; + + auto panSpeed = Base::mProps.flightPanSpeed; + constexpr FLOAT minPitch = (-F_PI_2 + EPSILON); + constexpr FLOAT maxPitch = ( F_PI_2 - EPSILON); + pitch = clamp(grabPitch + del.y * -panSpeed.y, minPitch, maxPitch); + yaw = fmod(grabYaw + del.x * panSpeed.x, 2.0 * F_PI); + + updateTarget(pitch, yaw); + } + + void grabEnd() override { + mGrabbing = false; + } + + void keyDown(typename Base::Key key) override { + mKeyDown[(int) key] = true; + } + + void keyUp(typename Base::Key key) override { + mKeyDown[(int) key] = false; + } + + void scroll(int x, int y, FLOAT scrolldelta) override { + const FLOAT halfSpeedSteps = Base::mProps.flightSpeedSteps / 2; + mScrollWheel = clamp(mScrollWheel + scrolldelta, -halfSpeedSteps, halfSpeedSteps); + // Normalize the scroll position from -1 to 1 and calculate the move speed, in world + // units per second. + mScrollPositionNormalized = (mScrollWheel + halfSpeedSteps) / halfSpeedSteps - 1.0; + mMoveSpeed = pow(Base::mProps.flightMaxSpeed, mScrollPositionNormalized); + } + + void update(FLOAT deltaTime) override { + vec3 forceLocal { 0.0, 0.0, 0.0 }; + + if (mKeyDown[(int) Base::Key::FORWARD]) { + forceLocal += vec3{ 0.0, 0.0, -1.0 }; + } + if (mKeyDown[(int) Base::Key::LEFT]) { + forceLocal += vec3{ -1.0, 0.0, 0.0 }; + } + if (mKeyDown[(int) Base::Key::BACKWARD]) { + forceLocal += vec3{ 0.0, 0.0, 1.0 }; + } + if (mKeyDown[(int) Base::Key::RIGHT]) { + forceLocal += vec3{ 1.0, 0.0, 0.0 }; + } + + const mat4 orientation = mat4::lookAt(Base::mEye, Base::mTarget, Base::mProps.upVector); + vec3 forceWorld = (orientation * vec4{ forceLocal, 0.0f }).xyz; + + if (mKeyDown[(int) Base::Key::UP]) { + forceWorld += vec3{ 0.0, 1.0, 0.0 }; + } + if (mKeyDown[(int) Base::Key::DOWN]) { + forceWorld += vec3{ 0.0, -1.0, 0.0 }; + } + + forceWorld *= mMoveSpeed; + + const auto dampingFactor = Base::mProps.flightMoveDamping; + if (dampingFactor == 0.0) { + // Without damping, we simply treat the force as our velocity. + mEyeVelocity = forceWorld; + } else { + // The dampingFactor acts as "friction", which acts upon the camera in the direction + // opposite its velocity. + // Force is also multiplied by the dampingFactor, to "make up" for the friction. + // This ensures that the max velocity still approaches mMoveSpeed; + vec3 velocityDelta = (forceWorld - mEyeVelocity) * dampingFactor; + mEyeVelocity += velocityDelta * deltaTime; + } + + const vec3 positionDelta = mEyeVelocity * deltaTime; + + Base::mEye += positionDelta; + Base::mTarget += positionDelta; + } + + Bookmark getCurrentBookmark() const override { + Bookmark bookmark; + bookmark.flight.position = Base::mEye; + bookmark.flight.pitch = mTargetEuler.x; + bookmark.flight.yaw = mTargetEuler.y; + return bookmark; + } + + Bookmark getHomeBookmark() const override { + Bookmark bookmark; + bookmark.flight.position = Base::mProps.flightStartPosition;; + bookmark.flight.pitch = Base::mProps.flightStartPitch; + bookmark.flight.yaw = Base::mProps.flightStartYaw; + return bookmark; + } + + void jumpToBookmark(const Bookmark& bookmark) override { + Base::mEye = bookmark.flight.position; + updateTarget(bookmark.flight.pitch, bookmark.flight.yaw); + } + +private: + vec2 mGrabWin; + vec2 mTargetEuler; // (pitch, yaw) + vec2 mGrabEuler; // (pitch, yaw) + bool mKeyDown[(int) Base::Key::COUNT] = {false}; + bool mGrabbing = false; + FLOAT mScrollWheel = 0.0f; + FLOAT mScrollPositionNormalized = 0.0f; + FLOAT mMoveSpeed = 1.0f; + vec3 mEyeVelocity; +}; + +} // namespace camutils +} // namespace filament + +#endif /* CAMUTILS_FREEFLIGHT_MANIPULATOR_H */ diff --git a/ios/include/src/Manipulator.cpp b/ios/include/src/Manipulator.cpp new file mode 100644 index 00000000..d44528cd --- /dev/null +++ b/ios/include/src/Manipulator.cpp @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include "FreeFlightManipulator.h" +#include "MapManipulator.h" +#include "OrbitManipulator.h" + +using namespace filament::math; + +namespace filament { +namespace camutils { + +template typename +Manipulator::Builder& Manipulator::Builder::viewport(int width, int height) { + details.viewport[0] = width; + details.viewport[1] = height; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::targetPosition(FLOAT x, FLOAT y, FLOAT z) { + details.targetPosition = {x, y, z}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::upVector(FLOAT x, FLOAT y, FLOAT z) { + details.upVector = {x, y, z}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::zoomSpeed(FLOAT val) { + details.zoomSpeed = val; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::orbitHomePosition(FLOAT x, FLOAT y, FLOAT z) { + details.orbitHomePosition = {x, y, z}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::orbitSpeed(FLOAT x, FLOAT y) { + details.orbitSpeed = {x, y}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::fovDirection(Fov fov) { + details.fovDirection = fov; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::fovDegrees(FLOAT degrees) { + details.fovDegrees = degrees; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::farPlane(FLOAT distance) { + details.farPlane = distance; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::mapExtent(FLOAT worldWidth, FLOAT worldHeight) { + details.mapExtent = {worldWidth, worldHeight}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::mapMinDistance(FLOAT mindist) { + details.mapMinDistance = mindist; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::flightStartPosition(FLOAT x, FLOAT y, FLOAT z) { + details.flightStartPosition = {x, y, z}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::flightStartOrientation(FLOAT pitch, FLOAT yaw) { + details.flightStartPitch = pitch; + details.flightStartYaw = yaw; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::flightMaxMoveSpeed(FLOAT maxSpeed) { + details.flightMaxSpeed = maxSpeed; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::flightSpeedSteps(int steps) { + details.flightSpeedSteps = steps; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::flightPanSpeed(FLOAT x, FLOAT y) { + details.flightPanSpeed = {x, y}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::flightMoveDamping(FLOAT damping) { + details.flightMoveDamping = damping; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::groundPlane(FLOAT a, FLOAT b, FLOAT c, FLOAT d) { + details.groundPlane = {a, b, c, d}; + return *this; +} + +template typename +Manipulator::Builder& Manipulator::Builder::raycastCallback(RayCallback cb, void* userdata) { + details.raycastCallback = cb; + details.raycastUserdata = userdata; + return *this; +} + +template +Manipulator* Manipulator::Builder::build(Mode mode) { + switch (mode) { + case Mode::FREE_FLIGHT: + return new FreeFlightManipulator(mode, details); + case Mode::MAP: + return new MapManipulator(mode, details); + case Mode::ORBIT: + return new OrbitManipulator(mode, details); + } +} + +template +Manipulator::Manipulator(Mode mode, const Config& props) : mMode(mode) { + setProperties(props); +} + +template +void Manipulator::setProperties(const Config& props) { + mProps = props; + + if (mProps.zoomSpeed == FLOAT(0)) { + mProps.zoomSpeed = 0.01; + } + + if (mProps.upVector == vec3(0)) { + mProps.upVector = vec3(0, 1, 0); + } + + if (mProps.fovDegrees == FLOAT(0)) { + mProps.fovDegrees = 33; + } + + if (mProps.farPlane == FLOAT(0)) { + mProps.farPlane = 5000; + } + + if (mProps.mapExtent == vec2(0)) { + mProps.mapExtent = vec2(512); + } +} + +template +void Manipulator::setViewport(int width, int height) { + Config props = mProps; + props.viewport[0] = width; + props.viewport[1] = height; + setProperties(props); +} + +template +void Manipulator::getLookAt(vec3* eyePosition, vec3* targetPosition, vec3* upward) const { + *targetPosition = mTarget; + *eyePosition = mEye; + const vec3 gaze = normalize(mTarget - mEye); + const vec3 right = cross(gaze, mProps.upVector); + *upward = cross(right, gaze); +} + +template +static bool raycastPlane(const filament::math::vec3& origin, + const filament::math::vec3& dir, FLOAT* t, void* userdata) { + using vec3 = filament::math::vec3; + using vec4 = filament::math::vec4; + auto props = (const typename Manipulator::Config*) userdata; + const vec4 plane = props->groundPlane; + const vec3 n = vec3(plane[0], plane[1], plane[2]); + const vec3 p0 = n * plane[3]; + const FLOAT denom = -dot(n, dir); + if (denom > 1e-6) { + const vec3 p0l0 = p0 - origin; + *t = dot(p0l0, n) / -denom; + return *t >= 0; + } + return false; +} + +template +void Manipulator::getRay(int x, int y, vec3* porigin, vec3* pdir) const { + const vec3 gaze = normalize(mTarget - mEye); + const vec3 right = normalize(cross(gaze, mProps.upVector)); + const vec3 upward = cross(right, gaze); + const FLOAT width = mProps.viewport[0]; + const FLOAT height = mProps.viewport[1]; + const FLOAT fov = mProps.fovDegrees * F_PI / 180.0; + + // Remap the grid coordinate into [-1, +1] and shift it to the pixel center. + const FLOAT u = 2.0 * (0.5 + x) / width - 1.0; + const FLOAT v = 2.0 * (0.5 + y) / height - 1.0; + + // Compute the tangent of the field-of-view angle as well as the aspect ratio. + const FLOAT tangent = tan(fov / 2.0); + const FLOAT aspect = width / height; + + // Adjust the gaze so it goes through the pixel of interest rather than the grid center. + vec3 dir = gaze; + if (mProps.fovDirection == Fov::VERTICAL) { + dir += right * tangent * u * aspect; + dir += upward * tangent * v; + } else { + dir += right * tangent * u; + dir += upward * tangent * v / aspect; + } + dir = normalize(dir); + + *porigin = mEye; + *pdir = dir; +} + +template +bool Manipulator::raycast(int x, int y, vec3* result) const { + vec3 origin, dir; + getRay(x, y, &origin, &dir); + + // Choose either the user's callback function or the plane intersector. + auto callback = mProps.raycastCallback; + auto fallback = raycastPlane; + void* userdata = mProps.raycastUserdata; + if (!callback) { + callback = fallback; + userdata = (void*) &mProps; + } + + // If the ray misses, then try the fallback function. + FLOAT t; + if (!callback(mEye, dir, &t, userdata)) { + if (callback == fallback || !fallback(mEye, dir, &t, (void*) &mProps)) { + return false; + } + } + + *result = mEye + dir * t; + return true; +} + +template +filament::math::vec3 Manipulator::raycastFarPlane(int x, int y) const { + const filament::math::vec3 gaze = normalize(mTarget - mEye); + const vec3 right = cross(gaze, mProps.upVector); + const vec3 upward = cross(right, gaze); + const FLOAT width = mProps.viewport[0]; + const FLOAT height = mProps.viewport[1]; + const FLOAT fov = mProps.fovDegrees * math::F_PI / 180.0; + + // Remap the grid coordinate into [-1, +1] and shift it to the pixel center. + const FLOAT u = 2.0 * (0.5 + x) / width - 1.0; + const FLOAT v = 2.0 * (0.5 + y) / height - 1.0; + + // Compute the tangent of the field-of-view angle as well as the aspect ratio. + const FLOAT tangent = tan(fov / 2.0); + const FLOAT aspect = width / height; + + // Adjust the gaze so it goes through the pixel of interest rather than the grid center. + vec3 dir = gaze; + if (mProps.fovDirection == Fov::VERTICAL) { + dir += right * tangent * u * aspect; + dir += upward * tangent * v; + } else { + dir += right * tangent * u; + dir += upward * tangent * v / aspect; + } + return mEye + dir * mProps.farPlane; +} + +template +void Manipulator::keyDown(Manipulator::Key key) { } + +template +void Manipulator::keyUp(Manipulator::Key key) { } + +template +void Manipulator::update(FLOAT deltaTime) { } + +template class Manipulator; + +} // namespace camutils +} // namespace filament diff --git a/ios/include/src/MapManipulator.h b/ios/include/src/MapManipulator.h new file mode 100644 index 00000000..6df5b4c5 --- /dev/null +++ b/ios/include/src/MapManipulator.h @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef CAMUTILS_MAP_MANIPULATOR_H +#define CAMUTILS_MAP_MANIPULATOR_H + +#include + +#include + +namespace filament { +namespace camutils { + +template +class MapManipulator : public Manipulator { +public: + using vec2 = math::vec2; + using vec3 = math::vec3; + using vec4 = math::vec4; + using Bookmark = filament::camutils::Bookmark; + using Base = Manipulator; + using Config = typename Manipulator::Config; + + MapManipulator(Mode mode, const Config& props) : Manipulator(mode, props) { + const FLOAT width = Base::mProps.mapExtent.x; + const FLOAT height = Base::mProps.mapExtent.y; + const bool horiz = Base::mProps.fovDirection == Fov::HORIZONTAL; + const vec3 targetToEye = Base::mProps.groundPlane.xyz; + const FLOAT halfExtent = (horiz ? width : height) / 2.0; + const FLOAT fov = Base::mProps.fovDegrees * math::F_PI / 180.0; + const FLOAT distance = halfExtent / tan(fov / 2.0); + Base::mTarget = Base::mProps.targetPosition; + Base::mEye = Base::mTarget + distance * targetToEye; + } + + void grabBegin(int x, int y, bool strafe) override { + if (strafe || !Base::raycast(x, y, &mGrabScene)) { + return; + } + mGrabFar = Base::raycastFarPlane(x, y); + mGrabEye = Base::mEye; + mGrabTarget = Base::mTarget; + mGrabbing = true; + } + + void grabUpdate(int x, int y) override { + if (mGrabbing) { + const FLOAT ulen = distance(mGrabScene, mGrabEye); + const FLOAT vlen = distance(mGrabFar, mGrabScene); + const vec3 translation = (mGrabFar - Base::raycastFarPlane(x, y)) * ulen / vlen; + const vec3 eyePosition = mGrabEye + translation; + const vec3 targetPosition = mGrabTarget + translation; + moveWithConstraints(eyePosition, targetPosition); + } + } + + void grabEnd() override { + mGrabbing = false; + } + + void scroll(int x, int y, FLOAT scrolldelta) override { + vec3 grabScene; + if (!Base::raycast(x, y, &grabScene)) { + return; + } + + // Find the direction of travel for the dolly. We do not normalize since it + // is desirable to move faster when further away from the targetPosition. + vec3 u = grabScene - Base::mEye; + + // Prevent getting stuck when zooming in. + if (scrolldelta < 0) { + const FLOAT distanceToSurface = length(u); + if (distanceToSurface < Base::mProps.zoomSpeed) { + return; + } + } + + u *= -scrolldelta * Base::mProps.zoomSpeed; + + const vec3 eyePosition = Base::mEye + u; + const vec3 targetPosition = Base::mTarget + u; + moveWithConstraints(eyePosition, targetPosition); + } + + Bookmark getCurrentBookmark() const override { + const vec3 dir = normalize(Base::mTarget - Base::mEye); + + FLOAT distance; + raycastPlane(Base::mEye, dir, &distance); + + const FLOAT fov = Base::mProps.fovDegrees * math::F_PI / 180.0; + const FLOAT halfExtent = distance * tan(fov / 2.0); + + vec3 targetPosition = Base::mEye + dir * distance; + + const vec3 targetToEye = Base::mProps.groundPlane.xyz; + const vec3 uvec = cross(Base::mProps.upVector, targetToEye); + const vec3 vvec = cross(targetToEye, uvec); + const vec3 centerToTarget = targetPosition - Base::mProps.targetPosition; + + Bookmark bookmark; + bookmark.mode = Mode::MAP; + bookmark.map.extent = halfExtent * 2.0; + bookmark.map.center.x = dot(uvec, centerToTarget); + bookmark.map.center.y = dot(vvec, centerToTarget); + + bookmark.orbit.theta = 0; + bookmark.orbit.phi = 0; + bookmark.orbit.pivot = Base::mProps.targetPosition + + uvec * bookmark.map.center.x + + vvec * bookmark.map.center.y; + bookmark.orbit.distance = halfExtent / tan(fov / 2.0); + + return bookmark; + } + + Bookmark getHomeBookmark() const override { + const FLOAT fov = Base::mProps.fovDegrees * math::F_PI / 180.0; + const FLOAT width = Base::mProps.mapExtent.x; + const FLOAT height = Base::mProps.mapExtent.y; + const bool horiz = Base::mProps.fovDirection == Fov::HORIZONTAL; + + Bookmark bookmark; + bookmark.mode = Mode::MAP; + bookmark.map.extent = horiz ? width : height; + bookmark.map.center.x = 0; + bookmark.map.center.y = 0; + + bookmark.orbit.theta = 0; + bookmark.orbit.phi = 0; + bookmark.orbit.pivot = Base::mTarget; + bookmark.orbit.distance = 0.5 * bookmark.map.extent / tan(fov / 2.0); + + // TODO: Add optional boundary constraints here. + + return bookmark; + } + + void jumpToBookmark(const Bookmark& bookmark) override { + const vec3 targetToEye = Base::mProps.groundPlane.xyz; + const FLOAT halfExtent = bookmark.map.extent / 2.0; + const FLOAT fov = Base::mProps.fovDegrees * math::F_PI / 180.0; + const FLOAT distance = halfExtent / tan(fov / 2.0); + vec3 uvec = cross(Base::mProps.upVector, targetToEye); + vec3 vvec = cross(targetToEye, uvec); + uvec = normalize(uvec) * bookmark.map.center.x; + vvec = normalize(vvec) * bookmark.map.center.y; + Base::mTarget = Base::mProps.targetPosition + uvec + vvec; + Base::mEye = Base::mTarget + distance * targetToEye; + } + +private: + bool raycastPlane(const vec3& origin, const vec3& dir, FLOAT* t) const { + const vec4 plane = Base::mProps.groundPlane; + const vec3 n = vec3(plane[0], plane[1], plane[2]); + const vec3 p0 = n * plane[3]; + const FLOAT denom = -dot(n, dir); + if (denom > 1e-6) { + const vec3 p0l0 = p0 - origin; + *t = dot(p0l0, n) / -denom; + return *t >= 0; + } + return false; + } + + void moveWithConstraints(vec3 eye, vec3 targetPosition) { + Base::mEye = eye; + Base::mTarget = targetPosition; + // TODO: Add optional boundary constraints here. + } + +private: + bool mGrabbing = false; + vec3 mGrabScene; + vec3 mGrabFar; + vec3 mGrabEye; + vec3 mGrabTarget; +}; + +} // namespace camutils +} // namespace filament + +#endif /* CAMUTILS_MAP_MANIPULATOR_H */ diff --git a/ios/include/src/OrbitManipulator.h b/ios/include/src/OrbitManipulator.h new file mode 100644 index 00000000..54e325cc --- /dev/null +++ b/ios/include/src/OrbitManipulator.h @@ -0,0 +1,201 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef CAMUTILS_ORBIT_MANIPULATOR_H +#define CAMUTILS_ORBIT_MANIPULATOR_H + +#include + +#include + +#define MAX_PHI (F_PI / 2.0 - 0.001) + +namespace filament { +namespace camutils { + +using namespace filament::math; + +template +class OrbitManipulator : public Manipulator { +public: + using vec2 = filament::math::vec2; + using vec3 = filament::math::vec3; + using vec4 = filament::math::vec4; + using Bookmark = filament::camutils::Bookmark; + using Base = Manipulator; + using Config = typename Base::Config; + + enum GrabState { INACTIVE, ORBITING, PANNING }; + + OrbitManipulator(Mode mode, const Config& props) : Base(mode, props) { + setProperties(props); + Base::mEye = Base::mProps.orbitHomePosition; + mPivot = Base::mTarget = Base::mProps.targetPosition; + } + + void setProperties(const Config& props) override { + Config resolved = props; + + if (resolved.orbitHomePosition == vec3(0)) { + resolved.orbitHomePosition = vec3(0, 0, 1); + } + + if (resolved.orbitSpeed == vec2(0)) { + resolved.orbitSpeed = vec2(0.01); + } + + // By default, place the ground plane so that it aligns with the targetPosition position. + // This is used only when PANNING. + if (resolved.groundPlane == vec4(0)) { + const FLOAT d = length(resolved.targetPosition); + const vec3 n = normalize(resolved.orbitHomePosition - resolved.targetPosition); + resolved.groundPlane = vec4(n, -d); + } + + Base::setProperties(resolved); + } + + void grabBegin(int x, int y, bool strafe) override { + mGrabState = strafe ? PANNING : ORBITING; + mGrabPivot = mPivot; + mGrabEye = Base::mEye; + mGrabTarget = Base::mTarget; + mGrabBookmark = getCurrentBookmark(); + mGrabWinX = x; + mGrabWinY = y; + mGrabFar = Base::raycastFarPlane(x, y); + Base::raycast(x, y, &mGrabScene); + } + + void grabUpdate(int x, int y) override { + const int delx = mGrabWinX - x; + const int dely = mGrabWinY - y; + + if (mGrabState == ORBITING) { + Bookmark bookmark = getCurrentBookmark(); + + const FLOAT theta = delx * Base::mProps.orbitSpeed.x; + const FLOAT phi = dely * Base::mProps.orbitSpeed.y; + const FLOAT maxPhi = MAX_PHI; + + bookmark.orbit.phi = clamp(mGrabBookmark.orbit.phi + phi, -maxPhi, +maxPhi); + bookmark.orbit.theta = mGrabBookmark.orbit.theta + theta; + + jumpToBookmark(bookmark); + } + + if (mGrabState == PANNING) { + const FLOAT ulen = distance(mGrabScene, mGrabEye); + const FLOAT vlen = distance(mGrabFar, mGrabScene); + const vec3 translation = (mGrabFar - Base::raycastFarPlane(x, y)) * ulen / vlen; + mPivot = mGrabPivot + translation; + Base::mEye = mGrabEye + translation; + Base::mTarget = mGrabTarget + translation; + } + } + + void grabEnd() override { + mGrabState = INACTIVE; + } + + void scroll(int x, int y, FLOAT scrolldelta) override { + const vec3 gaze = normalize(Base::mTarget - Base::mEye); + const vec3 movement = gaze * Base::mProps.zoomSpeed * -scrolldelta; + const vec3 v0 = mPivot - Base::mEye; + Base::mEye += movement; + Base::mTarget += movement; + const vec3 v1 = mPivot - Base::mEye; + + // Check if the camera has moved past the point of interest. + if (dot(v0, v1) < 0) { + mFlipped = !mFlipped; + } + } + + Bookmark getCurrentBookmark() const override { + Bookmark bookmark; + bookmark.mode = Mode::ORBIT; + const vec3 pivotToEye = Base::mEye - mPivot; + const FLOAT d = length(pivotToEye); + const FLOAT x = pivotToEye.x / d; + const FLOAT y = pivotToEye.y / d; + const FLOAT z = pivotToEye.z / d; + + bookmark.orbit.phi = asin(y); + bookmark.orbit.theta = atan2(x, z); + bookmark.orbit.distance = mFlipped ? -d : d; + bookmark.orbit.pivot = mPivot; + + const FLOAT fov = Base::mProps.fovDegrees * math::F_PI / 180.0; + const FLOAT halfExtent = d * tan(fov / 2.0); + const vec3 targetToEye = Base::mProps.groundPlane.xyz; + const vec3 uvec = cross(Base::mProps.upVector, targetToEye); + const vec3 vvec = cross(targetToEye, uvec); + const vec3 centerToTarget = mPivot - Base::mProps.targetPosition; + + bookmark.map.extent = halfExtent * 2; + bookmark.map.center.x = dot(uvec, centerToTarget); + bookmark.map.center.y = dot(vvec, centerToTarget); + + return bookmark; + } + + Bookmark getHomeBookmark() const override { + Bookmark bookmark; + bookmark.mode = Mode::ORBIT; + bookmark.orbit.phi = FLOAT(0); + bookmark.orbit.theta = FLOAT(0); + bookmark.orbit.pivot = Base::mProps.targetPosition; + bookmark.orbit.distance = distance(Base::mProps.targetPosition, Base::mProps.orbitHomePosition); + + const FLOAT fov = Base::mProps.fovDegrees * math::F_PI / 180.0; + const FLOAT halfExtent = bookmark.orbit.distance * tan(fov / 2.0); + + bookmark.map.extent = halfExtent * 2; + bookmark.map.center.x = 0; + bookmark.map.center.y = 0; + + return bookmark; + } + + void jumpToBookmark(const Bookmark& bookmark) override { + mPivot = bookmark.orbit.pivot; + const FLOAT x = sin(bookmark.orbit.theta) * cos(bookmark.orbit.phi); + const FLOAT y = sin(bookmark.orbit.phi); + const FLOAT z = cos(bookmark.orbit.theta) * cos(bookmark.orbit.phi); + Base::mEye = mPivot + vec3(x, y, z) * abs(bookmark.orbit.distance); + mFlipped = bookmark.orbit.distance < 0; + Base::mTarget = Base::mEye + vec3(x, y, z) * (mFlipped ? 1.0 : -1.0); + } + +private: + GrabState mGrabState = INACTIVE; + bool mFlipped = false; + vec3 mGrabPivot; + vec3 mGrabScene; + vec3 mGrabFar; + vec3 mGrabEye; + vec3 mGrabTarget; + Bookmark mGrabBookmark; + int mGrabWinX; + int mGrabWinY; + vec3 mPivot; +}; + +} // namespace camutils +} // namespace filament + +#endif /* CAMUTILS_ORBIT_MANIPULATOR_H */ diff --git a/ios/include/tests/test_camutils.cpp b/ios/include/tests/test_camutils.cpp new file mode 100644 index 00000000..ac2314d6 --- /dev/null +++ b/ios/include/tests/test_camutils.cpp @@ -0,0 +1,85 @@ +/* + * Copyright 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +using namespace filament::math; + +namespace camutils = filament::camutils; + +using CamManipulator = camutils::Manipulator; + +class CamUtilsTest : public testing::Test {}; + +#define EXPECT_VEC_EQ(a, x, y, z) expectVecEq(a, {x, y, z}, __LINE__) + +static void expectVecEq(float3 a, float3 b, int line) { + EXPECT_FLOAT_EQ(a.x, b.x); + EXPECT_FLOAT_EQ(a.y, b.y); + EXPECT_FLOAT_EQ(a.z, b.z); +} + +TEST_F(CamUtilsTest, Orbit) { + + float3 eye, targetPosition, up; + + CamManipulator* orbit = CamManipulator::Builder() + .viewport(256, 256) + .targetPosition(0, 0, 0) + .upVector(0, 1, 0) + .zoomSpeed(0.01) + .orbitHomePosition(0, 0, 4) + .orbitSpeed(1, 1) + .build(camutils::Mode::ORBIT); + + orbit->getLookAt(&eye, &targetPosition, &up); + EXPECT_VEC_EQ(eye, 0, 0, 4); + EXPECT_VEC_EQ(targetPosition, 0, 0, 0); + EXPECT_VEC_EQ(up, 0, 1, 0); + + orbit->grabBegin(100, 100, false); + orbit->grabUpdate(200, 100); + orbit->grabEnd(); + + orbit->getLookAt(&eye, &targetPosition, &up); + EXPECT_VEC_EQ(eye, 2.0254626, 0, 3.4492755); + EXPECT_VEC_EQ(targetPosition, 1.519097, 0, 2.5869565); + EXPECT_VEC_EQ(up, 0, 1, 0); + + delete orbit; +} + +TEST_F(CamUtilsTest, Map) { + + float3 eye, targetPosition, up; + + CamManipulator* map = CamManipulator::Builder() + .viewport(256, 256) + .targetPosition(0, 0, 0) + .zoomSpeed(0.01) + .orbitHomePosition(0, 0, 4) + .build(camutils::Mode::MAP); + + delete map; +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/ios/include/tsl/array-hash/array_growth_policy.h b/ios/include/tsl/array-hash/array_growth_policy.h new file mode 100644 index 00000000..641e0cb7 --- /dev/null +++ b/ios/include/tsl/array-hash/array_growth_policy.h @@ -0,0 +1,307 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ARRAY_GROWTH_POLICY_H +#define TSL_ARRAY_GROWTH_POLICY_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef __EXCEPTIONS +# define THROW(_e, _m) throw _e(_m) +#else +# include +# ifndef NDEBUG +# define THROW(_e, _m) do { fprintf(stderr, _m); std::terminate(); } while(0) +# else +# define THROW(_e, _m) std::terminate() +# endif +#endif + + +namespace tsl { +namespace ah { + +/** + * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a power of two. It allows + * the table to use a mask operation instead of a modulo operation to map a hash to a bucket. + * + * GrowthFactor must be a power of two >= 2. + */ +template +class power_of_two_growth_policy { +public: + /** + * Called on the hash table creation and on rehash. The number of buckets for the table is passed in parameter. + * This number is a minimum, the policy may update this value with a higher value if needed (but not lower). + * + * If 0 is given, min_bucket_count_in_out must still be 0 after the policy creation and + * bucket_for_hash must always return 0 in this case. + */ + explicit power_of_two_growth_policy(std::size_t& min_bucket_count_in_out) { + if(min_bucket_count_in_out > max_bucket_count()) { + THROW(std::length_error, "The hash table exceeds its maximum size."); + } + + if(min_bucket_count_in_out > 0) { + min_bucket_count_in_out = round_up_to_power_of_two(min_bucket_count_in_out); + m_mask = min_bucket_count_in_out - 1; + } + else { + m_mask = 0; + } + } + + /** + * Return the bucket [0, bucket_count()) to which the hash belongs. + * If bucket_count() is 0, it must always return 0. + */ + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash & m_mask; + } + + /** + * Return the number of buckets that should be used on next growth. + */ + std::size_t next_bucket_count() const { + if((m_mask + 1) > max_bucket_count() / GrowthFactor) { + THROW(std::length_error, "The hash table exceeds its maximum size."); + } + + return (m_mask + 1) * GrowthFactor; + } + + /** + * Return the maximum number of buckets supported by the policy. + */ + std::size_t max_bucket_count() const { + // Largest power of two. + return (std::numeric_limits::max() / 2) + 1; + } + + /** + * Reset the growth policy as if it was created with a bucket count of 0. + * After a clear, the policy must always return 0 when bucket_for_hash is called. + */ + void clear() noexcept { + m_mask = 0; + } + +private: + static std::size_t round_up_to_power_of_two(std::size_t value) { + if(is_power_of_two(value)) { + return value; + } + + if(value == 0) { + return 1; + } + + --value; + for(std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) { + value |= value >> i; + } + + return value + 1; + } + + static constexpr bool is_power_of_two(std::size_t value) { + return value != 0 && (value & (value - 1)) == 0; + } + +protected: + static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2, "GrowthFactor must be a power of two >= 2."); + + std::size_t m_mask; +}; + + +/** + * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo to map a hash + * to a bucket. Slower but it can be useful if you want a slower growth. + */ +template> +class mod_growth_policy { +public: + explicit mod_growth_policy(std::size_t& min_bucket_count_in_out) { + if(min_bucket_count_in_out > max_bucket_count()) { + THROW(std::length_error, "The hash table exceeds its maximum size."); + } + + if(min_bucket_count_in_out > 0) { + m_mod = min_bucket_count_in_out; + } + else { + m_mod = 1; + } + } + + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash % m_mod; + } + + std::size_t next_bucket_count() const { + if(m_mod == max_bucket_count()) { + THROW(std::length_error, "The hash table exceeds its maximum size."); + } + + const double next_bucket_count = std::ceil(double(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR); + if(!std::isnormal(next_bucket_count)) { + THROW(std::length_error, "The hash table exceeds its maximum size."); + } + + if(next_bucket_count > double(max_bucket_count())) { + return max_bucket_count(); + } + else { + return std::size_t(next_bucket_count); + } + } + + std::size_t max_bucket_count() const { + return MAX_BUCKET_COUNT; + } + + void clear() noexcept { + m_mod = 1; + } + +private: + static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = 1.0 * GrowthFactor::num / GrowthFactor::den; + static const std::size_t MAX_BUCKET_COUNT = + std::size_t(double( + std::numeric_limits::max() / REHASH_SIZE_MULTIPLICATION_FACTOR + )); + + static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, "Growth factor should be >= 1.1."); + + std::size_t m_mod; +}; + + + +namespace detail { + +static constexpr const std::array PRIMES = {{ + 1ul, 5ul, 17ul, 29ul, 37ul, 53ul, 67ul, 79ul, 97ul, 131ul, 193ul, 257ul, 389ul, 521ul, 769ul, 1031ul, + 1543ul, 2053ul, 3079ul, 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, + 1572869ul, 3145739ul, 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, + 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, 4294967291ul +}}; + +template +static constexpr std::size_t mod(std::size_t hash) { return hash % PRIMES[IPrime]; } + +// MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for faster modulo as the +// compiler can optimize the modulo code better with a constant known at the compilation. +static constexpr const std::array MOD_PRIME = {{ + &mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>, &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, + &mod<11>, &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>, &mod<18>, &mod<19>, &mod<20>, + &mod<21>, &mod<22>, &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>, &mod<29>, &mod<30>, + &mod<31>, &mod<32>, &mod<33>, &mod<34>, &mod<35>, &mod<36>, &mod<37> , &mod<38>, &mod<39> +}}; + +} + +/** + * Grow the hash table by using prime numbers as bucket count. Slower than tsl::ah::power_of_two_growth_policy in + * general but will probably distribute the values around better in the buckets with a poor hash function. + * + * To allow the compiler to optimize the modulo operation, a lookup table is used with constant primes numbers. + * + * With a switch the code would look like: + * \code + * switch(iprime) { // iprime is the current prime of the hash table + * case 0: hash % 5ul; + * break; + * case 1: hash % 17ul; + * break; + * case 2: hash % 29ul; + * break; + * ... + * } + * \endcode + * + * Due to the constant variable in the modulo the compiler is able to optimize the operation + * by a series of multiplications, substractions and shifts. + * + * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) * 5' in a 64 bits environment. + */ +class prime_growth_policy { +public: + explicit prime_growth_policy(std::size_t& min_bucket_count_in_out) { + auto it_prime = std::lower_bound(detail::PRIMES.begin(), + detail::PRIMES.end(), min_bucket_count_in_out); + if(it_prime == detail::PRIMES.end()) { + THROW(std::length_error, "The hash table exceeds its maximum size."); + } + + m_iprime = static_cast(std::distance(detail::PRIMES.begin(), it_prime)); + if(min_bucket_count_in_out > 0) { + min_bucket_count_in_out = *it_prime; + } + else { + min_bucket_count_in_out = 0; + } + } + + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return detail::MOD_PRIME[m_iprime](hash); + } + + std::size_t next_bucket_count() const { + if(m_iprime + 1 >= detail::PRIMES.size()) { + THROW(std::length_error, "The hash table exceeds its maximum size."); + } + + return detail::PRIMES[m_iprime + 1]; + } + + std::size_t max_bucket_count() const { + return detail::PRIMES.back(); + } + + void clear() noexcept { + m_iprime = 0; + } + +private: + unsigned int m_iprime; + + static_assert(std::numeric_limits::max() >= detail::PRIMES.size(), + "The type of m_iprime is not big enough."); +}; + +} +} + +#endif diff --git a/ios/include/tsl/array-hash/array_hash.h b/ios/include/tsl/array-hash/array_hash.h new file mode 100644 index 00000000..ccb204ca --- /dev/null +++ b/ios/include/tsl/array-hash/array_hash.h @@ -0,0 +1,1766 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ARRAY_HASH_H +#define TSL_ARRAY_HASH_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "array_growth_policy.h" + + +/* + * __has_include is a bit useless (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79433), + * check also __cplusplus version. + */ +#ifdef __has_include +# if __has_include() && __cplusplus >= 201703L +# define TSL_AH_HAS_STRING_VIEW +# endif +#endif + + +#ifdef TSL_AH_HAS_STRING_VIEW +# include +#endif + + +#ifdef TSL_DEBUG +# define tsl_ah_assert(expr) assert(expr) +#else +# define tsl_ah_assert(expr) (static_cast(0)) +#endif + + + +/** + * Implementation of the array hash structure described in the + * "Cache-conscious collision resolution in string hash tables." (Askitis Nikolas and Justin Zobel, 2005) paper. + */ +namespace tsl { + +namespace ah { + +template +struct str_hash { +#ifdef TSL_AH_HAS_STRING_VIEW + std::size_t operator()(const CharT* key, std::size_t key_size) const { + return std::hash>()(std::basic_string_view(key, key_size)); + } +#else + /** + * FNV-1a hash + */ + std::size_t operator()(const CharT* key, std::size_t key_size) const { + static const std::size_t init = std::size_t((sizeof(std::size_t) == 8)?0xcbf29ce484222325:0x811c9dc5); + static const std::size_t multiplier = std::size_t((sizeof(std::size_t) == 8)?0x100000001b3:0x1000193); + + std::size_t hash = init; + for (std::size_t i = 0; i < key_size; ++i) { + hash ^= key[i]; + hash *= multiplier; + } + + return hash; + } +#endif +}; + +template +struct str_equal { + bool operator()(const CharT* key_lhs, std::size_t key_size_lhs, + const CharT* key_rhs, std::size_t key_size_rhs) const + { + if(key_size_lhs != key_size_rhs) { + return false; + } + else { + return std::memcmp(key_lhs, key_rhs, key_size_lhs * sizeof(CharT)) == 0; + } + } +}; +} + + +namespace detail_array_hash { + +template +struct is_iterator: std::false_type { +}; + +template +struct is_iterator::iterator_category, void>::value>::type>: std::true_type { +}; + +static constexpr bool is_power_of_two(std::size_t value) { + return value != 0 && (value & (value - 1)) == 0; +} + +template +static T numeric_cast(U value, const char* error_message = "numeric_cast() failed.") { + T ret = static_cast(value); + if(static_cast(ret) != value) { + THROW(std::runtime_error, error_message); + } + + const bool is_same_signedness = (std::is_unsigned::value && std::is_unsigned::value) || + (std::is_signed::value && std::is_signed::value); + if(!is_same_signedness && (ret < T{}) != (value < U{})) { + THROW(std::runtime_error, error_message); + } + + return ret; +} + + + +/** + * Fixed size type used to represent size_type values on serialization. Need to be big enough + * to represent a std::size_t on 32 and 64 bits platforms, and must be the same size on both platforms. + */ +using slz_size_type = std::uint64_t; + +template +static T deserialize_value(Deserializer& deserializer) { + // MSVC < 2017 is not conformant, circumvent the problem by removing the template keyword +#if defined (_MSC_VER) && _MSC_VER < 1910 + return deserializer.Deserializer::operator()(); +#else + return deserializer.Deserializer::template operator()(); +#endif +} + +/** + * For each string in the bucket, store the size of the string, the chars of the string + * and T, if it's not void. T should be either void or an unsigned type. + * + * End the buffer with END_OF_BUCKET flag. END_OF_BUCKET has the same type as the string size variable. + * + * m_buffer (CharT*): + * | size of str1 (KeySizeT) | str1 (const CharT*) | value (T if T != void) | ... | + * | size of strN (KeySizeT) | strN (const CharT*) | value (T if T != void) | END_OF_BUCKET (KeySizeT) | + * + * m_buffer is null if there is no string in the bucket. + * + * KeySizeT and T are extended to be a multiple of CharT when stored in the buffer. + * + * Use std::malloc and std::free instead of new and delete so we can have access to std::realloc. + */ +template +class array_bucket { + template + using has_mapped_type = typename std::integral_constant::value>; + + static_assert(!has_mapped_type::value || std::is_unsigned::value, + "T should be either void or an unsigned type."); + + static_assert(std::is_unsigned::value, "KeySizeT should be an unsigned type."); + +public: + template + class array_bucket_iterator; + + using char_type = CharT; + using key_size_type = KeySizeT; + using mapped_type = T; + using size_type = std::size_t; + using key_equal = KeyEqual; + using iterator = array_bucket_iterator; + using const_iterator = array_bucket_iterator; + + static_assert(sizeof(KeySizeT) <= sizeof(size_type), "sizeof(KeySizeT) should be <= sizeof(std::size_t;)"); + static_assert(std::is_unsigned::value, ""); + +private: + /** + * Return how much space in bytes the type U will take when stored in the buffer. + * As the buffer is of type CharT, U may take more space than sizeof(U). + * + * Example: sizeof(CharT) = 4, sizeof(U) = 2 => U will take 4 bytes in the buffer instead of 2. + */ + template + static constexpr size_type sizeof_in_buff() noexcept { + static_assert(is_power_of_two(sizeof(U)), "sizeof(U) should be a power of two."); + static_assert(is_power_of_two(sizeof(CharT)), "sizeof(CharT) should be a power of two."); + + return std::max(sizeof(U), sizeof(CharT)); + } + + /** + * Same as sizeof_in_buff, but instead of returning the size in bytes return it in term of sizeof(CharT). + */ + template + static constexpr size_type size_as_char_t() noexcept { + return sizeof_in_buff() / sizeof(CharT); + } + + static key_size_type read_key_size(const CharT* buffer) noexcept { + key_size_type key_size; + std::memcpy(&key_size, buffer, sizeof(key_size)); + + return key_size; + } + + static mapped_type read_value(const CharT* buffer) noexcept { + mapped_type value; + std::memcpy(&value, buffer, sizeof(value)); + + return value; + } + + static bool is_end_of_bucket(const CharT* buffer) noexcept { + return read_key_size(buffer) == END_OF_BUCKET; + } + +public: + /** + * Return the size required for an entry with a key of size 'key_size'. + */ + template::value>::type* = nullptr> + static size_type entry_required_bytes(size_type key_size) noexcept { + return sizeof_in_buff() + (key_size + KEY_EXTRA_SIZE) * sizeof(CharT); + } + + template::value>::type* = nullptr> + static size_type entry_required_bytes(size_type key_size) noexcept { + return sizeof_in_buff() + (key_size + KEY_EXTRA_SIZE) * sizeof(CharT) + + sizeof_in_buff(); + } + +private: + /** + * Return the size of the current entry in buffer. + */ + static size_type entry_size_bytes(const CharT* buffer) noexcept { + return entry_required_bytes(read_key_size(buffer)); + } + +public: + template + class array_bucket_iterator { + friend class array_bucket; + + using buffer_type = typename std::conditional::type; + + explicit array_bucket_iterator(buffer_type* position) noexcept: m_position(position) { + } + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = void; + using difference_type = std::ptrdiff_t; + using reference = void; + using pointer = void; + + public: + array_bucket_iterator() noexcept: m_position(nullptr) { + } + + const CharT* key() const { + return m_position + size_as_char_t(); + } + + size_type key_size() const { + return read_key_size(m_position); + } + + template::value>::type* = nullptr> + U value() const { + return read_value(m_position + size_as_char_t() + key_size() + KEY_EXTRA_SIZE); + } + + + template::value && !IsConst && std::is_same::value>::type* = nullptr> + void set_value(U value) noexcept { + std::memcpy(m_position + size_as_char_t() + key_size() + KEY_EXTRA_SIZE, + &value, sizeof(value)); + } + + array_bucket_iterator& operator++() { + m_position += entry_size_bytes(m_position)/sizeof(CharT); + if(is_end_of_bucket(m_position)) { + m_position = nullptr; + } + + return *this; + } + + array_bucket_iterator operator++(int) { + array_bucket_iterator tmp(*this); + ++*this; + + return tmp; + } + + friend bool operator==(const array_bucket_iterator& lhs, const array_bucket_iterator& rhs) { + return lhs.m_position == rhs.m_position; + } + + friend bool operator!=(const array_bucket_iterator& lhs, const array_bucket_iterator& rhs) { + return !(lhs == rhs); + } + + private: + buffer_type* m_position; + }; + + + + static iterator end_it() noexcept { + return iterator(nullptr); + } + + static const_iterator cend_it() noexcept { + return const_iterator(nullptr); + } + +public: + array_bucket(): m_buffer(nullptr) { + } + + /** + * Reserve 'size' in the buffer of the bucket. The created bucket is empty. + */ + array_bucket(std::size_t size): m_buffer(nullptr) { + if(size == 0) { + return; + } + + m_buffer = static_cast(std::malloc(size*sizeof(CharT) + sizeof_in_buff())); + if(m_buffer == nullptr) { + THROW(std::runtime_error, "Out of memory"); + } + + const auto end_of_bucket = END_OF_BUCKET; + std::memcpy(m_buffer, &end_of_bucket, sizeof(end_of_bucket)); + } + + ~array_bucket() { + clear(); + } + + array_bucket(const array_bucket& other) { + if(other.m_buffer == nullptr) { + m_buffer = nullptr; + return; + } + + const size_type other_buffer_size = other.size(); + m_buffer = static_cast(std::malloc(other_buffer_size*sizeof(CharT) + sizeof_in_buff())); + if(m_buffer == nullptr) { + THROW(std::runtime_error, "Out of memory"); + } + + std::memcpy(m_buffer, other.m_buffer, other_buffer_size*sizeof(CharT)); + + const auto end_of_bucket = END_OF_BUCKET; + std::memcpy(m_buffer + other_buffer_size, &end_of_bucket, sizeof(end_of_bucket)); + } + + array_bucket(array_bucket&& other) noexcept: m_buffer(other.m_buffer) { + other.m_buffer = nullptr; + } + + array_bucket& operator=(array_bucket other) noexcept { + other.swap(*this); + + return *this; + } + + void swap(array_bucket& other) noexcept { + std::swap(m_buffer, other.m_buffer); + } + + iterator begin() noexcept { return iterator(m_buffer); } + iterator end() noexcept { return iterator(nullptr); } + const_iterator begin() const noexcept { return cbegin(); } + const_iterator end() const noexcept { return cend(); } + const_iterator cbegin() const noexcept { return const_iterator(m_buffer); } + const_iterator cend() const noexcept { return const_iterator(nullptr); } + + /** + * Return an iterator pointing to the key entry if presents or, if not there, to the position + * past the last element of the bucket. Return end() if the bucket has not be initialized yet. + * + * The boolean of the pair is set to true if the key is there, false otherwise. + */ + std::pair find_or_end_of_bucket(const CharT* key, size_type key_size) const noexcept { + if(m_buffer == nullptr) { + return std::make_pair(cend(), false); + } + + const CharT* buffer_ptr_in_out = m_buffer; + const bool found = find_or_end_of_bucket_impl(key, key_size, buffer_ptr_in_out); + + return std::make_pair(const_iterator(buffer_ptr_in_out), found); + } + + /** + * Append the element 'key' with its potential value at the end of the bucket. + * 'end_of_bucket' should point past the end of the last element in the bucket, end() if the bucket + * was not initialized yet. You usually get this value from find_or_end_of_bucket. + * + * Return the position where the element was actually inserted. + */ + template + const_iterator append(const_iterator end_of_bucket, const CharT* key, size_type key_size, + ValueArgs&&... value) + { + const key_size_type key_sz = as_key_size_type(key_size); + + if(end_of_bucket == cend()) { + tsl_ah_assert(m_buffer == nullptr); + + const size_type buffer_size = entry_required_bytes(key_sz) + sizeof_in_buff(); + + m_buffer = static_cast(std::malloc(buffer_size)); + if(m_buffer == nullptr) { + THROW(std::runtime_error, "Out of memory"); + } + + append_impl(key, key_sz, m_buffer, std::forward(value)...); + + return const_iterator(m_buffer); + } + else { + tsl_ah_assert(is_end_of_bucket(end_of_bucket.m_position)); + + const size_type current_size = ((end_of_bucket.m_position + size_as_char_t()) - + m_buffer) * sizeof(CharT); + const size_type new_size = current_size + entry_required_bytes(key_sz); + + + CharT* new_buffer = static_cast(std::realloc(m_buffer, new_size)); + if(new_buffer == nullptr) { + THROW(std::runtime_error, "Out of memory"); + } + m_buffer = new_buffer; + + + CharT* buffer_append_pos = m_buffer + current_size / sizeof(CharT) - + size_as_char_t(); + append_impl(key, key_sz, buffer_append_pos, std::forward(value)...); + + return const_iterator(buffer_append_pos); + } + + } + + const_iterator erase(const_iterator position) noexcept { + tsl_ah_assert(position.m_position != nullptr && !is_end_of_bucket(position.m_position)); + + // get mutable pointers + CharT* start_entry = m_buffer + (position.m_position - m_buffer); + CharT* start_next_entry = start_entry + entry_size_bytes(start_entry) / sizeof(CharT); + + + CharT* end_buffer_ptr = start_next_entry; + while(!is_end_of_bucket(end_buffer_ptr)) { + end_buffer_ptr += entry_size_bytes(end_buffer_ptr) / sizeof(CharT); + } + end_buffer_ptr += size_as_char_t(); + + + const size_type size_to_move = (end_buffer_ptr - start_next_entry) * sizeof(CharT); + std::memmove(start_entry, start_next_entry, size_to_move); + + + if(is_end_of_bucket(m_buffer)) { + clear(); + return cend(); + } + else if(is_end_of_bucket(start_entry)) { + return cend(); + } + else { + return const_iterator(start_entry); + } + } + + /** + * Return true if an element has been erased + */ + bool erase(const CharT* key, size_type key_size) noexcept { + if(m_buffer == nullptr) { + return false; + } + + const CharT* entry_buffer_ptr_in_out = m_buffer; + bool found = find_or_end_of_bucket_impl(key, key_size, entry_buffer_ptr_in_out); + if(found) { + erase(const_iterator(entry_buffer_ptr_in_out)); + + return true; + } + else { + return false; + } + } + + /** + * Bucket should be big enough and there is no check to see if the key already exists. + * No check on key_size. + */ + template + void append_in_reserved_bucket_no_check(const CharT* key, size_type key_size, ValueArgs&&... value) noexcept { + CharT* buffer_ptr = m_buffer; + while(!is_end_of_bucket(buffer_ptr)) { + buffer_ptr += entry_size_bytes(buffer_ptr)/sizeof(CharT); + } + + append_impl(key, key_size_type(key_size), buffer_ptr, std::forward(value)...); + } + + bool empty() const noexcept { + return m_buffer == nullptr || is_end_of_bucket(m_buffer); + } + + void clear() noexcept { + std::free(m_buffer); + m_buffer = nullptr; + } + + iterator mutable_iterator(const_iterator pos) noexcept { + return iterator(m_buffer + (pos.m_position - m_buffer)); + } + + template + void serialize(Serializer& serializer) const { + const slz_size_type bucket_size = size(); + tsl_ah_assert(m_buffer != nullptr || bucket_size == 0); + + serializer(bucket_size); + serializer(m_buffer, bucket_size); + } + + template + static array_bucket deserialize(Deserializer& deserializer) { + array_bucket bucket; + const slz_size_type bucket_size_ds = deserialize_value(deserializer); + + if(bucket_size_ds == 0) { + return bucket; + } + + const std::size_t bucket_size = numeric_cast(bucket_size_ds, "Deserialized bucket_size is too big."); + bucket.m_buffer = static_cast(std::malloc(bucket_size*sizeof(CharT) + sizeof_in_buff())); + if(bucket.m_buffer == nullptr) { + THROW(std::runtime_error, "Out of memory"); + } + + + deserializer(bucket.m_buffer, bucket_size); + + const auto end_of_bucket = END_OF_BUCKET; + std::memcpy(bucket.m_buffer + bucket_size, &end_of_bucket, sizeof(end_of_bucket)); + + + tsl_ah_assert(bucket.size() == bucket_size); + return bucket; + } + +private: + key_size_type as_key_size_type(size_type key_size) const { + if(key_size > MAX_KEY_SIZE) { + THROW(std::length_error, "Key is too long."); + } + + return key_size_type(key_size); + } + + /* + * Return true if found, false otherwise. + * If true, buffer_ptr_in_out points to the start of the entry matching 'key'. + * If false, buffer_ptr_in_out points to where the 'key' should be inserted. + * + * Start search from buffer_ptr_in_out. + */ + bool find_or_end_of_bucket_impl(const CharT* key, size_type key_size, + const CharT* & buffer_ptr_in_out) const noexcept + { + while(!is_end_of_bucket(buffer_ptr_in_out)) { + const key_size_type buffer_key_size = read_key_size(buffer_ptr_in_out); + const CharT* buffer_str = buffer_ptr_in_out + size_as_char_t(); + if(KeyEqual()(buffer_str, buffer_key_size, key, key_size)) { + return true; + } + + buffer_ptr_in_out += entry_size_bytes(buffer_ptr_in_out)/sizeof(CharT); + } + + return false; + } + + template::value>::type* = nullptr> + void append_impl(const CharT* key, key_size_type key_size, CharT* buffer_append_pos) noexcept { + std::memcpy(buffer_append_pos, &key_size, sizeof(key_size)); + buffer_append_pos += size_as_char_t(); + + std::memcpy(buffer_append_pos, key, key_size * sizeof(CharT)); + buffer_append_pos += key_size; + + const CharT zero = 0; + std::memcpy(buffer_append_pos, &zero, KEY_EXTRA_SIZE * sizeof(CharT)); + buffer_append_pos += KEY_EXTRA_SIZE; + + const auto end_of_bucket = END_OF_BUCKET; + std::memcpy(buffer_append_pos, &end_of_bucket, sizeof(end_of_bucket)); + } + + template::value>::type* = nullptr> + void append_impl(const CharT* key, key_size_type key_size, CharT* buffer_append_pos, + typename array_bucket::mapped_type value) noexcept + { + std::memcpy(buffer_append_pos, &key_size, sizeof(key_size)); + buffer_append_pos += size_as_char_t(); + + std::memcpy(buffer_append_pos, key, key_size * sizeof(CharT)); + buffer_append_pos += key_size; + + const CharT zero = 0; + std::memcpy(buffer_append_pos, &zero, KEY_EXTRA_SIZE * sizeof(CharT)); + buffer_append_pos += KEY_EXTRA_SIZE; + + std::memcpy(buffer_append_pos, &value, sizeof(value)); + buffer_append_pos += size_as_char_t(); + + const auto end_of_bucket = END_OF_BUCKET; + std::memcpy(buffer_append_pos, &end_of_bucket, sizeof(end_of_bucket)); + } + + /** + * Return the number of CharT in m_buffer. As the size of the buffer is not stored to gain some space, + * the method need to find the EOF marker and is thus in O(n). + */ + size_type size() const noexcept { + if(m_buffer == nullptr) { + return 0; + } + + CharT* buffer_ptr = m_buffer; + while(!is_end_of_bucket(buffer_ptr)) { + buffer_ptr += entry_size_bytes(buffer_ptr)/sizeof(CharT); + } + + return buffer_ptr - m_buffer; + } + +private: + static const key_size_type END_OF_BUCKET = std::numeric_limits::max(); + static const key_size_type KEY_EXTRA_SIZE = StoreNullTerminator?1:0; + + CharT* m_buffer; + +public: + static const key_size_type MAX_KEY_SIZE = + // -1 for END_OF_BUCKET + key_size_type(std::numeric_limits::max() - KEY_EXTRA_SIZE - 1); +}; + + +template +class value_container { +public: + void clear() noexcept { + m_values.clear(); + } + + void reserve(std::size_t new_cap) { + m_values.reserve(new_cap); + } + + void shrink_to_fit() { + m_values.shrink_to_fit(); + } + + friend void swap(value_container& lhs, value_container& rhs) { + lhs.m_values.swap(rhs.m_values); + } + +protected: + static constexpr float VECTOR_GROWTH_RATE = 1.5f; + + // TODO use a sparse array? or a std::deque + std::vector m_values; +}; + +template<> +class value_container { +public: + void clear() noexcept { + } + + void shrink_to_fit() { + } + + void reserve(std::size_t /*new_cap*/) { + } +}; + + + +/** + * If there is no value in the array_hash (in the case of a set for example), T should be void. + * + * The size of a key string is limited to std::numeric_limits::max() - 1. + * + * The number of elements in the map is limited to std::numeric_limits::max(). + */ +template +class array_hash: private value_container, private Hash, private GrowthPolicy { +private: + template + using has_mapped_type = typename std::integral_constant::value>; + + /** + * If there is a mapped type in array_hash, we store the values in m_values of value_container class + * and we store an index to m_values in the bucket. The index is of type IndexSizeT. + */ + using array_bucket = tsl::detail_array_hash::array_bucket::value, + IndexSizeT, + void>::type, + KeyEqual, KeySizeT, StoreNullTerminator>; + +public: + template + class array_hash_iterator; + + using char_type = CharT; + using key_size_type = KeySizeT; + using index_size_type = IndexSizeT; + using size_type = std::size_t; + using hasher = Hash; + using key_equal = KeyEqual; + using iterator = array_hash_iterator; + using const_iterator = array_hash_iterator; + + +/* + * Iterator classes + */ +public: + template + class array_hash_iterator { + friend class array_hash; + + private: + using iterator_array_bucket = typename array_bucket::const_iterator; + + using iterator_buckets = typename std::conditional::const_iterator, + typename std::vector::iterator>::type; + + using array_hash_ptr = typename std::conditional::type; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename std::conditional::value, T, void>::type; + using difference_type = std::ptrdiff_t; + using reference = typename std::conditional::value, + typename std::conditional< + IsConst, + typename std::add_lvalue_reference::type, + typename std::add_lvalue_reference::type>::type, + void>::type; + using pointer = typename std::conditional::value, + typename std::conditional::type, + void>::type; + + + private: + array_hash_iterator(iterator_buckets buckets_iterator, iterator_array_bucket array_bucket_iterator, + array_hash_ptr array_hash_p) noexcept: + m_buckets_iterator(buckets_iterator), + m_array_bucket_iterator(array_bucket_iterator), + m_array_hash(array_hash_p) + { + tsl_ah_assert(m_array_hash != nullptr); + } + + public: + array_hash_iterator() noexcept: m_array_hash(nullptr) { + } + + template::type* = nullptr> + array_hash_iterator(const array_hash_iterator& other) noexcept : + m_buckets_iterator(other.m_buckets_iterator), + m_array_bucket_iterator(other.m_array_bucket_iterator), + m_array_hash(other.m_array_hash) + { + } + + array_hash_iterator(const array_hash_iterator& other) = default; + array_hash_iterator(array_hash_iterator&& other) = default; + array_hash_iterator& operator=(const array_hash_iterator& other) = default; + array_hash_iterator& operator=(array_hash_iterator&& other) = default; + + const CharT* key() const { + return m_array_bucket_iterator.key(); + } + + size_type key_size() const { + return m_array_bucket_iterator.key_size(); + } + +#ifdef TSL_AH_HAS_STRING_VIEW + std::basic_string_view key_sv() const { + return std::basic_string_view(key(), key_size()); + } +#endif + + template::value>::type* = nullptr> + reference value() const { + return this->m_array_hash->m_values[value_position()]; + } + + template::value>::type* = nullptr> + reference operator*() const { + return value(); + } + + template::value>::type* = nullptr> + pointer operator->() const { + return std::addressof(value()); + } + + array_hash_iterator& operator++() { + tsl_ah_assert(m_buckets_iterator != m_array_hash->m_buckets_data.end()); + tsl_ah_assert(m_array_bucket_iterator != m_buckets_iterator->cend()); + + ++m_array_bucket_iterator; + if(m_array_bucket_iterator == m_buckets_iterator->cend()) { + do { + ++m_buckets_iterator; + } while(m_buckets_iterator != m_array_hash->m_buckets_data.end() && + m_buckets_iterator->empty()); + + if(m_buckets_iterator != m_array_hash->m_buckets_data.end()) { + m_array_bucket_iterator = m_buckets_iterator->cbegin(); + } + } + + return *this; + } + + array_hash_iterator operator++(int) { + array_hash_iterator tmp(*this); + ++*this; + + return tmp; + } + + friend bool operator==(const array_hash_iterator& lhs, const array_hash_iterator& rhs) { + return lhs.m_buckets_iterator == rhs.m_buckets_iterator && + lhs.m_array_bucket_iterator == rhs.m_array_bucket_iterator && + lhs.m_array_hash == rhs.m_array_hash; + } + + friend bool operator!=(const array_hash_iterator& lhs, const array_hash_iterator& rhs) { + return !(lhs == rhs); + } + + private: + template::value>::type* = nullptr> + IndexSizeT value_position() const { + return this->m_array_bucket_iterator.value(); + } + + private: + iterator_buckets m_buckets_iterator; + iterator_array_bucket m_array_bucket_iterator; + + array_hash_ptr m_array_hash; + }; + + + +public: + array_hash(size_type bucket_count, + const Hash& hash, + float max_load_factor): value_container(), + Hash(hash), + GrowthPolicy(bucket_count), + m_buckets_data(bucket_count > max_bucket_count()? + max_bucket_count(): + bucket_count), + m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():m_buckets_data.data()), + m_nb_elements(0) + { + this->max_load_factor(max_load_factor); + } + + array_hash(const array_hash& other): value_container(other), + Hash(other), + GrowthPolicy(other), + m_buckets_data(other.m_buckets_data), + m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():m_buckets_data.data()), + m_nb_elements(other.m_nb_elements), + m_max_load_factor(other.m_max_load_factor), + m_load_threshold(other.m_load_threshold) + { + } + + array_hash(array_hash&& other) noexcept(std::is_nothrow_move_constructible>::value && + std::is_nothrow_move_constructible::value && + std::is_nothrow_move_constructible::value && + std::is_nothrow_move_constructible>::value) + : value_container(std::move(other)), + Hash(std::move(other)), + GrowthPolicy(std::move(other)), + m_buckets_data(std::move(other.m_buckets_data)), + m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():m_buckets_data.data()), + m_nb_elements(other.m_nb_elements), + m_max_load_factor(other.m_max_load_factor), + m_load_threshold(other.m_load_threshold) + { + other.value_container::clear(); + other.GrowthPolicy::clear(); + other.m_buckets_data.clear(); + other.m_buckets = static_empty_bucket_ptr(); + other.m_nb_elements = 0; + other.m_load_threshold = 0; + } + + array_hash& operator=(const array_hash& other) { + if(&other != this) { + value_container::operator=(other); + Hash::operator=(other); + GrowthPolicy::operator=(other); + + m_buckets_data = other.m_buckets_data; + m_buckets = m_buckets_data.empty()?static_empty_bucket_ptr(): + m_buckets_data.data(); + m_nb_elements = other.m_nb_elements; + m_max_load_factor = other.m_max_load_factor; + m_load_threshold = other.m_load_threshold; + } + + return *this; + } + + array_hash& operator=(array_hash&& other) { + other.swap(*this); + other.clear(); + + return *this; + } + + + /* + * Iterators + */ + iterator begin() noexcept { + auto begin = m_buckets_data.begin(); + while(begin != m_buckets_data.end() && begin->empty()) { + ++begin; + } + + return (begin != m_buckets_data.end())?iterator(begin, begin->cbegin(), this):end(); + } + + const_iterator begin() const noexcept { + return cbegin(); + } + + const_iterator cbegin() const noexcept { + auto begin = m_buckets_data.cbegin(); + while(begin != m_buckets_data.cend() && begin->empty()) { + ++begin; + } + + return (begin != m_buckets_data.cend())?const_iterator(begin, begin->cbegin(), this):cend(); + } + + iterator end() noexcept { + return iterator(m_buckets_data.end(), array_bucket::cend_it(), this); + } + + const_iterator end() const noexcept { + return cend(); + } + + const_iterator cend() const noexcept { + return const_iterator(m_buckets_data.end(), array_bucket::cend_it(), this); + } + + + /* + * Capacity + */ + bool empty() const noexcept { + return m_nb_elements == 0; + } + + size_type size() const noexcept { + return m_nb_elements; + } + + size_type max_size() const noexcept { + return std::numeric_limits::max(); + } + + size_type max_key_size() const noexcept { + return MAX_KEY_SIZE; + } + + void shrink_to_fit() { + clear_old_erased_values(); + value_container::shrink_to_fit(); + + rehash_impl(size_type(std::ceil(float(size())/max_load_factor()))); + } + + /* + * Modifiers + */ + void clear() noexcept { + value_container::clear(); + + for(auto& bucket: m_buckets_data) { + bucket.clear(); + } + + m_nb_elements = 0; + } + + + + template + std::pair emplace(const CharT* key, size_type key_size, ValueArgs&&... value_args) { + const std::size_t hash = hash_key(key, key_size); + std::size_t ibucket = bucket_for_hash(hash); + + auto it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + if(it_find.second) { + return std::make_pair(iterator(m_buckets_data.begin() + ibucket, it_find.first, this), false); + } + + if(grow_on_high_load()) { + ibucket = bucket_for_hash(hash); + it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + } + + return emplace_impl(ibucket, it_find.first, key, key_size, std::forward(value_args)...); + } + + template + std::pair insert_or_assign(const CharT* key, size_type key_size, M&& obj) { + auto it = emplace(key, key_size, std::forward(obj)); + if(!it.second) { + it.first.value() = std::forward(obj); + } + + return it; + } + + + + iterator erase(const_iterator pos) { + if(should_clear_old_erased_values()) { + clear_old_erased_values(); + } + + return erase_from_bucket(mutable_iterator(pos)); + } + + iterator erase(const_iterator first, const_iterator last) { + if(first == last) { + return mutable_iterator(first); + } + + /** + * When erasing an element from a bucket with erase_from_bucket, it invalidates all the iterators + * in the array bucket of the element (m_array_bucket_iterator) but not the iterators of the buckets + * itself (m_buckets_iterator). + * + * So first erase all the values between first and last which are not part of the bucket of last, + * and then erase carefully the values in last's bucket. + */ + auto to_delete = mutable_iterator(first); + while(to_delete.m_buckets_iterator != last.m_buckets_iterator) { + to_delete = erase_from_bucket(to_delete); + } + + std::size_t nb_elements_until_last = std::distance(to_delete.m_array_bucket_iterator, + last.m_array_bucket_iterator); + while(nb_elements_until_last > 0) { + to_delete = erase_from_bucket(to_delete); + nb_elements_until_last--; + } + + if(should_clear_old_erased_values()) { + clear_old_erased_values(); + } + + return to_delete; + } + + + + size_type erase(const CharT* key, size_type key_size) { + return erase(key, key_size, hash_key(key, key_size)); + } + + size_type erase(const CharT* key, size_type key_size, std::size_t hash) { + if(should_clear_old_erased_values()) { + clear_old_erased_values(); + } + + const std::size_t ibucket = bucket_for_hash(hash); + if(m_buckets[ibucket].erase(key, key_size)) { + m_nb_elements--; + return 1; + } + else { + return 0; + } + } + + + + void swap(array_hash& other) { + using std::swap; + + swap(static_cast&>(*this), static_cast&>(other)); + swap(static_cast(*this), static_cast(other)); + swap(static_cast(*this), static_cast(other)); + swap(m_buckets_data, other.m_buckets_data); + swap(m_buckets, other.m_buckets); + swap(m_nb_elements, other.m_nb_elements); + swap(m_max_load_factor, other.m_max_load_factor); + swap(m_load_threshold, other.m_load_threshold); + } + + /* + * Lookup + */ + template::value>::type* = nullptr> + U& at(const CharT* key, size_type key_size) { + return at(key, key_size, hash_key(key, key_size)); + } + + template::value>::type* = nullptr> + const U& at(const CharT* key, size_type key_size) const { + return at(key, key_size, hash_key(key, key_size)); + } + + template::value>::type* = nullptr> + U& at(const CharT* key, size_type key_size, std::size_t hash) { + return const_cast(static_cast(this)->at(key, key_size, hash)); + } + + template::value>::type* = nullptr> + const U& at(const CharT* key, size_type key_size, std::size_t hash) const { + const std::size_t ibucket = bucket_for_hash(hash); + + auto it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + if(it_find.second) { + return this->m_values[it_find.first.value()]; + } + else { + THROW(std::out_of_range, "Couldn't find key."); + } + } + + + + template::value>::type* = nullptr> + U& access_operator(const CharT* key, size_type key_size) { + const std::size_t hash = hash_key(key, key_size); + std::size_t ibucket = bucket_for_hash(hash); + + auto it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + if(it_find.second) { + return this->m_values[it_find.first.value()]; + } + else { + if(grow_on_high_load()) { + ibucket = bucket_for_hash(hash); + it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + } + + return emplace_impl(ibucket, it_find.first, key, key_size, U{}).first.value(); + } + } + + + + size_type count(const CharT* key, size_type key_size) const { + return count(key, key_size, hash_key(key, key_size)); + } + + size_type count(const CharT* key, size_type key_size, std::size_t hash) const { + const std::size_t ibucket = bucket_for_hash(hash); + + auto it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + if(it_find.second) { + return 1; + } + else { + return 0; + } + } + + + + iterator find(const CharT* key, size_type key_size) { + return find(key, key_size, hash_key(key, key_size)); + } + + const_iterator find(const CharT* key, size_type key_size) const { + return find(key, key_size, hash_key(key, key_size)); + } + + iterator find(const CharT* key, size_type key_size, std::size_t hash) { + const std::size_t ibucket = bucket_for_hash(hash); + + auto it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + if(it_find.second) { + return iterator(m_buckets_data.begin() + ibucket, it_find.first, this); + } + else { + return end(); + } + } + + const_iterator find(const CharT* key, size_type key_size, std::size_t hash) const { + const std::size_t ibucket = bucket_for_hash(hash); + + auto it_find = m_buckets[ibucket].find_or_end_of_bucket(key, key_size); + if(it_find.second) { + return const_iterator(m_buckets_data.cbegin() + ibucket, it_find.first, this); + } + else { + return cend(); + } + } + + + + std::pair equal_range(const CharT* key, size_type key_size) { + return equal_range(key, key_size, hash_key(key, key_size)); + } + + std::pair equal_range(const CharT* key, size_type key_size) const { + return equal_range(key, key_size, hash_key(key, key_size)); + } + + std::pair equal_range(const CharT* key, size_type key_size, std::size_t hash) { + iterator it = find(key, key_size, hash); + return std::make_pair(it, (it == end())?it:std::next(it)); + } + + std::pair equal_range(const CharT* key, size_type key_size, + std::size_t hash) const + { + const_iterator it = find(key, key_size, hash); + return std::make_pair(it, (it == cend())?it:std::next(it)); + } + + /* + * Bucket interface + */ + size_type bucket_count() const { + return m_buckets_data.size(); + } + + size_type max_bucket_count() const { + return std::min(GrowthPolicy::max_bucket_count(), m_buckets_data.max_size()); + } + + + /* + * Hash policy + */ + float load_factor() const { + if(bucket_count() == 0) { + return 0; + } + + return float(m_nb_elements) / float(bucket_count()); + } + + float max_load_factor() const { + return m_max_load_factor; + } + + void max_load_factor(float ml) { + m_max_load_factor = std::max(0.1f, ml); + m_load_threshold = size_type(float(bucket_count())*m_max_load_factor); + } + + void rehash(size_type count) { + count = std::max(count, size_type(std::ceil(float(size())/max_load_factor()))); + rehash_impl(count); + } + + void reserve(size_type count) { + rehash(size_type(std::ceil(float(count)/max_load_factor()))); + } + + /* + * Observers + */ + hasher hash_function() const { + return static_cast(*this); + } + + // TODO add support for statefull KeyEqual + key_equal key_eq() const { + return KeyEqual(); + } + + /* + * Other + */ + iterator mutable_iterator(const_iterator it) noexcept { + auto it_bucket = m_buckets_data.begin() + std::distance(m_buckets_data.cbegin(), it.m_buckets_iterator); + return iterator(it_bucket, it.m_array_bucket_iterator, this); + } + + template + void serialize(Serializer& serializer) const { + serialize_impl(serializer); + } + + template + void deserialize(Deserializer& deserializer, bool hash_compatible) { + deserialize_impl(deserializer, hash_compatible); + } + +private: + std::size_t hash_key(const CharT* key, size_type key_size) const { + return Hash::operator()(key, key_size); + } + + std::size_t bucket_for_hash(std::size_t hash) const { + return GrowthPolicy::bucket_for_hash(hash); + } + + /** + * If there is a mapped_type, the mapped value in m_values is not erased now. + * It will be erased when the ratio between the size of the map and + * the size of the map + the number of deleted values still stored is low enough (see clear_old_erased_values). + */ + iterator erase_from_bucket(iterator pos) noexcept { + auto array_bucket_next_it = pos.m_buckets_iterator->erase(pos.m_array_bucket_iterator); + m_nb_elements--; + + if(array_bucket_next_it != pos.m_buckets_iterator->cend()) { + return iterator(pos.m_buckets_iterator, array_bucket_next_it, this); + } + else { + do { + ++pos.m_buckets_iterator; + } while(pos.m_buckets_iterator != m_buckets_data.end() && pos.m_buckets_iterator->empty()); + + if(pos.m_buckets_iterator != m_buckets_data.end()) { + return iterator(pos.m_buckets_iterator, pos.m_buckets_iterator->cbegin(), this); + } + else { + return end(); + } + } + } + + + template::value>::type* = nullptr> + bool should_clear_old_erased_values(float /*threshold*/ = DEFAULT_CLEAR_OLD_ERASED_VALUE_THRESHOLD) const { + return false; + } + + template::value>::type* = nullptr> + bool should_clear_old_erased_values(float threshold = DEFAULT_CLEAR_OLD_ERASED_VALUE_THRESHOLD) const { + if(this->m_values.size() == 0) { + return false; + } + + return float(m_nb_elements)/float(this->m_values.size()) < threshold; + } + + template::value>::type* = nullptr> + void clear_old_erased_values() { + } + + template::value>::type* = nullptr> + void clear_old_erased_values() { + static_assert(std::is_nothrow_move_constructible::value || + std::is_copy_constructible::value, + "mapped_value must be either copy constructible or nothrow move constructible."); + + if(m_nb_elements == this->m_values.size()) { + return; + } + + std::vector new_values; + new_values.reserve(size()); + + for(auto it = begin(); it != end(); ++it) { + new_values.push_back(std::move_if_noexcept(it.value())); + } + + + IndexSizeT ivalue = 0; + for(auto it = begin(); it != end(); ++it) { + auto it_array_bucket = it.m_buckets_iterator->mutable_iterator(it.m_array_bucket_iterator); + it_array_bucket.set_value(ivalue); + ivalue++; + } + + new_values.swap(this->m_values); + tsl_ah_assert(m_nb_elements == this->m_values.size()); + } + + /** + * Return true if a rehash occurred. + */ + bool grow_on_high_load() { + if(size() >= m_load_threshold) { + rehash_impl(GrowthPolicy::next_bucket_count()); + return true; + } + + return false; + } + + template::value>::type* = nullptr> + std::pair emplace_impl(std::size_t ibucket, typename array_bucket::const_iterator end_of_bucket, + const CharT* key, size_type key_size, ValueArgs&&... value_args) + { + if(this->m_values.size() >= max_size()) { + // Try to clear old erased values lingering in m_values. Throw if it doesn't change anything. + clear_old_erased_values(); + if(this->m_values.size() >= max_size()) { + THROW(std::length_error, "Can't insert value, too much values in the map."); + } + } + + if(this->m_values.size() == this->m_values.capacity()) { + this->m_values.reserve(std::size_t(float(this->m_values.size()) * value_container::VECTOR_GROWTH_RATE)); + } + + + this->m_values.emplace_back(std::forward(value_args)...); + + auto it = m_buckets[ibucket].append(end_of_bucket, key, key_size, IndexSizeT(this->m_values.size() - 1)); + m_nb_elements++; + + return std::make_pair(iterator(m_buckets_data.begin() + ibucket, it, this), true); + } + + template::value>::type* = nullptr> + std::pair emplace_impl(std::size_t ibucket, typename array_bucket::const_iterator end_of_bucket, + const CharT* key, size_type key_size) + { + if(m_nb_elements >= max_size()) { + THROW(std::length_error, "Can't insert value, too much values in the map."); + } + + auto it = m_buckets[ibucket].append(end_of_bucket, key, key_size); + m_nb_elements++; + + return std::make_pair(iterator(m_buckets_data.begin() + ibucket, it, this), true); + } + + void rehash_impl(size_type bucket_count) { + GrowthPolicy new_growth_policy(bucket_count); + if(bucket_count == this->bucket_count()) { + return; + } + + + if(should_clear_old_erased_values(REHASH_CLEAR_OLD_ERASED_VALUE_THRESHOLD)) { + clear_old_erased_values(); + } + + + std::vector required_size_for_bucket(bucket_count, 0); + std::vector bucket_for_ivalue(size(), 0); + + std::size_t ivalue = 0; + for(auto it = begin(); it != end(); ++it) { + const std::size_t hash = hash_key(it.key(), it.key_size()); + const std::size_t ibucket = new_growth_policy.bucket_for_hash(hash); + + bucket_for_ivalue[ivalue] = ibucket; + required_size_for_bucket[ibucket] += array_bucket::entry_required_bytes(it.key_size()); + ivalue++; + } + + + + + std::vector new_buckets; + new_buckets.reserve(bucket_count); + for(std::size_t ibucket = 0; ibucket < bucket_count; ibucket++) { + new_buckets.emplace_back(required_size_for_bucket[ibucket]); + } + + + ivalue = 0; + for(auto it = begin(); it != end(); ++it) { + const std::size_t ibucket = bucket_for_ivalue[ivalue]; + append_iterator_in_reserved_bucket_no_check(new_buckets[ibucket], it); + + ivalue++; + } + + + using std::swap; + swap(static_cast(*this), new_growth_policy); + + m_buckets_data.swap(new_buckets); + m_buckets = !m_buckets_data.empty()?m_buckets_data.data(): + static_empty_bucket_ptr(); + + // Call max_load_factor to change m_load_threshold + max_load_factor(m_max_load_factor); + } + + template::value>::type* = nullptr> + void append_iterator_in_reserved_bucket_no_check(array_bucket& bucket, iterator it) { + bucket.append_in_reserved_bucket_no_check(it.key(), it.key_size()); + } + + template::value>::type* = nullptr> + void append_iterator_in_reserved_bucket_no_check(array_bucket& bucket, iterator it) { + bucket.append_in_reserved_bucket_no_check(it.key(), it.key_size(), it.value_position()); + } + + + + /** + * On serialization the values of each bucket (if has_mapped_type is true) are serialized + * next to the bucket. The potential old erased values in value_container are thus not serialized. + * + * On deserialization, when hash_compatible is true, we reaffect the value index (IndexSizeT) of each + * bucket with set_value as the position of each value is no more the same in value_container compared + * to when they were serialized. + * + * It's done this way as we can't call clear_old_erased_values() because we want the serialize + * method to remain const and we don't want to serialize/deserialize old erased values. As we may + * not serialize all the values in value_container, the values we keep can change of index. + * We thus have to modify the value indexes in the buckets. + */ + template + void serialize_impl(Serializer& serializer) const { + const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION; + serializer(version); + + const slz_size_type bucket_count = m_buckets_data.size(); + serializer(bucket_count); + + const slz_size_type nb_elements = m_nb_elements; + serializer(nb_elements); + + const float max_load_factor = m_max_load_factor; + serializer(max_load_factor); + + for(const array_bucket& bucket: m_buckets_data) { + bucket.serialize(serializer); + serialize_bucket_values(serializer, bucket); + } + } + + template::value>::type* = nullptr> + void serialize_bucket_values(Serializer& /*serializer*/, const array_bucket& /*bucket*/) const { + } + + template::value>::type* = nullptr> + void serialize_bucket_values(Serializer& serializer, const array_bucket& bucket) const { + for(auto it = bucket.begin(); it != bucket.end(); ++it) { + serializer(this->m_values[it.value()]); + } + } + + template + void deserialize_impl(Deserializer& deserializer, bool hash_compatible) { + tsl_ah_assert(m_buckets_data.empty()); // Current hash table must be empty + + const slz_size_type version = deserialize_value(deserializer); + // For now we only have one version of the serialization protocol. + // If it doesn't match there is a problem with the file. + if(version != SERIALIZATION_PROTOCOL_VERSION) { + THROW(std::runtime_error, "Can't deserialize the array_map/set. The protocol version header is invalid."); + } + + const slz_size_type bucket_count_ds = deserialize_value(deserializer); + const slz_size_type nb_elements = deserialize_value(deserializer); + const float max_load_factor = deserialize_value(deserializer); + + + m_nb_elements = numeric_cast(nb_elements, "Deserialized nb_elements is too big."); + + size_type bucket_count = numeric_cast(bucket_count_ds, "Deserialized bucket_count is too big."); + GrowthPolicy::operator=(GrowthPolicy(bucket_count)); + + + this->max_load_factor(max_load_factor); + value_container::reserve(m_nb_elements); + + + if(hash_compatible) { + if(bucket_count != bucket_count_ds) { + THROW(std::runtime_error, "The GrowthPolicy is not the same even though hash_compatible is true."); + } + + m_buckets_data.reserve(bucket_count); + for(size_type i = 0; i < bucket_count; i++) { + m_buckets_data.push_back(array_bucket::deserialize(deserializer)); + deserialize_bucket_values(deserializer, m_buckets_data.back()); + } + } + else { + m_buckets_data.resize(bucket_count); + for(size_type i = 0; i < bucket_count; i++) { + // TODO use buffer to avoid reallocation on each deserialization. + array_bucket bucket = array_bucket::deserialize(deserializer); + deserialize_bucket_values(deserializer, bucket); + + for(auto it_val = bucket.cbegin(); it_val != bucket.cend(); ++it_val) { + const std::size_t ibucket = bucket_for_hash(hash_key(it_val.key(), it_val.key_size())); + + auto it_find = m_buckets_data[ibucket].find_or_end_of_bucket(it_val.key(), it_val.key_size()); + if(it_find.second) { + THROW(std::runtime_error, "Error on deserialization, the same key is presents multiple times."); + } + + append_array_bucket_iterator_in_bucket(m_buckets_data[ibucket], it_find.first, it_val); + } + } + } + + m_buckets = m_buckets_data.data(); + + + if(load_factor() > this->max_load_factor()) { + THROW(std::runtime_error, "Invalid max_load_factor. Check that the serializer and deserializer support " + "floats correctly as they can be converted implicitely to ints."); + } + } + + template::value>::type* = nullptr> + void deserialize_bucket_values(Deserializer& /*deserializer*/, array_bucket& /*bucket*/) { + } + + template::value>::type* = nullptr> + void deserialize_bucket_values(Deserializer& deserializer, array_bucket& bucket) { + for(auto it = bucket.begin(); it != bucket.end(); ++it) { + this->m_values.emplace_back(deserialize_value(deserializer)); + + tsl_ah_assert(this->m_values.size() - 1 <= std::numeric_limits::max()); + it.set_value(static_cast(this->m_values.size() - 1)); + } + } + + template::value>::type* = nullptr> + void append_array_bucket_iterator_in_bucket(array_bucket& bucket, + typename array_bucket::const_iterator end_of_bucket, + typename array_bucket::const_iterator it_val) + { + bucket.append(end_of_bucket, it_val.key(), it_val.key_size()); + } + + template::value>::type* = nullptr> + void append_array_bucket_iterator_in_bucket(array_bucket& bucket, + typename array_bucket::const_iterator end_of_bucket, + typename array_bucket::const_iterator it_val) + { + bucket.append(end_of_bucket, it_val.key(), it_val.key_size(), it_val.value()); + } + +public: + static const size_type DEFAULT_INIT_BUCKET_COUNT = 0; + static constexpr float DEFAULT_MAX_LOAD_FACTOR = 2.0f; + static const size_type MAX_KEY_SIZE = array_bucket::MAX_KEY_SIZE; + +private: + /** + * Protocol version currenlty used for serialization. + */ + static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1; + + + static constexpr float DEFAULT_CLEAR_OLD_ERASED_VALUE_THRESHOLD = 0.6f; + static constexpr float REHASH_CLEAR_OLD_ERASED_VALUE_THRESHOLD = 0.9f; + + + /** + * Return an always valid pointer to a static empty array_bucket. + */ + array_bucket* static_empty_bucket_ptr() { + static array_bucket empty_bucket; + return &empty_bucket; + } + +private: + std::vector m_buckets_data; + + /** + * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points to static_empty_bucket_ptr. + * This variable is useful to avoid the cost of checking if m_buckets_data is empty when trying + * to find an element. + * + * TODO Remove m_buckets_data and only use a pointer+size instead of a pointer+vector to save some space in the array_hash object. + */ + array_bucket* m_buckets; + + IndexSizeT m_nb_elements; + float m_max_load_factor; + size_type m_load_threshold; +}; + +} // end namespace detail_array_hash +} //end namespace tsl + +#endif diff --git a/ios/include/tsl/array-hash/array_map.h b/ios/include/tsl/array-hash/array_map.h new file mode 100644 index 00000000..bc534bf2 --- /dev/null +++ b/ios/include/tsl/array-hash/array_map.h @@ -0,0 +1,863 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ARRAY_MAP_H +#define TSL_ARRAY_MAP_H + +#include +#include +#include +#include +#include +#include +#include +#include "array_hash.h" + +namespace tsl { + + +/** + * Implementation of a cache-conscious string hash map. + * + * The map stores the strings as `const CharT*`. If `StoreNullTerminator` is true, + * the strings are stored with the a null-terminator (the `key()` method of the iterators + * will return a pointer to this null-terminated string). Otherwise the null character + * is not stored (which allow an economy of 1 byte per string). + * + * The value `T` must be either nothrow move-constructible, copy-constructible or both. + * + * The size of a key string is limited to `std::numeric_limits::max() - 1`. + * That is 65 535 characters by default, but can be raised with the `KeySizeT` template parameter. + * See `max_key_size()` for an easy access to this limit. + * + * The number of elements in the map is limited to `std::numeric_limits::max()`. + * That is 4 294 967 296 elements, but can be raised with the `IndexSizeT` template parameter. + * See `max_size()` for an easy access to this limit. + * + * Iterators invalidation: + * - clear, operator=: always invalidate the iterators. + * - insert, emplace, operator[]: always invalidate the iterators. + * - erase: always invalidate the iterators. + * - shrink_to_fit: always invalidate the iterators. + */ +template, + class KeyEqual = tsl::ah::str_equal, + bool StoreNullTerminator = true, + class KeySizeT = std::uint16_t, + class IndexSizeT = std::uint32_t, + class GrowthPolicy = tsl::ah::power_of_two_growth_policy<2>> +class array_map { +private: + template + using is_iterator = tsl::detail_array_hash::is_iterator; + + using ht = tsl::detail_array_hash::array_hash; + +public: + using char_type = typename ht::char_type; + using mapped_type = T; + using key_size_type = typename ht::key_size_type; + using index_size_type = typename ht::index_size_type; + using size_type = typename ht::size_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + +public: + array_map(): array_map(ht::DEFAULT_INIT_BUCKET_COUNT) { + } + + explicit array_map(size_type bucket_count, + const Hash& hash = Hash()): m_ht(bucket_count, hash, ht::DEFAULT_MAX_LOAD_FACTOR) + { + } + + template::value>::type* = nullptr> + array_map(InputIt first, InputIt last, + size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, + const Hash& hash = Hash()): array_map(bucket_count, hash) + { + insert(first, last); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + array_map(std::initializer_list, T>> init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, + const Hash& hash = Hash()): array_map(bucket_count, hash) + { + insert(init); + } +#else + array_map(std::initializer_list> init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, + const Hash& hash = Hash()): array_map(bucket_count, hash) + { + insert(init); + } +#endif + + + +#ifdef TSL_AH_HAS_STRING_VIEW + array_map& operator=(std::initializer_list, T>> ilist) { + clear(); + + reserve(ilist.size()); + insert(ilist); + + return *this; + } +#else + array_map& operator=(std::initializer_list> ilist) { + clear(); + + reserve(ilist.size()); + insert(ilist); + + return *this; + } +#endif + + + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + size_type max_key_size() const noexcept { return m_ht.max_key_size(); } + void shrink_to_fit() { m_ht.shrink_to_fit(); } + + + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + std::pair insert(const std::basic_string_view& key, const T& value) { + return m_ht.emplace(key.data(), key.size(), value); + } +#else + std::pair insert(const CharT* key, const T& value) { + return m_ht.emplace(key, std::char_traits::length(key), value); + } + + std::pair insert(const std::basic_string& key, const T& value) { + return m_ht.emplace(key.data(), key.size(), value); + } +#endif + std::pair insert_ks(const CharT* key, size_type key_size, const T& value) { + return m_ht.emplace(key, key_size, value); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + std::pair insert(const std::basic_string_view& key, T&& value) { + return m_ht.emplace(key.data(), key.size(), std::move(value)); + } +#else + std::pair insert(const CharT* key, T&& value) { + return m_ht.emplace(key, std::char_traits::length(key), std::move(value)); + } + + std::pair insert(const std::basic_string& key, T&& value) { + return m_ht.emplace(key.data(), key.size(), std::move(value)); + } +#endif + std::pair insert_ks(const CharT* key, size_type key_size, T&& value) { + return m_ht.emplace(key, key_size, std::move(value)); + } + + + + template::value>::type* = nullptr> + void insert(InputIt first, InputIt last) { + if(std::is_base_of::iterator_category>::value) + { + const auto nb_elements_insert = std::distance(first, last); + const std::size_t nb_free_buckets = std::size_t(float(bucket_count())*max_load_factor()) - size(); + + if(nb_elements_insert > 0 && nb_free_buckets < std::size_t(nb_elements_insert)) { + reserve(size() + std::size_t(nb_elements_insert)); + } + } + + for(auto it = first; it != last; ++it) { + insert_pair(*it); + } + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + void insert(std::initializer_list, T>> ilist) { + insert(ilist.begin(), ilist.end()); + } +#else + void insert(std::initializer_list> ilist) { + insert(ilist.begin(), ilist.end()); + } +#endif + + + +#ifdef TSL_AH_HAS_STRING_VIEW + template + std::pair insert_or_assign(const std::basic_string_view& key, M&& obj) { + return m_ht.insert_or_assign(key.data(), key.size(), std::forward(obj)); + } +#else + template + std::pair insert_or_assign(const CharT* key, M&& obj) { + return m_ht.insert_or_assign(key, std::char_traits::length(key), std::forward(obj)); + } + + template + std::pair insert_or_assign(const std::basic_string& key, M&& obj) { + return m_ht.insert_or_assign(key.data(), key.size(), std::forward(obj)); + } +#endif + template + std::pair insert_or_assign_ks(const CharT* key, size_type key_size, M&& obj) { + return m_ht.insert_or_assign(key, key_size, std::forward(obj)); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + template + std::pair emplace(const std::basic_string_view& key, Args&&... args) { + return m_ht.emplace(key.data(), key.size(), std::forward(args)...); + } +#else + template + std::pair emplace(const CharT* key, Args&&... args) { + return m_ht.emplace(key, std::char_traits::length(key), std::forward(args)...); + } + + template + std::pair emplace(const std::basic_string& key, Args&&... args) { + return m_ht.emplace(key.data(), key.size(), std::forward(args)...); + } +#endif + template + std::pair emplace_ks(const CharT* key, size_type key_size, Args&&... args) { + return m_ht.emplace(key, key_size, std::forward(args)...); + } + + + + /** + * Erase has an amortized O(1) runtime complexity, but even if it removes the key immediately, + * it doesn't do the same for the associated value T. + * + * T will only be removed when the ratio between the size of the map and + * the size of the map + the number of deleted values still stored is low enough. + * + * To force the deletion you can call shrink_to_fit. + */ + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + + /** + * @copydoc erase(const_iterator pos) + */ + iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc erase(const_iterator pos) + */ + size_type erase(const std::basic_string_view& key) { + return m_ht.erase(key.data(), key.size()); + } +#else + /** + * @copydoc erase(const_iterator pos) + */ + size_type erase(const CharT* key) { + return m_ht.erase(key, std::char_traits::length(key)); + } + + /** + * @copydoc erase(const_iterator pos) + */ + size_type erase(const std::basic_string& key) { + return m_ht.erase(key.data(), key.size()); + } +#endif + /** + * @copydoc erase(const_iterator pos) + */ + size_type erase_ks(const CharT* key, size_type key_size) { + return m_ht.erase(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + size_type erase(const std::basic_string_view& key, std::size_t precalculated_hash) { + return m_ht.erase(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + size_type erase(const CharT* key, std::size_t precalculated_hash) { + return m_ht.erase(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + size_type erase(const std::basic_string& key, std::size_t precalculated_hash) { + return m_ht.erase(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * @copydoc erase(const_iterator pos) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + size_type erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) { + return m_ht.erase(key, key_size, precalculated_hash); + } + + + + void swap(array_map& other) { other.m_ht.swap(m_ht); } + + + + /* + * Lookup + */ +#ifdef TSL_AH_HAS_STRING_VIEW + T& at(const std::basic_string_view& key) { + return m_ht.at(key.data(), key.size()); + } + + const T& at(const std::basic_string_view& key) const { + return m_ht.at(key.data(), key.size()); + } +#else + T& at(const CharT* key) { + return m_ht.at(key, std::char_traits::length(key)); + } + + const T& at(const CharT* key) const { + return m_ht.at(key, std::char_traits::length(key)); + } + + T& at(const std::basic_string& key) { + return m_ht.at(key.data(), key.size()); + } + + const T& at(const std::basic_string& key) const { + return m_ht.at(key.data(), key.size()); + } +#endif + T& at_ks(const CharT* key, size_type key_size) { + return m_ht.at(key, key_size); + } + + const T& at_ks(const CharT* key, size_type key_size) const { + return m_ht.at(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + T& at(const std::basic_string_view& key, std::size_t precalculated_hash) { + return m_ht.at(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const T& at(const std::basic_string_view& key, std::size_t precalculated_hash) const { + return m_ht.at(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + T& at(const CharT* key, std::size_t precalculated_hash) { + return m_ht.at(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const T& at(const CharT* key, std::size_t precalculated_hash) const { + return m_ht.at(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + T& at(const std::basic_string& key, std::size_t precalculated_hash) { + return m_ht.at(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const T& at(const std::basic_string& key, std::size_t precalculated_hash) const { + return m_ht.at(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + T& at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) { + return m_ht.at(key, key_size, precalculated_hash); + } + + /** + * @copydoc at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const T& at_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const { + return m_ht.at(key, key_size, precalculated_hash); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + T& operator[](const std::basic_string_view& key) { return m_ht.access_operator(key.data(), key.size()); } +#else + T& operator[](const CharT* key) { return m_ht.access_operator(key, std::char_traits::length(key)); } + T& operator[](const std::basic_string& key) { return m_ht.access_operator(key.data(), key.size()); } +#endif + + + +#ifdef TSL_AH_HAS_STRING_VIEW + size_type count(const std::basic_string_view& key) const { + return m_ht.count(key.data(), key.size()); + } +#else + size_type count(const CharT* key) const { + return m_ht.count(key, std::char_traits::length(key)); + } + + size_type count(const std::basic_string& key) const { + return m_ht.count(key.data(), key.size()); + } +#endif + size_type count_ks(const CharT* key, size_type key_size) const { + return m_ht.count(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const + */ + size_type count(const std::basic_string_view& key, std::size_t precalculated_hash) const { + return m_ht.count(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const + */ + size_type count(const CharT* key, std::size_t precalculated_hash) const { + return m_ht.count(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const + */ + size_type count(const std::basic_string& key, std::size_t precalculated_hash) const { + return m_ht.count(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + size_type count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const { + return m_ht.count(key, key_size, precalculated_hash); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + iterator find(const std::basic_string_view& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string_view& key) const { + return m_ht.find(key.data(), key.size()); + } +#else + iterator find(const CharT* key) { + return m_ht.find(key, std::char_traits::length(key)); + } + + const_iterator find(const CharT* key) const { + return m_ht.find(key, std::char_traits::length(key)); + } + + iterator find(const std::basic_string& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string& key) const { + return m_ht.find(key.data(), key.size()); + } +#endif + iterator find_ks(const CharT* key, size_type key_size) { + return m_ht.find(key, key_size); + } + + const_iterator find_ks(const CharT* key, size_type key_size) const { + return m_ht.find(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + iterator find(const std::basic_string_view& key, std::size_t precalculated_hash) { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find(const std::basic_string_view& key, std::size_t precalculated_hash) const { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + iterator find(const CharT* key, std::size_t precalculated_hash) { + return m_ht.find(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find(const CharT* key, std::size_t precalculated_hash) const { + return m_ht.find(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + iterator find(const std::basic_string& key, std::size_t precalculated_hash) { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find(const std::basic_string& key, std::size_t precalculated_hash) const { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + iterator find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) { + return m_ht.find(key, key_size, precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const { + return m_ht.find(key, key_size, precalculated_hash); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + std::pair equal_range(const std::basic_string_view& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string_view& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#else + std::pair equal_range(const CharT* key) { + return m_ht.equal_range(key, std::char_traits::length(key)); + } + + std::pair equal_range(const CharT* key) const { + return m_ht.equal_range(key, std::char_traits::length(key)); + } + + std::pair equal_range(const std::basic_string& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#endif + std::pair equal_range_ks(const CharT* key, size_type key_size) { + return m_ht.equal_range(key, key_size); + } + + std::pair equal_range_ks(const CharT* key, size_type key_size) const { + return m_ht.equal_range(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string_view& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string_view& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const CharT* key, std::size_t precalculated_hash) { + return m_ht.equal_range(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const CharT* key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + std::pair equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) { + return m_ht.equal_range(key, key_size, precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, key_size, precalculated_hash); + } + + + + /* + * Bucket interface + */ + size_type bucket_count() const { return m_ht.bucket_count(); } + size_type max_bucket_count() const { return m_ht.max_bucket_count(); } + + + /* + * Hash policy + */ + float load_factor() const { return m_ht.load_factor(); } + float max_load_factor() const { return m_ht.max_load_factor(); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + void rehash(size_type count) { m_ht.rehash(count); } + void reserve(size_type count) { m_ht.reserve(count); } + + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + key_equal key_eq() const { return m_ht.key_eq(); } + + + /* + * Other + */ + /** + * Return the `const_iterator it` as an `iterator`. + */ + iterator mutable_iterator(const_iterator it) noexcept { return m_ht.mutable_iterator(it); } + + /** + * Serialize the map through the `serializer` parameter. + * + * The `serializer` parameter must be a function object that supports the following calls: + * - `template void operator()(const U& value);` where the types `std::uint64_t`, `float` and `T` must be supported for U. + * - `void operator()(const CharT* value, std::size_t value_size);` + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, ...) of the types it serializes + * in the hands of the `Serializer` function object if compatibility is required. + */ + template + void serialize(Serializer& serializer) const { + m_ht.serialize(serializer); + } + + /** + * Deserialize a previously serialized map through the `deserializer` parameter. + * + * The `deserializer` parameter must be a function object that supports the following calls: + * - `template U operator()();` where the types `std::uint64_t`, `float` and `T` must be supported for U. + * - `void operator()(CharT* value_out, std::size_t value_size);` + * + * If the deserialized hash map type is hash compatible with the serialized map, the deserialization process can be + * sped up by setting `hash_compatible` to true. To be hash compatible, the Hash (take care of the 32-bits vs 64 bits), + * KeyEqual, GrowthPolicy, StoreNullTerminator, KeySizeT and IndexSizeT must behave the same than the ones used on the + * serialized map. Otherwise the behaviour is undefined with `hash_compatible` sets to true. + * + * The behaviour is undefined if the type `CharT` and `T` of the `array_map` are not the same as the + * types used during serialization. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, size of int, ...) of the types it + * deserializes in the hands of the `Deserializer` function object if compatibility is required. + */ + template + static array_map deserialize(Deserializer& deserializer, bool hash_compatible = false) { + array_map map(0); + map.m_ht.deserialize(deserializer, hash_compatible); + + return map; + } + + friend bool operator==(const array_map& lhs, const array_map& rhs) { + if(lhs.size() != rhs.size()) { + return false; + } + + for(auto it = lhs.cbegin(); it != lhs.cend(); ++it) { + const auto it_element_rhs = rhs.find_ks(it.key(), it.key_size()); + if(it_element_rhs == rhs.cend() || it.value() != it_element_rhs.value()) { + return false; + } + } + + return true; + } + + friend bool operator!=(const array_map& lhs, const array_map& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(array_map& lhs, array_map& rhs) { + lhs.swap(rhs); + } + +private: + template + void insert_pair(const std::pair& value) { + insert(value.first, value.second); + } + + template + void insert_pair(std::pair&& value) { + insert(value.first, std::move(value.second)); + } + +public: + static const size_type MAX_KEY_SIZE = ht::MAX_KEY_SIZE; + +private: + ht m_ht; +}; + + +/** + * Same as + * `tsl::array_map`. + */ +template, + class KeyEqual = tsl::ah::str_equal, + bool StoreNullTerminator = true, + class KeySizeT = std::uint16_t, + class IndexSizeT = std::uint32_t> +using array_pg_map = array_map; + +} //end namespace tsl + +#endif diff --git a/ios/include/tsl/array-hash/array_set.h b/ios/include/tsl/array-hash/array_set.h new file mode 100644 index 00000000..0322bcd0 --- /dev/null +++ b/ios/include/tsl/array-hash/array_set.h @@ -0,0 +1,664 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ARRAY_SET_H +#define TSL_ARRAY_SET_H + +#include +#include +#include +#include +#include +#include +#include +#include "array_hash.h" + +namespace tsl { + +/** + * Implementation of a cache-conscious string hash set. + * + * The set stores the strings as `const CharT*`. If `StoreNullTerminator` is true, + * the strings are stored with the a null-terminator (the `key()` method of the iterators + * will return a pointer to this null-terminated string). Otherwise the null character + * is not stored (which allow an economy of 1 byte per string). + * + * The size of a key string is limited to `std::numeric_limits::max() - 1`. + * That is 65 535 characters by default, but can be raised with the `KeySizeT` template parameter. + * See `max_key_size()` for an easy access to this limit. + * + * The number of elements in the set is limited to `std::numeric_limits::max()`. + * That is 4 294 967 296 elements, but can be raised with the `IndexSizeT` template parameter. + * See `max_size()` for an easy access to this limit. + * + * Iterators invalidation: + * - clear, operator=: always invalidate the iterators. + * - insert, emplace, operator[]: always invalidate the iterators. + * - erase: always invalidate the iterators. + * - shrink_to_fit: always invalidate the iterators. + */ +template, + class KeyEqual = tsl::ah::str_equal, + bool StoreNullTerminator = true, + class KeySizeT = std::uint16_t, + class IndexSizeT = std::uint32_t, + class GrowthPolicy = tsl::ah::power_of_two_growth_policy<2>> +class array_set { +private: + template + using is_iterator = tsl::detail_array_hash::is_iterator; + + using ht = tsl::detail_array_hash::array_hash; + +public: + using char_type = typename ht::char_type; + using key_size_type = typename ht::key_size_type; + using index_size_type = typename ht::index_size_type; + using size_type = typename ht::size_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + + array_set(): array_set(ht::DEFAULT_INIT_BUCKET_COUNT) { + } + + explicit array_set(size_type bucket_count, + const Hash& hash = Hash()): m_ht(bucket_count, hash, ht::DEFAULT_MAX_LOAD_FACTOR) + { + } + + template::value>::type* = nullptr> + array_set(InputIt first, InputIt last, + size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, + const Hash& hash = Hash()): array_set(bucket_count, hash) + { + insert(first, last); + } + + +#ifdef TSL_AH_HAS_STRING_VIEW + array_set(std::initializer_list> init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, + const Hash& hash = Hash()): array_set(bucket_count, hash) + { + insert(init); + } +#else + array_set(std::initializer_list init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, + const Hash& hash = Hash()): array_set(bucket_count, hash) + { + insert(init); + } +#endif + + + +#ifdef TSL_AH_HAS_STRING_VIEW + array_set& operator=(std::initializer_list> ilist) { + clear(); + + reserve(ilist.size()); + insert(ilist); + + return *this; + } +#else + array_set& operator=(std::initializer_list ilist) { + clear(); + + reserve(ilist.size()); + insert(ilist); + + return *this; + } +#endif + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + size_type max_key_size() const noexcept { return m_ht.max_key_size(); } + void shrink_to_fit() { m_ht.shrink_to_fit(); } + + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + std::pair insert(const std::basic_string_view& key) { + return m_ht.emplace(key.data(), key.size()); + } +#else + std::pair insert(const CharT* key) { + return m_ht.emplace(key, std::char_traits::length(key)); + } + + std::pair insert(const std::basic_string& key) { + return m_ht.emplace(key.data(), key.size()); + } +#endif + std::pair insert_ks(const CharT* key, size_type key_size) { + return m_ht.emplace(key, key_size); + } + + + + template::value>::type* = nullptr> + void insert(InputIt first, InputIt last) { + if(std::is_base_of::iterator_category>::value) + { + const auto nb_elements_insert = std::distance(first, last); + const std::size_t nb_free_buckets = std::size_t(float(bucket_count())*max_load_factor()) - size(); + + if(nb_elements_insert > 0 && nb_free_buckets < std::size_t(nb_elements_insert)) { + reserve(size() + std::size_t(nb_elements_insert)); + } + } + + for(auto it = first; it != last; ++it) { + insert(*it); + } + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + void insert(std::initializer_list> ilist) { + insert(ilist.begin(), ilist.end()); + } +#else + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } +#endif + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc emplace_ks(const CharT* key, size_type key_size) + */ + std::pair emplace(const std::basic_string_view& key) { + return m_ht.emplace(key.data(), key.size()); + } +#else + /** + * @copydoc emplace_ks(const CharT* key, size_type key_size) + */ + std::pair emplace(const CharT* key) { + return m_ht.emplace(key, std::char_traits::length(key)); + } + + /** + * @copydoc emplace_ks(const CharT* key, size_type key_size) + */ + std::pair emplace(const std::basic_string& key) { + return m_ht.emplace(key.data(), key.size()); + } +#endif + /** + * No difference compared to the insert method. Mainly here for coherence with array_map. + */ + std::pair emplace_ks(const CharT* key, size_type key_size) { + return m_ht.emplace(key, key_size); + } + + + + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } + +#ifdef TSL_AH_HAS_STRING_VIEW + size_type erase(const std::basic_string_view& key) { + return m_ht.erase(key.data(), key.size()); + } +#else + size_type erase(const CharT* key) { + return m_ht.erase(key, std::char_traits::length(key)); + } + + size_type erase(const std::basic_string& key) { + return m_ht.erase(key.data(), key.size()); + } +#endif + size_type erase_ks(const CharT* key, size_type key_size) { + return m_ht.erase(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + size_type erase(const std::basic_string_view& key, std::size_t precalculated_hash) { + return m_ht.erase(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + size_type erase(const CharT* key, std::size_t precalculated_hash) { + return m_ht.erase(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + size_type erase(const std::basic_string& key, std::size_t precalculated_hash) { + return m_ht.erase(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + size_type erase_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) { + return m_ht.erase(key, key_size, precalculated_hash); + } + + + + void swap(array_set& other) { other.m_ht.swap(m_ht); } + + + + /* + * Lookup + */ +#ifdef TSL_AH_HAS_STRING_VIEW + size_type count(const std::basic_string_view& key) const { return m_ht.count(key.data(), key.size()); } +#else + size_type count(const CharT* key) const { return m_ht.count(key, std::char_traits::length(key)); } + size_type count(const std::basic_string& key) const { return m_ht.count(key.data(), key.size()); } +#endif + size_type count_ks(const CharT* key, size_type key_size) const { return m_ht.count(key, key_size); } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const + */ + size_type count(const std::basic_string_view& key, std::size_t precalculated_hash) const { + return m_ht.count(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const + */ + size_type count(const CharT* key, std::size_t precalculated_hash) const { + return m_ht.count(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const + */ + size_type count(const std::basic_string& key, std::size_t precalculated_hash) const { + return m_ht.count(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + size_type count_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const { + return m_ht.count(key, key_size, precalculated_hash); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + iterator find(const std::basic_string_view& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string_view& key) const { + return m_ht.find(key.data(), key.size()); + } +#else + iterator find(const CharT* key) { + return m_ht.find(key, std::char_traits::length(key)); + } + + const_iterator find(const CharT* key) const { + return m_ht.find(key, std::char_traits::length(key)); + } + + iterator find(const std::basic_string& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string& key) const { + return m_ht.find(key.data(), key.size()); + } +#endif + iterator find_ks(const CharT* key, size_type key_size) { + return m_ht.find(key, key_size); + } + + const_iterator find_ks(const CharT* key, size_type key_size) const { + return m_ht.find(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + iterator find(const std::basic_string_view& key, std::size_t precalculated_hash) { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find(const std::basic_string_view& key, std::size_t precalculated_hash) const { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + iterator find(const CharT* key, std::size_t precalculated_hash) { + return m_ht.find(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find(const CharT* key, std::size_t precalculated_hash) const { + return m_ht.find(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + iterator find(const std::basic_string& key, std::size_t precalculated_hash) { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find(const std::basic_string& key, std::size_t precalculated_hash) const { + return m_ht.find(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + iterator find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) { + return m_ht.find(key, key_size, precalculated_hash); + } + + /** + * @copydoc find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + const_iterator find_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const { + return m_ht.find(key, key_size, precalculated_hash); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + std::pair equal_range(const std::basic_string_view& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string_view& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#else + std::pair equal_range(const CharT* key) { + return m_ht.equal_range(key, std::char_traits::length(key)); + } + + std::pair equal_range(const CharT* key) const { + return m_ht.equal_range(key, std::char_traits::length(key)); + } + + std::pair equal_range(const std::basic_string& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#endif + std::pair equal_range_ks(const CharT* key, size_type key_size) { + return m_ht.equal_range(key, key_size); + } + + std::pair equal_range_ks(const CharT* key, size_type key_size) const { + return m_ht.equal_range(key, key_size); + } + + + +#ifdef TSL_AH_HAS_STRING_VIEW + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string_view& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string_view& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } +#else + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const CharT* key, std::size_t precalculated_hash) { + return m_ht.equal_range(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const CharT* key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, std::char_traits::length(key), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range(const std::basic_string& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key.data(), key.size(), precalculated_hash); + } +#endif + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash. + */ + std::pair equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) { + return m_ht.equal_range(key, key_size, precalculated_hash); + } + + /** + * @copydoc equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) + */ + std::pair equal_range_ks(const CharT* key, size_type key_size, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, key_size, precalculated_hash); + } + + + + /* + * Bucket interface + */ + size_type bucket_count() const { return m_ht.bucket_count(); } + size_type max_bucket_count() const { return m_ht.max_bucket_count(); } + + + /* + * Hash policy + */ + float load_factor() const { return m_ht.load_factor(); } + float max_load_factor() const { return m_ht.max_load_factor(); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + void rehash(size_type count) { m_ht.rehash(count); } + void reserve(size_type count) { m_ht.reserve(count); } + + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + key_equal key_eq() const { return m_ht.key_eq(); } + + + /* + * Other + */ + /** + * Return the `const_iterator it` as an `iterator`. + */ + iterator mutable_iterator(const_iterator it) noexcept { return m_ht.mutable_iterator(it); } + + /** + * Serialize the set through the `serializer` parameter. + * + * The `serializer` parameter must be a function object that supports the following calls: + * - `template void operator()(const U& value);` where the types `std::uint64_t` and `float` must be supported for U. + * - `void operator()(const CharT* value, std::size_t value_size);` + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, ...) of the types it serializes + * in the hands of the `Serializer` function object if compatibility is required. + */ + template + void serialize(Serializer& serializer) const { + m_ht.serialize(serializer); + } + + /** + * Deserialize a previously serialized set through the `deserializer` parameter. + * + * The `deserializer` parameter must be a function object that supports the following calls: + * - `template U operator()();` where the types `std::uint64_t` and `float` must be supported for U. + * - `void operator()(CharT* value_out, std::size_t value_size);` + * + * If the deserialized hash set type is hash compatible with the serialized set, the deserialization process can be + * sped up by setting `hash_compatible` to true. To be hash compatible, the Hash (take care of the 32-bits vs 64 bits), + * KeyEqual, GrowthPolicy, StoreNullTerminator, KeySizeT and IndexSizeT must behave the same than the ones used on the + * serialized set. Otherwise the behaviour is undefined with `hash_compatible` sets to true. + * + * The behaviour is undefined if the type `CharT` of the `array_set` is not the same as the + * type used during serialization. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, size of int, ...) of the types it + * deserializes in the hands of the `Deserializer` function object if compatibility is required. + */ + template + static array_set deserialize(Deserializer& deserializer, bool hash_compatible = false) { + array_set set(0); + set.m_ht.deserialize(deserializer, hash_compatible); + + return set; + } + + friend bool operator==(const array_set& lhs, const array_set& rhs) { + if(lhs.size() != rhs.size()) { + return false; + } + + for(auto it = lhs.cbegin(); it != lhs.cend(); ++it) { + const auto it_element_rhs = rhs.find_ks(it.key(), it.key_size()); + if(it_element_rhs == rhs.cend()) { + return false; + } + } + + return true; + } + + friend bool operator!=(const array_set& lhs, const array_set& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(array_set& lhs, array_set& rhs) { + lhs.swap(rhs); + } + +public: + static const size_type MAX_KEY_SIZE = ht::MAX_KEY_SIZE; + +private: + ht m_ht; +}; + + +/** + * Same as + * `tsl::array_set`. + */ +template, + class KeyEqual = tsl::ah::str_equal, + bool StoreNullTerminator = true, + class KeySizeT = std::uint16_t, + class IndexSizeT = std::uint32_t> +using array_pg_set = array_set; + +} //end namespace tsl + +#endif diff --git a/ios/include/tsl/htrie_hash.h b/ios/include/tsl/htrie_hash.h new file mode 100644 index 00000000..99ee0724 --- /dev/null +++ b/ios/include/tsl/htrie_hash.h @@ -0,0 +1,2090 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_HTRIE_HASH_H +#define TSL_HTRIE_HASH_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "array-hash/array_map.h" +#include "array-hash/array_set.h" + + +/* + * __has_include is a bit useless (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79433), + * check also __cplusplus version. + */ +#ifdef __has_include +# if __has_include() && __cplusplus >= 201703L +# define TSL_HT_HAS_STRING_VIEW +# endif +#endif + + +#ifdef TSL_HT_HAS_STRING_VIEW +# include +#endif + + +#ifdef TSL_DEBUG +# define tsl_ht_assert(expr) assert(expr) +#else +# define tsl_ht_assert(expr) (static_cast(0)) +#endif + + +namespace tsl { + +namespace detail_htrie_hash { + +template +struct is_iterator: std::false_type { +}; + +template +struct is_iterator::iterator_category, void>::value>::type>: std::true_type { +}; + +template +struct is_related: std::false_type {}; + +template +struct is_related: std::is_same::type>::type, + typename std::remove_cv::type>::type> {}; + +template +static T numeric_cast(U value, const char* error_message = "numeric_cast() failed.") { + T ret = static_cast(value); + if(static_cast(ret) != value) { + THROW(std::runtime_error, error_message); + } + + const bool is_same_signedness = (std::is_unsigned::value && std::is_unsigned::value) || + (std::is_signed::value && std::is_signed::value); + if(!is_same_signedness && (ret < T{}) != (value < U{})) { + THROW(std::runtime_error, error_message); + } + + return ret; +} + + +template +struct value_node { + /* + * Avoid conflict with copy constructor 'value_node(const value_node&)'. If we call the copy constructor + * with a mutable reference 'value_node(value_node&)', we don't want the forward constructor to be called. + */ + template::value>::type* = nullptr> + value_node(Args&&... args): m_value(std::forward(args)...) { + } + + T m_value; +}; + +template<> +struct value_node { +}; + + +/** + * T should be void if there is no value associated to a key (in a set for example). + */ +template +class htrie_hash { +private: + template + using has_value = typename std::integral_constant::value>; + + static_assert(std::is_same::value, "char is the only supported CharT type for now."); + + static const std::size_t ALPHABET_SIZE = + std::numeric_limits::type>::max() + 1; + + +public: + template + class htrie_hash_iterator; + + + using char_type = CharT; + using key_size_type = KeySizeT; + using size_type = std::size_t; + using hasher = Hash; + using iterator = htrie_hash_iterator; + using const_iterator = htrie_hash_iterator; + using prefix_iterator = htrie_hash_iterator; + using const_prefix_iterator = htrie_hash_iterator; + + +private: + using array_hash_type = + typename std::conditional< + has_value::value, + tsl::array_map, false, + KeySizeT, std::uint16_t, tsl::ah::power_of_two_growth_policy<4>>, + tsl::array_set, false, + KeySizeT, std::uint16_t, tsl::ah::power_of_two_growth_policy<4>>>::type; + + +private: + /* + * The tree is mainly composed of two nodes types: trie_node and hash_node which both have anode as base class. + * Each child is either a hash_node or a trie_node. + * + * A hash_node is always a leaf node, it doesn't have any child. + * + * Example: + * | ... | a |.. ..................... | f | ... | trie_node_1 + * \ \ + * hash_node_1 |array_hash = {"dd"}| |...| a | ... | trie_node_2 + * / + * |array_hash = {"ble", "bric", "lse"}| hash_node_2 + * + * + * Each trie_node may also have a value node, which contains a value T, if the trie_node marks + * the end of a string value. + * + * A trie node should at least have one child or a value node. There can't be a trie node without + * any child and no value node. + */ + + using value_node = tsl::detail_htrie_hash::value_node; + + + class trie_node; + class hash_node; + + // TODO better encapsulate operations modifying the tree. + class anode { + friend class trie_node; + + public: + /* + * TODO Avoid the virtual to economize 8 bytes. We could use a custom deleter in the std::unique_ptr + * we use (as we know if an anode is a trie_node or hash_node). + */ + virtual ~anode() = default; + + bool is_trie_node() const noexcept { + return m_node_type == node_type::TRIE_NODE; + } + + bool is_hash_node() const noexcept { + return m_node_type == node_type::HASH_NODE; + } + + trie_node& as_trie_node() noexcept { + tsl_ht_assert(is_trie_node()); + return static_cast(*this); + } + + hash_node& as_hash_node() noexcept { + tsl_ht_assert(is_hash_node()); + return static_cast(*this); + } + + const trie_node& as_trie_node() const noexcept { + tsl_ht_assert(is_trie_node()); + return static_cast(*this); + } + + const hash_node& as_hash_node() const noexcept { + tsl_ht_assert(is_hash_node()); + return static_cast(*this); + } + + /** + * @see m_child_of_char + */ + CharT child_of_char() const noexcept { + tsl_ht_assert(parent() != nullptr); + return m_child_of_char; + } + + /** + * Return nullptr if none. + */ + trie_node* parent() noexcept { + return m_parent_node; + } + + const trie_node* parent() const noexcept { + return m_parent_node; + } + + protected: + enum class node_type: unsigned char { + HASH_NODE, + TRIE_NODE + }; + + anode(node_type node_type_): m_node_type(node_type_), m_child_of_char(0), + m_parent_node(nullptr) + { + } + + anode(node_type node_type_, CharT child_of_char): m_node_type(node_type_), + m_child_of_char(child_of_char), + m_parent_node(nullptr) + { + } + + + protected: + node_type m_node_type; + + /** + * If the node has a parent, then it's a descendant of some char. + * + * Example: + * | ... | a | b | ... | trie_node_1 + * \ + * |...| a | ... | trie_node_2 + * / + * |array_hash| hash_node_1 + * + * trie_node_2 is a child of trie_node_1 through 'b', it will have 'b' as m_child_of_char. + * hash_node_1 is a child of trie_node_2 through 'a', it will have 'a' as m_child_of_char. + * + * trie_node_1 has no parent, its m_child_of_char is undefined. + */ + CharT m_child_of_char; + trie_node* m_parent_node; + }; + + // Give the position in trie_node::m_children corresponding to the character c + static std::size_t as_position(CharT c) noexcept { + return static_cast(static_cast::type>(c)); + } + + class trie_node: public anode { + public: + trie_node(): anode(anode::node_type::TRIE_NODE), + m_value_node(nullptr), m_children() + { + } + + trie_node(const trie_node& other): anode(anode::node_type::TRIE_NODE, other.m_child_of_char), + m_value_node(nullptr), m_children() + { + if(other.m_value_node != nullptr) { + m_value_node = make_unique(*other.m_value_node); + } + + // TODO avoid recursion + for(std::size_t ichild = 0; ichild < other.m_children.size(); ichild++) { + if(other.m_children[ichild] != nullptr) { + if(other.m_children[ichild]->is_hash_node()) { + m_children[ichild] = make_unique(other.m_children[ichild]->as_hash_node()); + } + else { + m_children[ichild] = make_unique(other.m_children[ichild]->as_trie_node()); + } + + m_children[ichild]->m_parent_node = this; + } + } + } + + trie_node(trie_node&& other) = delete; + trie_node& operator=(const trie_node& other) = delete; + trie_node& operator=(trie_node&& other) = delete; + + /** + * Return nullptr if none. + */ + anode* first_child() noexcept { + return const_cast(static_cast(this)->first_child()); + } + + const anode* first_child() const noexcept { + for(std::size_t ichild = 0; ichild < m_children.size(); ichild++) { + if(m_children[ichild] != nullptr) { + return m_children[ichild].get(); + } + } + + return nullptr; + } + + + /** + * Get the next_child that come after current_child. Return nullptr if no next child. + */ + anode* next_child(const anode& current_child) noexcept { + return const_cast(static_cast(this)->next_child(current_child)); + } + + const anode* next_child(const anode& current_child) const noexcept { + tsl_ht_assert(current_child.parent() == this); + + for(std::size_t ichild = as_position(current_child.child_of_char()) + 1; + ichild < m_children.size(); + ichild++) + { + if(m_children[ichild] != nullptr) { + return m_children[ichild].get(); + } + } + + return nullptr; + } + + + /** + * Return the first left-descendant trie node with an m_value_node. If none return the most left trie node. + */ + trie_node& most_left_descendant_value_trie_node() noexcept { + return const_cast(static_cast(this)->most_left_descendant_value_trie_node()); + } + + const trie_node& most_left_descendant_value_trie_node() const noexcept { + const trie_node* current_node = this; + while(true) { + if(current_node->m_value_node != nullptr) { + return *current_node; + } + + const anode* first_child = current_node->first_child(); + tsl_ht_assert(first_child != nullptr); // a trie_node must either have a value_node or at least one child. + if(first_child->is_hash_node()) { + return *current_node; + } + + current_node = &first_child->as_trie_node(); + } + } + + + + size_type nb_children() const noexcept { + return std::count_if(m_children.cbegin(), m_children.cend(), + [](const std::unique_ptr& n) { return n != nullptr; }); + } + + bool empty() const noexcept { + return std::all_of(m_children.cbegin(), m_children.cend(), + [](const std::unique_ptr& n) { return n == nullptr; }); + } + + std::unique_ptr& child(CharT for_char) noexcept { + return m_children[as_position(for_char)]; + } + + const std::unique_ptr& child(CharT for_char) const noexcept { + return m_children[as_position(for_char)]; + } + + typename std::array, ALPHABET_SIZE>::iterator begin() noexcept { + return m_children.begin(); + } + + typename std::array, ALPHABET_SIZE>::iterator end() noexcept { + return m_children.end(); + } + + void set_child(CharT for_char, std::unique_ptr child) noexcept { + if(child != nullptr) { + child->m_child_of_char = for_char; + child->m_parent_node = this; + } + + m_children[as_position(for_char)] = std::move(child); + } + + std::unique_ptr& val_node() noexcept { + return m_value_node; + } + + const std::unique_ptr& val_node() const noexcept { + return m_value_node; + } + + private: + // TODO Avoid storing a value_node when has_value::value is false + std::unique_ptr m_value_node; + + /** + * Each character CharT corresponds to one position in the array. To convert a character + * to a position use the as_position method. + * + * TODO Try to reduce the size of m_children with a hash map, linear/binary search on array, ... + * TODO Store number of non-null values in m_children. Check if we can store this value in the alignment + * space as we don't want the node to get bigger (empty() and nb_children() are rarely used so it is + * not an important variable). + */ + std::array, ALPHABET_SIZE> m_children; + }; + + + class hash_node: public anode { + public: + hash_node(const Hash& hash, float max_load_factor): + hash_node(HASH_NODE_DEFAULT_INIT_BUCKETS_COUNT, hash, max_load_factor) + { + } + + hash_node(size_type bucket_count, const Hash& hash, float max_load_factor): + anode(anode::node_type::HASH_NODE), m_array_hash(bucket_count, hash) + { + m_array_hash.max_load_factor(max_load_factor); + } + + hash_node(array_hash_type&& array_hash) noexcept(std::is_nothrow_move_constructible::value): + anode(anode::node_type::HASH_NODE), m_array_hash(std::move(array_hash)) + { + } + + hash_node(const hash_node& other) = default; + + hash_node(hash_node&& other) = delete; + hash_node& operator=(const hash_node& other) = delete; + hash_node& operator=(hash_node&& other) = delete; + + + array_hash_type& array_hash() noexcept { + return m_array_hash; + } + + const array_hash_type& array_hash() const noexcept { + return m_array_hash; + } + + private: + array_hash_type m_array_hash; + }; + + + +public: + template + class htrie_hash_iterator { + friend class htrie_hash; + + private: + using anode_type = typename std::conditional::type; + using trie_node_type = typename std::conditional::type; + using hash_node_type = typename std::conditional::type; + + using array_hash_iterator_type = + typename std::conditional::type; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename std::conditional::value, T, void>::type; + using difference_type = std::ptrdiff_t; + using reference = typename std::conditional< + has_value::value, + typename std::conditional::type, + typename std::add_lvalue_reference::type>::type, + void>::type; + using pointer = typename std::conditional< + has_value::value, + typename std::conditional::type, + void>::type; + + private: + /** + * Start reading from start_hash_node->array_hash().begin(). + */ + htrie_hash_iterator(hash_node_type& start_hash_node) noexcept: + htrie_hash_iterator(start_hash_node, start_hash_node.array_hash().begin()) + { + } + + /** + * Start reading from iterator begin in start_hash_node->array_hash(). + */ + htrie_hash_iterator(hash_node_type& start_hash_node, array_hash_iterator_type begin) noexcept: + m_current_trie_node(start_hash_node.parent()), m_current_hash_node(&start_hash_node), + m_array_hash_iterator(begin), + m_array_hash_end_iterator(start_hash_node.array_hash().end()), + m_read_trie_node_value(false) + { + tsl_ht_assert(!m_current_hash_node->array_hash().empty()); + } + + /** + * Start reading from the value in start_trie_node. start_trie_node->val_node() should be non-null. + */ + htrie_hash_iterator(trie_node_type& start_trie_node) noexcept: + m_current_trie_node(&start_trie_node), m_current_hash_node(nullptr), + m_read_trie_node_value(true) + { + tsl_ht_assert(m_current_trie_node->val_node() != nullptr); + } + + template::type* = nullptr> + htrie_hash_iterator(trie_node_type* tnode, hash_node_type* hnode, + array_hash_iterator_type begin, array_hash_iterator_type end, + bool read_trie_node_value) noexcept: + m_current_trie_node(tnode), m_current_hash_node(hnode), + m_array_hash_iterator(begin), m_array_hash_end_iterator(end), + m_read_trie_node_value(read_trie_node_value) + { + } + + template::type* = nullptr> + htrie_hash_iterator(trie_node_type* tnode, hash_node_type* hnode, + array_hash_iterator_type begin, array_hash_iterator_type end, + bool read_trie_node_value, std::basic_string prefix_filter) noexcept: + m_current_trie_node(tnode), m_current_hash_node(hnode), + m_array_hash_iterator(begin), m_array_hash_end_iterator(end), + m_read_trie_node_value(read_trie_node_value), m_prefix_filter(std::move(prefix_filter)) + { + } + + public: + htrie_hash_iterator() noexcept { + } + + // Copy constructor from iterator to const_iterator. + template::type* = nullptr> + htrie_hash_iterator(const htrie_hash_iterator& other) noexcept: + m_current_trie_node(other.m_current_trie_node), m_current_hash_node(other.m_current_hash_node), + m_array_hash_iterator(other.m_array_hash_iterator), + m_array_hash_end_iterator(other.m_array_hash_end_iterator), + m_read_trie_node_value(other.m_read_trie_node_value) + { + } + + // Copy constructor from iterator to const_iterator. + template::type* = nullptr> + htrie_hash_iterator(const htrie_hash_iterator& other) noexcept: + m_current_trie_node(other.m_current_trie_node), m_current_hash_node(other.m_current_hash_node), + m_array_hash_iterator(other.m_array_hash_iterator), + m_array_hash_end_iterator(other.m_array_hash_end_iterator), + m_read_trie_node_value(other.m_read_trie_node_value), m_prefix_filter(other.m_prefix_filter) + { + } + + htrie_hash_iterator(const htrie_hash_iterator& other) = default; + htrie_hash_iterator(htrie_hash_iterator&& other) = default; + htrie_hash_iterator& operator=(const htrie_hash_iterator& other) = default; + htrie_hash_iterator& operator=(htrie_hash_iterator&& other) = default; + + void key(std::basic_string& key_buffer_out) const { + key_buffer_out.clear(); + + trie_node_type* tnode = m_current_trie_node; + while(tnode != nullptr && tnode->parent() != nullptr) { + key_buffer_out.push_back(tnode->child_of_char()); + tnode = tnode->parent(); + } + + std::reverse(key_buffer_out.begin(), key_buffer_out.end()); + + if(!m_read_trie_node_value) { + tsl_ht_assert(m_current_hash_node != nullptr); + if(m_current_hash_node->parent() != nullptr) { + key_buffer_out.push_back(m_current_hash_node->child_of_char()); + } + + key_buffer_out.append(m_array_hash_iterator.key(), m_array_hash_iterator.key_size()); + } + } + + std::basic_string key() const { + std::basic_string key_buffer; + key(key_buffer); + + return key_buffer; + } + + template::value>::type* = nullptr> + reference value() const { + if(this->m_read_trie_node_value) { + tsl_ht_assert(this->m_current_trie_node != nullptr); + tsl_ht_assert(this->m_current_trie_node->val_node() != nullptr); + + return this->m_current_trie_node->val_node()->m_value; + } + else { + return this->m_array_hash_iterator.value(); + } + } + + template::value>::type* = nullptr> + reference operator*() const { + return value(); + } + + template::value>::type* = nullptr> + pointer operator->() const { + return std::addressof(value()); + } + + htrie_hash_iterator& operator++() { + if(m_read_trie_node_value) { + tsl_ht_assert(m_current_trie_node != nullptr); + + m_read_trie_node_value = false; + + anode_type* child = m_current_trie_node->first_child(); + if(child != nullptr) { + set_most_left_descendant_as_next_node(*child); + } + else if(m_current_trie_node->parent() != nullptr) { + trie_node_type* current_node_child = m_current_trie_node; + m_current_trie_node = m_current_trie_node->parent(); + + set_next_node_ascending(*current_node_child); + } + else { + set_as_end_iterator(); + } + } + else { + ++m_array_hash_iterator; + if(m_array_hash_iterator != m_array_hash_end_iterator) { + filter_prefix(); + } + // End of the road, set the iterator as an end node. + else if(m_current_trie_node == nullptr) { + set_as_end_iterator(); + } + else { + tsl_ht_assert(m_current_hash_node != nullptr); + set_next_node_ascending(*m_current_hash_node); + } + } + + + return *this; + } + + htrie_hash_iterator operator++(int) { + htrie_hash_iterator tmp(*this); + ++*this; + + return tmp; + } + + friend bool operator==(const htrie_hash_iterator& lhs, const htrie_hash_iterator& rhs) { + if(lhs.m_current_trie_node != rhs.m_current_trie_node || + lhs.m_read_trie_node_value != rhs.m_read_trie_node_value) + { + return false; + } + else if(lhs.m_read_trie_node_value) { + return true; + } + else { + if(lhs.m_current_hash_node != rhs.m_current_hash_node) { + return false; + } + else if(lhs.m_current_hash_node == nullptr) { + return true; + } + else { + return lhs.m_array_hash_iterator == rhs.m_array_hash_iterator && + lhs.m_array_hash_end_iterator == rhs.m_array_hash_end_iterator; + } + } + } + + friend bool operator!=(const htrie_hash_iterator& lhs, const htrie_hash_iterator& rhs) { + return !(lhs == rhs); + } + + private: + void hash_node_prefix(std::basic_string& key_buffer_out) const { + tsl_ht_assert(!m_read_trie_node_value); + key_buffer_out.clear(); + + trie_node_type* tnode = m_current_trie_node; + while(tnode != nullptr && tnode->parent() != nullptr) { + key_buffer_out.push_back(tnode->child_of_char()); + tnode = tnode->parent(); + } + + std::reverse(key_buffer_out.begin(), key_buffer_out.end()); + + tsl_ht_assert(m_current_hash_node != nullptr); + if(m_current_hash_node->parent() != nullptr) { + key_buffer_out.push_back(m_current_hash_node->child_of_char()); + } + } + + template::type* = nullptr> + void filter_prefix() { + } + + template::type* = nullptr> + void filter_prefix() { + tsl_ht_assert(m_array_hash_iterator != m_array_hash_end_iterator); + tsl_ht_assert(!m_read_trie_node_value && m_current_hash_node != nullptr); + + if(m_prefix_filter.empty()) { + return; + } + + while((m_prefix_filter.size() > m_array_hash_iterator.key_size() || + m_prefix_filter.compare(0, m_prefix_filter.size(), + m_array_hash_iterator.key(), m_prefix_filter.size()) != 0)) + { + ++m_array_hash_iterator; + if(m_array_hash_iterator == m_array_hash_end_iterator) { + if(m_current_trie_node == nullptr) { + set_as_end_iterator(); + } + else { + tsl_ht_assert(m_current_hash_node != nullptr); + set_next_node_ascending(*m_current_hash_node); + } + + return; + } + } + } + + /** + * Go back up in the tree to get the current_trie_node_child sibling. + * If none, try to go back up more in the tree to check the siblings of the ancestors. + */ + void set_next_node_ascending(anode_type& current_trie_node_child) { + tsl_ht_assert(m_current_trie_node != nullptr); + tsl_ht_assert(current_trie_node_child.parent() == m_current_trie_node); + + anode_type* next_node = m_current_trie_node->next_child(current_trie_node_child); + while(next_node == nullptr && m_current_trie_node->parent() != nullptr) { + anode_type* current_child = m_current_trie_node; + m_current_trie_node = m_current_trie_node->parent(); + next_node = m_current_trie_node->next_child(*current_child); + } + + // End of the road, set the iterator as an end node. + if(next_node == nullptr) { + set_as_end_iterator(); + } + else { + set_most_left_descendant_as_next_node(*next_node); + } + } + + void set_most_left_descendant_as_next_node(anode_type& search_start) { + if(search_start.is_hash_node()) { + set_current_hash_node(search_start.as_hash_node()); + } + else { + m_current_trie_node = &search_start.as_trie_node().most_left_descendant_value_trie_node(); + if(m_current_trie_node->val_node() != nullptr) { + m_read_trie_node_value = true; + } + else { + anode_type* first_child = m_current_trie_node->first_child(); + // a trie_node must either have a value_node or at least one child. + tsl_ht_assert(first_child != nullptr); + + set_current_hash_node(first_child->as_hash_node()); + } + } + } + + void set_current_hash_node(hash_node_type& hnode) { + tsl_ht_assert(!hnode.array_hash().empty()); + + m_current_hash_node = &hnode; + m_array_hash_iterator = m_current_hash_node->array_hash().begin(); + m_array_hash_end_iterator = m_current_hash_node->array_hash().end(); + } + + void set_as_end_iterator() { + m_current_trie_node = nullptr; + m_current_hash_node = nullptr; + m_read_trie_node_value = false; + } + + void skip_hash_node() { + tsl_ht_assert(!m_read_trie_node_value && m_current_hash_node != nullptr); + if(m_current_trie_node == nullptr) { + set_as_end_iterator(); + } + else { + tsl_ht_assert(m_current_hash_node != nullptr); + set_next_node_ascending(*m_current_hash_node); + } + } + + private: + trie_node_type* m_current_trie_node; + hash_node_type* m_current_hash_node; + + array_hash_iterator_type m_array_hash_iterator; + array_hash_iterator_type m_array_hash_end_iterator; + + bool m_read_trie_node_value; + // TODO can't have void if !IsPrefixIterator, use inheritance + typename std::conditional, bool>::type m_prefix_filter; + }; + + + +public: + htrie_hash(const Hash& hash, float max_load_factor, size_type burst_threshold): + m_root(nullptr), m_nb_elements(0), + m_hash(hash), m_max_load_factor(max_load_factor) + { + this->burst_threshold(burst_threshold); + } + + htrie_hash(const htrie_hash& other): m_root(nullptr), m_nb_elements(other.m_nb_elements), + m_hash(other.m_hash), m_max_load_factor(other.m_max_load_factor), + m_burst_threshold(other.m_burst_threshold) + { + if(other.m_root != nullptr) { + if(other.m_root->is_hash_node()) { + m_root = make_unique(other.m_root->as_hash_node()); + } + else { + m_root = make_unique(other.m_root->as_trie_node()); + } + } + } + + htrie_hash(htrie_hash&& other) noexcept(std::is_nothrow_move_constructible::value) + : m_root(std::move(other.m_root)), + m_nb_elements(other.m_nb_elements), + m_hash(std::move(other.m_hash)), + m_max_load_factor(other.m_max_load_factor), + m_burst_threshold(other.m_burst_threshold) + { + other.clear(); + } + + htrie_hash& operator=(const htrie_hash& other) { + if(&other != this) { + std::unique_ptr new_root = nullptr; + if(other.m_root != nullptr) { + if(other.m_root->is_hash_node()) { + new_root = make_unique(other.m_root->as_hash_node()); + } + else { + new_root = make_unique(other.m_root->as_trie_node()); + } + } + + m_hash = other.m_hash; + m_root = std::move(new_root); + m_nb_elements = other.m_nb_elements; + m_max_load_factor = other.m_max_load_factor; + m_burst_threshold = other.m_burst_threshold; + } + + return *this; + } + + htrie_hash& operator=(htrie_hash&& other) { + other.swap(*this); + other.clear(); + + return *this; + } + + /* + * Iterators + */ + iterator begin() noexcept { + return mutable_iterator(cbegin()); + } + + const_iterator begin() const noexcept { + return cbegin(); + } + + const_iterator cbegin() const noexcept { + if(empty()) { + return cend(); + } + + return cbegin(*m_root); + } + + iterator end() noexcept { + iterator it; + it.set_as_end_iterator(); + + return it; + } + + const_iterator end() const noexcept { + return cend(); + } + + const_iterator cend() const noexcept { + const_iterator it; + it.set_as_end_iterator(); + + return it; + } + + + /* + * Capacity + */ + bool empty() const noexcept { + return m_nb_elements == 0; + } + + size_type size() const noexcept { + return m_nb_elements; + } + + size_type max_size() const noexcept { + return std::numeric_limits::max(); + } + + size_type max_key_size() const noexcept { + return array_hash_type::MAX_KEY_SIZE; + } + + void shrink_to_fit() { + auto first = begin(); + auto last = end(); + + while(first != last) { + if(first.m_read_trie_node_value) { + ++first; + } + else { + /* + * shrink_to_fit on array_hash will invalidate the iterators of array_hash. + * Save pointer to array_hash, skip the array_hash_node and then call + * shrink_to_fit on the saved pointer. + */ + hash_node* hnode = first.m_current_hash_node; + first.skip_hash_node(); + + tsl_ht_assert(hnode != nullptr); + hnode->array_hash().shrink_to_fit(); + } + } + } + + + /* + * Modifiers + */ + void clear() noexcept { + m_root.reset(nullptr); + m_nb_elements = 0; + } + + template + std::pair insert(const CharT* key, size_type key_size, ValueArgs&&... value_args) { + if(key_size > max_key_size()) { + THROW(std::length_error, "Key is too long."); + } + + if(m_root == nullptr) { + m_root = make_unique(m_hash, m_max_load_factor); + } + + return insert_impl(*m_root, key, key_size, std::forward(value_args)...); + } + + iterator erase(const_iterator pos) { + return erase(mutable_iterator(pos)); + } + + iterator erase(const_iterator first, const_iterator last) { + // TODO Optimize, could avoid the call to std::distance + const std::size_t nb_to_erase = std::size_t(std::distance(first, last)); + auto to_delete = mutable_iterator(first); + for(std::size_t i = 0; i < nb_to_erase; i++) { + to_delete = erase(to_delete); + } + + return to_delete; + } + + size_type erase(const CharT* key, size_type key_size) { + auto it = find(key, key_size); + if(it != end()) { + erase(it); + return 1; + } + else { + return 0; + } + + } + + size_type erase_prefix(const CharT* prefix, size_type prefix_size) { + if(m_root == nullptr) { + return 0; + } + + anode* current_node = m_root.get(); + for(size_type iprefix = 0; iprefix < prefix_size; iprefix++) { + if(current_node->is_trie_node()) { + trie_node* tnode = ¤t_node->as_trie_node(); + + if(tnode->child(prefix[iprefix]) == nullptr) { + return 0; + } + else { + current_node = tnode->child(prefix[iprefix]).get(); + } + } + else { + hash_node& hnode = current_node->as_hash_node(); + return erase_prefix_hash_node(hnode, prefix + iprefix, prefix_size - iprefix); + } + } + + + if(current_node->is_trie_node()) { + trie_node* parent = current_node->parent(); + + if(parent != nullptr) { + const size_type nb_erased = size_descendants(current_node->as_trie_node()); + + parent->set_child(current_node->child_of_char(), nullptr); + m_nb_elements -= nb_erased; + + if(parent->empty()) { + clear_empty_nodes(*parent); + } + + return nb_erased; + } + else { + const size_type nb_erased = m_nb_elements; + m_root.reset(nullptr); + m_nb_elements = 0; + + return nb_erased; + } + } + else { + const size_type nb_erased = current_node->as_hash_node().array_hash().size(); + + current_node->as_hash_node().array_hash().clear(); + m_nb_elements -= nb_erased; + + clear_empty_nodes(current_node->as_hash_node()); + + return nb_erased; + } + } + + void swap(htrie_hash& other) { + using std::swap; + + swap(m_hash, other.m_hash); + swap(m_root, other.m_root); + swap(m_nb_elements, other.m_nb_elements); + swap(m_max_load_factor, other.m_max_load_factor); + swap(m_burst_threshold, other.m_burst_threshold); + } + + /* + * Lookup + */ + template::value>::type* = nullptr> + U& at(const CharT* key, size_type key_size) { + return const_cast(static_cast(this)->at(key, key_size)); + } + + template::value>::type* = nullptr> + const U& at(const CharT* key, size_type key_size) const { + auto it_find = find(key, key_size); + if(it_find != cend()) { + return it_find.value(); + } + else { + THROW(std::out_of_range, "Couldn't find key."); + } + } + + //TODO optimize + template::value>::type* = nullptr> + U& access_operator(const CharT* key, size_type key_size) { + auto it_find = find(key, key_size); + if(it_find != cend()) { + return it_find.value(); + } + else { + return insert(key, key_size, U{}).first.value(); + } + } + + size_type count(const CharT* key, size_type key_size) const { + if(find(key, key_size) != cend()) { + return 1; + } + else { + return 0; + } + } + + iterator find(const CharT* key, size_type key_size) { + if(m_root == nullptr) { + return end(); + } + + return find_impl(*m_root, key, key_size); + } + + const_iterator find(const CharT* key, size_type key_size) const { + if(m_root == nullptr) { + return cend(); + } + + return find_impl(*m_root, key, key_size); + } + + std::pair equal_range(const CharT* key, size_type key_size) { + iterator it = find(key, key_size); + return std::make_pair(it, (it == end())?it:std::next(it)); + } + + std::pair equal_range(const CharT* key, size_type key_size) const { + const_iterator it = find(key, key_size); + return std::make_pair(it, (it == cend())?it:std::next(it)); + } + + std::pair equal_prefix_range(const CharT* prefix, size_type prefix_size) { + if(m_root == nullptr) { + return std::make_pair(prefix_end(), prefix_end()); + } + + return equal_prefix_range_impl(*m_root, prefix, prefix_size); + } + + std::pair equal_prefix_range(const CharT* prefix, + size_type prefix_size) const + { + if(m_root == nullptr) { + return std::make_pair(prefix_cend(), prefix_cend()); + } + + return equal_prefix_range_impl(*m_root, prefix, prefix_size); + } + + iterator longest_prefix(const CharT* key, size_type key_size) { + if(m_root == nullptr) { + return end(); + } + + return longest_prefix_impl(*m_root, key, key_size); + } + + const_iterator longest_prefix(const CharT* key, size_type key_size) const { + if(m_root == nullptr) { + return cend(); + } + + return longest_prefix_impl(*m_root, key, key_size); + } + + + /* + * Hash policy + */ + float max_load_factor() const { + return m_max_load_factor; + } + + void max_load_factor(float ml) { + m_max_load_factor = ml; + } + + /* + * Burst policy + */ + size_type burst_threshold() const { + return m_burst_threshold; + } + + void burst_threshold(size_type threshold) { + const size_type min_burst_threshold = MIN_BURST_THRESHOLD; + m_burst_threshold = std::max(min_burst_threshold, threshold); + } + + /* + * Observers + */ + hasher hash_function() const { + return m_hash; + } + + /* + * Other + */ + template + void serialize(Serializer& serializer) const { + serialize_impl(serializer); + } + + template + void deserialize(Deserializer& deserializer, bool hash_compatible) { + deserialize_impl(deserializer, hash_compatible); + } + +private: + /** + * Get the begin iterator by searching for the most left descendant node starting at search_start_node. + */ + template + Iterator cbegin(const anode& search_start_node) const noexcept { + if(search_start_node.is_hash_node()) { + return Iterator(search_start_node.as_hash_node()); + } + + const trie_node& tnode = search_start_node.as_trie_node().most_left_descendant_value_trie_node(); + if(tnode.val_node() != nullptr) { + return Iterator(tnode); + } + else { + const anode* first_child = tnode.first_child(); + tsl_ht_assert(first_child != nullptr); + + return Iterator(first_child->as_hash_node()); + } + } + + /** + * Get an iterator to the node that come just after the last descendant of search_start_node. + */ + template + Iterator cend(const anode& search_start_node) const noexcept { + if(search_start_node.parent() == nullptr) { + Iterator it; + it.set_as_end_iterator(); + + return it; + } + + const trie_node* current_trie_node = search_start_node.parent(); + const anode* next_node = current_trie_node->next_child(search_start_node); + + while(next_node == nullptr && current_trie_node->parent() != nullptr) { + const anode* current_child = current_trie_node; + current_trie_node = current_trie_node->parent(); + next_node = current_trie_node->next_child(*current_child); + } + + if(next_node == nullptr) { + Iterator it; + it.set_as_end_iterator(); + + return it; + } + else { + return cbegin(*next_node); + } + } + + prefix_iterator prefix_end() noexcept { + prefix_iterator it; + it.set_as_end_iterator(); + + return it; + } + + const_prefix_iterator prefix_cend() const noexcept { + const_prefix_iterator it; + it.set_as_end_iterator(); + + return it; + } + + size_type size_descendants(const anode& start_node) const { + auto first = cbegin(start_node); + auto last = cend(start_node); + + size_type nb_elements = 0; + while(first != last) { + if(first.m_read_trie_node_value) { + nb_elements++; + ++first; + } + else { + nb_elements += first.m_current_hash_node->array_hash().size(); + first.skip_hash_node(); + } + } + + return nb_elements; + } + + template + std::pair insert_impl(anode& search_start_node, + const CharT* key, size_type key_size, ValueArgs&&... value_args) + { + anode* current_node = &search_start_node; + + for(size_type ikey = 0; ikey < key_size; ikey++) { + if(current_node->is_trie_node()) { + trie_node& tnode = current_node->as_trie_node(); + + if(tnode.child(key[ikey]) != nullptr) { + current_node = tnode.child(key[ikey]).get(); + } + else { + auto hnode = make_unique(m_hash, m_max_load_factor); + auto insert_it = hnode->array_hash().emplace_ks(key + ikey + 1, key_size - ikey - 1, + std::forward(value_args)...); + + tnode.set_child(key[ikey], std::move(hnode)); + m_nb_elements++; + + + return std::make_pair(iterator(tnode.child(key[ikey])->as_hash_node(), + insert_it.first), true); + } + } + else { + return insert_in_hash_node(current_node->as_hash_node(), + key + ikey, key_size - ikey, std::forward(value_args)...); + } + } + + + if(current_node->is_trie_node()) { + trie_node& tnode = current_node->as_trie_node(); + if(tnode.val_node() != nullptr) { + return std::make_pair(iterator(tnode), false); + } + else { + tnode.val_node() = make_unique(std::forward(value_args)...); + m_nb_elements++; + + return std::make_pair(iterator(tnode), true); + } + } + else { + return insert_in_hash_node(current_node->as_hash_node(), + "", 0, std::forward(value_args)...); + } + } + + template + std::pair insert_in_hash_node(hash_node& hnode, + const CharT* key, size_type key_size, ValueArgs&&... value_args) + { + if(need_burst(hnode)) { + std::unique_ptr new_node = burst(hnode); + if(hnode.parent() == nullptr) { + tsl_ht_assert(m_root.get() == &hnode); + + m_root = std::move(new_node); + return insert_impl(*m_root, key, key_size, std::forward(value_args)...); + } + else { + trie_node* parent = hnode.parent(); + const CharT child_of_char = hnode.child_of_char(); + + parent->set_child(child_of_char, std::move(new_node)); + + return insert_impl(*parent->child(child_of_char), + key, key_size, std::forward(value_args)...); + } + } + else { + auto it_insert = hnode.array_hash().emplace_ks(key, key_size, + std::forward(value_args)...); + if(it_insert.second) { + m_nb_elements++; + } + + return std::make_pair(iterator(hnode, it_insert.first), it_insert.second); + } + } + + + iterator erase(iterator pos) { + iterator next_pos = std::next(pos); + + if(pos.m_read_trie_node_value) { + tsl_ht_assert(pos.m_current_trie_node != nullptr && pos.m_current_trie_node->val_node() != nullptr); + + pos.m_current_trie_node->val_node().reset(nullptr); + m_nb_elements--; + + if(pos.m_current_trie_node->empty()) { + clear_empty_nodes(*pos.m_current_trie_node); + } + + return next_pos; + } + else { + tsl_ht_assert(pos.m_current_hash_node != nullptr); + auto next_array_hash_it = pos.m_current_hash_node->array_hash().erase(pos.m_array_hash_iterator); + m_nb_elements--; + + if(next_array_hash_it != pos.m_current_hash_node->array_hash().end()) { + // The erase on array_hash invalidated the next_pos iterator, return the right one. + return iterator(*pos.m_current_hash_node, next_array_hash_it); + } + else { + if(pos.m_current_hash_node->array_hash().empty()) { + clear_empty_nodes(*pos.m_current_hash_node); + } + + return next_pos; + } + } + } + + /** + * Clear all the empty nodes from the tree starting from empty_node (empty for a hash_node means that + * the array hash is empty, for a trie_node it means the node doesn't have any child or value_node + * associated to it). + */ + void clear_empty_nodes(anode& empty_node) noexcept { + tsl_ht_assert(!empty_node.is_trie_node() || + (empty_node.as_trie_node().empty() && empty_node.as_trie_node().val_node() == nullptr)); + tsl_ht_assert(!empty_node.is_hash_node() || empty_node.as_hash_node().array_hash().empty()); + + + trie_node* parent = empty_node.parent(); + if(parent == nullptr) { + tsl_ht_assert(m_root.get() == &empty_node); + tsl_ht_assert(m_nb_elements == 0); + m_root.reset(nullptr); + } + else if(parent->val_node() != nullptr || parent->nb_children() > 1) { + parent->child(empty_node.child_of_char()).reset(nullptr); + } + else if(parent->parent() == nullptr) { + tsl_ht_assert(m_root.get() == empty_node.parent()); + tsl_ht_assert(m_nb_elements == 0); + m_root.reset(nullptr); + } + else { + /** + * Parent is empty if we remove its empty_node child. + * Put empty_node as new child of the grand parent instead of parent (move hnode up, + * and delete the parent). And recurse. + * + * We can't just set grand_parent->child(parent->child_of_char()) to nullptr as + * the grand_parent may also become empty. We don't want empty trie_node with no value_node + * in the tree. + */ + trie_node* grand_parent = parent->parent(); + grand_parent->set_child(parent->child_of_char(), + std::move(parent->child(empty_node.child_of_char()))); + + + clear_empty_nodes(empty_node); + } + } + + + + + iterator find_impl(const anode& search_start_node, const CharT* key, size_type key_size) { + return mutable_iterator(static_cast(this)->find_impl(search_start_node, key, key_size)); + } + + const_iterator find_impl(const anode& search_start_node, const CharT* key, size_type key_size) const { + const anode* current_node = &search_start_node; + + for(size_type ikey = 0; ikey < key_size; ikey++) { + if(current_node->is_trie_node()) { + const trie_node* tnode = ¤t_node->as_trie_node(); + + if(tnode->child(key[ikey]) == nullptr) { + return cend(); + } + else { + current_node = tnode->child(key[ikey]).get(); + } + } + else { + return find_in_hash_node(current_node->as_hash_node(), + key + ikey, key_size - ikey); + } + } + + + if(current_node->is_trie_node()) { + const trie_node& tnode = current_node->as_trie_node(); + return (tnode.val_node() != nullptr)?const_iterator(tnode):cend(); + } + else { + return find_in_hash_node(current_node->as_hash_node(), "", 0); + } + } + + const_iterator find_in_hash_node(const hash_node& hnode, + const CharT* key, size_type key_size) const + { + auto it = hnode.array_hash().find_ks(key, key_size); + if(it != hnode.array_hash().end()) { + return const_iterator(hnode, it); + } + else { + return cend(); + } + } + + + iterator longest_prefix_impl(const anode& search_start_node, + const CharT* value, size_type value_size) + { + return mutable_iterator(static_cast(this)->longest_prefix_impl(search_start_node, + value, value_size)); + } + + const_iterator longest_prefix_impl(const anode& search_start_node, + const CharT* value, size_type value_size) const + { + const anode* current_node = &search_start_node; + const_iterator longest_found_prefix = cend(); + + for(size_type ivalue = 0; ivalue < value_size; ivalue++) { + if(current_node->is_trie_node()) { + const trie_node& tnode = current_node->as_trie_node(); + + if(tnode.val_node() != nullptr) { + longest_found_prefix = const_iterator(tnode); + } + + if(tnode.child(value[ivalue]) == nullptr) { + return longest_found_prefix; + } + else { + current_node = tnode.child(value[ivalue]).get(); + } + } + else { + const hash_node& hnode = current_node->as_hash_node(); + + /** + * Test the presence in the hash node of each substring from the + * remaining [ivalue, value_size) string starting from the longest. + * Also test the empty string. + */ + for(std::size_t i = ivalue; i <= value_size; i++) { + auto it = hnode.array_hash().find_ks(value + ivalue, (value_size - i)); + if(it != hnode.array_hash().end()) { + return const_iterator(hnode, it); + } + } + + return longest_found_prefix; + } + } + + if(current_node->is_trie_node()) { + const trie_node& tnode = current_node->as_trie_node(); + + if(tnode.val_node() != nullptr) { + longest_found_prefix = const_iterator(tnode); + } + } + else { + const hash_node& hnode = current_node->as_hash_node(); + + auto it = hnode.array_hash().find_ks("", 0); + if(it != hnode.array_hash().end()) { + longest_found_prefix = const_iterator(hnode, it); + } + } + + return longest_found_prefix; + } + + + std::pair equal_prefix_range_impl( + anode& search_start_node, + const CharT* prefix, size_type prefix_size) + { + auto range = static_cast(this)->equal_prefix_range_impl(search_start_node, + prefix, prefix_size); + return std::make_pair(mutable_iterator(range.first), mutable_iterator(range.second)); + } + + std::pair equal_prefix_range_impl( + const anode& search_start_node, + const CharT* prefix, size_type prefix_size) const + { + const anode* current_node = &search_start_node; + + for(size_type iprefix = 0; iprefix < prefix_size; iprefix++) { + if(current_node->is_trie_node()) { + const trie_node* tnode = ¤t_node->as_trie_node(); + + if(tnode->child(prefix[iprefix]) == nullptr) { + return std::make_pair(prefix_cend(), prefix_cend()); + } + else { + current_node = tnode->child(prefix[iprefix]).get(); + } + } + else { + const hash_node& hnode = current_node->as_hash_node(); + const_prefix_iterator begin(hnode.parent(), &hnode, + hnode.array_hash().begin(), hnode.array_hash().end(), + false, std::basic_string(prefix + iprefix, prefix_size - iprefix)); + begin.filter_prefix(); + + const_prefix_iterator end = cend(*current_node); + + return std::make_pair(begin, end); + } + } + + + const_prefix_iterator begin = cbegin(*current_node); + const_prefix_iterator end = cend(*current_node); + + return std::make_pair(begin, end); + } + + size_type erase_prefix_hash_node(hash_node& hnode, const CharT* prefix, size_type prefix_size) { + size_type nb_erased = 0; + + auto it = hnode.array_hash().begin(); + while(it != hnode.array_hash().end()) { + if(it.key_size() >= prefix_size && + std::memcmp(prefix, it.key(), prefix_size * sizeof(CharT)) == 0) + { + it = hnode.array_hash().erase(it); + ++nb_erased; + --m_nb_elements; + } + else { + ++it; + } + } + + return nb_erased; + } + + + /* + * Burst + */ + bool need_burst(hash_node& node) const { + return node.array_hash().size() >= m_burst_threshold; + } + + + /** + * Burst the node and use the copy constructor instead of move constructor for the values. + * Also use this method for trivial value types like int, int*, ... as it requires + * less book-keeping (thus faster) than the burst using move constructors. + */ + template::value && + std::is_copy_constructible::value && + (!std::is_nothrow_move_constructible::value || + !std::is_nothrow_move_assignable::value || + std::is_arithmetic::value || + std::is_pointer::value)>::type* = nullptr> + std::unique_ptr burst(hash_node& node) { + const std::array first_char_count = + get_first_char_count(node.array_hash().cbegin(), + node.array_hash().cend()); + + + auto new_node = make_unique(); + for(auto it = node.array_hash().cbegin(); it != node.array_hash().cend(); ++it) { + if(it.key_size() == 0) { + new_node->val_node() = make_unique(it.value()); + } + else { + hash_node& hnode = get_hash_node_for_char(first_char_count, *new_node, it.key()[0]); + hnode.array_hash().insert_ks(it.key() + 1, it.key_size() - 1, it.value()); + } + } + + + tsl_ht_assert(new_node->val_node() != nullptr || !new_node->empty()); + return new_node; + } + + /** + * Burst the node and use the move constructor and move assign operator + */ + template::value && + std::is_nothrow_move_constructible::value && + std::is_nothrow_move_assignable::value && + !std::is_arithmetic::value && + !std::is_pointer::value>::type* = nullptr> + std::unique_ptr burst(hash_node& node) { + /** + * We burst the node->array_hash() into multiple arrays hash. While doing so, we move each value in + * the node->array_hash() into the new arrays hash. After each move, we save a pointer to where the value + * has been moved. In case of exception, we rollback these values into the original node->array_hash(). + */ + std::vector moved_values_rollback; + moved_values_rollback.reserve(node.array_hash().size()); + + const std::array first_char_count = + get_first_char_count(node.array_hash().cbegin(), node.array_hash().cend()); + + + auto new_node = make_unique(); + for(auto it = node.array_hash().begin(); it != node.array_hash().end(); ++it) { + if(it.key_size() == 0) { + new_node->val_node() = make_unique(std::move(it.value())); + moved_values_rollback.push_back(std::addressof(new_node->val_node()->m_value)); + } + else { + hash_node& hnode = get_hash_node_for_char(first_char_count, *new_node, it.key()[0]); + auto it_insert = hnode.array_hash().insert_ks(it.key() + 1, it.key_size() - 1, + std::move(it.value())); + moved_values_rollback.push_back(std::addressof(it_insert.first.value())); + } + } + + + tsl_ht_assert(new_node->val_node() != nullptr || !new_node->empty()); + return new_node; + } + + template::value>::type* = nullptr> + std::unique_ptr burst(hash_node& node) { + const std::array first_char_count = + get_first_char_count(node.array_hash().begin(), node.array_hash().end()); + + + auto new_node = make_unique(); + for(auto it = node.array_hash().cbegin(); it != node.array_hash().cend(); ++it) { + if(it.key_size() == 0) { + new_node->val_node() = make_unique(); + } + else { + hash_node& hnode = get_hash_node_for_char(first_char_count, *new_node, it.key()[0]); + hnode.array_hash().insert_ks(it.key() + 1, it.key_size() - 1); + } + } + + + tsl_ht_assert(new_node->val_node() != nullptr || !new_node->empty()); + return new_node; + } + + std::array get_first_char_count(typename array_hash_type::const_iterator begin, + typename array_hash_type::const_iterator end) const + { + std::array count{{}}; + for(auto it = begin; it != end; ++it) { + if(it.key_size() == 0) { + continue; + } + + count[as_position(it.key()[0])]++; + } + + return count; + } + + + hash_node& get_hash_node_for_char(const std::array& first_char_count, + trie_node& tnode, CharT for_char) + { + if(tnode.child(for_char) == nullptr) { + const size_type nb_buckets = + size_type( + std::ceil(float(first_char_count[as_position(for_char)] + + HASH_NODE_DEFAULT_INIT_BUCKETS_COUNT/2) + / m_max_load_factor + )); + + tnode.set_child(for_char, + make_unique(nb_buckets, m_hash, m_max_load_factor)); + } + + return tnode.child(for_char)->as_hash_node(); + } + + iterator mutable_iterator(const_iterator it) noexcept { + // end iterator or reading from a trie node value + if(it.m_current_hash_node == nullptr || it.m_read_trie_node_value) { + typename array_hash_type::iterator default_it; + + return iterator(const_cast(it.m_current_trie_node), nullptr, + default_it, default_it, it.m_read_trie_node_value); + } + else { + hash_node* hnode = const_cast(it.m_current_hash_node); + return iterator(const_cast(it.m_current_trie_node), hnode, + hnode->array_hash().mutable_iterator(it.m_array_hash_iterator), + hnode->array_hash().mutable_iterator(it.m_array_hash_end_iterator), + it.m_read_trie_node_value); + } + } + + prefix_iterator mutable_iterator(const_prefix_iterator it) noexcept { + // end iterator or reading from a trie node value + if(it.m_current_hash_node == nullptr || it.m_read_trie_node_value) { + typename array_hash_type::iterator default_it; + + return prefix_iterator(const_cast(it.m_current_trie_node), nullptr, + default_it, default_it, it.m_read_trie_node_value, ""); + } + else { + hash_node* hnode = const_cast(it.m_current_hash_node); + return prefix_iterator(const_cast(it.m_current_trie_node), hnode, + hnode->array_hash().mutable_iterator(it.m_array_hash_iterator), + hnode->array_hash().mutable_iterator(it.m_array_hash_end_iterator), + it.m_read_trie_node_value, it.m_prefix_filter); + } + } + + template + void serialize_impl(Serializer& serializer) const { + const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION; + serializer(version); + + const slz_size_type nb_elements = m_nb_elements; + serializer(nb_elements); + + const float max_load_factor = m_max_load_factor; + serializer(max_load_factor); + + const slz_size_type burst_threshold = m_burst_threshold; + serializer(burst_threshold); + + + std::basic_string str_buffer; + + auto it = begin(); + auto last = end(); + + while(it != last) { + // Serialize trie node value + if(it.m_read_trie_node_value) { + const CharT node_type = static_cast::type>(slz_node_type::TRIE_NODE); + serializer(&node_type, 1); + + it.key(str_buffer); + + const slz_size_type str_size = str_buffer.size(); + serializer(str_size); + serializer(str_buffer.data(), str_buffer.size()); + serialize_value(serializer, it); + + + ++it; + } + // Serialize hash node values + else { + const CharT node_type = static_cast::type>(slz_node_type::HASH_NODE); + serializer(&node_type, 1); + + it.hash_node_prefix(str_buffer); + + const slz_size_type str_size = str_buffer.size(); + serializer(str_size); + serializer(str_buffer.data(), str_buffer.size()); + + const hash_node* hnode = it.m_current_hash_node; + tsl_ht_assert(hnode != nullptr); + hnode->array_hash().serialize(serializer); + + + it.skip_hash_node(); + } + } + } + + template::value>::type* = nullptr> + void serialize_value(Serializer& /*serializer*/, const_iterator /*it*/) const { + } + + template::value>::type* = nullptr> + void serialize_value(Serializer& serializer, const_iterator it) const { + serializer(it.value()); + } + + template + void deserialize_impl(Deserializer& deserializer, bool hash_compatible) { + tsl_ht_assert(m_nb_elements == 0 && m_root == nullptr); // Current trie must be empty + + const slz_size_type version = deserialize_value(deserializer); + // For now we only have one version of the serialization protocol. + // If it doesn't match there is a problem with the file. + if(version != SERIALIZATION_PROTOCOL_VERSION) { + THROW(std::runtime_error, "Can't deserialize the htrie_map/set. The protocol version header is invalid."); + } + + + const slz_size_type nb_elements = deserialize_value(deserializer); + const float max_load_factor = deserialize_value(deserializer); + const slz_size_type burst_threshold = deserialize_value(deserializer); + + this->burst_threshold(numeric_cast(burst_threshold, "Deserialized burst_threshold is too big.")); + this->max_load_factor(max_load_factor); + + + std::vector str_buffer; + while(m_nb_elements < nb_elements) { + CharT node_type_marker; + deserializer(&node_type_marker, 1); + + static_assert(std::is_same::type>::value, ""); + const slz_node_type node_type = static_cast(node_type_marker); + if(node_type == slz_node_type::TRIE_NODE) { + const std::size_t str_size = numeric_cast(deserialize_value(deserializer), + "Deserialized str_size is too big."); + + str_buffer.resize(str_size); + deserializer(str_buffer.data(), str_size); + + + trie_node* current_node = insert_prefix_trie_nodes(str_buffer.data(), str_size); + deserialize_value_node(deserializer, current_node); + m_nb_elements++; + } + else if(node_type == slz_node_type::HASH_NODE) { + const std::size_t str_size = numeric_cast(deserialize_value(deserializer), + "Deserialized str_size is too big."); + + if(str_size == 0) { + tsl_ht_assert(m_nb_elements == 0 && !m_root); + + m_root = make_unique(array_hash_type::deserialize(deserializer, hash_compatible)); + m_nb_elements += m_root->as_hash_node().array_hash().size(); + + tsl_ht_assert(m_nb_elements == nb_elements); + } + else { + str_buffer.resize(str_size); + deserializer(str_buffer.data(), str_size); + + + auto hnode = make_unique(array_hash_type::deserialize(deserializer, hash_compatible)); + m_nb_elements += hnode->array_hash().size(); + + trie_node* current_node = insert_prefix_trie_nodes(str_buffer.data(), str_size - 1); + current_node->set_child(str_buffer[str_size - 1], std::move(hnode)); + } + } + else { + THROW(std::runtime_error, "Unknown deserialized node type."); + } + } + + tsl_ht_assert(m_nb_elements == nb_elements); + } + + trie_node* insert_prefix_trie_nodes(const CharT* prefix, std::size_t prefix_size) { + if(m_root == nullptr) { + m_root = make_unique(); + } + + trie_node* current_node = &m_root->as_trie_node(); + for(std::size_t iprefix = 0; iprefix < prefix_size; iprefix++) { + if(current_node->child(prefix[iprefix]) == nullptr) { + current_node->set_child(prefix[iprefix], make_unique()); + } + + current_node = ¤t_node->child(prefix[iprefix])->as_trie_node(); + } + + return current_node; + } + + template::value>::type* = nullptr> + void deserialize_value_node(Deserializer& /*deserializer*/, trie_node* current_node) { + tsl_ht_assert(!current_node->val_node()); + current_node->val_node() = make_unique(); + } + + template::value>::type* = nullptr> + void deserialize_value_node(Deserializer& deserializer, trie_node* current_node) { + tsl_ht_assert(!current_node->val_node()); + current_node->val_node() = make_unique(deserialize_value(deserializer)); + } + + template + static U deserialize_value(Deserializer& deserializer) { + // MSVC < 2017 is not conformant, circumvent the problem by removing the template keyword + #if defined (_MSC_VER) && _MSC_VER < 1910 + return deserializer.Deserializer::operator()(); + #else + return deserializer.Deserializer::template operator()(); + #endif + } + + // Same as std::make_unique for non-array types which is only available in C++14 (we need to support C++11). + template + static std::unique_ptr make_unique(Args&&... args) { + return std::unique_ptr(new U(std::forward(args)...)); + } + +public: + static constexpr float HASH_NODE_DEFAULT_MAX_LOAD_FACTOR = 8.0f; + static const size_type DEFAULT_BURST_THRESHOLD = 16384; + +private: + + /** + * Fixed size type used to represent size_type values on serialization. Need to be big enough + * to represent a std::size_t on 32 and 64 bits platforms, and must be the same size on both platforms. + */ + using slz_size_type = std::uint64_t; + enum class slz_node_type: CharT { TRIE_NODE = 0, HASH_NODE = 1 }; + + /** + * Protocol version currenlty used for serialization. + */ + static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1; + + static const size_type HASH_NODE_DEFAULT_INIT_BUCKETS_COUNT = 32; + static const size_type MIN_BURST_THRESHOLD = 4; + + std::unique_ptr m_root; + size_type m_nb_elements; + Hash m_hash; + float m_max_load_factor; + size_type m_burst_threshold; + +}; + +} // end namespace detail_htrie_hash +} // end namespace tsl + +#endif diff --git a/ios/include/tsl/htrie_map.h b/ios/include/tsl/htrie_map.h new file mode 100644 index 00000000..59712c5e --- /dev/null +++ b/ios/include/tsl/htrie_map.h @@ -0,0 +1,647 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_HTRIE_MAP_H +#define TSL_HTRIE_MAP_H + +#include +#include +#include +#include +#include +#include "htrie_hash.h" + +namespace tsl { + +/** + * Implementation of a hat-trie map. + * + * The value T must be either nothrow move-constructible/assignable, copy-constructible or both. + * + * The size of a key string is limited to std::numeric_limits::max() - 1. + * That is 65 535 characters by default, but can be raised with the KeySizeT template parameter. + * See max_key_size() for an easy access to this limit. + * + * Iterators invalidation: + * - clear, operator=: always invalidate the iterators. + * - insert, emplace, operator[]: always invalidate the iterators. + * - erase: always invalidate the iterators. + */ +template, + class KeySizeT = std::uint16_t> +class htrie_map { +private: + template + using is_iterator = tsl::detail_array_hash::is_iterator; + + using ht = tsl::detail_htrie_hash::htrie_hash; + +public: + using char_type = typename ht::char_type; + using mapped_type = T; + using key_size_type = typename ht::key_size_type; + using size_type = typename ht::size_type; + using hasher = typename ht::hasher; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + using prefix_iterator = typename ht::prefix_iterator; + using const_prefix_iterator = typename ht::const_prefix_iterator; + +public: + explicit htrie_map(const Hash& hash = Hash()): m_ht(hash, ht::HASH_NODE_DEFAULT_MAX_LOAD_FACTOR, + ht::DEFAULT_BURST_THRESHOLD) + { + } + + explicit htrie_map(size_type burst_threshold, + const Hash& hash = Hash()): m_ht(hash, ht::HASH_NODE_DEFAULT_MAX_LOAD_FACTOR, + burst_threshold) + { + } + + template::value>::type* = nullptr> + htrie_map(InputIt first, InputIt last, + const Hash& hash = Hash()): htrie_map(hash) + { + insert(first, last); + } + + + +#ifdef TSL_HT_HAS_STRING_VIEW + htrie_map(std::initializer_list, T>> init, + const Hash& hash = Hash()): htrie_map(hash) + { + insert(init); + } +#else + htrie_map(std::initializer_list> init, + const Hash& hash = Hash()): htrie_map(hash) + { + insert(init); + } +#endif + + + +#ifdef TSL_HT_HAS_STRING_VIEW + htrie_map& operator=(std::initializer_list, T>> ilist) { + clear(); + insert(ilist); + + return *this; + } +#else + htrie_map& operator=(std::initializer_list> ilist) { + clear(); + insert(ilist); + + return *this; + } +#endif + + + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + size_type max_key_size() const noexcept { return m_ht.max_key_size(); } + + /** + * Call shrink_to_fit() on each hash node of the hat-trie to reduce its size. + */ + void shrink_to_fit() { m_ht.shrink_to_fit(); } + + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + + + std::pair insert_ks(const CharT* key, size_type key_size, const T& value) { + return m_ht.insert(key, key_size, value); + } +#ifdef TSL_HT_HAS_STRING_VIEW + std::pair insert(const std::basic_string_view& key, const T& value) { + return m_ht.insert(key.data(), key.size(), value); + } +#else + std::pair insert(const CharT* key, const T& value) { + return m_ht.insert(key, std::strlen(key), value); + } + + std::pair insert(const std::basic_string& key, const T& value) { + return m_ht.insert(key.data(), key.size(), value); + } +#endif + + + + std::pair insert_ks(const CharT* key, size_type key_size, T&& value) { + return m_ht.insert(key, key_size, std::move(value)); + } +#ifdef TSL_HT_HAS_STRING_VIEW + std::pair insert(const std::basic_string_view& key, T&& value) { + return m_ht.insert(key.data(), key.size(), std::move(value)); + } +#else + std::pair insert(const CharT* key, T&& value) { + return m_ht.insert(key, std::strlen(key), std::move(value)); + } + + std::pair insert(const std::basic_string& key, T&& value) { + return m_ht.insert(key.data(), key.size(), std::move(value)); + } +#endif + + + + template::value>::type* = nullptr> + void insert(InputIt first, InputIt last) { + for(auto it = first; it != last; ++it) { + insert_pair(*it); + } + } + + + +#ifdef TSL_HT_HAS_STRING_VIEW + void insert(std::initializer_list, T>> ilist) { + insert(ilist.begin(), ilist.end()); + } +#else + void insert(std::initializer_list> ilist) { + insert(ilist.begin(), ilist.end()); + } +#endif + + + + template + std::pair emplace_ks(const CharT* key, size_type key_size, Args&&... args) { + return m_ht.insert(key, key_size, std::forward(args)...); + } +#ifdef TSL_HT_HAS_STRING_VIEW + template + std::pair emplace(const std::basic_string_view& key, Args&&... args) { + return m_ht.insert(key.data(), key.size(), std::forward(args)...); + } +#else + template + std::pair emplace(const CharT* key, Args&&... args) { + return m_ht.insert(key, std::strlen(key), std::forward(args)...); + } + + template + std::pair emplace(const std::basic_string& key, Args&&... args) { + return m_ht.insert(key.data(), key.size(), std::forward(args)...); + } +#endif + + + + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } + + + + size_type erase_ks(const CharT* key, size_type key_size) { + return m_ht.erase(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + size_type erase(const std::basic_string_view& key) { + return m_ht.erase(key.data(), key.size()); + } +#else + size_type erase(const CharT* key) { + return m_ht.erase(key, std::strlen(key)); + } + + size_type erase(const std::basic_string& key) { + return m_ht.erase(key.data(), key.size()); + } +#endif + + + + /** + * Erase all the elements which have 'prefix' as prefix. Return the number of erase elements. + */ + size_type erase_prefix_ks(const CharT* prefix, size_type prefix_size) { + return m_ht.erase_prefix(prefix, prefix_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + /** + * @copydoc erase_prefix_ks(const CharT* prefix, size_type prefix_size) + */ + size_type erase_prefix(const std::basic_string_view& prefix) { + return m_ht.erase_prefix(prefix.data(), prefix.size()); + } +#else + /** + * @copydoc erase_prefix_ks(const CharT* prefix, size_type prefix_size) + */ + size_type erase_prefix(const CharT* prefix) { + return m_ht.erase_prefix(prefix, std::strlen(prefix)); + } + + /** + * @copydoc erase_prefix_ks(const CharT* prefix, size_type prefix_size) + */ + size_type erase_prefix(const std::basic_string& prefix) { + return m_ht.erase_prefix(prefix.data(), prefix.size()); + } +#endif + + + + void swap(htrie_map& other) { other.m_ht.swap(m_ht); } + + /* + * Lookup + */ + T& at_ks(const CharT* key, size_type key_size) { return m_ht.at(key, key_size); } + const T& at_ks(const CharT* key, size_type key_size) const { return m_ht.at(key, key_size); } + +#ifdef TSL_HT_HAS_STRING_VIEW + T& at(const std::basic_string_view& key) { return m_ht.at(key.data(), key.size()); } + const T& at(const std::basic_string_view& key) const { return m_ht.at(key.data(), key.size()); } +#else + T& at(const CharT* key) { return m_ht.at(key, std::strlen(key)); } + const T& at(const CharT* key) const { return m_ht.at(key, std::strlen(key)); } + + T& at(const std::basic_string& key) { return m_ht.at(key.data(), key.size()); } + const T& at(const std::basic_string& key) const { return m_ht.at(key.data(), key.size()); } +#endif + + + +#ifdef TSL_HT_HAS_STRING_VIEW + T& operator[](const std::basic_string_view& key) { return m_ht.access_operator(key.data(), key.size()); } +#else + T& operator[](const CharT* key) { return m_ht.access_operator(key, std::strlen(key)); } + T& operator[](const std::basic_string& key) { return m_ht.access_operator(key.data(), key.size()); } +#endif + + + + size_type count_ks(const CharT* key, size_type key_size) const { return m_ht.count(key, key_size); } +#ifdef TSL_HT_HAS_STRING_VIEW + size_type count(const std::basic_string_view& key) const { return m_ht.count(key.data(), key.size()); } +#else + size_type count(const CharT* key) const { return m_ht.count(key, std::strlen(key)); } + size_type count(const std::basic_string& key) const { return m_ht.count(key.data(), key.size()); } +#endif + + + + iterator find_ks(const CharT* key, size_type key_size) { + return m_ht.find(key, key_size); + } + + const_iterator find_ks(const CharT* key, size_type key_size) const { + return m_ht.find(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + iterator find(const std::basic_string_view& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string_view& key) const { + return m_ht.find(key.data(), key.size()); + } +#else + iterator find(const CharT* key) { + return m_ht.find(key, std::strlen(key)); + } + + const_iterator find(const CharT* key) const { + return m_ht.find(key, std::strlen(key)); + } + + iterator find(const std::basic_string& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string& key) const { + return m_ht.find(key.data(), key.size()); + } +#endif + + + + std::pair equal_range_ks(const CharT* key, size_type key_size) { + return m_ht.equal_range(key, key_size); + } + + std::pair equal_range_ks(const CharT* key, size_type key_size) const { + return m_ht.equal_range(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + std::pair equal_range(const std::basic_string_view& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string_view& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#else + std::pair equal_range(const CharT* key) { + return m_ht.equal_range(key, std::strlen(key)); + } + + std::pair equal_range(const CharT* key) const { + return m_ht.equal_range(key, std::strlen(key)); + } + + std::pair equal_range(const std::basic_string& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#endif + + + /** + * Return a range containing all the elements which have 'prefix' as prefix. The range is defined by a pair + * of iterator, the first being the begin iterator and the second being the end iterator. + */ + std::pair equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) { + return m_ht.equal_prefix_range(prefix, prefix_size); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) const { + return m_ht.equal_prefix_range(prefix, prefix_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string_view& prefix) { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string_view& prefix) const { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } +#else + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const CharT* prefix) { + return m_ht.equal_prefix_range(prefix, std::strlen(prefix)); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const CharT* prefix) const { + return m_ht.equal_prefix_range(prefix, std::strlen(prefix)); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string& prefix) { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string& prefix) const { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } +#endif + + + + /** + * Return the element in the trie which is the longest prefix of `key`. If no + * element in the trie is a prefix of `key`, the end iterator is returned. + * + * Example: + * + * tsl::htrie_map map = {{"/foo", 1}, {"/foo/bar", 1}}; + * + * map.longest_prefix("/foo"); // returns {"/foo", 1} + * map.longest_prefix("/foo/baz"); // returns {"/foo", 1} + * map.longest_prefix("/foo/bar/baz"); // returns {"/foo/bar", 1} + * map.longest_prefix("/foo/bar/"); // returns {"/foo/bar", 1} + * map.longest_prefix("/bar"); // returns end() + * map.longest_prefix(""); // returns end() + */ + iterator longest_prefix_ks(const CharT* key, size_type key_size) { + return m_ht.longest_prefix(key, key_size); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix_ks(const CharT* key, size_type key_size) const { + return m_ht.longest_prefix(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + iterator longest_prefix(const std::basic_string_view& key) { + return m_ht.longest_prefix(key.data(), key.size()); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix(const std::basic_string_view& key) const { + return m_ht.longest_prefix(key.data(), key.size()); + } +#else + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + iterator longest_prefix(const CharT* key) { + return m_ht.longest_prefix(key, std::strlen(key)); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix(const CharT* key) const { + return m_ht.longest_prefix(key, std::strlen(key)); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + iterator longest_prefix(const std::basic_string& key) { + return m_ht.longest_prefix(key.data(), key.size()); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix(const std::basic_string& key) const { + return m_ht.longest_prefix(key.data(), key.size()); + } +#endif + + + + /* + * Hash policy + */ + float max_load_factor() const { return m_ht.max_load_factor(); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + + /* + * Burst policy + */ + size_type burst_threshold() const { return m_ht.burst_threshold(); } + void burst_threshold(size_type threshold) { m_ht.burst_threshold(threshold); } + + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + + + + /* + * Other + */ + + /** + * Serialize the map through the `serializer` parameter. + * + * The `serializer` parameter must be a function object that supports the following calls: + * - `void operator()(const U& value);` where the types `std::uint64_t`, `float` and `T` must be supported for U. + * - `void operator()(const CharT* value, std::size_t value_size);` + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, ...) of the types it serializes + * in the hands of the `Serializer` function object if compatibility is required. + */ + template + void serialize(Serializer& serializer) const { + m_ht.serialize(serializer); + } + + + /** + * Deserialize a previously serialized map through the `deserializer` parameter. + * + * The `deserializer` parameter must be a function object that supports the following calls: + * - `template U operator()();` where the types `std::uint64_t`, `float` and `T` must be supported for U. + * - `void operator()(CharT* value_out, std::size_t value_size);` + * + * If the deserialized hash map part of the hat-trie is hash compatible with the serialized map, the deserialization process + * can be sped up by setting `hash_compatible` to true. To be hash compatible, the Hash (take care of the 32-bits vs 64 bits), + * and KeySizeT must behave the same than the ones used in the serialized map. Otherwise the behaviour is undefined + * with `hash_compatible` sets to true. + * + * The behaviour is undefined if the type `CharT` and `T` of the `htrie_map` are not the same as the + * types used during serialization. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, size of int, ...) of the types it + * deserializes in the hands of the `Deserializer` function object if compatibility is required. + */ + template + static htrie_map deserialize(Deserializer& deserializer, bool hash_compatible = false) { + htrie_map map; + map.m_ht.deserialize(deserializer, hash_compatible); + + return map; + } + + friend bool operator==(const htrie_map& lhs, const htrie_map& rhs) { + if(lhs.size() != rhs.size()) { + return false; + } + + std::string key_buffer; + for(auto it = lhs.cbegin(); it != lhs.cend(); ++it) { + it.key(key_buffer); + + const auto it_element_rhs = rhs.find(key_buffer); + if(it_element_rhs == rhs.cend() || it.value() != it_element_rhs.value()) { + return false; + } + } + + return true; + } + + friend bool operator!=(const htrie_map& lhs, const htrie_map& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(htrie_map& lhs, htrie_map& rhs) { + lhs.swap(rhs); + } + +private: + template + void insert_pair(const std::pair& value) { + insert(value.first, value.second); + } + + template + void insert_pair(std::pair&& value) { + insert(value.first, std::move(value.second)); + } + +private: + ht m_ht; +}; + +} // end namespace tsl + +#endif diff --git a/ios/include/tsl/htrie_set.h b/ios/include/tsl/htrie_set.h new file mode 100644 index 00000000..e2f40adc --- /dev/null +++ b/ios/include/tsl/htrie_set.h @@ -0,0 +1,586 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_HTRIE_SET_H +#define TSL_HTRIE_SET_H + +#include +#include +#include +#include +#include +#include "htrie_hash.h" + +namespace tsl { + +/** + * Implementation of a hat-trie set. + * + * The size of a key string is limited to std::numeric_limits::max() - 1. + * That is 65 535 characters by default, but can be raised with the KeySizeT template parameter. + * See max_key_size() for an easy access to this limit. + * + * Iterators invalidation: + * - clear, operator=: always invalidate the iterators. + * - insert: always invalidate the iterators. + * - erase: always invalidate the iterators. + */ +template, + class KeySizeT = std::uint16_t> +class htrie_set { +private: + template + using is_iterator = tsl::detail_array_hash::is_iterator; + + using ht = tsl::detail_htrie_hash::htrie_hash; + +public: + using char_type = typename ht::char_type; + using key_size_type = typename ht::key_size_type; + using size_type = typename ht::size_type; + using hasher = typename ht::hasher; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + using prefix_iterator = typename ht::prefix_iterator; + using const_prefix_iterator = typename ht::const_prefix_iterator; + +public: + explicit htrie_set(const Hash& hash = Hash()): m_ht(hash, ht::HASH_NODE_DEFAULT_MAX_LOAD_FACTOR, + ht::DEFAULT_BURST_THRESHOLD) + { + } + + explicit htrie_set(size_type burst_threshold, + const Hash& hash = Hash()): m_ht(hash, ht::HASH_NODE_DEFAULT_MAX_LOAD_FACTOR, + burst_threshold) + { + } + + template::value>::type* = nullptr> + htrie_set(InputIt first, InputIt last, + const Hash& hash = Hash()): htrie_set(hash) + { + insert(first, last); + } + + + +#ifdef TSL_HT_HAS_STRING_VIEW + htrie_set(std::initializer_list> init, + const Hash& hash = Hash()): htrie_set(hash) + { + insert(init); + } +#else + htrie_set(std::initializer_list init, + const Hash& hash = Hash()): htrie_set(hash) + { + insert(init); + } +#endif + + + +#ifdef TSL_HT_HAS_STRING_VIEW + htrie_set& operator=(std::initializer_list> ilist) { + clear(); + insert(ilist); + + return *this; + } +#else + htrie_set& operator=(std::initializer_list ilist) { + clear(); + insert(ilist); + + return *this; + } +#endif + + + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + size_type max_key_size() const noexcept { return m_ht.max_key_size(); } + + /** + * Call shrink_to_fit() on each hash node of the hat-trie to reduce its size. + */ + void shrink_to_fit() { m_ht.shrink_to_fit(); } + + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + + + std::pair insert_ks(const CharT* key, size_type key_size) { + return m_ht.insert(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + std::pair insert(const std::basic_string_view& key) { + return m_ht.insert(key.data(), key.size()); + } +#else + std::pair insert(const CharT* key) { + return m_ht.insert(key, std::strlen(key)); + } + + std::pair insert(const std::basic_string& key) { + return m_ht.insert(key.data(), key.size()); + } +#endif + + + + template::value>::type* = nullptr> + void insert(InputIt first, InputIt last) { + for(auto it = first; it != last; ++it) { + insert(*it); + } + } + + + +#ifdef TSL_HT_HAS_STRING_VIEW + void insert(std::initializer_list> ilist) { + insert(ilist.begin(), ilist.end()); + } +#else + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } +#endif + + + + std::pair emplace_ks(const CharT* key, size_type key_size) { + return m_ht.insert(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + std::pair emplace(const std::basic_string_view& key) { + return m_ht.insert(key.data(), key.size()); + } +#else + std::pair emplace(const CharT* key) { + return m_ht.insert(key, std::strlen(key)); + } + + std::pair emplace(const std::basic_string& key) { + return m_ht.insert(key.data(), key.size()); + } +#endif + + + + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } + + + + size_type erase_ks(const CharT* key, size_type key_size) { + return m_ht.erase(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + size_type erase(const std::basic_string_view& key) { + return m_ht.erase(key.data(), key.size()); + } +#else + size_type erase(const CharT* key) { + return m_ht.erase(key, std::strlen(key)); + } + + size_type erase(const std::basic_string& key) { + return m_ht.erase(key.data(), key.size()); + } +#endif + + + + /** + * Erase all the elements which have 'prefix' as prefix. Return the number of erase elements. + */ + size_type erase_prefix_ks(const CharT* prefix, size_type prefix_size) { + return m_ht.erase_prefix(prefix, prefix_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + /** + * @copydoc erase_prefix_ks(const CharT* prefix, size_type prefix_size) + */ + size_type erase_prefix(const std::basic_string_view& prefix) { + return m_ht.erase_prefix(prefix.data(), prefix.size()); + } +#else + /** + * @copydoc erase_prefix_ks(const CharT* prefix, size_type prefix_size) + */ + size_type erase_prefix(const CharT* prefix) { + return m_ht.erase_prefix(prefix, std::strlen(prefix)); + } + + /** + * @copydoc erase_prefix_ks(const CharT* prefix, size_type prefix_size) + */ + size_type erase_prefix(const std::basic_string& prefix) { + return m_ht.erase_prefix(prefix.data(), prefix.size()); + } +#endif + + + + void swap(htrie_set& other) { other.m_ht.swap(m_ht); } + + + /* + * Lookup + */ + size_type count_ks(const CharT* key, size_type key_size) const { return m_ht.count(key, key_size); } +#ifdef TSL_HT_HAS_STRING_VIEW + size_type count(const std::basic_string_view& key) const { return m_ht.count(key.data(), key.size()); } +#else + size_type count(const CharT* key) const { return m_ht.count(key, std::strlen(key)); } + size_type count(const std::basic_string& key) const { return m_ht.count(key.data(), key.size()); } +#endif + + + + iterator find_ks(const CharT* key, size_type key_size) { + return m_ht.find(key, key_size); + } + + const_iterator find_ks(const CharT* key, size_type key_size) const { + return m_ht.find(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + iterator find(const std::basic_string_view& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string_view& key) const { + return m_ht.find(key.data(), key.size()); + } +#else + iterator find(const CharT* key) { + return m_ht.find(key, std::strlen(key)); + } + + const_iterator find(const CharT* key) const { + return m_ht.find(key, std::strlen(key)); + } + + iterator find(const std::basic_string& key) { + return m_ht.find(key.data(), key.size()); + } + + const_iterator find(const std::basic_string& key) const { + return m_ht.find(key.data(), key.size()); + } +#endif + + + + std::pair equal_range_ks(const CharT* key, size_type key_size) { + return m_ht.equal_range(key, key_size); + } + + std::pair equal_range_ks(const CharT* key, size_type key_size) const { + return m_ht.equal_range(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + std::pair equal_range(const std::basic_string_view& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string_view& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#else + std::pair equal_range(const CharT* key) { + return m_ht.equal_range(key, std::strlen(key)); + } + + std::pair equal_range(const CharT* key) const { + return m_ht.equal_range(key, std::strlen(key)); + } + + std::pair equal_range(const std::basic_string& key) { + return m_ht.equal_range(key.data(), key.size()); + } + + std::pair equal_range(const std::basic_string& key) const { + return m_ht.equal_range(key.data(), key.size()); + } +#endif + + + + /** + * Return a range containing all the elements which have 'prefix' as prefix. The range is defined by a pair + * of iterator, the first being the begin iterator and the second being the end iterator. + */ + std::pair equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) { + return m_ht.equal_prefix_range(prefix, prefix_size); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) const { + return m_ht.equal_prefix_range(prefix, prefix_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string_view& prefix) { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string_view& prefix) const { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } +#else + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const CharT* prefix) { + return m_ht.equal_prefix_range(prefix, std::strlen(prefix)); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const CharT* prefix) const { + return m_ht.equal_prefix_range(prefix, std::strlen(prefix)); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string& prefix) { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } + + /** + * @copydoc equal_prefix_range_ks(const CharT* prefix, size_type prefix_size) + */ + std::pair equal_prefix_range(const std::basic_string& prefix) const { + return m_ht.equal_prefix_range(prefix.data(), prefix.size()); + } +#endif + + + + /** + * Return the element in the trie which is the longest prefix of `key`. If no + * element in the trie is a prefix of `key`, the end iterator is returned. + * + * Example: + * + * tsl::htrie_set set = {"/foo", "/foo/bar"}; + * + * set.longest_prefix("/foo"); // returns "/foo" + * set.longest_prefix("/foo/baz"); // returns "/foo" + * set.longest_prefix("/foo/bar/baz"); // returns "/foo/bar" + * set.longest_prefix("/foo/bar/"); // returns "/foo/bar" + * set.longest_prefix("/bar"); // returns end() + * set.longest_prefix(""); // returns end() + */ + iterator longest_prefix_ks(const CharT* key, size_type key_size) { + return m_ht.longest_prefix(key, key_size); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix_ks(const CharT* key, size_type key_size) const { + return m_ht.longest_prefix(key, key_size); + } +#ifdef TSL_HT_HAS_STRING_VIEW + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + iterator longest_prefix(const std::basic_string_view& key) { + return m_ht.longest_prefix(key.data(), key.size()); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix(const std::basic_string_view& key) const { + return m_ht.longest_prefix(key.data(), key.size()); + } +#else + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + iterator longest_prefix(const CharT* key) { + return m_ht.longest_prefix(key, std::strlen(key)); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix(const CharT* key) const { + return m_ht.longest_prefix(key, std::strlen(key)); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + iterator longest_prefix(const std::basic_string& key) { + return m_ht.longest_prefix(key.data(), key.size()); + } + + /** + * @copydoc longest_prefix_ks(const CharT* key, size_type key_size) + */ + const_iterator longest_prefix(const std::basic_string& key) const { + return m_ht.longest_prefix(key.data(), key.size()); + } +#endif + + + + /* + * Hash policy + */ + float max_load_factor() const { return m_ht.max_load_factor(); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + + /* + * Burst policy + */ + size_type burst_threshold() const { return m_ht.burst_threshold(); } + void burst_threshold(size_type threshold) { m_ht.burst_threshold(threshold); } + + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + + + + /* + * Other + */ + + /** + * Serialize the set through the `serializer` parameter. + * + * The `serializer` parameter must be a function object that supports the following calls: + * - `void operator()(const U& value);` where the types `std::uint64_t` and `float` must be supported for U. + * - `void operator()(const CharT* value, std::size_t value_size);` + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, ...) of the types it serializes + * in the hands of the `Serializer` function object if compatibility is required. + */ + template + void serialize(Serializer& serializer) const { + m_ht.serialize(serializer); + } + + + /** + * Deserialize a previously serialized set through the `deserializer` parameter. + * + * The `deserializer` parameter must be a function object that supports the following calls: + * - `template U operator()();` where the types `std::uint64_t` and `float` must be supported for U. + * - `void operator()(CharT* value_out, std::size_t value_size);` + * + * If the deserialized hash set part of the hat-trie is hash compatible with the serialized set, the deserialization process + * can be sped up by setting `hash_compatible` to true. To be hash compatible, the Hash (take care of the 32-bits vs 64 bits), + * and KeySizeT must behave the same than the ones used in the serialized set. Otherwise the behaviour is undefined + * with `hash_compatible` sets to true. + * + * The behaviour is undefined if the type `CharT` of the `htrie_set` is not the same as the + * type used during serialization. + * + * The implementation leaves binary compatibility (endianness, IEEE 754 for floats, size of int, ...) of the types it + * deserializes in the hands of the `Deserializer` function object if compatibility is required. + */ + template + static htrie_set deserialize(Deserializer& deserializer, bool hash_compatible = false) { + htrie_set set; + set.m_ht.deserialize(deserializer, hash_compatible); + + return set; + } + + friend bool operator==(const htrie_set& lhs, const htrie_set& rhs) { + if(lhs.size() != rhs.size()) { + return false; + } + + std::string key_buffer; + for(auto it = lhs.cbegin(); it != lhs.cend(); ++it) { + it.key(key_buffer); + + const auto it_element_rhs = rhs.find(key_buffer); + if(it_element_rhs == rhs.cend()) { + return false; + } + } + + return true; + } + + friend bool operator!=(const htrie_set& lhs, const htrie_set& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(htrie_set& lhs, htrie_set& rhs) { + lhs.swap(rhs); + } + +private: + ht m_ht; +}; + +} // end namespace tsl + +#endif diff --git a/ios/include/tsl/robin_growth_policy.h b/ios/include/tsl/robin_growth_policy.h index 61595f0a..daf6bf56 100644 --- a/ios/include/tsl/robin_growth_policy.h +++ b/ios/include/tsl/robin_growth_policy.h @@ -1,290 +1,290 @@ -/** - * MIT License - * - * Copyright (c) 2017 Tessil - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef TSL_ROBIN_GROWTH_POLICY_H -#define TSL_ROBIN_GROWTH_POLICY_H - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef __EXCEPTIONS -# define THROW(_e, _m) throw _e(_m) -#else -# include -# ifndef NDEBUG -# define THROW(_e, _m) do { fprintf(stderr, _m); std::terminate(); } while(0) -# else -# define THROW(_e, _m) std::terminate() -# endif -#endif - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - -#if __has_builtin(__builtin_expect) -# define TSL_LIKELY( exp ) (__builtin_expect( !!(exp), true )) -#else -# define TSL_LIKELY( exp ) (exp) -#endif - -namespace tsl { -namespace rh { - -/** - * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a power of two. It allows - * the table to use a mask operation instead of a modulo operation to map a hash to a bucket. - * - * GrowthFactor must be a power of two >= 2. - */ -template -class power_of_two_growth_policy { -public: - /** - * Called on the hash table creation and on rehash. The number of buckets for the table is passed in parameter. - * This number is a minimum, the policy may update this value with a higher value if needed (but not lower). - */ - power_of_two_growth_policy(std::size_t& min_bucket_count_in_out) { - if(min_bucket_count_in_out > max_bucket_count()) { - THROW(std::length_error, "The hash table exceeds its maxmimum size."); - } - - static_assert(MIN_BUCKETS_SIZE > 0, "MIN_BUCKETS_SIZE must be > 0."); - const std::size_t min_bucket_count = MIN_BUCKETS_SIZE; - - min_bucket_count_in_out = std::max(min_bucket_count, min_bucket_count_in_out); - min_bucket_count_in_out = round_up_to_power_of_two(min_bucket_count_in_out); - m_mask = min_bucket_count_in_out - 1; - } - - /** - * Return the bucket [0, bucket_count()) to which the hash belongs. - */ - std::size_t bucket_for_hash(std::size_t hash) const noexcept { - return hash & m_mask; - } - - /** - * Return the bucket count to use when the bucket array grows on rehash. - */ - std::size_t next_bucket_count() const { - if((m_mask + 1) > max_bucket_count() / GrowthFactor) { - THROW(std::length_error, "The hash table exceeds its maxmimum size."); - } - - return (m_mask + 1) * GrowthFactor; - } - - /** - * Return the maximum number of buckets supported by the policy. - */ - std::size_t max_bucket_count() const { - // Largest power of two. - return (std::numeric_limits::max() / 2) + 1; - } - -private: - static std::size_t round_up_to_power_of_two(std::size_t value) { - if(is_power_of_two(value)) { - return value; - } - - if(value == 0) { - return 1; - } - - --value; - for(std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) { - value |= value >> i; - } - - return value + 1; - } - - static constexpr bool is_power_of_two(std::size_t value) { - return value != 0 && (value & (value - 1)) == 0; - } - -protected: - static const std::size_t MIN_BUCKETS_SIZE = 2; - static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2, "GrowthFactor must be a power of two >= 2."); - - std::size_t m_mask; -}; - - -/** - * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo to map a hash - * to a bucket. Slower but it can be usefull if you want a slower growth. - */ -template> -class mod_growth_policy { -public: - mod_growth_policy(std::size_t& min_bucket_count_in_out) { - if(min_bucket_count_in_out > max_bucket_count()) { - THROW(std::length_error, "The hash table exceeds its maxmimum size."); - } - - static_assert(MIN_BUCKETS_SIZE > 0, "MIN_BUCKETS_SIZE must be > 0."); - const std::size_t min_bucket_count = MIN_BUCKETS_SIZE; - - min_bucket_count_in_out = std::max(min_bucket_count, min_bucket_count_in_out); - m_bucket_count = min_bucket_count_in_out; - } - - std::size_t bucket_for_hash(std::size_t hash) const noexcept { - return hash % m_bucket_count; - } - - std::size_t next_bucket_count() const { - if(m_bucket_count == max_bucket_count()) { - THROW(std::length_error, "The hash table exceeds its maxmimum size."); - } - - const double next_bucket_count = std::ceil(double(m_bucket_count) * REHASH_SIZE_MULTIPLICATION_FACTOR); - if(!std::isnormal(next_bucket_count)) { - THROW(std::length_error, "The hash table exceeds its maxmimum size."); - } - - if(next_bucket_count > double(max_bucket_count())) { - return max_bucket_count(); - } - else { - return std::size_t(next_bucket_count); - } - } - - std::size_t max_bucket_count() const { - return MAX_BUCKET_COUNT; - } - -private: - static const std::size_t MIN_BUCKETS_SIZE = 2; - static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = 1.0 * GrowthFactor::num / GrowthFactor::den; - static const std::size_t MAX_BUCKET_COUNT = - std::size_t(double( - std::numeric_limits::max() / REHASH_SIZE_MULTIPLICATION_FACTOR - )); - - static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, "Growth factor should be >= 1.1."); - - std::size_t m_bucket_count; -}; - - - -namespace detail { - -static constexpr const std::array PRIMES = {{ - 5ul, 17ul, 29ul, 37ul, 53ul, 67ul, 79ul, 97ul, 131ul, 193ul, 257ul, 389ul, 521ul, 769ul, 1031ul, 1543ul, 2053ul, - 3079ul, 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, - 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, - 1610612741ul, 3221225473ul, 4294967291ul -}}; - -template -static constexpr std::size_t mod(std::size_t hash) { return hash % PRIMES[IPrime]; } - -// MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for faster modulo as the -// compiler can optimize the modulo code better with a constant known at the compilation. -static constexpr const std::array MOD_PRIME = {{ - &mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>, &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, - &mod<11>, &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>, &mod<18>, &mod<19>, &mod<20>, - &mod<21>, &mod<22>, &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>, &mod<29>, &mod<30>, - &mod<31>, &mod<32>, &mod<33>, &mod<34>, &mod<35>, &mod<36>, &mod<37> , &mod<38> -}}; - -} - -/** - * Grow the hash table by using prime numbers as bucket count. Slower than tsl::rh::power_of_two_growth_policy in - * general but will probably distribute the values around better in the buckets with a poor hash function. - * - * To allow the compiler to optimize the modulo operation, a lookup table is used with constant primes numbers. - * - * With a switch the code would look like: - * \code - * switch(iprime) { // iprime is the current prime of the hash table - * case 0: hash % 5ul; - * break; - * case 1: hash % 17ul; - * break; - * case 2: hash % 29ul; - * break; - * ... - * } - * \endcode - * - * Due to the constant variable in the modulo the compiler is able to optimize the operation - * by a series of multiplications, substractions and shifts. - * - * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) * 5' in a 64 bits environement. - */ -class prime_growth_policy { -public: - prime_growth_policy(std::size_t& min_bucket_count_in_out) { - auto it_prime = std::lower_bound(detail::PRIMES.begin(), - detail::PRIMES.end(), min_bucket_count_in_out); - if(it_prime == detail::PRIMES.end()) { - THROW(std::length_error, "The hash table exceeds its maxmimum size."); - } - - m_iprime = static_cast(std::distance(detail::PRIMES.begin(), it_prime)); - min_bucket_count_in_out = *it_prime; - } - - std::size_t bucket_for_hash(std::size_t hash) const noexcept { - return detail::MOD_PRIME[m_iprime](hash); - } - - std::size_t next_bucket_count() const { - if(m_iprime + 1 >= detail::PRIMES.size()) { - THROW(std::length_error, "The hash table exceeds its maxmimum size."); - } - - return detail::PRIMES[m_iprime + 1]; - } - - std::size_t max_bucket_count() const { - return detail::PRIMES.back(); - } - -private: - unsigned int m_iprime; - - static_assert(std::numeric_limits::max() >= detail::PRIMES.size(), - "The type of m_iprime is not big enough."); -}; - -} -} - -#endif +/** + * MIT License + * + * Copyright (c) 2017 Tessil + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_GROWTH_POLICY_H +#define TSL_ROBIN_GROWTH_POLICY_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __EXCEPTIONS +# define THROW(_e, _m) throw _e(_m) +#else +# include +# ifndef NDEBUG +# define THROW(_e, _m) do { fprintf(stderr, _m); std::terminate(); } while(0) +# else +# define THROW(_e, _m) std::terminate() +# endif +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#if __has_builtin(__builtin_expect) +# define TSL_LIKELY( exp ) (__builtin_expect( !!(exp), true )) +#else +# define TSL_LIKELY( exp ) (exp) +#endif + +namespace tsl { +namespace rh { + +/** + * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a power of two. It allows + * the table to use a mask operation instead of a modulo operation to map a hash to a bucket. + * + * GrowthFactor must be a power of two >= 2. + */ +template +class power_of_two_growth_policy { +public: + /** + * Called on the hash table creation and on rehash. The number of buckets for the table is passed in parameter. + * This number is a minimum, the policy may update this value with a higher value if needed (but not lower). + */ + power_of_two_growth_policy(std::size_t& min_bucket_count_in_out) { + if(min_bucket_count_in_out > max_bucket_count()) { + THROW(std::length_error, "The hash table exceeds its maxmimum size."); + } + + static_assert(MIN_BUCKETS_SIZE > 0, "MIN_BUCKETS_SIZE must be > 0."); + const std::size_t min_bucket_count = MIN_BUCKETS_SIZE; + + min_bucket_count_in_out = std::max(min_bucket_count, min_bucket_count_in_out); + min_bucket_count_in_out = round_up_to_power_of_two(min_bucket_count_in_out); + m_mask = min_bucket_count_in_out - 1; + } + + /** + * Return the bucket [0, bucket_count()) to which the hash belongs. + */ + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash & m_mask; + } + + /** + * Return the bucket count to use when the bucket array grows on rehash. + */ + std::size_t next_bucket_count() const { + if((m_mask + 1) > max_bucket_count() / GrowthFactor) { + THROW(std::length_error, "The hash table exceeds its maxmimum size."); + } + + return (m_mask + 1) * GrowthFactor; + } + + /** + * Return the maximum number of buckets supported by the policy. + */ + std::size_t max_bucket_count() const { + // Largest power of two. + return (std::numeric_limits::max() / 2) + 1; + } + +private: + static std::size_t round_up_to_power_of_two(std::size_t value) { + if(is_power_of_two(value)) { + return value; + } + + if(value == 0) { + return 1; + } + + --value; + for(std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) { + value |= value >> i; + } + + return value + 1; + } + + static constexpr bool is_power_of_two(std::size_t value) { + return value != 0 && (value & (value - 1)) == 0; + } + +protected: + static const std::size_t MIN_BUCKETS_SIZE = 2; + static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2, "GrowthFactor must be a power of two >= 2."); + + std::size_t m_mask; +}; + + +/** + * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo to map a hash + * to a bucket. Slower but it can be usefull if you want a slower growth. + */ +template> +class mod_growth_policy { +public: + mod_growth_policy(std::size_t& min_bucket_count_in_out) { + if(min_bucket_count_in_out > max_bucket_count()) { + THROW(std::length_error, "The hash table exceeds its maxmimum size."); + } + + static_assert(MIN_BUCKETS_SIZE > 0, "MIN_BUCKETS_SIZE must be > 0."); + const std::size_t min_bucket_count = MIN_BUCKETS_SIZE; + + min_bucket_count_in_out = std::max(min_bucket_count, min_bucket_count_in_out); + m_bucket_count = min_bucket_count_in_out; + } + + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash % m_bucket_count; + } + + std::size_t next_bucket_count() const { + if(m_bucket_count == max_bucket_count()) { + THROW(std::length_error, "The hash table exceeds its maxmimum size."); + } + + const double next_bucket_count = std::ceil(double(m_bucket_count) * REHASH_SIZE_MULTIPLICATION_FACTOR); + if(!std::isnormal(next_bucket_count)) { + THROW(std::length_error, "The hash table exceeds its maxmimum size."); + } + + if(next_bucket_count > double(max_bucket_count())) { + return max_bucket_count(); + } + else { + return std::size_t(next_bucket_count); + } + } + + std::size_t max_bucket_count() const { + return MAX_BUCKET_COUNT; + } + +private: + static const std::size_t MIN_BUCKETS_SIZE = 2; + static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = 1.0 * GrowthFactor::num / GrowthFactor::den; + static const std::size_t MAX_BUCKET_COUNT = + std::size_t(double( + std::numeric_limits::max() / REHASH_SIZE_MULTIPLICATION_FACTOR + )); + + static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, "Growth factor should be >= 1.1."); + + std::size_t m_bucket_count; +}; + + + +namespace detail { + +static constexpr const std::array PRIMES = {{ + 5ul, 17ul, 29ul, 37ul, 53ul, 67ul, 79ul, 97ul, 131ul, 193ul, 257ul, 389ul, 521ul, 769ul, 1031ul, 1543ul, 2053ul, + 3079ul, 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, + 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, + 1610612741ul, 3221225473ul, 4294967291ul +}}; + +template +static constexpr std::size_t mod(std::size_t hash) { return hash % PRIMES[IPrime]; } + +// MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for faster modulo as the +// compiler can optimize the modulo code better with a constant known at the compilation. +static constexpr const std::array MOD_PRIME = {{ + &mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>, &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, + &mod<11>, &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>, &mod<18>, &mod<19>, &mod<20>, + &mod<21>, &mod<22>, &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>, &mod<29>, &mod<30>, + &mod<31>, &mod<32>, &mod<33>, &mod<34>, &mod<35>, &mod<36>, &mod<37> , &mod<38> +}}; + +} + +/** + * Grow the hash table by using prime numbers as bucket count. Slower than tsl::rh::power_of_two_growth_policy in + * general but will probably distribute the values around better in the buckets with a poor hash function. + * + * To allow the compiler to optimize the modulo operation, a lookup table is used with constant primes numbers. + * + * With a switch the code would look like: + * \code + * switch(iprime) { // iprime is the current prime of the hash table + * case 0: hash % 5ul; + * break; + * case 1: hash % 17ul; + * break; + * case 2: hash % 29ul; + * break; + * ... + * } + * \endcode + * + * Due to the constant variable in the modulo the compiler is able to optimize the operation + * by a series of multiplications, substractions and shifts. + * + * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) * 5' in a 64 bits environement. + */ +class prime_growth_policy { +public: + prime_growth_policy(std::size_t& min_bucket_count_in_out) { + auto it_prime = std::lower_bound(detail::PRIMES.begin(), + detail::PRIMES.end(), min_bucket_count_in_out); + if(it_prime == detail::PRIMES.end()) { + THROW(std::length_error, "The hash table exceeds its maxmimum size."); + } + + m_iprime = static_cast(std::distance(detail::PRIMES.begin(), it_prime)); + min_bucket_count_in_out = *it_prime; + } + + std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return detail::MOD_PRIME[m_iprime](hash); + } + + std::size_t next_bucket_count() const { + if(m_iprime + 1 >= detail::PRIMES.size()) { + THROW(std::length_error, "The hash table exceeds its maxmimum size."); + } + + return detail::PRIMES[m_iprime + 1]; + } + + std::size_t max_bucket_count() const { + return detail::PRIMES.back(); + } + +private: + unsigned int m_iprime; + + static_assert(std::numeric_limits::max() >= detail::PRIMES.size(), + "The type of m_iprime is not big enough."); +}; + +} +} + +#endif diff --git a/ios/include/tsl/robin_hash.h b/ios/include/tsl/robin_hash.h index 3f2b8fa4..0f94a07a 100644 --- a/ios/include/tsl/robin_hash.h +++ b/ios/include/tsl/robin_hash.h @@ -1,1252 +1,1252 @@ -/** - * MIT License - * - * Copyright (c) 2017 Tessil - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef TSL_ROBIN_HASH_H -#define TSL_ROBIN_HASH_H - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "robin_growth_policy.h" - - - -#ifndef tsl_assert - #ifdef TSL_DEBUG - #define tsl_assert(expr) assert(expr) - #else - #define tsl_assert(expr) (static_cast(0)) - #endif -#endif - - - -namespace tsl { - -namespace detail_robin_hash { - -template -struct make_void { - using type = void; -}; - -template -struct has_is_transparent: std::false_type { -}; - -template -struct has_is_transparent::type>: std::true_type { -}; - -template -struct is_power_of_two_policy: std::false_type { -}; - -template -struct is_power_of_two_policy>: std::true_type { -}; - - - -using truncated_hash_type = std::uint_least32_t; - -/** - * Helper class that store a truncated hash if StoreHash is true and nothing otherwise. - */ -template -class bucket_entry_hash { -public: - bool bucket_hash_equal(std::size_t /*hash*/) const noexcept { - return true; - } - - truncated_hash_type truncated_hash() const noexcept { - return 0; - } - -protected: - void set_hash(truncated_hash_type /*hash*/) noexcept { - } -}; - -template<> -class bucket_entry_hash { -public: - bool bucket_hash_equal(std::size_t hash) const noexcept { - return m_hash == truncated_hash_type(hash); - } - - truncated_hash_type truncated_hash() const noexcept { - return m_hash; - } - -protected: - void set_hash(truncated_hash_type hash) noexcept { - m_hash = truncated_hash_type(hash); - } - -private: - truncated_hash_type m_hash; -}; - - -/** - * Each bucket entry has: - * - A value of type `ValueType`. - * - An integer to store how far the value of the bucket, if any, is from its ideal bucket - * (ex: if the current bucket 5 has the value 'foo' and `hash('foo') % nb_buckets` == 3, - * `dist_from_ideal_bucket()` will return 2 as the current value of the bucket is two - * buckets away from its ideal bucket) - * If there is no value in the bucket (i.e. `empty()` is true) `dist_from_ideal_bucket()` will be < 0. - * - A marker which tells us if the bucket is the last bucket of the bucket array (useful for the - * iterator of the hash table). - * - If `StoreHash` is true, 32 bits of the hash of the value, if any, are also stored in the bucket. - * If the size of the hash is more than 32 bits, it is truncated. We don't store the full hash - * as storing the hash is a potential opportunity to use the unused space due to the alignement - * of the bucket_entry structure. We can thus potentially store the hash without any extra space - * (which would not be possible with 64 bits of the hash). - */ -template -class bucket_entry: public bucket_entry_hash { - using bucket_hash = bucket_entry_hash; - -public: - using value_type = ValueType; - using distance_type = std::int_least16_t; - - - bucket_entry() noexcept: bucket_hash(), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), - m_last_bucket(false) - { - tsl_assert(empty()); - } - - bucket_entry(const bucket_entry& other) noexcept(std::is_nothrow_copy_constructible::value): - bucket_hash(other), - m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), - m_last_bucket(other.m_last_bucket) - { - if(!other.empty()) { - ::new (static_cast(std::addressof(m_value))) value_type(other.value()); - m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; - } - } - - /** - * Never really used, but still necessary as we must call resize on an empty `std::vector`. - * and we need to support move-only types. See robin_hash constructor for details. - */ - bucket_entry(bucket_entry&& other) noexcept(std::is_nothrow_move_constructible::value): - bucket_hash(std::move(other)), - m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), - m_last_bucket(other.m_last_bucket) - { - if(!other.empty()) { - ::new (static_cast(std::addressof(m_value))) value_type(std::move(other.value())); - m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; - } - } - - bucket_entry& operator=(const bucket_entry& other) - noexcept(std::is_nothrow_copy_constructible::value) - { - if(this != &other) { - clear(); - - bucket_hash::operator=(other); - if(!other.empty()) { - ::new (static_cast(std::addressof(m_value))) value_type(other.value()); - } - - m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; - m_last_bucket = other.m_last_bucket; - } - - return *this; - } - - bucket_entry& operator=(bucket_entry&& ) = delete; - - ~bucket_entry() noexcept { - clear(); - } - - void clear() noexcept { - if(!empty()) { - destroy_value(); - m_dist_from_ideal_bucket = EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; - } - } - - bool empty() const noexcept { - return m_dist_from_ideal_bucket == EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; - } - - value_type& value() noexcept { - tsl_assert(!empty()); - return *reinterpret_cast(std::addressof(m_value)); - } - - const value_type& value() const noexcept { - tsl_assert(!empty()); - return *reinterpret_cast(std::addressof(m_value)); - } - - distance_type dist_from_ideal_bucket() const noexcept { - return m_dist_from_ideal_bucket; - } - - bool last_bucket() const noexcept { - return m_last_bucket; - } - - void set_as_last_bucket() noexcept { - m_last_bucket = true; - } - - template - void set_value_of_empty_bucket(distance_type dist_from_ideal_bucket, - truncated_hash_type hash, Args&&... value_type_args) - { - tsl_assert(dist_from_ideal_bucket >= 0); - tsl_assert(empty()); - - ::new (static_cast(std::addressof(m_value))) value_type(std::forward(value_type_args)...); - this->set_hash(hash); - m_dist_from_ideal_bucket = dist_from_ideal_bucket; - - tsl_assert(!empty()); - } - - void swap_with_value_in_bucket(distance_type& dist_from_ideal_bucket, - truncated_hash_type& hash, value_type& value) - { - tsl_assert(!empty()); - - using std::swap; - swap(value, this->value()); - swap(dist_from_ideal_bucket, m_dist_from_ideal_bucket); - - // Avoid warning of unused variable if StoreHash is false - (void) hash; - if(StoreHash) { - const truncated_hash_type tmp_hash = this->truncated_hash(); - this->set_hash(hash); - hash = tmp_hash; - } - } - - static truncated_hash_type truncate_hash(std::size_t hash) noexcept { - return truncated_hash_type(hash); - } - -private: - void destroy_value() noexcept { - tsl_assert(!empty()); - value().~value_type(); - } - -private: - using storage = typename std::aligned_storage::type; - - static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1; - - distance_type m_dist_from_ideal_bucket; - bool m_last_bucket; - storage m_value; -}; - - - -/** - * Internal common class used by `robin_map` and `robin_set`. - * - * ValueType is what will be stored by `robin_hash` (usually `std::pair` for map and `Key` for set). - * - * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in parameter and returns a - * reference to the key. - * - * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in parameter and returns a - * reference to the value. `ValueSelect` should be void if there is no value (in a set for example). - * - * The strong exception guarantee only holds if the expression - * `std::is_nothrow_swappable::value && std::is_nothrow_move_constructible::value` is true. - * - * Behaviour is undefined if the destructor of `ValueType` throws. - */ -template -class robin_hash: private Hash, private KeyEqual, private GrowthPolicy { -private: - template - using has_mapped_type = typename std::integral_constant::value>; - - -public: - template - class robin_iterator; - - using key_type = typename KeySelect::key_type; - using value_type = ValueType; - using size_type = std::size_t; - using difference_type = std::ptrdiff_t; - using hasher = Hash; - using key_equal = KeyEqual; - using allocator_type = Allocator; - using reference = value_type&; - using const_reference = const value_type&; - using pointer = value_type*; - using const_pointer = const value_type*; - using iterator = robin_iterator; - using const_iterator = robin_iterator; - - -private: - /** - * Either store the hash because we are asked by the `StoreHash` template parameter - * or store the hash because it doesn't cost us anything in size and can be used to speed up rehash. - */ - static constexpr bool STORE_HASH = StoreHash || - ( - (sizeof(tsl::detail_robin_hash::bucket_entry) == - sizeof(tsl::detail_robin_hash::bucket_entry)) - && - (sizeof(std::size_t) == sizeof(truncated_hash_type) || - is_power_of_two_policy::value) - && - // Don't store the hash for primitive types with default hash. - (!std::is_arithmetic::value || - !std::is_same>::value) - ); - - /** - * Only use the stored hash on lookup if we are explictly asked. We are not sure how slow - * the KeyEqual operation is. An extra comparison may slow things down with a fast KeyEqual. - */ - static constexpr bool USE_STORED_HASH_ON_LOOKUP = StoreHash; - - /** - * We can only use the hash on rehash if the size of the hash type is the same as the stored one or - * if we use a power of two modulo. In the case of the power of two modulo, we just mask - * the least significant bytes, we just have to check that the truncated_hash_type didn't truncated - * more bytes. - */ - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunknown-pragmas" -#pragma clang diagnostic ignored "-Wunknown-warning-option" -#pragma clang diagnostic ignored "-Wtautological-constant-compare" -#endif - - static bool USE_STORED_HASH_ON_REHASH(size_type bucket_count) { - (void) bucket_count; - if(STORE_HASH && sizeof(std::size_t) == sizeof(truncated_hash_type)) { - return true; - } - else if(STORE_HASH && is_power_of_two_policy::value) { - tsl_assert(bucket_count > 0); - return (bucket_count - 1) <= std::numeric_limits::max(); - } - else { - return false; - } - } - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - - using bucket_entry = tsl::detail_robin_hash::bucket_entry; - using distance_type = typename bucket_entry::distance_type; - - using buckets_allocator = typename std::allocator_traits::template rebind_alloc; - using buckets_container_type = std::vector; - - -public: - /** - * The 'operator*()' and 'operator->()' methods return a const reference and const pointer respectively to the - * stored value type. - * - * In case of a map, to get a mutable reference to the value associated to a key (the '.second' in the - * stored pair), you have to call 'value()'. - * - * The main reason for this is that if we returned a `std::pair&` instead - * of a `const std::pair&`, the user may modify the key which will put the map in a undefined state. - */ - template - class robin_iterator { - friend class robin_hash; - - private: - using iterator_bucket = typename std::conditional::type; - - - robin_iterator(iterator_bucket it) noexcept: m_iterator(it) { - } - - public: - using iterator_category = std::forward_iterator_tag; - using value_type = const typename robin_hash::value_type; - using difference_type = std::ptrdiff_t; - using reference = value_type&; - using pointer = value_type*; - - - robin_iterator() noexcept { - } - - robin_iterator(const robin_iterator& other) noexcept: m_iterator(other.m_iterator) { - } - - const typename robin_hash::key_type& key() const { - return KeySelect()(m_iterator->value()); - } - - template::value && IsConst>::type* = nullptr> - const typename U::value_type& value() const { - return U()(m_iterator->value()); - } - - template::value && !IsConst>::type* = nullptr> - typename U::value_type& value() { - return U()(m_iterator->value()); - } - - reference operator*() const { - return m_iterator->value(); - } - - pointer operator->() const { - return std::addressof(m_iterator->value()); - } - - robin_iterator& operator++() { - while(true) { - if(m_iterator->last_bucket()) { - ++m_iterator; - return *this; - } - - ++m_iterator; - if(!m_iterator->empty()) { - return *this; - } - } - } - - robin_iterator operator++(int) { - robin_iterator tmp(*this); - ++*this; - - return tmp; - } - - friend bool operator==(const robin_iterator& lhs, const robin_iterator& rhs) { - return lhs.m_iterator == rhs.m_iterator; - } - - friend bool operator!=(const robin_iterator& lhs, const robin_iterator& rhs) { - return !(lhs == rhs); - } - - private: - iterator_bucket m_iterator; - }; - - -public: - robin_hash(size_type bucket_count, - const Hash& hash, - const KeyEqual& equal, - const Allocator& alloc, - float max_load_factor): Hash(hash), KeyEqual(equal), - // We need a non-zero bucket_count - GrowthPolicy(bucket_count == 0?++bucket_count:bucket_count), - m_buckets(alloc), - m_bucket_count(bucket_count), - m_nb_elements(0), - m_grow_on_next_insert(false) - { - if(bucket_count > max_bucket_count()) { - THROW(std::length_error, "The map exceeds its maxmimum size."); - } - - /* - * We can't use the `vector(size_type count, const Allocator& alloc)` constructor - * as it's only available in C++14 and we need to support C++11. We thus must resize after using - * the `vector(const Allocator& alloc)` constructor. - * - * We can't use `vector(size_type count, const T& value, const Allocator& alloc)` as it requires the - * value T to be copyable. - */ - m_buckets.resize(m_bucket_count); - - tsl_assert(!m_buckets.empty()); - m_buckets.back().set_as_last_bucket(); - - - this->max_load_factor(max_load_factor); - } - - robin_hash(const robin_hash& other) = default; - - robin_hash(robin_hash&& other) noexcept(std::is_nothrow_move_constructible::value && - std::is_nothrow_move_constructible::value && - std::is_nothrow_move_constructible::value && - std::is_nothrow_move_constructible::value) - : Hash(std::move(static_cast(other))), - KeyEqual(std::move(static_cast(other))), - GrowthPolicy(std::move(static_cast(other))), - m_buckets(std::move(other.m_buckets)), - m_bucket_count(other.m_bucket_count), - m_nb_elements(other.m_nb_elements), - m_load_threshold(other.m_load_threshold), - m_max_load_factor(other.m_max_load_factor), - m_grow_on_next_insert(other.m_grow_on_next_insert) - { - other.clear(); - } - - robin_hash& operator=(const robin_hash& other) = default; - - robin_hash& operator=(robin_hash&& other) { - other.swap(*this); - other.clear(); - - return *this; - } - - allocator_type get_allocator() const { - return m_buckets.get_allocator(); - } - - - /* - * Iterators - */ - iterator begin() noexcept { - auto begin = m_buckets.begin(); - while(begin != m_buckets.end() && begin->empty()) { - ++begin; - } - - return iterator(begin); - } - - const_iterator begin() const noexcept { - return cbegin(); - } - - const_iterator cbegin() const noexcept { - auto begin = m_buckets.cbegin(); - while(begin != m_buckets.cend() && begin->empty()) { - ++begin; - } - - return const_iterator(begin); - } - - iterator end() noexcept { - return iterator(m_buckets.end()); - } - - const_iterator end() const noexcept { - return cend(); - } - - const_iterator cend() const noexcept { - return const_iterator(m_buckets.cend()); - } - - - /* - * Capacity - */ - bool empty() const noexcept { - return m_nb_elements == 0; - } - - size_type size() const noexcept { - return m_nb_elements; - } - - size_type max_size() const noexcept { - return m_buckets.max_size(); - } - - /* - * Modifiers - */ - void clear() noexcept { - for(auto& bucket: m_buckets) { - bucket.clear(); - } - - m_nb_elements = 0; - m_grow_on_next_insert = false; - } - - - - template - std::pair insert(P&& value) { - return insert_impl(KeySelect()(value), std::forward

(value)); - } - - template - iterator insert(const_iterator hint, P&& value) { - if(hint != cend() && compare_keys(KeySelect()(*hint), KeySelect()(value))) { - return mutable_iterator(hint); - } - - return insert(std::forward

(value)).first; - } - - template - void insert(InputIt first, InputIt last) { - if(std::is_base_of::iterator_category>::value) - { - const auto nb_elements_insert = std::distance(first, last); - const size_type nb_free_buckets = m_load_threshold - size(); - tsl_assert(m_load_threshold >= size()); - - if(nb_elements_insert > 0 && nb_free_buckets < size_type(nb_elements_insert)) { - reserve(size() + size_type(nb_elements_insert)); - } - } - - for(; first != last; ++first) { - insert(*first); - } - } - - - - template - std::pair insert_or_assign(K&& key, M&& obj) { - auto it = try_emplace(std::forward(key), std::forward(obj)); - if(!it.second) { - it.first.value() = std::forward(obj); - } - - return it; - } - - template - iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) { - if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { - auto it = mutable_iterator(hint); - it.value() = std::forward(obj); - - return it; - } - - return insert_or_assign(std::forward(key), std::forward(obj)).first; - } - - - template - std::pair emplace(Args&&... args) { - return insert(value_type(std::forward(args)...)); - } - - template - iterator emplace_hint(const_iterator hint, Args&&... args) { - return insert(hint, value_type(std::forward(args)...)); - } - - - - template - std::pair try_emplace(K&& key, Args&&... args) { - return insert_impl(key, std::piecewise_construct, - std::forward_as_tuple(std::forward(key)), - std::forward_as_tuple(std::forward(args)...)); - } - - template - iterator try_emplace(const_iterator hint, K&& key, Args&&... args) { - if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { - return mutable_iterator(hint); - } - - return try_emplace(std::forward(key), std::forward(args)...).first; - } - - /** - * Here to avoid `template size_type erase(const K& key)` being used when - * we use a iterator instead of a const_iterator. - */ - iterator erase(iterator pos) { - erase_from_bucket(pos); - - /** - * Erase bucket used a backward shift after clearing the bucket. - * Check if there is a new value in the bucket, if not get the next non-empty. - */ - if(pos.m_iterator->empty()) { - ++pos; - } - - return pos; - } - - iterator erase(const_iterator pos) { - return erase(mutable_iterator(pos)); - } - - iterator erase(const_iterator first, const_iterator last) { - if(first == last) { - return mutable_iterator(first); - } - - auto first_mutable = mutable_iterator(first); - auto last_mutable = mutable_iterator(last); - for(auto it = first_mutable.m_iterator; it != last_mutable.m_iterator; ++it) { - if(!it->empty()) { - it->clear(); - m_nb_elements--; - } - } - - if(last_mutable == end()) { - return end(); - } - - - /* - * Backward shift on the values which come after the deleted values. - * We try to move the values closer to their ideal bucket. - */ - std::size_t icloser_bucket = std::size_t(std::distance(m_buckets.begin(), first_mutable.m_iterator)); - std::size_t ito_move_closer_value = std::size_t(std::distance(m_buckets.begin(), last_mutable.m_iterator)); - tsl_assert(ito_move_closer_value > icloser_bucket); - - const std::size_t ireturn_bucket = ito_move_closer_value - - std::min(ito_move_closer_value - icloser_bucket, - std::size_t(m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); - - while(ito_move_closer_value < m_buckets.size() && m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) { - icloser_bucket = ito_move_closer_value - - std::min(ito_move_closer_value - icloser_bucket, - std::size_t(m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); - - - tsl_assert(m_buckets[icloser_bucket].empty()); - const distance_type new_distance = distance_type(m_buckets[ito_move_closer_value].dist_from_ideal_bucket() - - (ito_move_closer_value - icloser_bucket)); - m_buckets[icloser_bucket].set_value_of_empty_bucket(new_distance, - m_buckets[ito_move_closer_value].truncated_hash(), - std::move(m_buckets[ito_move_closer_value].value())); - m_buckets[ito_move_closer_value].clear(); - - - ++icloser_bucket; - ++ito_move_closer_value; - } - - - return iterator(m_buckets.begin() + ireturn_bucket); - } - - - template - size_type erase(const K& key) { - return erase(key, hash_key(key)); - } - - template - size_type erase(const K& key, std::size_t hash) { - auto it = find(key, hash); - if(it != end()) { - erase_from_bucket(it); - - return 1; - } - else { - return 0; - } - } - - - - - - void swap(robin_hash& other) { - using std::swap; - - swap(static_cast(*this), static_cast(other)); - swap(static_cast(*this), static_cast(other)); - swap(static_cast(*this), static_cast(other)); - swap(m_buckets, other.m_buckets); - swap(m_bucket_count, other.m_bucket_count); - swap(m_nb_elements, other.m_nb_elements); - swap(m_load_threshold, other.m_load_threshold); - swap(m_max_load_factor, other.m_max_load_factor); - swap(m_grow_on_next_insert, other.m_grow_on_next_insert); - } - - - /* - * Lookup - */ - template::value>::type* = nullptr> - typename U::value_type& at(const K& key) { - return at(key, hash_key(key)); - } - - template::value>::type* = nullptr> - typename U::value_type& at(const K& key, std::size_t hash) { - return const_cast(static_cast(this)->at(key, hash)); - } - - - template::value>::type* = nullptr> - const typename U::value_type& at(const K& key) const { - return at(key, hash_key(key)); - } - - template::value>::type* = nullptr> - const typename U::value_type& at(const K& key, std::size_t hash) const { - auto it = find(key, hash); - if(it != cend()) { - return it.value(); - } - else { - THROW(std::out_of_range, "Couldn't find key."); - } - } - - template::value>::type* = nullptr> - typename U::value_type& operator[](K&& key) { - return try_emplace(std::forward(key)).first.value(); - } - - - template - size_type count(const K& key) const { - return count(key, hash_key(key)); - } - - template - size_type count(const K& key, std::size_t hash) const { - if(find(key, hash) != cend()) { - return 1; - } - else { - return 0; - } - } - - - template - iterator find(const K& key) { - return find_impl(key, hash_key(key)); - } - - template - iterator find(const K& key, std::size_t hash) { - return find_impl(key, hash); - } - - - template - const_iterator find(const K& key) const { - return find_impl(key, hash_key(key)); - } - - template - const_iterator find(const K& key, std::size_t hash) const { - return find_impl(key, hash); - } - - - template - std::pair equal_range(const K& key) { - return equal_range(key, hash_key(key)); - } - - template - std::pair equal_range(const K& key, std::size_t hash) { - iterator it = find(key, hash); - return std::make_pair(it, (it == end())?it:std::next(it)); - } - - - template - std::pair equal_range(const K& key) const { - return equal_range(key, hash_key(key)); - } - - template - std::pair equal_range(const K& key, std::size_t hash) const { - const_iterator it = find(key, hash); - return std::make_pair(it, (it == cend())?it:std::next(it)); - } - - /* - * Bucket interface - */ - size_type bucket_count() const { - return m_bucket_count; - } - - size_type max_bucket_count() const { - return std::min(GrowthPolicy::max_bucket_count(), m_buckets.max_size()); - } - - /* - * Hash policy - */ - float load_factor() const { - return float(m_nb_elements)/float(bucket_count()); - } - - float max_load_factor() const { - return m_max_load_factor; - } - - void max_load_factor(float ml) { - m_max_load_factor = std::max(0.1f, std::min(ml, 0.95f)); - m_load_threshold = size_type(float(bucket_count())*m_max_load_factor); - } - - void rehash(size_type count) { - count = std::max(count, size_type(std::ceil(float(size())/max_load_factor()))); - rehash_impl(count); - } - - void reserve(size_type count) { - rehash(size_type(std::ceil(float(count)/max_load_factor()))); - } - - /* - * Observers - */ - hasher hash_function() const { - return static_cast(*this); - } - - key_equal key_eq() const { - return static_cast(*this); - } - - - /* - * Other - */ - iterator mutable_iterator(const_iterator pos) { - return iterator(m_buckets.begin() + std::distance(m_buckets.cbegin(), pos.m_iterator)); - } - -private: - template - std::size_t hash_key(const K& key) const { - return Hash::operator()(key); - } - - template - bool compare_keys(const K1& key1, const K2& key2) const { - return KeyEqual::operator()(key1, key2); - } - - std::size_t bucket_for_hash(std::size_t hash) const { - return GrowthPolicy::bucket_for_hash(hash); - } - - template::value>::type* = nullptr> - std::size_t next_bucket(std::size_t index) const noexcept { - tsl_assert(index < bucket_count()); - - return (index + 1) & this->m_mask; - } - - template::value>::type* = nullptr> - std::size_t next_bucket(std::size_t index) const noexcept { - tsl_assert(index < bucket_count()); - - index++; - return (index != bucket_count())?index:0; - } - - - - template - iterator find_impl(const K& key, std::size_t hash) { - return mutable_iterator(static_cast(this)->find(key, hash)); - } - - template - const_iterator find_impl(const K& key, std::size_t hash) const { - std::size_t ibucket = bucket_for_hash(hash); - distance_type dist_from_ideal_bucket = 0; - - while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { - if (TSL_LIKELY((!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) && - compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) - { - return const_iterator(m_buckets.begin() + ibucket); - } - - ibucket = next_bucket(ibucket); - dist_from_ideal_bucket++; - } - - return cend(); - } - - void erase_from_bucket(iterator pos) { - pos.m_iterator->clear(); - m_nb_elements--; - - /** - * Backward shift, swap the empty bucket, previous_ibucket, with the values on its right, ibucket, - * until we cross another empty bucket or if the other bucket has a distance_from_ideal_bucket == 0. - * - * We try to move the values closer to their ideal bucket. - */ - std::size_t previous_ibucket = std::size_t(std::distance(m_buckets.begin(), pos.m_iterator)); - std::size_t ibucket = next_bucket(previous_ibucket); - - while(m_buckets[ibucket].dist_from_ideal_bucket() > 0) { - tsl_assert(m_buckets[previous_ibucket].empty()); - - const distance_type new_distance = distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1); - m_buckets[previous_ibucket].set_value_of_empty_bucket(new_distance, m_buckets[ibucket].truncated_hash(), - std::move(m_buckets[ibucket].value())); - m_buckets[ibucket].clear(); - - previous_ibucket = ibucket; - ibucket = next_bucket(ibucket); - } - } - - template - std::pair insert_impl(const K& key, Args&&... value_type_args) { - const std::size_t hash = hash_key(key); - - std::size_t ibucket = bucket_for_hash(hash); - distance_type dist_from_ideal_bucket = 0; - - while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { - if((!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) && - compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) - { - return std::make_pair(iterator(m_buckets.begin() + ibucket), false); - } - - ibucket = next_bucket(ibucket); - dist_from_ideal_bucket++; - } - - if(grow_on_high_load()) { - ibucket = bucket_for_hash(hash); - dist_from_ideal_bucket = 0; - - while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { - ibucket = next_bucket(ibucket); - dist_from_ideal_bucket++; - } - } - - - if(m_buckets[ibucket].empty()) { - m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), - std::forward(value_type_args)...); - } - else { - insert_value(ibucket, dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), - std::forward(value_type_args)...); - } - - - m_nb_elements++; - /* - * The value will be inserted in ibucket in any case, either because it was - * empty or by stealing the bucket (robin hood). - */ - return std::make_pair(iterator(m_buckets.begin() + ibucket), true); - } - - - template - void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, - truncated_hash_type hash, Args&&... value_type_args) - { - value_type value(std::forward(value_type_args)...); - insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); - } - - // fix issue #6 (see https://github.com/Tessil/robin-map/commit/965dacd191502d310f053cc00551ea8fc2f6c7f0) - void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, - truncated_hash_type hash, value_type&& value) - { - insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); - } - - /* - * We don't use `value_type&& value` as last argument due to a bug in MSVC when `value_type` is a pointer, - * The compiler is not able to see the difference between `std::string*` and `std::string*&&` resulting in - * compile error. - * - * The `value` will be in a moved state at the end of the function. - */ - void insert_value_impl(std::size_t ibucket, distance_type dist_from_ideal_bucket, - truncated_hash_type hash, value_type& value) - { - m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); - ibucket = next_bucket(ibucket); - dist_from_ideal_bucket++; - - while(!m_buckets[ibucket].empty()) { - if(dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) { - if(dist_from_ideal_bucket >= REHASH_ON_HIGH_NB_PROBES__NPROBES && - load_factor() >= REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR) - { - /** - * The number of probes is really high, rehash the map on the next insert. - * Difficult to do now as rehash may throw. - */ - m_grow_on_next_insert = true; - } - - m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); - } - - ibucket = next_bucket(ibucket); - dist_from_ideal_bucket++; - } - - m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value)); - } - - - void rehash_impl(size_type count) { - robin_hash new_table(count, static_cast(*this), static_cast(*this), - get_allocator(), m_max_load_factor); - - const bool use_stored_hash = USE_STORED_HASH_ON_REHASH(new_table.bucket_count()); - for(auto& bucket: m_buckets) { - if(bucket.empty()) { - continue; - } - - const std::size_t hash = use_stored_hash?bucket.truncated_hash(): - new_table.hash_key(KeySelect()(bucket.value())); - - new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0, - bucket_entry::truncate_hash(hash), std::move(bucket.value())); - } - - new_table.m_nb_elements = m_nb_elements; - new_table.swap(*this); - } - - void insert_value_on_rehash(std::size_t ibucket, distance_type dist_from_ideal_bucket, - truncated_hash_type hash, value_type&& value) - { - while(true) { - if(dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) { - if(m_buckets[ibucket].empty()) { - m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value)); - return; - } - else { - m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); - } - } - - dist_from_ideal_bucket++; - ibucket = next_bucket(ibucket); - } - } - - - - /** - * Return true if the map has been rehashed. - */ - bool grow_on_high_load() { - if(m_grow_on_next_insert || size() >= m_load_threshold) { - rehash_impl(GrowthPolicy::next_bucket_count()); - m_grow_on_next_insert = false; - - return true; - } - - return false; - } - - -public: - static const size_type DEFAULT_INIT_BUCKETS_SIZE = 16; - static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f; - -private: - static const distance_type REHASH_ON_HIGH_NB_PROBES__NPROBES = 128; - static constexpr float REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR = 0.15f; - -private: - buckets_container_type m_buckets; - - /** - * Used a lot in find, avoid the call to m_buckets.size() which is a bit slower. - */ - size_type m_bucket_count; - - size_type m_nb_elements; - - size_type m_load_threshold; - float m_max_load_factor; - - bool m_grow_on_next_insert; -}; - -} - -} - -#endif +/** + * MIT License + * + * Copyright (c) 2017 Tessil + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_HASH_H +#define TSL_ROBIN_HASH_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "robin_growth_policy.h" + + + +#ifndef tsl_assert + #ifdef TSL_DEBUG + #define tsl_assert(expr) assert(expr) + #else + #define tsl_assert(expr) (static_cast(0)) + #endif +#endif + + + +namespace tsl { + +namespace detail_robin_hash { + +template +struct make_void { + using type = void; +}; + +template +struct has_is_transparent: std::false_type { +}; + +template +struct has_is_transparent::type>: std::true_type { +}; + +template +struct is_power_of_two_policy: std::false_type { +}; + +template +struct is_power_of_two_policy>: std::true_type { +}; + + + +using truncated_hash_type = std::uint_least32_t; + +/** + * Helper class that store a truncated hash if StoreHash is true and nothing otherwise. + */ +template +class bucket_entry_hash { +public: + bool bucket_hash_equal(std::size_t /*hash*/) const noexcept { + return true; + } + + truncated_hash_type truncated_hash() const noexcept { + return 0; + } + +protected: + void set_hash(truncated_hash_type /*hash*/) noexcept { + } +}; + +template<> +class bucket_entry_hash { +public: + bool bucket_hash_equal(std::size_t hash) const noexcept { + return m_hash == truncated_hash_type(hash); + } + + truncated_hash_type truncated_hash() const noexcept { + return m_hash; + } + +protected: + void set_hash(truncated_hash_type hash) noexcept { + m_hash = truncated_hash_type(hash); + } + +private: + truncated_hash_type m_hash; +}; + + +/** + * Each bucket entry has: + * - A value of type `ValueType`. + * - An integer to store how far the value of the bucket, if any, is from its ideal bucket + * (ex: if the current bucket 5 has the value 'foo' and `hash('foo') % nb_buckets` == 3, + * `dist_from_ideal_bucket()` will return 2 as the current value of the bucket is two + * buckets away from its ideal bucket) + * If there is no value in the bucket (i.e. `empty()` is true) `dist_from_ideal_bucket()` will be < 0. + * - A marker which tells us if the bucket is the last bucket of the bucket array (useful for the + * iterator of the hash table). + * - If `StoreHash` is true, 32 bits of the hash of the value, if any, are also stored in the bucket. + * If the size of the hash is more than 32 bits, it is truncated. We don't store the full hash + * as storing the hash is a potential opportunity to use the unused space due to the alignement + * of the bucket_entry structure. We can thus potentially store the hash without any extra space + * (which would not be possible with 64 bits of the hash). + */ +template +class bucket_entry: public bucket_entry_hash { + using bucket_hash = bucket_entry_hash; + +public: + using value_type = ValueType; + using distance_type = std::int_least16_t; + + + bucket_entry() noexcept: bucket_hash(), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), + m_last_bucket(false) + { + tsl_assert(empty()); + } + + bucket_entry(const bucket_entry& other) noexcept(std::is_nothrow_copy_constructible::value): + bucket_hash(other), + m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), + m_last_bucket(other.m_last_bucket) + { + if(!other.empty()) { + ::new (static_cast(std::addressof(m_value))) value_type(other.value()); + m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; + } + } + + /** + * Never really used, but still necessary as we must call resize on an empty `std::vector`. + * and we need to support move-only types. See robin_hash constructor for details. + */ + bucket_entry(bucket_entry&& other) noexcept(std::is_nothrow_move_constructible::value): + bucket_hash(std::move(other)), + m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), + m_last_bucket(other.m_last_bucket) + { + if(!other.empty()) { + ::new (static_cast(std::addressof(m_value))) value_type(std::move(other.value())); + m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; + } + } + + bucket_entry& operator=(const bucket_entry& other) + noexcept(std::is_nothrow_copy_constructible::value) + { + if(this != &other) { + clear(); + + bucket_hash::operator=(other); + if(!other.empty()) { + ::new (static_cast(std::addressof(m_value))) value_type(other.value()); + } + + m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; + m_last_bucket = other.m_last_bucket; + } + + return *this; + } + + bucket_entry& operator=(bucket_entry&& ) = delete; + + ~bucket_entry() noexcept { + clear(); + } + + void clear() noexcept { + if(!empty()) { + destroy_value(); + m_dist_from_ideal_bucket = EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; + } + } + + bool empty() const noexcept { + return m_dist_from_ideal_bucket == EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; + } + + value_type& value() noexcept { + tsl_assert(!empty()); + return *reinterpret_cast(std::addressof(m_value)); + } + + const value_type& value() const noexcept { + tsl_assert(!empty()); + return *reinterpret_cast(std::addressof(m_value)); + } + + distance_type dist_from_ideal_bucket() const noexcept { + return m_dist_from_ideal_bucket; + } + + bool last_bucket() const noexcept { + return m_last_bucket; + } + + void set_as_last_bucket() noexcept { + m_last_bucket = true; + } + + template + void set_value_of_empty_bucket(distance_type dist_from_ideal_bucket, + truncated_hash_type hash, Args&&... value_type_args) + { + tsl_assert(dist_from_ideal_bucket >= 0); + tsl_assert(empty()); + + ::new (static_cast(std::addressof(m_value))) value_type(std::forward(value_type_args)...); + this->set_hash(hash); + m_dist_from_ideal_bucket = dist_from_ideal_bucket; + + tsl_assert(!empty()); + } + + void swap_with_value_in_bucket(distance_type& dist_from_ideal_bucket, + truncated_hash_type& hash, value_type& value) + { + tsl_assert(!empty()); + + using std::swap; + swap(value, this->value()); + swap(dist_from_ideal_bucket, m_dist_from_ideal_bucket); + + // Avoid warning of unused variable if StoreHash is false + (void) hash; + if(StoreHash) { + const truncated_hash_type tmp_hash = this->truncated_hash(); + this->set_hash(hash); + hash = tmp_hash; + } + } + + static truncated_hash_type truncate_hash(std::size_t hash) noexcept { + return truncated_hash_type(hash); + } + +private: + void destroy_value() noexcept { + tsl_assert(!empty()); + value().~value_type(); + } + +private: + using storage = typename std::aligned_storage::type; + + static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1; + + distance_type m_dist_from_ideal_bucket; + bool m_last_bucket; + storage m_value; +}; + + + +/** + * Internal common class used by `robin_map` and `robin_set`. + * + * ValueType is what will be stored by `robin_hash` (usually `std::pair` for map and `Key` for set). + * + * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in parameter and returns a + * reference to the key. + * + * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in parameter and returns a + * reference to the value. `ValueSelect` should be void if there is no value (in a set for example). + * + * The strong exception guarantee only holds if the expression + * `std::is_nothrow_swappable::value && std::is_nothrow_move_constructible::value` is true. + * + * Behaviour is undefined if the destructor of `ValueType` throws. + */ +template +class robin_hash: private Hash, private KeyEqual, private GrowthPolicy { +private: + template + using has_mapped_type = typename std::integral_constant::value>; + + +public: + template + class robin_iterator; + + using key_type = typename KeySelect::key_type; + using value_type = ValueType; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using hasher = Hash; + using key_equal = KeyEqual; + using allocator_type = Allocator; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + using iterator = robin_iterator; + using const_iterator = robin_iterator; + + +private: + /** + * Either store the hash because we are asked by the `StoreHash` template parameter + * or store the hash because it doesn't cost us anything in size and can be used to speed up rehash. + */ + static constexpr bool STORE_HASH = StoreHash || + ( + (sizeof(tsl::detail_robin_hash::bucket_entry) == + sizeof(tsl::detail_robin_hash::bucket_entry)) + && + (sizeof(std::size_t) == sizeof(truncated_hash_type) || + is_power_of_two_policy::value) + && + // Don't store the hash for primitive types with default hash. + (!std::is_arithmetic::value || + !std::is_same>::value) + ); + + /** + * Only use the stored hash on lookup if we are explictly asked. We are not sure how slow + * the KeyEqual operation is. An extra comparison may slow things down with a fast KeyEqual. + */ + static constexpr bool USE_STORED_HASH_ON_LOOKUP = StoreHash; + + /** + * We can only use the hash on rehash if the size of the hash type is the same as the stored one or + * if we use a power of two modulo. In the case of the power of two modulo, we just mask + * the least significant bytes, we just have to check that the truncated_hash_type didn't truncated + * more bytes. + */ + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunknown-pragmas" +#pragma clang diagnostic ignored "-Wunknown-warning-option" +#pragma clang diagnostic ignored "-Wtautological-constant-compare" +#endif + + static bool USE_STORED_HASH_ON_REHASH(size_type bucket_count) { + (void) bucket_count; + if(STORE_HASH && sizeof(std::size_t) == sizeof(truncated_hash_type)) { + return true; + } + else if(STORE_HASH && is_power_of_two_policy::value) { + tsl_assert(bucket_count > 0); + return (bucket_count - 1) <= std::numeric_limits::max(); + } + else { + return false; + } + } + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + + using bucket_entry = tsl::detail_robin_hash::bucket_entry; + using distance_type = typename bucket_entry::distance_type; + + using buckets_allocator = typename std::allocator_traits::template rebind_alloc; + using buckets_container_type = std::vector; + + +public: + /** + * The 'operator*()' and 'operator->()' methods return a const reference and const pointer respectively to the + * stored value type. + * + * In case of a map, to get a mutable reference to the value associated to a key (the '.second' in the + * stored pair), you have to call 'value()'. + * + * The main reason for this is that if we returned a `std::pair&` instead + * of a `const std::pair&`, the user may modify the key which will put the map in a undefined state. + */ + template + class robin_iterator { + friend class robin_hash; + + private: + using iterator_bucket = typename std::conditional::type; + + + robin_iterator(iterator_bucket it) noexcept: m_iterator(it) { + } + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = const typename robin_hash::value_type; + using difference_type = std::ptrdiff_t; + using reference = value_type&; + using pointer = value_type*; + + + robin_iterator() noexcept { + } + + robin_iterator(const robin_iterator& other) noexcept: m_iterator(other.m_iterator) { + } + + const typename robin_hash::key_type& key() const { + return KeySelect()(m_iterator->value()); + } + + template::value && IsConst>::type* = nullptr> + const typename U::value_type& value() const { + return U()(m_iterator->value()); + } + + template::value && !IsConst>::type* = nullptr> + typename U::value_type& value() { + return U()(m_iterator->value()); + } + + reference operator*() const { + return m_iterator->value(); + } + + pointer operator->() const { + return std::addressof(m_iterator->value()); + } + + robin_iterator& operator++() { + while(true) { + if(m_iterator->last_bucket()) { + ++m_iterator; + return *this; + } + + ++m_iterator; + if(!m_iterator->empty()) { + return *this; + } + } + } + + robin_iterator operator++(int) { + robin_iterator tmp(*this); + ++*this; + + return tmp; + } + + friend bool operator==(const robin_iterator& lhs, const robin_iterator& rhs) { + return lhs.m_iterator == rhs.m_iterator; + } + + friend bool operator!=(const robin_iterator& lhs, const robin_iterator& rhs) { + return !(lhs == rhs); + } + + private: + iterator_bucket m_iterator; + }; + + +public: + robin_hash(size_type bucket_count, + const Hash& hash, + const KeyEqual& equal, + const Allocator& alloc, + float max_load_factor): Hash(hash), KeyEqual(equal), + // We need a non-zero bucket_count + GrowthPolicy(bucket_count == 0?++bucket_count:bucket_count), + m_buckets(alloc), + m_bucket_count(bucket_count), + m_nb_elements(0), + m_grow_on_next_insert(false) + { + if(bucket_count > max_bucket_count()) { + THROW(std::length_error, "The map exceeds its maxmimum size."); + } + + /* + * We can't use the `vector(size_type count, const Allocator& alloc)` constructor + * as it's only available in C++14 and we need to support C++11. We thus must resize after using + * the `vector(const Allocator& alloc)` constructor. + * + * We can't use `vector(size_type count, const T& value, const Allocator& alloc)` as it requires the + * value T to be copyable. + */ + m_buckets.resize(m_bucket_count); + + tsl_assert(!m_buckets.empty()); + m_buckets.back().set_as_last_bucket(); + + + this->max_load_factor(max_load_factor); + } + + robin_hash(const robin_hash& other) = default; + + robin_hash(robin_hash&& other) noexcept(std::is_nothrow_move_constructible::value && + std::is_nothrow_move_constructible::value && + std::is_nothrow_move_constructible::value && + std::is_nothrow_move_constructible::value) + : Hash(std::move(static_cast(other))), + KeyEqual(std::move(static_cast(other))), + GrowthPolicy(std::move(static_cast(other))), + m_buckets(std::move(other.m_buckets)), + m_bucket_count(other.m_bucket_count), + m_nb_elements(other.m_nb_elements), + m_load_threshold(other.m_load_threshold), + m_max_load_factor(other.m_max_load_factor), + m_grow_on_next_insert(other.m_grow_on_next_insert) + { + other.clear(); + } + + robin_hash& operator=(const robin_hash& other) = default; + + robin_hash& operator=(robin_hash&& other) { + other.swap(*this); + other.clear(); + + return *this; + } + + allocator_type get_allocator() const { + return m_buckets.get_allocator(); + } + + + /* + * Iterators + */ + iterator begin() noexcept { + auto begin = m_buckets.begin(); + while(begin != m_buckets.end() && begin->empty()) { + ++begin; + } + + return iterator(begin); + } + + const_iterator begin() const noexcept { + return cbegin(); + } + + const_iterator cbegin() const noexcept { + auto begin = m_buckets.cbegin(); + while(begin != m_buckets.cend() && begin->empty()) { + ++begin; + } + + return const_iterator(begin); + } + + iterator end() noexcept { + return iterator(m_buckets.end()); + } + + const_iterator end() const noexcept { + return cend(); + } + + const_iterator cend() const noexcept { + return const_iterator(m_buckets.cend()); + } + + + /* + * Capacity + */ + bool empty() const noexcept { + return m_nb_elements == 0; + } + + size_type size() const noexcept { + return m_nb_elements; + } + + size_type max_size() const noexcept { + return m_buckets.max_size(); + } + + /* + * Modifiers + */ + void clear() noexcept { + for(auto& bucket: m_buckets) { + bucket.clear(); + } + + m_nb_elements = 0; + m_grow_on_next_insert = false; + } + + + + template + std::pair insert(P&& value) { + return insert_impl(KeySelect()(value), std::forward

(value)); + } + + template + iterator insert(const_iterator hint, P&& value) { + if(hint != cend() && compare_keys(KeySelect()(*hint), KeySelect()(value))) { + return mutable_iterator(hint); + } + + return insert(std::forward

(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + if(std::is_base_of::iterator_category>::value) + { + const auto nb_elements_insert = std::distance(first, last); + const size_type nb_free_buckets = m_load_threshold - size(); + tsl_assert(m_load_threshold >= size()); + + if(nb_elements_insert > 0 && nb_free_buckets < size_type(nb_elements_insert)) { + reserve(size() + size_type(nb_elements_insert)); + } + } + + for(; first != last; ++first) { + insert(*first); + } + } + + + + template + std::pair insert_or_assign(K&& key, M&& obj) { + auto it = try_emplace(std::forward(key), std::forward(obj)); + if(!it.second) { + it.first.value() = std::forward(obj); + } + + return it; + } + + template + iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) { + if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { + auto it = mutable_iterator(hint); + it.value() = std::forward(obj); + + return it; + } + + return insert_or_assign(std::forward(key), std::forward(obj)).first; + } + + + template + std::pair emplace(Args&&... args) { + return insert(value_type(std::forward(args)...)); + } + + template + iterator emplace_hint(const_iterator hint, Args&&... args) { + return insert(hint, value_type(std::forward(args)...)); + } + + + + template + std::pair try_emplace(K&& key, Args&&... args) { + return insert_impl(key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + } + + template + iterator try_emplace(const_iterator hint, K&& key, Args&&... args) { + if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { + return mutable_iterator(hint); + } + + return try_emplace(std::forward(key), std::forward(args)...).first; + } + + /** + * Here to avoid `template size_type erase(const K& key)` being used when + * we use a iterator instead of a const_iterator. + */ + iterator erase(iterator pos) { + erase_from_bucket(pos); + + /** + * Erase bucket used a backward shift after clearing the bucket. + * Check if there is a new value in the bucket, if not get the next non-empty. + */ + if(pos.m_iterator->empty()) { + ++pos; + } + + return pos; + } + + iterator erase(const_iterator pos) { + return erase(mutable_iterator(pos)); + } + + iterator erase(const_iterator first, const_iterator last) { + if(first == last) { + return mutable_iterator(first); + } + + auto first_mutable = mutable_iterator(first); + auto last_mutable = mutable_iterator(last); + for(auto it = first_mutable.m_iterator; it != last_mutable.m_iterator; ++it) { + if(!it->empty()) { + it->clear(); + m_nb_elements--; + } + } + + if(last_mutable == end()) { + return end(); + } + + + /* + * Backward shift on the values which come after the deleted values. + * We try to move the values closer to their ideal bucket. + */ + std::size_t icloser_bucket = std::size_t(std::distance(m_buckets.begin(), first_mutable.m_iterator)); + std::size_t ito_move_closer_value = std::size_t(std::distance(m_buckets.begin(), last_mutable.m_iterator)); + tsl_assert(ito_move_closer_value > icloser_bucket); + + const std::size_t ireturn_bucket = ito_move_closer_value - + std::min(ito_move_closer_value - icloser_bucket, + std::size_t(m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); + + while(ito_move_closer_value < m_buckets.size() && m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) { + icloser_bucket = ito_move_closer_value - + std::min(ito_move_closer_value - icloser_bucket, + std::size_t(m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); + + + tsl_assert(m_buckets[icloser_bucket].empty()); + const distance_type new_distance = distance_type(m_buckets[ito_move_closer_value].dist_from_ideal_bucket() - + (ito_move_closer_value - icloser_bucket)); + m_buckets[icloser_bucket].set_value_of_empty_bucket(new_distance, + m_buckets[ito_move_closer_value].truncated_hash(), + std::move(m_buckets[ito_move_closer_value].value())); + m_buckets[ito_move_closer_value].clear(); + + + ++icloser_bucket; + ++ito_move_closer_value; + } + + + return iterator(m_buckets.begin() + ireturn_bucket); + } + + + template + size_type erase(const K& key) { + return erase(key, hash_key(key)); + } + + template + size_type erase(const K& key, std::size_t hash) { + auto it = find(key, hash); + if(it != end()) { + erase_from_bucket(it); + + return 1; + } + else { + return 0; + } + } + + + + + + void swap(robin_hash& other) { + using std::swap; + + swap(static_cast(*this), static_cast(other)); + swap(static_cast(*this), static_cast(other)); + swap(static_cast(*this), static_cast(other)); + swap(m_buckets, other.m_buckets); + swap(m_bucket_count, other.m_bucket_count); + swap(m_nb_elements, other.m_nb_elements); + swap(m_load_threshold, other.m_load_threshold); + swap(m_max_load_factor, other.m_max_load_factor); + swap(m_grow_on_next_insert, other.m_grow_on_next_insert); + } + + + /* + * Lookup + */ + template::value>::type* = nullptr> + typename U::value_type& at(const K& key) { + return at(key, hash_key(key)); + } + + template::value>::type* = nullptr> + typename U::value_type& at(const K& key, std::size_t hash) { + return const_cast(static_cast(this)->at(key, hash)); + } + + + template::value>::type* = nullptr> + const typename U::value_type& at(const K& key) const { + return at(key, hash_key(key)); + } + + template::value>::type* = nullptr> + const typename U::value_type& at(const K& key, std::size_t hash) const { + auto it = find(key, hash); + if(it != cend()) { + return it.value(); + } + else { + THROW(std::out_of_range, "Couldn't find key."); + } + } + + template::value>::type* = nullptr> + typename U::value_type& operator[](K&& key) { + return try_emplace(std::forward(key)).first.value(); + } + + + template + size_type count(const K& key) const { + return count(key, hash_key(key)); + } + + template + size_type count(const K& key, std::size_t hash) const { + if(find(key, hash) != cend()) { + return 1; + } + else { + return 0; + } + } + + + template + iterator find(const K& key) { + return find_impl(key, hash_key(key)); + } + + template + iterator find(const K& key, std::size_t hash) { + return find_impl(key, hash); + } + + + template + const_iterator find(const K& key) const { + return find_impl(key, hash_key(key)); + } + + template + const_iterator find(const K& key, std::size_t hash) const { + return find_impl(key, hash); + } + + + template + std::pair equal_range(const K& key) { + return equal_range(key, hash_key(key)); + } + + template + std::pair equal_range(const K& key, std::size_t hash) { + iterator it = find(key, hash); + return std::make_pair(it, (it == end())?it:std::next(it)); + } + + + template + std::pair equal_range(const K& key) const { + return equal_range(key, hash_key(key)); + } + + template + std::pair equal_range(const K& key, std::size_t hash) const { + const_iterator it = find(key, hash); + return std::make_pair(it, (it == cend())?it:std::next(it)); + } + + /* + * Bucket interface + */ + size_type bucket_count() const { + return m_bucket_count; + } + + size_type max_bucket_count() const { + return std::min(GrowthPolicy::max_bucket_count(), m_buckets.max_size()); + } + + /* + * Hash policy + */ + float load_factor() const { + return float(m_nb_elements)/float(bucket_count()); + } + + float max_load_factor() const { + return m_max_load_factor; + } + + void max_load_factor(float ml) { + m_max_load_factor = std::max(0.1f, std::min(ml, 0.95f)); + m_load_threshold = size_type(float(bucket_count())*m_max_load_factor); + } + + void rehash(size_type count) { + count = std::max(count, size_type(std::ceil(float(size())/max_load_factor()))); + rehash_impl(count); + } + + void reserve(size_type count) { + rehash(size_type(std::ceil(float(count)/max_load_factor()))); + } + + /* + * Observers + */ + hasher hash_function() const { + return static_cast(*this); + } + + key_equal key_eq() const { + return static_cast(*this); + } + + + /* + * Other + */ + iterator mutable_iterator(const_iterator pos) { + return iterator(m_buckets.begin() + std::distance(m_buckets.cbegin(), pos.m_iterator)); + } + +private: + template + std::size_t hash_key(const K& key) const { + return Hash::operator()(key); + } + + template + bool compare_keys(const K1& key1, const K2& key2) const { + return KeyEqual::operator()(key1, key2); + } + + std::size_t bucket_for_hash(std::size_t hash) const { + return GrowthPolicy::bucket_for_hash(hash); + } + + template::value>::type* = nullptr> + std::size_t next_bucket(std::size_t index) const noexcept { + tsl_assert(index < bucket_count()); + + return (index + 1) & this->m_mask; + } + + template::value>::type* = nullptr> + std::size_t next_bucket(std::size_t index) const noexcept { + tsl_assert(index < bucket_count()); + + index++; + return (index != bucket_count())?index:0; + } + + + + template + iterator find_impl(const K& key, std::size_t hash) { + return mutable_iterator(static_cast(this)->find(key, hash)); + } + + template + const_iterator find_impl(const K& key, std::size_t hash) const { + std::size_t ibucket = bucket_for_hash(hash); + distance_type dist_from_ideal_bucket = 0; + + while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { + if (TSL_LIKELY((!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) && + compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) + { + return const_iterator(m_buckets.begin() + ibucket); + } + + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + + return cend(); + } + + void erase_from_bucket(iterator pos) { + pos.m_iterator->clear(); + m_nb_elements--; + + /** + * Backward shift, swap the empty bucket, previous_ibucket, with the values on its right, ibucket, + * until we cross another empty bucket or if the other bucket has a distance_from_ideal_bucket == 0. + * + * We try to move the values closer to their ideal bucket. + */ + std::size_t previous_ibucket = std::size_t(std::distance(m_buckets.begin(), pos.m_iterator)); + std::size_t ibucket = next_bucket(previous_ibucket); + + while(m_buckets[ibucket].dist_from_ideal_bucket() > 0) { + tsl_assert(m_buckets[previous_ibucket].empty()); + + const distance_type new_distance = distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1); + m_buckets[previous_ibucket].set_value_of_empty_bucket(new_distance, m_buckets[ibucket].truncated_hash(), + std::move(m_buckets[ibucket].value())); + m_buckets[ibucket].clear(); + + previous_ibucket = ibucket; + ibucket = next_bucket(ibucket); + } + } + + template + std::pair insert_impl(const K& key, Args&&... value_type_args) { + const std::size_t hash = hash_key(key); + + std::size_t ibucket = bucket_for_hash(hash); + distance_type dist_from_ideal_bucket = 0; + + while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { + if((!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) && + compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) + { + return std::make_pair(iterator(m_buckets.begin() + ibucket), false); + } + + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + + if(grow_on_high_load()) { + ibucket = bucket_for_hash(hash); + dist_from_ideal_bucket = 0; + + while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + } + + + if(m_buckets[ibucket].empty()) { + m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), + std::forward(value_type_args)...); + } + else { + insert_value(ibucket, dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), + std::forward(value_type_args)...); + } + + + m_nb_elements++; + /* + * The value will be inserted in ibucket in any case, either because it was + * empty or by stealing the bucket (robin hood). + */ + return std::make_pair(iterator(m_buckets.begin() + ibucket), true); + } + + + template + void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, + truncated_hash_type hash, Args&&... value_type_args) + { + value_type value(std::forward(value_type_args)...); + insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); + } + + // fix issue #6 (see https://github.com/Tessil/robin-map/commit/965dacd191502d310f053cc00551ea8fc2f6c7f0) + void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, + truncated_hash_type hash, value_type&& value) + { + insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); + } + + /* + * We don't use `value_type&& value` as last argument due to a bug in MSVC when `value_type` is a pointer, + * The compiler is not able to see the difference between `std::string*` and `std::string*&&` resulting in + * compile error. + * + * The `value` will be in a moved state at the end of the function. + */ + void insert_value_impl(std::size_t ibucket, distance_type dist_from_ideal_bucket, + truncated_hash_type hash, value_type& value) + { + m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + + while(!m_buckets[ibucket].empty()) { + if(dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) { + if(dist_from_ideal_bucket >= REHASH_ON_HIGH_NB_PROBES__NPROBES && + load_factor() >= REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR) + { + /** + * The number of probes is really high, rehash the map on the next insert. + * Difficult to do now as rehash may throw. + */ + m_grow_on_next_insert = true; + } + + m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); + } + + ibucket = next_bucket(ibucket); + dist_from_ideal_bucket++; + } + + m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value)); + } + + + void rehash_impl(size_type count) { + robin_hash new_table(count, static_cast(*this), static_cast(*this), + get_allocator(), m_max_load_factor); + + const bool use_stored_hash = USE_STORED_HASH_ON_REHASH(new_table.bucket_count()); + for(auto& bucket: m_buckets) { + if(bucket.empty()) { + continue; + } + + const std::size_t hash = use_stored_hash?bucket.truncated_hash(): + new_table.hash_key(KeySelect()(bucket.value())); + + new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0, + bucket_entry::truncate_hash(hash), std::move(bucket.value())); + } + + new_table.m_nb_elements = m_nb_elements; + new_table.swap(*this); + } + + void insert_value_on_rehash(std::size_t ibucket, distance_type dist_from_ideal_bucket, + truncated_hash_type hash, value_type&& value) + { + while(true) { + if(dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) { + if(m_buckets[ibucket].empty()) { + m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value)); + return; + } + else { + m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); + } + } + + dist_from_ideal_bucket++; + ibucket = next_bucket(ibucket); + } + } + + + + /** + * Return true if the map has been rehashed. + */ + bool grow_on_high_load() { + if(m_grow_on_next_insert || size() >= m_load_threshold) { + rehash_impl(GrowthPolicy::next_bucket_count()); + m_grow_on_next_insert = false; + + return true; + } + + return false; + } + + +public: + static const size_type DEFAULT_INIT_BUCKETS_SIZE = 16; + static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f; + +private: + static const distance_type REHASH_ON_HIGH_NB_PROBES__NPROBES = 128; + static constexpr float REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR = 0.15f; + +private: + buckets_container_type m_buckets; + + /** + * Used a lot in find, avoid the call to m_buckets.size() which is a bit slower. + */ + size_type m_bucket_count; + + size_type m_nb_elements; + + size_type m_load_threshold; + float m_max_load_factor; + + bool m_grow_on_next_insert; +}; + +} + +} + +#endif diff --git a/ios/include/tsl/robin_map.h b/ios/include/tsl/robin_map.h index b0cd8748..5958e70f 100644 --- a/ios/include/tsl/robin_map.h +++ b/ios/include/tsl/robin_map.h @@ -1,668 +1,668 @@ -/** - * MIT License - * - * Copyright (c) 2017 Tessil - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef TSL_ROBIN_MAP_H -#define TSL_ROBIN_MAP_H - - -#include -#include -#include -#include -#include -#include -#include "robin_hash.h" - - -namespace tsl { - - -/** - * Implementation of a hash map using open-adressing and the robin hood hashing algorithm with backward shift deletion. - * - * For operations modifying the hash map (insert, erase, rehash, ...), the strong exception guarantee - * is only guaranteed when the expression `std::is_nothrow_swappable>::value && - * std::is_nothrow_move_constructible>::value` is true, otherwise if an exception - * is thrown during the swap or the move, the hash map may end up in a undefined state. Per the standard - * a `Key` or `T` with a noexcept copy constructor and no move constructor also satisfies the - * `std::is_nothrow_move_constructible>::value` criterion (and will thus guarantee the - * strong exception for the map). - * - * When `StoreHash` is true, 32 bits of the hash are stored alongside the values. It can improve - * the performance during lookups if the `KeyEqual` function takes time (if it engenders a cache-miss for example) - * as we then compare the stored hashes before comparing the keys. When `tsl::rh::power_of_two_growth_policy` is used - * as `GrowthPolicy`, it may also speed-up the rehash process as we can avoid to recalculate the hash. - * When it is detected that storing the hash will not incur any memory penality due to alignement (i.e. - * `sizeof(tsl::detail_robin_hash::bucket_entry) == - * sizeof(tsl::detail_robin_hash::bucket_entry)`) and `tsl::rh::power_of_two_growth_policy` is - * used, the hash will be stored even if `StoreHash` is false so that we can speed-up the rehash (but it will - * not be used on lookups unless `StoreHash` is true). - * - * `GrowthPolicy` defines how the map grows and consequently how a hash value is mapped to a bucket. - * By default the map uses `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of buckets - * to a power of two and uses a mask to map the hash to a bucket instead of the slow modulo. - * Other growth policies are available and you may define your own growth policy, - * check `tsl::rh::power_of_two_growth_policy` for the interface. - * - * If the destructor of `Key` or `T` throws an exception, the behaviour of the class is undefined. - * - * Iterators invalidation: - * - clear, operator=, reserve, rehash: always invalidate the iterators. - * - insert, emplace, emplace_hint, operator[]: if there is an effective insert, invalidate the iterators. - * - erase: always invalidate the iterators. - */ -template, - class KeyEqual = std::equal_to, - class Allocator = std::allocator>, - bool StoreHash = false, - class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>> -class robin_map { -private: - template - using has_is_transparent = tsl::detail_robin_hash::has_is_transparent; - - class KeySelect { - public: - using key_type = Key; - - const key_type& operator()(const std::pair& key_value) const noexcept { - return key_value.first; - } - - key_type& operator()(std::pair& key_value) noexcept { - return key_value.first; - } - }; - - class ValueSelect { - public: - using value_type = T; - - const value_type& operator()(const std::pair& key_value) const noexcept { - return key_value.second; - } - - value_type& operator()(std::pair& key_value) noexcept { - return key_value.second; - } - }; - - using ht = detail_robin_hash::robin_hash, KeySelect, ValueSelect, - Hash, KeyEqual, Allocator, StoreHash, GrowthPolicy>; - -public: - using key_type = typename ht::key_type; - using mapped_type = T; - using value_type = typename ht::value_type; - using size_type = typename ht::size_type; - using difference_type = typename ht::difference_type; - using hasher = typename ht::hasher; - using key_equal = typename ht::key_equal; - using allocator_type = typename ht::allocator_type; - using reference = typename ht::reference; - using const_reference = typename ht::const_reference; - using pointer = typename ht::pointer; - using const_pointer = typename ht::const_pointer; - using iterator = typename ht::iterator; - using const_iterator = typename ht::const_iterator; - - -public: - /* - * Constructors - */ - robin_map(): robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) { - } - - explicit robin_map(size_type bucket_count, - const Hash& hash = Hash(), - const KeyEqual& equal = KeyEqual(), - const Allocator& alloc = Allocator()): - m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR) - { - } - - robin_map(size_type bucket_count, - const Allocator& alloc): robin_map(bucket_count, Hash(), KeyEqual(), alloc) - { - } - - robin_map(size_type bucket_count, - const Hash& hash, - const Allocator& alloc): robin_map(bucket_count, hash, KeyEqual(), alloc) - { - } - - explicit robin_map(const Allocator& alloc): robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) { - } - - template - robin_map(InputIt first, InputIt last, - size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, - const Hash& hash = Hash(), - const KeyEqual& equal = KeyEqual(), - const Allocator& alloc = Allocator()): robin_map(bucket_count, hash, equal, alloc) - { - insert(first, last); - } - - template - robin_map(InputIt first, InputIt last, - size_type bucket_count, - const Allocator& alloc): robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) - { - } - - template - robin_map(InputIt first, InputIt last, - size_type bucket_count, - const Hash& hash, - const Allocator& alloc): robin_map(first, last, bucket_count, hash, KeyEqual(), alloc) - { - } - - robin_map(std::initializer_list init, - size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, - const Hash& hash = Hash(), - const KeyEqual& equal = KeyEqual(), - const Allocator& alloc = Allocator()): - robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) - { - } - - robin_map(std::initializer_list init, - size_type bucket_count, - const Allocator& alloc): - robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc) - { - } - - robin_map(std::initializer_list init, - size_type bucket_count, - const Hash& hash, - const Allocator& alloc): - robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc) - { - } - - robin_map& operator=(std::initializer_list ilist) { - m_ht.clear(); - - m_ht.reserve(ilist.size()); - m_ht.insert(ilist.begin(), ilist.end()); - - return *this; - } - - allocator_type get_allocator() const { return m_ht.get_allocator(); } - - - /* - * Iterators - */ - iterator begin() noexcept { return m_ht.begin(); } - const_iterator begin() const noexcept { return m_ht.begin(); } - const_iterator cbegin() const noexcept { return m_ht.cbegin(); } - - iterator end() noexcept { return m_ht.end(); } - const_iterator end() const noexcept { return m_ht.end(); } - const_iterator cend() const noexcept { return m_ht.cend(); } - - - /* - * Capacity - */ - bool empty() const noexcept { return m_ht.empty(); } - size_type size() const noexcept { return m_ht.size(); } - size_type max_size() const noexcept { return m_ht.max_size(); } - - /* - * Modifiers - */ - void clear() noexcept { m_ht.clear(); } - - - - std::pair insert(const value_type& value) { - return m_ht.insert(value); - } - - template::value>::type* = nullptr> - std::pair insert(P&& value) { - return m_ht.emplace(std::forward

(value)); - } - - std::pair insert(value_type&& value) { - return m_ht.insert(std::move(value)); - } - - - iterator insert(const_iterator hint, const value_type& value) { - return m_ht.insert(hint, value); - } - - template::value>::type* = nullptr> - iterator insert(const_iterator hint, P&& value) { - return m_ht.emplace_hint(hint, std::forward

(value)); - } - - iterator insert(const_iterator hint, value_type&& value) { - return m_ht.insert(hint, std::move(value)); - } - - - template - void insert(InputIt first, InputIt last) { - m_ht.insert(first, last); - } - - void insert(std::initializer_list ilist) { - m_ht.insert(ilist.begin(), ilist.end()); - } - - - - - template - std::pair insert_or_assign(const key_type& k, M&& obj) { - return m_ht.insert_or_assign(k, std::forward(obj)); - } - - template - std::pair insert_or_assign(key_type&& k, M&& obj) { - return m_ht.insert_or_assign(std::move(k), std::forward(obj)); - } - - template - iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj) { - return m_ht.insert_or_assign(hint, k, std::forward(obj)); - } - - template - iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj) { - return m_ht.insert_or_assign(hint, std::move(k), std::forward(obj)); - } - - - - /** - * Due to the way elements are stored, emplace will need to move or copy the key-value once. - * The method is equivalent to insert(value_type(std::forward(args)...)); - * - * Mainly here for compatibility with the std::unordered_map interface. - */ - template - std::pair emplace(Args&&... args) { - return m_ht.emplace(std::forward(args)...); - } - - - - /** - * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once. - * The method is equivalent to insert(hint, value_type(std::forward(args)...)); - * - * Mainly here for compatibility with the std::unordered_map interface. - */ - template - iterator emplace_hint(const_iterator hint, Args&&... args) { - return m_ht.emplace_hint(hint, std::forward(args)...); - } - - - - - template - std::pair try_emplace(const key_type& k, Args&&... args) { - return m_ht.try_emplace(k, std::forward(args)...); - } - - template - std::pair try_emplace(key_type&& k, Args&&... args) { - return m_ht.try_emplace(std::move(k), std::forward(args)...); - } - - template - iterator try_emplace(const_iterator hint, const key_type& k, Args&&... args) { - return m_ht.try_emplace(hint, k, std::forward(args)...); - } - - template - iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args) { - return m_ht.try_emplace(hint, std::move(k), std::forward(args)...); - } - - - - - iterator erase(iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } - size_type erase(const key_type& key) { return m_ht.erase(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. - */ - size_type erase(const key_type& key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - size_type erase(const K& key) { return m_ht.erase(key); } - - /** - * @copydoc erase(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. - */ - template::value>::type* = nullptr> - size_type erase(const K& key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - - - void swap(robin_map& other) { other.m_ht.swap(m_ht); } - - - - /* - * Lookup - */ - T& at(const Key& key) { return m_ht.at(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - T& at(const Key& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); } - - - const T& at(const Key& key) const { return m_ht.at(key); } - - /** - * @copydoc at(const Key& key, std::size_t precalculated_hash) - */ - const T& at(const Key& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); } - - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - T& at(const K& key) { return m_ht.at(key); } - - /** - * @copydoc at(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - T& at(const K& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); } - - - /** - * @copydoc at(const K& key) - */ - template::value>::type* = nullptr> - const T& at(const K& key) const { return m_ht.at(key); } - - /** - * @copydoc at(const K& key, std::size_t precalculated_hash) - */ - template::value>::type* = nullptr> - const T& at(const K& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); } - - - - - T& operator[](const Key& key) { return m_ht[key]; } - T& operator[](Key&& key) { return m_ht[std::move(key)]; } - - - - - size_type count(const Key& key) const { return m_ht.count(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - size_type count(const Key& key, std::size_t precalculated_hash) const { - return m_ht.count(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - size_type count(const K& key) const { return m_ht.count(key); } - - /** - * @copydoc count(const K& key) const - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - size_type count(const K& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } - - - - - iterator find(const Key& key) { return m_ht.find(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } - - const_iterator find(const Key& key) const { return m_ht.find(key); } - - /** - * @copydoc find(const Key& key, std::size_t precalculated_hash) - */ - const_iterator find(const Key& key, std::size_t precalculated_hash) const { - return m_ht.find(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - iterator find(const K& key) { return m_ht.find(key); } - - /** - * @copydoc find(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } - - /** - * @copydoc find(const K& key) - */ - template::value>::type* = nullptr> - const_iterator find(const K& key) const { return m_ht.find(key); } - - /** - * @copydoc find(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - const_iterator find(const K& key, std::size_t precalculated_hash) const { - return m_ht.find(key, precalculated_hash); - } - - - - - std::pair equal_range(const Key& key) { return m_ht.equal_range(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - std::pair equal_range(const Key& key, std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - std::pair equal_range(const Key& key) const { return m_ht.equal_range(key); } - - /** - * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) - */ - std::pair equal_range(const Key& key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key) { return m_ht.equal_range(key); } - - - /** - * @copydoc equal_range(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key, std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * @copydoc equal_range(const K& key) - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key) const { return m_ht.equal_range(key); } - - /** - * @copydoc equal_range(const K& key, std::size_t precalculated_hash) - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - - - - /* - * Bucket interface - */ - size_type bucket_count() const { return m_ht.bucket_count(); } - size_type max_bucket_count() const { return m_ht.max_bucket_count(); } - - - /* - * Hash policy - */ - float load_factor() const { return m_ht.load_factor(); } - float max_load_factor() const { return m_ht.max_load_factor(); } - void max_load_factor(float ml) { m_ht.max_load_factor(ml); } - - void rehash(size_type count) { m_ht.rehash(count); } - void reserve(size_type count) { m_ht.reserve(count); } - - - /* - * Observers - */ - hasher hash_function() const { return m_ht.hash_function(); } - key_equal key_eq() const { return m_ht.key_eq(); } - - /* - * Other - */ - - /** - * Convert a const_iterator to an iterator. - */ - iterator mutable_iterator(const_iterator pos) { - return m_ht.mutable_iterator(pos); - } - - friend bool operator==(const robin_map& lhs, const robin_map& rhs) { - if(lhs.size() != rhs.size()) { - return false; - } - - for(const auto& element_lhs: lhs) { - const auto it_element_rhs = rhs.find(element_lhs.first); - if(it_element_rhs == rhs.cend() || element_lhs.second != it_element_rhs->second) { - return false; - } - } - - return true; - } - - friend bool operator!=(const robin_map& lhs, const robin_map& rhs) { - return !operator==(lhs, rhs); - } - - friend void swap(robin_map& lhs, robin_map& rhs) { - lhs.swap(rhs); - } - -private: - ht m_ht; -}; - - -/** - * Same as `tsl::robin_map`. - */ -template, - class KeyEqual = std::equal_to, - class Allocator = std::allocator>, - bool StoreHash = false> -using robin_pg_map = robin_map; - -} // end namespace tsl - -#endif +/** + * MIT License + * + * Copyright (c) 2017 Tessil + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_MAP_H +#define TSL_ROBIN_MAP_H + + +#include +#include +#include +#include +#include +#include +#include "robin_hash.h" + + +namespace tsl { + + +/** + * Implementation of a hash map using open-adressing and the robin hood hashing algorithm with backward shift deletion. + * + * For operations modifying the hash map (insert, erase, rehash, ...), the strong exception guarantee + * is only guaranteed when the expression `std::is_nothrow_swappable>::value && + * std::is_nothrow_move_constructible>::value` is true, otherwise if an exception + * is thrown during the swap or the move, the hash map may end up in a undefined state. Per the standard + * a `Key` or `T` with a noexcept copy constructor and no move constructor also satisfies the + * `std::is_nothrow_move_constructible>::value` criterion (and will thus guarantee the + * strong exception for the map). + * + * When `StoreHash` is true, 32 bits of the hash are stored alongside the values. It can improve + * the performance during lookups if the `KeyEqual` function takes time (if it engenders a cache-miss for example) + * as we then compare the stored hashes before comparing the keys. When `tsl::rh::power_of_two_growth_policy` is used + * as `GrowthPolicy`, it may also speed-up the rehash process as we can avoid to recalculate the hash. + * When it is detected that storing the hash will not incur any memory penality due to alignement (i.e. + * `sizeof(tsl::detail_robin_hash::bucket_entry) == + * sizeof(tsl::detail_robin_hash::bucket_entry)`) and `tsl::rh::power_of_two_growth_policy` is + * used, the hash will be stored even if `StoreHash` is false so that we can speed-up the rehash (but it will + * not be used on lookups unless `StoreHash` is true). + * + * `GrowthPolicy` defines how the map grows and consequently how a hash value is mapped to a bucket. + * By default the map uses `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of buckets + * to a power of two and uses a mask to map the hash to a bucket instead of the slow modulo. + * Other growth policies are available and you may define your own growth policy, + * check `tsl::rh::power_of_two_growth_policy` for the interface. + * + * If the destructor of `Key` or `T` throws an exception, the behaviour of the class is undefined. + * + * Iterators invalidation: + * - clear, operator=, reserve, rehash: always invalidate the iterators. + * - insert, emplace, emplace_hint, operator[]: if there is an effective insert, invalidate the iterators. + * - erase: always invalidate the iterators. + */ +template, + class KeyEqual = std::equal_to, + class Allocator = std::allocator>, + bool StoreHash = false, + class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>> +class robin_map { +private: + template + using has_is_transparent = tsl::detail_robin_hash::has_is_transparent; + + class KeySelect { + public: + using key_type = Key; + + const key_type& operator()(const std::pair& key_value) const noexcept { + return key_value.first; + } + + key_type& operator()(std::pair& key_value) noexcept { + return key_value.first; + } + }; + + class ValueSelect { + public: + using value_type = T; + + const value_type& operator()(const std::pair& key_value) const noexcept { + return key_value.second; + } + + value_type& operator()(std::pair& key_value) noexcept { + return key_value.second; + } + }; + + using ht = detail_robin_hash::robin_hash, KeySelect, ValueSelect, + Hash, KeyEqual, Allocator, StoreHash, GrowthPolicy>; + +public: + using key_type = typename ht::key_type; + using mapped_type = T; + using value_type = typename ht::value_type; + using size_type = typename ht::size_type; + using difference_type = typename ht::difference_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using allocator_type = typename ht::allocator_type; + using reference = typename ht::reference; + using const_reference = typename ht::const_reference; + using pointer = typename ht::pointer; + using const_pointer = typename ht::const_pointer; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + + +public: + /* + * Constructors + */ + robin_map(): robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) { + } + + explicit robin_map(size_type bucket_count, + const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()): + m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR) + { + } + + robin_map(size_type bucket_count, + const Allocator& alloc): robin_map(bucket_count, Hash(), KeyEqual(), alloc) + { + } + + robin_map(size_type bucket_count, + const Hash& hash, + const Allocator& alloc): robin_map(bucket_count, hash, KeyEqual(), alloc) + { + } + + explicit robin_map(const Allocator& alloc): robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) { + } + + template + robin_map(InputIt first, InputIt last, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()): robin_map(bucket_count, hash, equal, alloc) + { + insert(first, last); + } + + template + robin_map(InputIt first, InputIt last, + size_type bucket_count, + const Allocator& alloc): robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) + { + } + + template + robin_map(InputIt first, InputIt last, + size_type bucket_count, + const Hash& hash, + const Allocator& alloc): robin_map(first, last, bucket_count, hash, KeyEqual(), alloc) + { + } + + robin_map(std::initializer_list init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()): + robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) + { + } + + robin_map(std::initializer_list init, + size_type bucket_count, + const Allocator& alloc): + robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc) + { + } + + robin_map(std::initializer_list init, + size_type bucket_count, + const Hash& hash, + const Allocator& alloc): + robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc) + { + } + + robin_map& operator=(std::initializer_list ilist) { + m_ht.clear(); + + m_ht.reserve(ilist.size()); + m_ht.insert(ilist.begin(), ilist.end()); + + return *this; + } + + allocator_type get_allocator() const { return m_ht.get_allocator(); } + + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + + + std::pair insert(const value_type& value) { + return m_ht.insert(value); + } + + template::value>::type* = nullptr> + std::pair insert(P&& value) { + return m_ht.emplace(std::forward

(value)); + } + + std::pair insert(value_type&& value) { + return m_ht.insert(std::move(value)); + } + + + iterator insert(const_iterator hint, const value_type& value) { + return m_ht.insert(hint, value); + } + + template::value>::type* = nullptr> + iterator insert(const_iterator hint, P&& value) { + return m_ht.emplace_hint(hint, std::forward

(value)); + } + + iterator insert(const_iterator hint, value_type&& value) { + return m_ht.insert(hint, std::move(value)); + } + + + template + void insert(InputIt first, InputIt last) { + m_ht.insert(first, last); + } + + void insert(std::initializer_list ilist) { + m_ht.insert(ilist.begin(), ilist.end()); + } + + + + + template + std::pair insert_or_assign(const key_type& k, M&& obj) { + return m_ht.insert_or_assign(k, std::forward(obj)); + } + + template + std::pair insert_or_assign(key_type&& k, M&& obj) { + return m_ht.insert_or_assign(std::move(k), std::forward(obj)); + } + + template + iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj) { + return m_ht.insert_or_assign(hint, k, std::forward(obj)); + } + + template + iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj) { + return m_ht.insert_or_assign(hint, std::move(k), std::forward(obj)); + } + + + + /** + * Due to the way elements are stored, emplace will need to move or copy the key-value once. + * The method is equivalent to insert(value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + std::pair emplace(Args&&... args) { + return m_ht.emplace(std::forward(args)...); + } + + + + /** + * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once. + * The method is equivalent to insert(hint, value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + iterator emplace_hint(const_iterator hint, Args&&... args) { + return m_ht.emplace_hint(hint, std::forward(args)...); + } + + + + + template + std::pair try_emplace(const key_type& k, Args&&... args) { + return m_ht.try_emplace(k, std::forward(args)...); + } + + template + std::pair try_emplace(key_type&& k, Args&&... args) { + return m_ht.try_emplace(std::move(k), std::forward(args)...); + } + + template + iterator try_emplace(const_iterator hint, const key_type& k, Args&&... args) { + return m_ht.try_emplace(hint, k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args) { + return m_ht.try_emplace(hint, std::move(k), std::forward(args)...); + } + + + + + iterator erase(iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } + size_type erase(const key_type& key) { return m_ht.erase(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. + */ + size_type erase(const key_type& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + size_type erase(const K& key) { return m_ht.erase(key); } + + /** + * @copydoc erase(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. + */ + template::value>::type* = nullptr> + size_type erase(const K& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + + + void swap(robin_map& other) { other.m_ht.swap(m_ht); } + + + + /* + * Lookup + */ + T& at(const Key& key) { return m_ht.at(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + T& at(const Key& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); } + + + const T& at(const Key& key) const { return m_ht.at(key); } + + /** + * @copydoc at(const Key& key, std::size_t precalculated_hash) + */ + const T& at(const Key& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); } + + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + T& at(const K& key) { return m_ht.at(key); } + + /** + * @copydoc at(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + T& at(const K& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); } + + + /** + * @copydoc at(const K& key) + */ + template::value>::type* = nullptr> + const T& at(const K& key) const { return m_ht.at(key); } + + /** + * @copydoc at(const K& key, std::size_t precalculated_hash) + */ + template::value>::type* = nullptr> + const T& at(const K& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); } + + + + + T& operator[](const Key& key) { return m_ht[key]; } + T& operator[](Key&& key) { return m_ht[std::move(key)]; } + + + + + size_type count(const Key& key) const { return m_ht.count(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + size_type count(const Key& key, std::size_t precalculated_hash) const { + return m_ht.count(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + size_type count(const K& key) const { return m_ht.count(key); } + + /** + * @copydoc count(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + size_type count(const K& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } + + + + + iterator find(const Key& key) { return m_ht.find(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } + + const_iterator find(const Key& key) const { return m_ht.find(key); } + + /** + * @copydoc find(const Key& key, std::size_t precalculated_hash) + */ + const_iterator find(const Key& key, std::size_t precalculated_hash) const { + return m_ht.find(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + iterator find(const K& key) { return m_ht.find(key); } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } + + /** + * @copydoc find(const K& key) + */ + template::value>::type* = nullptr> + const_iterator find(const K& key) const { return m_ht.find(key); } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + const_iterator find(const K& key, std::size_t precalculated_hash) const { + return m_ht.find(key, precalculated_hash); + } + + + + + std::pair equal_range(const Key& key) { return m_ht.equal_range(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + std::pair equal_range(const Key& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + std::pair equal_range(const Key& key) const { return m_ht.equal_range(key); } + + /** + * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) + */ + std::pair equal_range(const Key& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key) { return m_ht.equal_range(key); } + + + /** + * @copydoc equal_range(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * @copydoc equal_range(const K& key) + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key) const { return m_ht.equal_range(key); } + + /** + * @copydoc equal_range(const K& key, std::size_t precalculated_hash) + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + + + + /* + * Bucket interface + */ + size_type bucket_count() const { return m_ht.bucket_count(); } + size_type max_bucket_count() const { return m_ht.max_bucket_count(); } + + + /* + * Hash policy + */ + float load_factor() const { return m_ht.load_factor(); } + float max_load_factor() const { return m_ht.max_load_factor(); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + void rehash(size_type count) { m_ht.rehash(count); } + void reserve(size_type count) { m_ht.reserve(count); } + + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + key_equal key_eq() const { return m_ht.key_eq(); } + + /* + * Other + */ + + /** + * Convert a const_iterator to an iterator. + */ + iterator mutable_iterator(const_iterator pos) { + return m_ht.mutable_iterator(pos); + } + + friend bool operator==(const robin_map& lhs, const robin_map& rhs) { + if(lhs.size() != rhs.size()) { + return false; + } + + for(const auto& element_lhs: lhs) { + const auto it_element_rhs = rhs.find(element_lhs.first); + if(it_element_rhs == rhs.cend() || element_lhs.second != it_element_rhs->second) { + return false; + } + } + + return true; + } + + friend bool operator!=(const robin_map& lhs, const robin_map& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(robin_map& lhs, robin_map& rhs) { + lhs.swap(rhs); + } + +private: + ht m_ht; +}; + + +/** + * Same as `tsl::robin_map`. + */ +template, + class KeyEqual = std::equal_to, + class Allocator = std::allocator>, + bool StoreHash = false> +using robin_pg_map = robin_map; + +} // end namespace tsl + +#endif diff --git a/ios/include/tsl/robin_set.h b/ios/include/tsl/robin_set.h index da47b293..4e4667e2 100644 --- a/ios/include/tsl/robin_set.h +++ b/ios/include/tsl/robin_set.h @@ -1,535 +1,535 @@ -/** - * MIT License - * - * Copyright (c) 2017 Tessil - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef TSL_ROBIN_SET_H -#define TSL_ROBIN_SET_H - - -#include -#include -#include -#include -#include -#include -#include "robin_hash.h" - - -namespace tsl { - - -/** - * Implementation of a hash set using open-adressing and the robin hood hashing algorithm with backward shift deletion. - * - * For operations modifying the hash set (insert, erase, rehash, ...), the strong exception guarantee - * is only guaranteed when the expression `std::is_nothrow_swappable::value && - * std::is_nothrow_move_constructible::value` is true, otherwise if an exception - * is thrown during the swap or the move, the hash set may end up in a undefined state. Per the standard - * a `Key` with a noexcept copy constructor and no move constructor also satisfies the - * `std::is_nothrow_move_constructible::value` criterion (and will thus guarantee the - * strong exception for the set). - * - * When `StoreHash` is true, 32 bits of the hash are stored alongside the values. It can improve - * the performance during lookups if the `KeyEqual` function takes time (or engenders a cache-miss for example) - * as we then compare the stored hashes before comparing the keys. When `tsl::rh::power_of_two_growth_policy` is used - * as `GrowthPolicy`, it may also speed-up the rehash process as we can avoid to recalculate the hash. - * When it is detected that storing the hash will not incur any memory penality due to alignement (i.e. - * `sizeof(tsl::detail_robin_hash::bucket_entry) == - * sizeof(tsl::detail_robin_hash::bucket_entry)`) and `tsl::rh::power_of_two_growth_policy` is - * used, the hash will be stored even if `StoreHash` is false so that we can speed-up the rehash (but it will - * not be used on lookups unless `StoreHash` is true). - * - * `GrowthPolicy` defines how the set grows and consequently how a hash value is mapped to a bucket. - * By default the set uses `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of buckets - * to a power of two and uses a mask to set the hash to a bucket instead of the slow modulo. - * Other growth policies are available and you may define your own growth policy, - * check `tsl::rh::power_of_two_growth_policy` for the interface. - * - * If the destructor of `Key` throws an exception, the behaviour of the class is undefined. - * - * Iterators invalidation: - * - clear, operator=, reserve, rehash: always invalidate the iterators. - * - insert, emplace, emplace_hint, operator[]: if there is an effective insert, invalidate the iterators. - * - erase: always invalidate the iterators. - */ -template, - class KeyEqual = std::equal_to, - class Allocator = std::allocator, - bool StoreHash = false, - class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>> -class robin_set { -private: - template - using has_is_transparent = tsl::detail_robin_hash::has_is_transparent; - - class KeySelect { - public: - using key_type = Key; - - const key_type& operator()(const Key& key) const noexcept { - return key; - } - - key_type& operator()(Key& key) noexcept { - return key; - } - }; - - using ht = detail_robin_hash::robin_hash; - -public: - using key_type = typename ht::key_type; - using value_type = typename ht::value_type; - using size_type = typename ht::size_type; - using difference_type = typename ht::difference_type; - using hasher = typename ht::hasher; - using key_equal = typename ht::key_equal; - using allocator_type = typename ht::allocator_type; - using reference = typename ht::reference; - using const_reference = typename ht::const_reference; - using pointer = typename ht::pointer; - using const_pointer = typename ht::const_pointer; - using iterator = typename ht::iterator; - using const_iterator = typename ht::const_iterator; - - - /* - * Constructors - */ - robin_set(): robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE) { - } - - explicit robin_set(size_type bucket_count, - const Hash& hash = Hash(), - const KeyEqual& equal = KeyEqual(), - const Allocator& alloc = Allocator()): - m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR) - { - } - - robin_set(size_type bucket_count, - const Allocator& alloc): robin_set(bucket_count, Hash(), KeyEqual(), alloc) - { - } - - robin_set(size_type bucket_count, - const Hash& hash, - const Allocator& alloc): robin_set(bucket_count, hash, KeyEqual(), alloc) - { - } - - explicit robin_set(const Allocator& alloc): robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) { - } - - template - robin_set(InputIt first, InputIt last, - size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, - const Hash& hash = Hash(), - const KeyEqual& equal = KeyEqual(), - const Allocator& alloc = Allocator()): robin_set(bucket_count, hash, equal, alloc) - { - insert(first, last); - } - - template - robin_set(InputIt first, InputIt last, - size_type bucket_count, - const Allocator& alloc): robin_set(first, last, bucket_count, Hash(), KeyEqual(), alloc) - { - } - - template - robin_set(InputIt first, InputIt last, - size_type bucket_count, - const Hash& hash, - const Allocator& alloc): robin_set(first, last, bucket_count, hash, KeyEqual(), alloc) - { - } - - robin_set(std::initializer_list init, - size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, - const Hash& hash = Hash(), - const KeyEqual& equal = KeyEqual(), - const Allocator& alloc = Allocator()): - robin_set(init.begin(), init.end(), bucket_count, hash, equal, alloc) - { - } - - robin_set(std::initializer_list init, - size_type bucket_count, - const Allocator& alloc): - robin_set(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc) - { - } - - robin_set(std::initializer_list init, - size_type bucket_count, - const Hash& hash, - const Allocator& alloc): - robin_set(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc) - { - } - - - robin_set& operator=(std::initializer_list ilist) { - m_ht.clear(); - - m_ht.reserve(ilist.size()); - m_ht.insert(ilist.begin(), ilist.end()); - - return *this; - } - - allocator_type get_allocator() const { return m_ht.get_allocator(); } - - - /* - * Iterators - */ - iterator begin() noexcept { return m_ht.begin(); } - const_iterator begin() const noexcept { return m_ht.begin(); } - const_iterator cbegin() const noexcept { return m_ht.cbegin(); } - - iterator end() noexcept { return m_ht.end(); } - const_iterator end() const noexcept { return m_ht.end(); } - const_iterator cend() const noexcept { return m_ht.cend(); } - - - /* - * Capacity - */ - bool empty() const noexcept { return m_ht.empty(); } - size_type size() const noexcept { return m_ht.size(); } - size_type max_size() const noexcept { return m_ht.max_size(); } - - /* - * Modifiers - */ - void clear() noexcept { m_ht.clear(); } - - - - - std::pair insert(const value_type& value) { - return m_ht.insert(value); - } - - std::pair insert(value_type&& value) { - return m_ht.insert(std::move(value)); - } - - iterator insert(const_iterator hint, const value_type& value) { - return m_ht.insert(hint, value); - } - - iterator insert(const_iterator hint, value_type&& value) { - return m_ht.insert(hint, std::move(value)); - } - - template - void insert(InputIt first, InputIt last) { - m_ht.insert(first, last); - } - - void insert(std::initializer_list ilist) { - m_ht.insert(ilist.begin(), ilist.end()); - } - - - - - /** - * Due to the way elements are stored, emplace will need to move or copy the key-value once. - * The method is equivalent to insert(value_type(std::forward(args)...)); - * - * Mainly here for compatibility with the std::unordered_map interface. - */ - template - std::pair emplace(Args&&... args) { - return m_ht.emplace(std::forward(args)...); - } - - - - /** - * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once. - * The method is equivalent to insert(hint, value_type(std::forward(args)...)); - * - * Mainly here for compatibility with the std::unordered_map interface. - */ - template - iterator emplace_hint(const_iterator hint, Args&&... args) { - return m_ht.emplace_hint(hint, std::forward(args)...); - } - - - - iterator erase(iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } - size_type erase(const key_type& key) { return m_ht.erase(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. - */ - size_type erase(const key_type& key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - size_type erase(const K& key) { return m_ht.erase(key); } - - /** - * @copydoc erase(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. - */ - template::value>::type* = nullptr> - size_type erase(const K& key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - - - void swap(robin_set& other) { other.m_ht.swap(m_ht); } - - - - /* - * Lookup - */ - size_type count(const Key& key) const { return m_ht.count(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - size_type count(const Key& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - size_type count(const K& key) const { return m_ht.count(key); } - - /** - * @copydoc count(const K& key) const - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - size_type count(const K& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } - - - - - iterator find(const Key& key) { return m_ht.find(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } - - const_iterator find(const Key& key) const { return m_ht.find(key); } - - /** - * @copydoc find(const Key& key, std::size_t precalculated_hash) - */ - const_iterator find(const Key& key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - iterator find(const K& key) { return m_ht.find(key); } - - /** - * @copydoc find(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } - - /** - * @copydoc find(const K& key) - */ - template::value>::type* = nullptr> - const_iterator find(const K& key) const { return m_ht.find(key); } - - /** - * @copydoc find(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - const_iterator find(const K& key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); } - - - - - std::pair equal_range(const Key& key) { return m_ht.equal_range(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - std::pair equal_range(const Key& key, std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - std::pair equal_range(const Key& key) const { return m_ht.equal_range(key); } - - /** - * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) - */ - std::pair equal_range(const Key& key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. - * If so, K must be hashable and comparable to Key. - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key) { return m_ht.equal_range(key); } - - /** - * @copydoc equal_range(const K& key) - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same - * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key, std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * @copydoc equal_range(const K& key) - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key) const { return m_ht.equal_range(key); } - - /** - * @copydoc equal_range(const K& key, std::size_t precalculated_hash) - */ - template::value>::type* = nullptr> - std::pair equal_range(const K& key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - - - - /* - * Bucket interface - */ - size_type bucket_count() const { return m_ht.bucket_count(); } - size_type max_bucket_count() const { return m_ht.max_bucket_count(); } - - - /* - * Hash policy - */ - float load_factor() const { return m_ht.load_factor(); } - float max_load_factor() const { return m_ht.max_load_factor(); } - void max_load_factor(float ml) { m_ht.max_load_factor(ml); } - - void rehash(size_type count) { m_ht.rehash(count); } - void reserve(size_type count) { m_ht.reserve(count); } - - - /* - * Observers - */ - hasher hash_function() const { return m_ht.hash_function(); } - key_equal key_eq() const { return m_ht.key_eq(); } - - - /* - * Other - */ - - /** - * Convert a const_iterator to an iterator. - */ - iterator mutable_iterator(const_iterator pos) { - return m_ht.mutable_iterator(pos); - } - - friend bool operator==(const robin_set& lhs, const robin_set& rhs) { - if(lhs.size() != rhs.size()) { - return false; - } - - for(const auto& element_lhs: lhs) { - const auto it_element_rhs = rhs.find(element_lhs); - if(it_element_rhs == rhs.cend()) { - return false; - } - } - - return true; - } - - friend bool operator!=(const robin_set& lhs, const robin_set& rhs) { - return !operator==(lhs, rhs); - } - - friend void swap(robin_set& lhs, robin_set& rhs) { - lhs.swap(rhs); - } - -private: - ht m_ht; -}; - - -/** - * Same as `tsl::robin_set`. - */ -template, - class KeyEqual = std::equal_to, - class Allocator = std::allocator, - bool StoreHash = false> -using robin_pg_set = robin_set; - -} // end namespace tsl - -#endif - +/** + * MIT License + * + * Copyright (c) 2017 Tessil + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TSL_ROBIN_SET_H +#define TSL_ROBIN_SET_H + + +#include +#include +#include +#include +#include +#include +#include "robin_hash.h" + + +namespace tsl { + + +/** + * Implementation of a hash set using open-adressing and the robin hood hashing algorithm with backward shift deletion. + * + * For operations modifying the hash set (insert, erase, rehash, ...), the strong exception guarantee + * is only guaranteed when the expression `std::is_nothrow_swappable::value && + * std::is_nothrow_move_constructible::value` is true, otherwise if an exception + * is thrown during the swap or the move, the hash set may end up in a undefined state. Per the standard + * a `Key` with a noexcept copy constructor and no move constructor also satisfies the + * `std::is_nothrow_move_constructible::value` criterion (and will thus guarantee the + * strong exception for the set). + * + * When `StoreHash` is true, 32 bits of the hash are stored alongside the values. It can improve + * the performance during lookups if the `KeyEqual` function takes time (or engenders a cache-miss for example) + * as we then compare the stored hashes before comparing the keys. When `tsl::rh::power_of_two_growth_policy` is used + * as `GrowthPolicy`, it may also speed-up the rehash process as we can avoid to recalculate the hash. + * When it is detected that storing the hash will not incur any memory penality due to alignement (i.e. + * `sizeof(tsl::detail_robin_hash::bucket_entry) == + * sizeof(tsl::detail_robin_hash::bucket_entry)`) and `tsl::rh::power_of_two_growth_policy` is + * used, the hash will be stored even if `StoreHash` is false so that we can speed-up the rehash (but it will + * not be used on lookups unless `StoreHash` is true). + * + * `GrowthPolicy` defines how the set grows and consequently how a hash value is mapped to a bucket. + * By default the set uses `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of buckets + * to a power of two and uses a mask to set the hash to a bucket instead of the slow modulo. + * Other growth policies are available and you may define your own growth policy, + * check `tsl::rh::power_of_two_growth_policy` for the interface. + * + * If the destructor of `Key` throws an exception, the behaviour of the class is undefined. + * + * Iterators invalidation: + * - clear, operator=, reserve, rehash: always invalidate the iterators. + * - insert, emplace, emplace_hint, operator[]: if there is an effective insert, invalidate the iterators. + * - erase: always invalidate the iterators. + */ +template, + class KeyEqual = std::equal_to, + class Allocator = std::allocator, + bool StoreHash = false, + class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>> +class robin_set { +private: + template + using has_is_transparent = tsl::detail_robin_hash::has_is_transparent; + + class KeySelect { + public: + using key_type = Key; + + const key_type& operator()(const Key& key) const noexcept { + return key; + } + + key_type& operator()(Key& key) noexcept { + return key; + } + }; + + using ht = detail_robin_hash::robin_hash; + +public: + using key_type = typename ht::key_type; + using value_type = typename ht::value_type; + using size_type = typename ht::size_type; + using difference_type = typename ht::difference_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using allocator_type = typename ht::allocator_type; + using reference = typename ht::reference; + using const_reference = typename ht::const_reference; + using pointer = typename ht::pointer; + using const_pointer = typename ht::const_pointer; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + + + /* + * Constructors + */ + robin_set(): robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE) { + } + + explicit robin_set(size_type bucket_count, + const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()): + m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR) + { + } + + robin_set(size_type bucket_count, + const Allocator& alloc): robin_set(bucket_count, Hash(), KeyEqual(), alloc) + { + } + + robin_set(size_type bucket_count, + const Hash& hash, + const Allocator& alloc): robin_set(bucket_count, hash, KeyEqual(), alloc) + { + } + + explicit robin_set(const Allocator& alloc): robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) { + } + + template + robin_set(InputIt first, InputIt last, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()): robin_set(bucket_count, hash, equal, alloc) + { + insert(first, last); + } + + template + robin_set(InputIt first, InputIt last, + size_type bucket_count, + const Allocator& alloc): robin_set(first, last, bucket_count, Hash(), KeyEqual(), alloc) + { + } + + template + robin_set(InputIt first, InputIt last, + size_type bucket_count, + const Hash& hash, + const Allocator& alloc): robin_set(first, last, bucket_count, hash, KeyEqual(), alloc) + { + } + + robin_set(std::initializer_list init, + size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, + const Hash& hash = Hash(), + const KeyEqual& equal = KeyEqual(), + const Allocator& alloc = Allocator()): + robin_set(init.begin(), init.end(), bucket_count, hash, equal, alloc) + { + } + + robin_set(std::initializer_list init, + size_type bucket_count, + const Allocator& alloc): + robin_set(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc) + { + } + + robin_set(std::initializer_list init, + size_type bucket_count, + const Hash& hash, + const Allocator& alloc): + robin_set(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc) + { + } + + + robin_set& operator=(std::initializer_list ilist) { + m_ht.clear(); + + m_ht.reserve(ilist.size()); + m_ht.insert(ilist.begin(), ilist.end()); + + return *this; + } + + allocator_type get_allocator() const { return m_ht.get_allocator(); } + + + /* + * Iterators + */ + iterator begin() noexcept { return m_ht.begin(); } + const_iterator begin() const noexcept { return m_ht.begin(); } + const_iterator cbegin() const noexcept { return m_ht.cbegin(); } + + iterator end() noexcept { return m_ht.end(); } + const_iterator end() const noexcept { return m_ht.end(); } + const_iterator cend() const noexcept { return m_ht.cend(); } + + + /* + * Capacity + */ + bool empty() const noexcept { return m_ht.empty(); } + size_type size() const noexcept { return m_ht.size(); } + size_type max_size() const noexcept { return m_ht.max_size(); } + + /* + * Modifiers + */ + void clear() noexcept { m_ht.clear(); } + + + + + std::pair insert(const value_type& value) { + return m_ht.insert(value); + } + + std::pair insert(value_type&& value) { + return m_ht.insert(std::move(value)); + } + + iterator insert(const_iterator hint, const value_type& value) { + return m_ht.insert(hint, value); + } + + iterator insert(const_iterator hint, value_type&& value) { + return m_ht.insert(hint, std::move(value)); + } + + template + void insert(InputIt first, InputIt last) { + m_ht.insert(first, last); + } + + void insert(std::initializer_list ilist) { + m_ht.insert(ilist.begin(), ilist.end()); + } + + + + + /** + * Due to the way elements are stored, emplace will need to move or copy the key-value once. + * The method is equivalent to insert(value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + std::pair emplace(Args&&... args) { + return m_ht.emplace(std::forward(args)...); + } + + + + /** + * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once. + * The method is equivalent to insert(hint, value_type(std::forward(args)...)); + * + * Mainly here for compatibility with the std::unordered_map interface. + */ + template + iterator emplace_hint(const_iterator hint, Args&&... args) { + return m_ht.emplace_hint(hint, std::forward(args)...); + } + + + + iterator erase(iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator pos) { return m_ht.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } + size_type erase(const key_type& key) { return m_ht.erase(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. + */ + size_type erase(const key_type& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + size_type erase(const K& key) { return m_ht.erase(key); } + + /** + * @copydoc erase(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash. + */ + template::value>::type* = nullptr> + size_type erase(const K& key, std::size_t precalculated_hash) { + return m_ht.erase(key, precalculated_hash); + } + + + + void swap(robin_set& other) { other.m_ht.swap(m_ht); } + + + + /* + * Lookup + */ + size_type count(const Key& key) const { return m_ht.count(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + size_type count(const Key& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + size_type count(const K& key) const { return m_ht.count(key); } + + /** + * @copydoc count(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + size_type count(const K& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } + + + + + iterator find(const Key& key) { return m_ht.find(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } + + const_iterator find(const Key& key) const { return m_ht.find(key); } + + /** + * @copydoc find(const Key& key, std::size_t precalculated_hash) + */ + const_iterator find(const Key& key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + iterator find(const K& key) { return m_ht.find(key); } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } + + /** + * @copydoc find(const K& key) + */ + template::value>::type* = nullptr> + const_iterator find(const K& key) const { return m_ht.find(key); } + + /** + * @copydoc find(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + const_iterator find(const K& key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); } + + + + + std::pair equal_range(const Key& key) { return m_ht.equal_range(key); } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + std::pair equal_range(const Key& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + std::pair equal_range(const Key& key) const { return m_ht.equal_range(key); } + + /** + * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) + */ + std::pair equal_range(const Key& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. + * If so, K must be hashable and comparable to Key. + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key) { return m_ht.equal_range(key); } + + /** + * @copydoc equal_range(const K& key) + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same + * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash. + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key, std::size_t precalculated_hash) { + return m_ht.equal_range(key, precalculated_hash); + } + + /** + * @copydoc equal_range(const K& key) + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key) const { return m_ht.equal_range(key); } + + /** + * @copydoc equal_range(const K& key, std::size_t precalculated_hash) + */ + template::value>::type* = nullptr> + std::pair equal_range(const K& key, std::size_t precalculated_hash) const { + return m_ht.equal_range(key, precalculated_hash); + } + + + + + /* + * Bucket interface + */ + size_type bucket_count() const { return m_ht.bucket_count(); } + size_type max_bucket_count() const { return m_ht.max_bucket_count(); } + + + /* + * Hash policy + */ + float load_factor() const { return m_ht.load_factor(); } + float max_load_factor() const { return m_ht.max_load_factor(); } + void max_load_factor(float ml) { m_ht.max_load_factor(ml); } + + void rehash(size_type count) { m_ht.rehash(count); } + void reserve(size_type count) { m_ht.reserve(count); } + + + /* + * Observers + */ + hasher hash_function() const { return m_ht.hash_function(); } + key_equal key_eq() const { return m_ht.key_eq(); } + + + /* + * Other + */ + + /** + * Convert a const_iterator to an iterator. + */ + iterator mutable_iterator(const_iterator pos) { + return m_ht.mutable_iterator(pos); + } + + friend bool operator==(const robin_set& lhs, const robin_set& rhs) { + if(lhs.size() != rhs.size()) { + return false; + } + + for(const auto& element_lhs: lhs) { + const auto it_element_rhs = rhs.find(element_lhs); + if(it_element_rhs == rhs.cend()) { + return false; + } + } + + return true; + } + + friend bool operator!=(const robin_set& lhs, const robin_set& rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(robin_set& lhs, robin_set& rhs) { + lhs.swap(rhs); + } + +private: + ht m_ht; +}; + + +/** + * Same as `tsl::robin_set`. + */ +template, + class KeyEqual = std::equal_to, + class Allocator = std::allocator, + bool StoreHash = false> +using robin_pg_set = robin_set; + +} // end namespace tsl + +#endif + diff --git a/ios/include/utils/Allocator.h b/ios/include/utils/Allocator.h index 23dc7211..5d881c13 100644 --- a/ios/include/utils/Allocator.h +++ b/ios/include/utils/Allocator.h @@ -1,824 +1,827 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_ALLOCATOR_H -#define TNT_UTILS_ALLOCATOR_H - - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -namespace utils { - -namespace pointermath { - -template -static inline P* add(P* a, T b) noexcept { - return (P*)(uintptr_t(a) + uintptr_t(b)); -} - -template -static inline P* align(P* p, size_t alignment) noexcept { - // alignment must be a power-of-two - assert(alignment && !(alignment & alignment-1)); - return (P*)((uintptr_t(p) + alignment - 1) & ~(alignment - 1)); -} - -template -static inline P* align(P* p, size_t alignment, size_t offset) noexcept { - P* const r = align(add(p, offset), alignment); - assert(pointermath::add(r, -offset) >= p); - return r; -} - -} - -/* ------------------------------------------------------------------------------------------------ - * LinearAllocator - * - * + Allocates blocks linearly - * + Cannot free individual blocks - * + Can free top of memory back up to a specified point - * + Doesn't call destructors - * ------------------------------------------------------------------------------------------------ - */ - -class LinearAllocator { -public: - // use memory area provided - LinearAllocator(void* begin, void* end) noexcept; - - template - explicit LinearAllocator(const AREA& area) : LinearAllocator(area.begin(), area.end()) { } - - // Allocators can't be copied - LinearAllocator(const LinearAllocator& rhs) = delete; - LinearAllocator& operator=(const LinearAllocator& rhs) = delete; - - // Allocators can be moved - LinearAllocator(LinearAllocator&& rhs) noexcept; - LinearAllocator& operator=(LinearAllocator&& rhs) noexcept; - - ~LinearAllocator() noexcept = default; - - // our allocator concept - void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) UTILS_RESTRICT { - // branch-less allocation - void* const p = pointermath::align(current(), alignment, extra); - void* const c = pointermath::add(p, size); - bool success = c <= end(); - set_current(success ? c : current()); - return success ? p : nullptr; - } - - // API specific to this allocator - - void *getCurrent() UTILS_RESTRICT noexcept { - return current(); - } - - // free memory back to the specified point - void rewind(void* p) UTILS_RESTRICT noexcept { - assert(p>=mBegin && p - explicit HeapAllocator(const AREA&) { } - - // our allocator concept - void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) { - // this allocator doesn't support 'extra' - assert(extra == 0); - return aligned_alloc(size, alignment); - } - - void free(void* p) noexcept { - aligned_free(p); - } - - void free(void* p, size_t) noexcept { - free(p); - } - - ~HeapAllocator() noexcept = default; - - void swap(HeapAllocator& rhs) noexcept { } -}; - -// ------------------------------------------------------------------------------------------------ - -class FreeList { -public: - FreeList() noexcept = default; - FreeList(void* begin, void* end, size_t elementSize, size_t alignment, size_t extra) noexcept; - FreeList(const FreeList& rhs) = delete; - FreeList& operator=(const FreeList& rhs) = delete; - FreeList(FreeList&& rhs) noexcept = default; - FreeList& operator=(FreeList&& rhs) noexcept = default; - - void* pop() noexcept { - Node* const head = mHead; - mHead = head ? head->next : nullptr; - // this could indicate a use after free - assert(!mHead || mHead >= mBegin && mHead < mEnd); - return head; - } - - void push(void* p) noexcept { - assert(p); - assert(p >= mBegin && p < mEnd); - // TODO: assert this is one of our pointer (i.e.: it's address match one of ours) - Node* const head = static_cast(p); - head->next = mHead; - mHead = head; - } - - void *getFirst() noexcept { - return mHead; - } - -private: - struct Node { - Node* next; - }; - - static Node* init(void* begin, void* end, - size_t elementSize, size_t alignment, size_t extra) noexcept; - - Node* mHead = nullptr; - -#ifndef NDEBUG - // These are needed only for debugging... - void* mBegin = nullptr; - void* mEnd = nullptr; -#endif -}; - -class AtomicFreeList { -public: - AtomicFreeList() noexcept = default; - AtomicFreeList(void* begin, void* end, - size_t elementSize, size_t alignment, size_t extra) noexcept; - AtomicFreeList(const FreeList& rhs) = delete; - AtomicFreeList& operator=(const FreeList& rhs) = delete; - - void* pop() noexcept { - Node* const storage = mStorage; - - HeadPtr currentHead = mHead.load(); - while (currentHead.offset >= 0) { - // The value of "next" we load here might already contain application data if another - // thread raced ahead of us. But in that case, the computed "newHead" will be discarded - // since compare_exchange_weak fails. Then this thread will loop with the updated - // value of currentHead, and try again. - Node* const next = storage[currentHead.offset].next.load(std::memory_order_relaxed); - const HeadPtr newHead{ next ? int32_t(next - storage) : -1, currentHead.tag + 1 }; - // In the rare case that the other thread that raced ahead of us already returned the - // same mHead we just loaded, but it now has a different "next" value, the tag field will not - // match, and compare_exchange_weak will fail and prevent that particular race condition. - if (mHead.compare_exchange_weak(currentHead, newHead)) { - // This assert needs to occur after we have validated that there was no race condition - // Otherwise, next might already contain application data, if another thread - // raced ahead of us after we loaded mHead, but before we loaded mHead->next. - assert(!next || next >= storage); - break; - } - } - void* p = (currentHead.offset >= 0) ? (storage + currentHead.offset) : nullptr; - assert(!p || p >= storage); - return p; - } - - void push(void* p) noexcept { - Node* const storage = mStorage; - assert(p && p >= storage); - Node* const node = static_cast(p); - HeadPtr currentHead = mHead.load(); - HeadPtr newHead = { int32_t(node - storage), currentHead.tag + 1 }; - do { - newHead.tag = currentHead.tag + 1; - Node* const n = (currentHead.offset >= 0) ? (storage + currentHead.offset) : nullptr; - node->next.store(n, std::memory_order_relaxed); - } while(!mHead.compare_exchange_weak(currentHead, newHead)); - } - - void* getFirst() noexcept { - return mStorage + mHead.load(std::memory_order_relaxed).offset; - } - -private: - struct Node { - // This should be a regular (non-atomic) pointer, but this causes TSAN to complain - // about a data-race that exists but is benin. We always use this atomic<> in - // relaxed mode. - // The data race TSAN complains about is when a pop() is interrupted by a - // pop() + push() just after mHead->next is read -- it appears as though it is written - // without synchronization (by the push), however in that case, the pop's CAS will fail - // and things will auto-correct. - // - // Pop() | - // | | - // read head->next | - // | pop() - // | | - // | read head->next - // | CAS, tag++ - // | | - // | push() - // | | - // [TSAN: data-race here] write head->next - // | CAS, tag++ - // CAS fails - // | - // read head->next - // | - // CAS, tag++ - // - std::atomic next; - }; - - // This struct is using a 32-bit offset into the arena rather than - // a direct pointer, because together with the 32-bit tag, it needs to - // fit into 8 bytes. If it was any larger, it would not be possible to - // access it atomically. - struct alignas(8) HeadPtr { - int32_t offset; - uint32_t tag; - }; - - std::atomic mHead{}; - - Node* mStorage = nullptr; -}; - -// ------------------------------------------------------------------------------------------------ - -template < - size_t ELEMENT_SIZE, - size_t ALIGNMENT = alignof(std::max_align_t), - size_t OFFSET = 0, - typename FREELIST = FreeList> -class PoolAllocator { - static_assert(ELEMENT_SIZE >= sizeof(void*), "ELEMENT_SIZE must accommodate at least a pointer"); -public: - // our allocator concept - void* alloc(size_t size = ELEMENT_SIZE, - size_t alignment = ALIGNMENT, size_t offset = OFFSET) noexcept { - assert(size <= ELEMENT_SIZE); - assert(alignment <= ALIGNMENT); - assert(offset == OFFSET); - return mFreeList.pop(); - } - - void free(void* p, size_t = ELEMENT_SIZE) noexcept { - mFreeList.push(p); - } - - constexpr size_t getSize() const noexcept { return ELEMENT_SIZE; } - - PoolAllocator(void* begin, void* end) noexcept - : mFreeList(begin, end, ELEMENT_SIZE, ALIGNMENT, OFFSET) { - } - - template - explicit PoolAllocator(const AREA& area) noexcept - : PoolAllocator(area.begin(), area.end()) { - } - - // Allocators can't be copied - PoolAllocator(const PoolAllocator& rhs) = delete; - PoolAllocator& operator=(const PoolAllocator& rhs) = delete; - - // Allocators can be moved - PoolAllocator(PoolAllocator&& rhs) = default; - PoolAllocator& operator=(PoolAllocator&& rhs) = default; - - PoolAllocator() noexcept = default; - ~PoolAllocator() noexcept = default; - - // API specific to this allocator - - void *getCurrent() noexcept { - return mFreeList.getFirst(); - } - -private: - FREELIST mFreeList; -}; - -#define UTILS_MAX(a,b) ((a) > (b) ? (a) : (b)) - -template -using ObjectPoolAllocator = PoolAllocator; - -template -using ThreadSafeObjectPoolAllocator = PoolAllocator; - - -// ------------------------------------------------------------------------------------------------ -// Areas -// ------------------------------------------------------------------------------------------------ - -namespace AreaPolicy { - -class StaticArea { -public: - StaticArea() noexcept = default; - - StaticArea(void* b, void* e) noexcept - : mBegin(b), mEnd(e) { - } - - ~StaticArea() noexcept = default; - - StaticArea(const StaticArea& rhs) = default; - StaticArea& operator=(const StaticArea& rhs) = default; - StaticArea(StaticArea&& rhs) noexcept = default; - StaticArea& operator=(StaticArea&& rhs) noexcept = default; - - void* data() const noexcept { return mBegin; } - void* begin() const noexcept { return mBegin; } - void* end() const noexcept { return mEnd; } - size_t size() const noexcept { return uintptr_t(mEnd) - uintptr_t(mBegin); } - - friend void swap(StaticArea& lhs, StaticArea& rhs) noexcept { - using std::swap; - swap(lhs.mBegin, rhs.mBegin); - swap(lhs.mEnd, rhs.mEnd); - } - -private: - void* mBegin = nullptr; - void* mEnd = nullptr; -}; - -class HeapArea { -public: - HeapArea() noexcept = default; - - explicit HeapArea(size_t size) { - if (size) { - // TODO: policy committing memory - mBegin = malloc(size); - mEnd = pointermath::add(mBegin, size); - } - } - - ~HeapArea() noexcept { - // TODO: policy for returning memory to system - free(mBegin); - } - - HeapArea(const HeapArea& rhs) = delete; - HeapArea& operator=(const HeapArea& rhs) = delete; - HeapArea(HeapArea&& rhs) noexcept = delete; - HeapArea& operator=(HeapArea&& rhs) noexcept = delete; - - void* data() const noexcept { return mBegin; } - void* begin() const noexcept { return mBegin; } - void* end() const noexcept { return mEnd; } - size_t size() const noexcept { return uintptr_t(mEnd) - uintptr_t(mBegin); } - - friend void swap(HeapArea& lhs, HeapArea& rhs) noexcept { - using std::swap; - swap(lhs.mBegin, rhs.mBegin); - swap(lhs.mEnd, rhs.mEnd); - } - -private: - void* mBegin = nullptr; - void* mEnd = nullptr; -}; - -} // namespace AreaPolicy - -// ------------------------------------------------------------------------------------------------ -// Policies -// ------------------------------------------------------------------------------------------------ - -namespace LockingPolicy { - -struct NoLock { - void lock() noexcept { } - void unlock() noexcept { } -}; - -using SpinLock = utils::SpinLock; -using Mutex = utils::Mutex; - -} // namespace LockingPolicy - - -namespace TrackingPolicy { - -// default no-op tracker -struct Untracked { - Untracked() noexcept = default; - Untracked(const char* name, void* base, size_t size) noexcept { } - void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept { } - void onFree(void* p, size_t = 0) noexcept { } - void onReset() noexcept { } - void onRewind(void* addr) noexcept { } -}; - -// This just track the max memory usage and logs it in the destructor -struct HighWatermark { - HighWatermark() noexcept = default; - HighWatermark(const char* name, void* base, size_t size) noexcept - : mName(name), mBase(base), mSize(uint32_t(size)) { } - ~HighWatermark() noexcept; - void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept; - void onFree(void* p, size_t size) noexcept; - void onReset() noexcept; - void onRewind(void const* addr) noexcept; -protected: - const char* mName = nullptr; - void* mBase = nullptr; - uint32_t mSize = 0; - uint32_t mCurrent = 0; - uint32_t mHighWaterMark = 0; -}; - -// This just fills buffers with known values to help catch uninitialized access and use after free. -struct Debug { - Debug() noexcept = default; - Debug(const char* name, void* base, size_t size) noexcept - : mName(name), mBase(base), mSize(uint32_t(size)) { } - void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept; - void onFree(void* p, size_t size) noexcept; - void onReset() noexcept; - void onRewind(void* addr) noexcept; -protected: - const char* mName = nullptr; - void* mBase = nullptr; - uint32_t mSize = 0; -}; - -struct DebugAndHighWatermark : protected HighWatermark, protected Debug { - DebugAndHighWatermark() noexcept = default; - DebugAndHighWatermark(const char* name, void* base, size_t size) noexcept - : HighWatermark(name, base, size), Debug(name, base, size) { } - void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept { - HighWatermark::onAlloc(p, size, alignment, extra); - Debug::onAlloc(p, size, alignment, extra); - } - void onFree(void* p, size_t size) noexcept { - HighWatermark::onFree(p, size); - Debug::onFree(p, size); - } - void onReset() noexcept { - HighWatermark::onReset(); - Debug::onReset(); - } - void onRewind(void* addr) noexcept { - HighWatermark::onRewind(addr); - Debug::onRewind(addr); - } -}; - -} // namespace TrackingPolicy - -// ------------------------------------------------------------------------------------------------ -// Arenas -// ------------------------------------------------------------------------------------------------ - -template -class Arena { -public: - - Arena() = default; - - // construct an arena with a name and forward argument to its allocator - template - Arena(const char* name, size_t size, ARGS&& ... args) - : mArenaName(name), - mArea(size), - mAllocator(mArea, std::forward(args) ... ), - mListener(name, mArea.data(), mArea.size()) { - } - - template - Arena(const char* name, AreaPolicy&& area, ARGS&& ... args) - : mArenaName(name), - mArea(std::forward(area)), - mAllocator(mArea, std::forward(args) ... ), - mListener(name, mArea.data(), mArea.size()) { - } - - // allocate memory from arena with given size and alignment - // (acceptable size/alignment may depend on the allocator provided) - void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) noexcept { - std::lock_guard guard(mLock); - void* p = mAllocator.alloc(size, alignment, extra); - mListener.onAlloc(p, size, alignment, extra); - return p; - } - - // Allocate an array of trivially destructible objects - // for safety, we disable the object-based alloc method if the object type is not - // trivially destructible, since free() won't call the destructor and this is allocating - // an array. - template ::value>::type> - T* alloc(size_t count, size_t alignment = alignof(T), size_t extra = 0) noexcept { - return (T*)alloc(count * sizeof(T), alignment, extra); - } - - // return memory pointed by p to the arena - // (actual behaviour may depend on allocator provided) - void free(void* p) noexcept { - if (p) { - std::lock_guard guard(mLock); - mListener.onFree(p); - mAllocator.free(p); - } - } - - // some allocators require the size of the allocation for free - void free(void* p, size_t size) noexcept { - if (p) { - std::lock_guard guard(mLock); - mListener.onFree(p, size); - mAllocator.free(p, size); - } - } - - // some allocators don't have a free() call, but a single reset() or rewind() instead - void reset() noexcept { - std::lock_guard guard(mLock); - mListener.onReset(); - mAllocator.reset(); - } - - void* getCurrent() noexcept { return mAllocator.getCurrent(); } - - void rewind(void *addr) noexcept { - std::lock_guard guard(mLock); - mListener.onRewind(addr); - mAllocator.rewind(addr); - } - - // Allocate and construct an object - template - T* make(ARGS&& ... args) noexcept { - void* const p = this->alloc(sizeof(T), ALIGN); - return p ? new(p) T(std::forward(args)...) : nullptr; - } - - // destroys an object created with make() above, and frees associated memory - template - void destroy(T* p) noexcept { - if (p) { - p->~T(); - this->free((void*)p, sizeof(T)); - } - } - - char const* getName() const noexcept { return mArenaName; } - - AllocatorPolicy& getAllocator() noexcept { return mAllocator; } - AllocatorPolicy const& getAllocator() const noexcept { return mAllocator; } - - TrackingPolicy& getListener() noexcept { return mListener; } - TrackingPolicy const& getListener() const noexcept { return mListener; } - - AreaPolicy& getArea() noexcept { return mArea; } - AreaPolicy const& getArea() const noexcept { return mArea; } - - void setListener(TrackingPolicy listener) noexcept { - std::swap(mListener, listener); - } - - template - void emplaceListener(ARGS&& ... args) noexcept { - mListener.~TrackingPolicy(); - new (&mListener) TrackingPolicy(std::forward(args)...); - } - - // An arena can't be copied - Arena(Arena const& rhs) noexcept = delete; - Arena& operator=(Arena const& rhs) noexcept = delete; - - friend void swap(Arena& lhs, Arena& rhs) noexcept { - using std::swap; - swap(lhs.mArea, rhs.mArea); - swap(lhs.mAllocator, rhs.mAllocator); - swap(lhs.mLock, rhs.mLock); - swap(lhs.mListener, rhs.mListener); - swap(lhs.mArenaName, rhs.mArenaName); - } - -private: - char const* mArenaName = nullptr; - AreaPolicy mArea; - // note: we should use something like compressed_pair for the members below - AllocatorPolicy mAllocator; - LockingPolicy mLock; - TrackingPolicy mListener; -}; - -// ------------------------------------------------------------------------------------------------ - -template -using HeapArena = Arena; - -// ------------------------------------------------------------------------------------------------ - -// This doesn't implement our allocator concept, because it's too risky to use this as an allocator -// in particular, doing ArenaScope. -template -class ArenaScope { - - struct Finalizer { - void (*finalizer)(void* p) = nullptr; - Finalizer* next = nullptr; - }; - - template - static void destruct(void* p) noexcept { - static_cast(p)->~T(); - } - -public: - explicit ArenaScope(ARENA& allocator) - : mArena(allocator), mRewind(allocator.getCurrent()) { - } - - ArenaScope& operator=(const ArenaScope& rhs) = delete; - ArenaScope(ArenaScope&& rhs) noexcept = delete; - ArenaScope& operator=(ArenaScope&& rhs) noexcept = delete; - - ~ArenaScope() { - // run the finalizer chain - Finalizer* head = mFinalizerHead; - while (head) { - void* p = pointermath::add(head, sizeof(Finalizer)); - head->finalizer(p); - head = head->next; - } - // ArenaScope works only with Arena that implements rewind() - mArena.rewind(mRewind); - } - - template - T* make(ARGS&& ... args) noexcept { - T* o = nullptr; - if (std::is_trivially_destructible::value) { - o = mArena.template make(std::forward(args)...); - } else { - void* const p = (Finalizer*)mArena.alloc(sizeof(T), ALIGN, sizeof(Finalizer)); - if (p != nullptr) { - Finalizer* const f = static_cast(p) - 1; - // constructor must be called before adding the dtor to the list - // so that the ctor can allocate objects in a nested scope and have the - // finalizers called in reverse order. - o = new(p) T(std::forward(args)...); - f->finalizer = &destruct; - f->next = mFinalizerHead; - mFinalizerHead = f; - } - } - return o; - } - - void* allocate(size_t size, size_t alignment = 1) noexcept { - return mArena.template alloc(size, alignment, 0); - } - - template - T* allocate(size_t size, size_t alignment = alignof(T), size_t extra = 0) noexcept { - return mArena.template alloc(size, alignment, extra); - } - - // use with caution - ARENA& getAllocator() noexcept { return mArena; } - -private: - ARENA& mArena; - void* mRewind = nullptr; - Finalizer* mFinalizerHead = nullptr; -}; - - -template -class STLAllocator { -public: - using value_type = TYPE; - using pointer = TYPE*; - using const_pointer = const TYPE*; - using reference = TYPE&; - using const_reference = const TYPE&; - using size_type = std::size_t; - using difference_type = std::ptrdiff_t; - using propagate_on_container_move_assignment = std::true_type; - using is_always_equal = std::true_type; - - template - struct rebind { using other = STLAllocator; }; - -public: - // we don't make this explicit, so that we can initialize a vector using a STLAllocator - // from an Arena, avoiding to have to repeat the vector type. - STLAllocator(ARENA& arena) : mArena(arena) { } // NOLINT(google-explicit-constructor) - - template - explicit STLAllocator(STLAllocator const& rhs) : mArena(rhs.mArena) { } - - TYPE* allocate(std::size_t n) { - return static_cast(mArena.alloc(n * sizeof(TYPE), alignof(TYPE))); - } - - void deallocate(TYPE* p, std::size_t n) { - mArena.free(p, n * sizeof(TYPE)); - } - - // these should be out-of-class friends, but this doesn't seem to work with some compilers - // which complain about multiple definition each time a STLAllocator<> is instantiated. - template - bool operator==(const STLAllocator& rhs) const noexcept { - return std::addressof(mArena) == std::addressof(rhs.mArena); - } - - template - bool operator!=(const STLAllocator& rhs) const noexcept { - return !operator==(rhs); - } - -private: - template - friend class STLAllocator; - - ARENA& mArena; -}; - -} // namespace utils - -#endif // TNT_UTILS_ALLOCATOR_H +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ALLOCATOR_H +#define TNT_UTILS_ALLOCATOR_H + + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +namespace utils { + +namespace pointermath { + +template +static inline P* add(P* a, T b) noexcept { + return (P*)(uintptr_t(a) + uintptr_t(b)); +} + +template +static inline P* align(P* p, size_t alignment) noexcept { + // alignment must be a power-of-two + assert(alignment && !(alignment & alignment-1)); + return (P*)((uintptr_t(p) + alignment - 1) & ~(alignment - 1)); +} + +template +static inline P* align(P* p, size_t alignment, size_t offset) noexcept { + P* const r = align(add(p, offset), alignment); + assert(pointermath::add(r, -offset) >= p); + return r; +} + +} + +/* ------------------------------------------------------------------------------------------------ + * LinearAllocator + * + * + Allocates blocks linearly + * + Cannot free individual blocks + * + Can free top of memory back up to a specified point + * + Doesn't call destructors + * ------------------------------------------------------------------------------------------------ + */ + +class LinearAllocator { +public: + // use memory area provided + LinearAllocator(void* begin, void* end) noexcept; + + template + explicit LinearAllocator(const AREA& area) : LinearAllocator(area.begin(), area.end()) { } + + // Allocators can't be copied + LinearAllocator(const LinearAllocator& rhs) = delete; + LinearAllocator& operator=(const LinearAllocator& rhs) = delete; + + // Allocators can be moved + LinearAllocator(LinearAllocator&& rhs) noexcept; + LinearAllocator& operator=(LinearAllocator&& rhs) noexcept; + + ~LinearAllocator() noexcept = default; + + // our allocator concept + void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) UTILS_RESTRICT { + // branch-less allocation + void* const p = pointermath::align(current(), alignment, extra); + void* const c = pointermath::add(p, size); + bool success = c <= end(); + set_current(success ? c : current()); + return success ? p : nullptr; + } + + // API specific to this allocator + + void *getCurrent() UTILS_RESTRICT noexcept { + return current(); + } + + // free memory back to the specified point + void rewind(void* p) UTILS_RESTRICT noexcept { + assert(p>=mBegin && p + explicit HeapAllocator(const AREA&) { } + + // our allocator concept + void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) { + // this allocator doesn't support 'extra' + assert(extra == 0); + return aligned_alloc(size, alignment); + } + + void free(void* p) noexcept { + aligned_free(p); + } + + void free(void* p, size_t) noexcept { + free(p); + } + + ~HeapAllocator() noexcept = default; + + void swap(HeapAllocator& rhs) noexcept { } +}; + +// ------------------------------------------------------------------------------------------------ + +class FreeList { +public: + FreeList() noexcept = default; + FreeList(void* begin, void* end, size_t elementSize, size_t alignment, size_t extra) noexcept; + FreeList(const FreeList& rhs) = delete; + FreeList& operator=(const FreeList& rhs) = delete; + FreeList(FreeList&& rhs) noexcept = default; + FreeList& operator=(FreeList&& rhs) noexcept = default; + + void* pop() noexcept { + Node* const head = mHead; + mHead = head ? head->next : nullptr; + // this could indicate a use after free + assert(!mHead || mHead >= mBegin && mHead < mEnd); + return head; + } + + void push(void* p) noexcept { + assert(p); + assert(p >= mBegin && p < mEnd); + // TODO: assert this is one of our pointer (i.e.: it's address match one of ours) + Node* const head = static_cast(p); + head->next = mHead; + mHead = head; + } + + void *getFirst() noexcept { + return mHead; + } + +private: + struct Node { + Node* next; + }; + + static Node* init(void* begin, void* end, + size_t elementSize, size_t alignment, size_t extra) noexcept; + + Node* mHead = nullptr; + +#ifndef NDEBUG + // These are needed only for debugging... + void* mBegin = nullptr; + void* mEnd = nullptr; +#endif +}; + +class AtomicFreeList { +public: + AtomicFreeList() noexcept = default; + AtomicFreeList(void* begin, void* end, + size_t elementSize, size_t alignment, size_t extra) noexcept; + AtomicFreeList(const FreeList& rhs) = delete; + AtomicFreeList& operator=(const FreeList& rhs) = delete; + + void* pop() noexcept { + Node* const storage = mStorage; + + HeadPtr currentHead = mHead.load(); + while (currentHead.offset >= 0) { + // The value of "next" we load here might already contain application data if another + // thread raced ahead of us. But in that case, the computed "newHead" will be discarded + // since compare_exchange_weak fails. Then this thread will loop with the updated + // value of currentHead, and try again. + Node* const next = storage[currentHead.offset].next.load(std::memory_order_relaxed); + const HeadPtr newHead{ next ? int32_t(next - storage) : -1, currentHead.tag + 1 }; + // In the rare case that the other thread that raced ahead of us already returned the + // same mHead we just loaded, but it now has a different "next" value, the tag field will not + // match, and compare_exchange_weak will fail and prevent that particular race condition. + if (mHead.compare_exchange_weak(currentHead, newHead)) { + // This assert needs to occur after we have validated that there was no race condition + // Otherwise, next might already contain application data, if another thread + // raced ahead of us after we loaded mHead, but before we loaded mHead->next. + assert(!next || next >= storage); + break; + } + } + void* p = (currentHead.offset >= 0) ? (storage + currentHead.offset) : nullptr; + assert(!p || p >= storage); + return p; + } + + void push(void* p) noexcept { + Node* const storage = mStorage; + assert(p && p >= storage); + Node* const node = static_cast(p); + HeadPtr currentHead = mHead.load(); + HeadPtr newHead = { int32_t(node - storage), currentHead.tag + 1 }; + do { + newHead.tag = currentHead.tag + 1; + Node* const n = (currentHead.offset >= 0) ? (storage + currentHead.offset) : nullptr; + node->next.store(n, std::memory_order_relaxed); + } while(!mHead.compare_exchange_weak(currentHead, newHead)); + } + + void* getFirst() noexcept { + return mStorage + mHead.load(std::memory_order_relaxed).offset; + } + +private: + struct Node { + // This should be a regular (non-atomic) pointer, but this causes TSAN to complain + // about a data-race that exists but is benin. We always use this atomic<> in + // relaxed mode. + // The data race TSAN complains about is when a pop() is interrupted by a + // pop() + push() just after mHead->next is read -- it appears as though it is written + // without synchronization (by the push), however in that case, the pop's CAS will fail + // and things will auto-correct. + // + // Pop() | + // | | + // read head->next | + // | pop() + // | | + // | read head->next + // | CAS, tag++ + // | | + // | push() + // | | + // [TSAN: data-race here] write head->next + // | CAS, tag++ + // CAS fails + // | + // read head->next + // | + // CAS, tag++ + // + std::atomic next; + }; + + // This struct is using a 32-bit offset into the arena rather than + // a direct pointer, because together with the 32-bit tag, it needs to + // fit into 8 bytes. If it was any larger, it would not be possible to + // access it atomically. + struct alignas(8) HeadPtr { + int32_t offset; + uint32_t tag; + }; + + std::atomic mHead{}; + + Node* mStorage = nullptr; +}; + +// ------------------------------------------------------------------------------------------------ + +template < + size_t ELEMENT_SIZE, + size_t ALIGNMENT = alignof(std::max_align_t), + size_t OFFSET = 0, + typename FREELIST = FreeList> +class PoolAllocator { + static_assert(ELEMENT_SIZE >= sizeof(void*), "ELEMENT_SIZE must accommodate at least a pointer"); +public: + // our allocator concept + void* alloc(size_t size = ELEMENT_SIZE, + size_t alignment = ALIGNMENT, size_t offset = OFFSET) noexcept { + assert(size <= ELEMENT_SIZE); + assert(alignment <= ALIGNMENT); + assert(offset == OFFSET); + return mFreeList.pop(); + } + + void free(void* p, size_t = ELEMENT_SIZE) noexcept { + mFreeList.push(p); + } + + constexpr size_t getSize() const noexcept { return ELEMENT_SIZE; } + + PoolAllocator(void* begin, void* end) noexcept + : mFreeList(begin, end, ELEMENT_SIZE, ALIGNMENT, OFFSET) { + } + + template + explicit PoolAllocator(const AREA& area) noexcept + : PoolAllocator(area.begin(), area.end()) { + } + + // Allocators can't be copied + PoolAllocator(const PoolAllocator& rhs) = delete; + PoolAllocator& operator=(const PoolAllocator& rhs) = delete; + + // Allocators can be moved + PoolAllocator(PoolAllocator&& rhs) = default; + PoolAllocator& operator=(PoolAllocator&& rhs) = default; + + PoolAllocator() noexcept = default; + ~PoolAllocator() noexcept = default; + + // API specific to this allocator + + void *getCurrent() noexcept { + return mFreeList.getFirst(); + } + +private: + FREELIST mFreeList; +}; + +#define UTILS_MAX(a,b) ((a) > (b) ? (a) : (b)) + +template +using ObjectPoolAllocator = PoolAllocator; + +template +using ThreadSafeObjectPoolAllocator = PoolAllocator; + + +// ------------------------------------------------------------------------------------------------ +// Areas +// ------------------------------------------------------------------------------------------------ + +namespace AreaPolicy { + +class StaticArea { +public: + StaticArea() noexcept = default; + + StaticArea(void* b, void* e) noexcept + : mBegin(b), mEnd(e) { + } + + ~StaticArea() noexcept = default; + + StaticArea(const StaticArea& rhs) = default; + StaticArea& operator=(const StaticArea& rhs) = default; + StaticArea(StaticArea&& rhs) noexcept = default; + StaticArea& operator=(StaticArea&& rhs) noexcept = default; + + void* data() const noexcept { return mBegin; } + void* begin() const noexcept { return mBegin; } + void* end() const noexcept { return mEnd; } + size_t size() const noexcept { return uintptr_t(mEnd) - uintptr_t(mBegin); } + + friend void swap(StaticArea& lhs, StaticArea& rhs) noexcept { + using std::swap; + swap(lhs.mBegin, rhs.mBegin); + swap(lhs.mEnd, rhs.mEnd); + } + +private: + void* mBegin = nullptr; + void* mEnd = nullptr; +}; + +class HeapArea { +public: + HeapArea() noexcept = default; + + explicit HeapArea(size_t size) { + if (size) { + // TODO: policy committing memory + mBegin = malloc(size); + mEnd = pointermath::add(mBegin, size); + } + } + + ~HeapArea() noexcept { + // TODO: policy for returning memory to system + free(mBegin); + } + + HeapArea(const HeapArea& rhs) = delete; + HeapArea& operator=(const HeapArea& rhs) = delete; + HeapArea(HeapArea&& rhs) noexcept = delete; + HeapArea& operator=(HeapArea&& rhs) noexcept = delete; + + void* data() const noexcept { return mBegin; } + void* begin() const noexcept { return mBegin; } + void* end() const noexcept { return mEnd; } + size_t size() const noexcept { return uintptr_t(mEnd) - uintptr_t(mBegin); } + + friend void swap(HeapArea& lhs, HeapArea& rhs) noexcept { + using std::swap; + swap(lhs.mBegin, rhs.mBegin); + swap(lhs.mEnd, rhs.mEnd); + } + +private: + void* mBegin = nullptr; + void* mEnd = nullptr; +}; + +} // namespace AreaPolicy + +// ------------------------------------------------------------------------------------------------ +// Policies +// ------------------------------------------------------------------------------------------------ + +namespace LockingPolicy { + +struct NoLock { + void lock() noexcept { } + void unlock() noexcept { } +}; + +using SpinLock = utils::SpinLock; +using Mutex = utils::Mutex; + +} // namespace LockingPolicy + + +namespace TrackingPolicy { + +// default no-op tracker +struct Untracked { + Untracked() noexcept = default; + Untracked(const char* name, void* base, size_t size) noexcept { } + void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept { } + void onFree(void* p, size_t = 0) noexcept { } + void onReset() noexcept { } + void onRewind(void* addr) noexcept { } +}; + +// This just track the max memory usage and logs it in the destructor +struct HighWatermark { + HighWatermark() noexcept = default; + HighWatermark(const char* name, void* base, size_t size) noexcept + : mName(name), mBase(base), mSize(uint32_t(size)) { } + ~HighWatermark() noexcept; + void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept; + void onFree(void* p, size_t size) noexcept; + void onReset() noexcept; + void onRewind(void const* addr) noexcept; +protected: + const char* mName = nullptr; + void* mBase = nullptr; + uint32_t mSize = 0; + uint32_t mCurrent = 0; + uint32_t mHighWaterMark = 0; +}; + +// This just fills buffers with known values to help catch uninitialized access and use after free. +struct Debug { + Debug() noexcept = default; + Debug(const char* name, void* base, size_t size) noexcept + : mName(name), mBase(base), mSize(uint32_t(size)) { } + void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept; + void onFree(void* p, size_t size) noexcept; + void onReset() noexcept; + void onRewind(void* addr) noexcept; +protected: + const char* mName = nullptr; + void* mBase = nullptr; + uint32_t mSize = 0; +}; + +struct DebugAndHighWatermark : protected HighWatermark, protected Debug { + DebugAndHighWatermark() noexcept = default; + DebugAndHighWatermark(const char* name, void* base, size_t size) noexcept + : HighWatermark(name, base, size), Debug(name, base, size) { } + void onAlloc(void* p, size_t size, size_t alignment, size_t extra) noexcept { + HighWatermark::onAlloc(p, size, alignment, extra); + Debug::onAlloc(p, size, alignment, extra); + } + void onFree(void* p, size_t size) noexcept { + HighWatermark::onFree(p, size); + Debug::onFree(p, size); + } + void onReset() noexcept { + HighWatermark::onReset(); + Debug::onReset(); + } + void onRewind(void* addr) noexcept { + HighWatermark::onRewind(addr); + Debug::onRewind(addr); + } +}; + +} // namespace TrackingPolicy + +// ------------------------------------------------------------------------------------------------ +// Arenas +// ------------------------------------------------------------------------------------------------ + +template +class Arena { +public: + + Arena() = default; + + // construct an arena with a name and forward argument to its allocator + template + Arena(const char* name, size_t size, ARGS&& ... args) + : mArenaName(name), + mArea(size), + mAllocator(mArea, std::forward(args) ... ), + mListener(name, mArea.data(), mArea.size()) { + } + + template + Arena(const char* name, AreaPolicy&& area, ARGS&& ... args) + : mArenaName(name), + mArea(std::forward(area)), + mAllocator(mArea, std::forward(args) ... ), + mListener(name, mArea.data(), mArea.size()) { + } + + // allocate memory from arena with given size and alignment + // (acceptable size/alignment may depend on the allocator provided) + void* alloc(size_t size, size_t alignment = alignof(std::max_align_t), size_t extra = 0) noexcept { + std::lock_guard guard(mLock); + void* p = mAllocator.alloc(size, alignment, extra); + mListener.onAlloc(p, size, alignment, extra); + return p; + } + + // Allocate an array of trivially destructible objects + // for safety, we disable the object-based alloc method if the object type is not + // trivially destructible, since free() won't call the destructor and this is allocating + // an array. + template ::value>::type> + T* alloc(size_t count, size_t alignment = alignof(T), size_t extra = 0) noexcept { + return (T*)alloc(count * sizeof(T), alignment, extra); + } + + // return memory pointed by p to the arena + // (actual behaviour may depend on allocator provided) + void free(void* p) noexcept { + if (p) { + std::lock_guard guard(mLock); + mListener.onFree(p); + mAllocator.free(p); + } + } + + // some allocators require the size of the allocation for free + void free(void* p, size_t size) noexcept { + if (p) { + std::lock_guard guard(mLock); + mListener.onFree(p, size); + mAllocator.free(p, size); + } + } + + // some allocators don't have a free() call, but a single reset() or rewind() instead + void reset() noexcept { + std::lock_guard guard(mLock); + mListener.onReset(); + mAllocator.reset(); + } + + void* getCurrent() noexcept { return mAllocator.getCurrent(); } + + void rewind(void *addr) noexcept { + std::lock_guard guard(mLock); + mListener.onRewind(addr); + mAllocator.rewind(addr); + } + + // Allocate and construct an object + template + T* make(ARGS&& ... args) noexcept { + void* const p = this->alloc(sizeof(T), ALIGN); + return p ? new(p) T(std::forward(args)...) : nullptr; + } + + // destroys an object created with make() above, and frees associated memory + template + void destroy(T* p) noexcept { + if (p) { + p->~T(); + this->free((void*)p, sizeof(T)); + } + } + + char const* getName() const noexcept { return mArenaName; } + + AllocatorPolicy& getAllocator() noexcept { return mAllocator; } + AllocatorPolicy const& getAllocator() const noexcept { return mAllocator; } + + TrackingPolicy& getListener() noexcept { return mListener; } + TrackingPolicy const& getListener() const noexcept { return mListener; } + + AreaPolicy& getArea() noexcept { return mArea; } + AreaPolicy const& getArea() const noexcept { return mArea; } + + void setListener(TrackingPolicy listener) noexcept { + std::swap(mListener, listener); + } + + template + void emplaceListener(ARGS&& ... args) noexcept { + mListener.~TrackingPolicy(); + new (&mListener) TrackingPolicy(std::forward(args)...); + } + + // An arena can't be copied + Arena(Arena const& rhs) noexcept = delete; + Arena& operator=(Arena const& rhs) noexcept = delete; + + friend void swap(Arena& lhs, Arena& rhs) noexcept { + using std::swap; + swap(lhs.mArea, rhs.mArea); + swap(lhs.mAllocator, rhs.mAllocator); + swap(lhs.mLock, rhs.mLock); + swap(lhs.mListener, rhs.mListener); + swap(lhs.mArenaName, rhs.mArenaName); + } + +private: + char const* mArenaName = nullptr; + AreaPolicy mArea; + // note: we should use something like compressed_pair for the members below + AllocatorPolicy mAllocator; + LockingPolicy mLock; + TrackingPolicy mListener; +}; + +// ------------------------------------------------------------------------------------------------ + +template +using HeapArena = Arena; + +// ------------------------------------------------------------------------------------------------ + +// This doesn't implement our allocator concept, because it's too risky to use this as an allocator +// in particular, doing ArenaScope. +template +class ArenaScope { + + struct Finalizer { + void (*finalizer)(void* p) = nullptr; + Finalizer* next = nullptr; + }; + + template + static void destruct(void* p) noexcept { + static_cast(p)->~T(); + } + +public: + explicit ArenaScope(ARENA& allocator) + : mArena(allocator), mRewind(allocator.getCurrent()) { + } + + ArenaScope& operator=(const ArenaScope& rhs) = delete; + ArenaScope(ArenaScope&& rhs) noexcept = delete; + ArenaScope& operator=(ArenaScope&& rhs) noexcept = delete; + + ~ArenaScope() { + // run the finalizer chain + Finalizer* head = mFinalizerHead; + while (head) { + void* p = pointermath::add(head, sizeof(Finalizer)); + head->finalizer(p); + head = head->next; + } + // ArenaScope works only with Arena that implements rewind() + mArena.rewind(mRewind); + } + + template + T* make(ARGS&& ... args) noexcept { + T* o = nullptr; + if (std::is_trivially_destructible::value) { + o = mArena.template make(std::forward(args)...); + } else { + void* const p = (Finalizer*)mArena.alloc(sizeof(T), ALIGN, sizeof(Finalizer)); + if (p != nullptr) { + Finalizer* const f = static_cast(p) - 1; + // constructor must be called before adding the dtor to the list + // so that the ctor can allocate objects in a nested scope and have the + // finalizers called in reverse order. + o = new(p) T(std::forward(args)...); + f->finalizer = &destruct; + f->next = mFinalizerHead; + mFinalizerHead = f; + } + } + return o; + } + + void* allocate(size_t size, size_t alignment = 1) noexcept { + return mArena.template alloc(size, alignment, 0); + } + + template + T* allocate(size_t size, size_t alignment = alignof(T), size_t extra = 0) noexcept { + return mArena.template alloc(size, alignment, extra); + } + + // use with caution + ARENA& getAllocator() noexcept { return mArena; } + +private: + ARENA& mArena; + void* mRewind = nullptr; + Finalizer* mFinalizerHead = nullptr; +}; + + +template +class STLAllocator { +public: + using value_type = TYPE; + using pointer = TYPE*; + using const_pointer = const TYPE*; + using reference = TYPE&; + using const_reference = const TYPE&; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using propagate_on_container_move_assignment = std::true_type; + using is_always_equal = std::true_type; + + template + struct rebind { using other = STLAllocator; }; + +public: + // we don't make this explicit, so that we can initialize a vector using a STLAllocator + // from an Arena, avoiding to have to repeat the vector type. + STLAllocator(ARENA& arena) : mArena(arena) { } // NOLINT(google-explicit-constructor) + + template + explicit STLAllocator(STLAllocator const& rhs) : mArena(rhs.mArena) { } + + TYPE* allocate(std::size_t n) { + auto p = static_cast(mArena.alloc(n * sizeof(TYPE), alignof(TYPE))); + assert_invariant(p); + return p; + } + + void deallocate(TYPE* p, std::size_t n) { + mArena.free(p, n * sizeof(TYPE)); + } + + // these should be out-of-class friends, but this doesn't seem to work with some compilers + // which complain about multiple definition each time a STLAllocator<> is instantiated. + template + bool operator==(const STLAllocator& rhs) const noexcept { + return std::addressof(mArena) == std::addressof(rhs.mArena); + } + + template + bool operator!=(const STLAllocator& rhs) const noexcept { + return !operator==(rhs); + } + +private: + template + friend class STLAllocator; + + ARENA& mArena; +}; + +} // namespace utils + +#endif // TNT_UTILS_ALLOCATOR_H diff --git a/ios/include/utils/BinaryTreeArray.h b/ios/include/utils/BinaryTreeArray.h index 50a1d6c8..d147d069 100644 --- a/ios/include/utils/BinaryTreeArray.h +++ b/ios/include/utils/BinaryTreeArray.h @@ -1,114 +1,114 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_BINARYTREEARRAY_H -#define TNT_UTILS_BINARYTREEARRAY_H - -#include - -#include - -#include -#include -#include - -namespace utils { - -class BinaryTreeArray { - - // Simple fixed capacity stack - template::value>::type> - class stack { - TYPE mElements[CAPACITY]; - size_t mSize = 0; - public: - bool empty() const noexcept { return mSize == 0; } - void push(TYPE const& v) noexcept { - assert(mSize < CAPACITY); - mElements[mSize++] = v; - } - void pop() noexcept { - assert(mSize > 0); - --mSize; - } - const TYPE& back() const noexcept { - return mElements[mSize - 1]; - } - }; - -public: - static size_t count(size_t height) noexcept { return (1u << height) - 1; } - static size_t left(size_t i, size_t height) noexcept { return i + 1; } - static size_t right(size_t i, size_t height) noexcept { return i + (1u << (height - 1)); } - - // this builds the depth-first binary tree array top down (post-order) - template - static void traverse(size_t height, Leaf leaf, Node node) noexcept { - - struct TNode { - uint32_t index; - uint32_t col; - uint32_t height; - uint32_t next; - - bool isLeaf() const noexcept { return height == 1; } - size_t left() const noexcept { return BinaryTreeArray::left(index, height); } - size_t right() const noexcept { return BinaryTreeArray::right(index, height); } - }; - - stack stack; - stack.push(TNode{ 0, 0, (uint32_t)height, (uint32_t)count(height) }); - - uint32_t prevLeft = 0; - uint32_t prevRight = 0; - uint32_t prevIndex = 0; - while (!stack.empty()) { - TNode const* const UTILS_RESTRICT curr = &stack.back(); - const bool isLeaf = curr->isLeaf(); - const uint32_t index = curr->index; - const uint32_t l = (uint32_t)curr->left(); - const uint32_t r = (uint32_t)curr->right(); - - if (prevLeft == index || prevRight == index) { - if (!isLeaf) { - // the 'next' node of our left node's right descendants is our right child - stack.push({ l, 2 * curr->col, curr->height - 1, r }); - } - } else if (l == prevIndex) { - if (!isLeaf) { - // the 'next' node of our right child is our own 'next' sibling - stack.push({ r, 2 * curr->col + 1, curr->height - 1, curr->next }); - } - } else { - if (!isLeaf) { - node(index, l, r, curr->next); - } else { - leaf(index, curr->col, curr->next); - } - stack.pop(); - } - - prevLeft = l; - prevRight = r; - prevIndex = index; - } - } -}; - -} // namespace utils - -#endif //TNT_UTILS_BINARYTREEARRAY_H +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_BINARYTREEARRAY_H +#define TNT_UTILS_BINARYTREEARRAY_H + +#include + +#include + +#include +#include +#include + +namespace utils { + +class BinaryTreeArray { + + // Simple fixed capacity stack + template::value>::type> + class stack { + TYPE mElements[CAPACITY]; + size_t mSize = 0; + public: + bool empty() const noexcept { return mSize == 0; } + void push(TYPE const& v) noexcept { + assert(mSize < CAPACITY); + mElements[mSize++] = v; + } + void pop() noexcept { + assert(mSize > 0); + --mSize; + } + const TYPE& back() const noexcept { + return mElements[mSize - 1]; + } + }; + +public: + static size_t count(size_t height) noexcept { return (1u << height) - 1; } + static size_t left(size_t i, size_t height) noexcept { return i + 1; } + static size_t right(size_t i, size_t height) noexcept { return i + (1u << (height - 1)); } + + // this builds the depth-first binary tree array top down (post-order) + template + static void traverse(size_t height, Leaf leaf, Node node) noexcept { + + struct TNode { + uint32_t index; + uint32_t col; + uint32_t height; + uint32_t next; + + bool isLeaf() const noexcept { return height == 1; } + size_t left() const noexcept { return BinaryTreeArray::left(index, height); } + size_t right() const noexcept { return BinaryTreeArray::right(index, height); } + }; + + stack stack; + stack.push(TNode{ 0, 0, (uint32_t)height, (uint32_t)count(height) }); + + uint32_t prevLeft = 0; + uint32_t prevRight = 0; + uint32_t prevIndex = 0; + while (!stack.empty()) { + TNode const* const UTILS_RESTRICT curr = &stack.back(); + const bool isLeaf = curr->isLeaf(); + const uint32_t index = curr->index; + const uint32_t l = (uint32_t)curr->left(); + const uint32_t r = (uint32_t)curr->right(); + + if (prevLeft == index || prevRight == index) { + if (!isLeaf) { + // the 'next' node of our left node's right descendants is our right child + stack.push({ l, 2 * curr->col, curr->height - 1, r }); + } + } else if (l == prevIndex) { + if (!isLeaf) { + // the 'next' node of our right child is our own 'next' sibling + stack.push({ r, 2 * curr->col + 1, curr->height - 1, curr->next }); + } + } else { + if (!isLeaf) { + node(index, l, r, curr->next); + } else { + leaf(index, curr->col, curr->next); + } + stack.pop(); + } + + prevLeft = l; + prevRight = r; + prevIndex = index; + } + } +}; + +} // namespace utils + +#endif //TNT_UTILS_BINARYTREEARRAY_H diff --git a/ios/include/utils/BitmaskEnum.h b/ios/include/utils/BitmaskEnum.h index ff4fc0d9..47f9cef0 100644 --- a/ios/include/utils/BitmaskEnum.h +++ b/ios/include/utils/BitmaskEnum.h @@ -1,99 +1,99 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_BITMASKENUM_H -#define TNT_UTILS_BITMASKENUM_H - -#include - -#include // for std::false_type - -#include -#include -#include - -namespace utils { -template -struct EnableBitMaskOperators : public std::false_type { }; -} // namespace utils - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr bool operator!(Enum rhs) noexcept { - using underlying = std::underlying_type_t; - return underlying(rhs) == 0; -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr Enum operator~(Enum rhs) noexcept { - using underlying = std::underlying_type_t; - return Enum(~underlying(rhs)); -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr Enum operator|(Enum lhs, Enum rhs) noexcept { - using underlying = std::underlying_type_t; - return Enum(underlying(lhs) | underlying(rhs)); -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr Enum operator&(Enum lhs, Enum rhs) noexcept { - using underlying = std::underlying_type_t; - return Enum(underlying(lhs) & underlying(rhs)); -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr Enum operator^(Enum lhs, Enum rhs) noexcept { - using underlying = std::underlying_type_t; - return Enum(underlying(lhs) ^ underlying(rhs)); -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr Enum operator|=(Enum& lhs, Enum rhs) noexcept { - return lhs = lhs | rhs; -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr Enum operator&=(Enum& lhs, Enum rhs) noexcept { - return lhs = lhs & rhs; -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr Enum operator^=(Enum& lhs, Enum rhs) noexcept { - return lhs = lhs ^ rhs; -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr bool none(Enum lhs) noexcept { - return !lhs; -} - -template::value && utils::EnableBitMaskOperators::value, int> = 0> -inline constexpr bool any(Enum lhs) noexcept { - return !none(lhs); -} - - -#endif // TNT_UTILS_BITMASKENUM_H +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_BITMASKENUM_H +#define TNT_UTILS_BITMASKENUM_H + +#include + +#include // for std::false_type + +#include +#include +#include + +namespace utils { +template +struct EnableBitMaskOperators : public std::false_type { }; +} // namespace utils + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr bool operator!(Enum rhs) noexcept { + using underlying = std::underlying_type_t; + return underlying(rhs) == 0; +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr Enum operator~(Enum rhs) noexcept { + using underlying = std::underlying_type_t; + return Enum(~underlying(rhs)); +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr Enum operator|(Enum lhs, Enum rhs) noexcept { + using underlying = std::underlying_type_t; + return Enum(underlying(lhs) | underlying(rhs)); +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr Enum operator&(Enum lhs, Enum rhs) noexcept { + using underlying = std::underlying_type_t; + return Enum(underlying(lhs) & underlying(rhs)); +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr Enum operator^(Enum lhs, Enum rhs) noexcept { + using underlying = std::underlying_type_t; + return Enum(underlying(lhs) ^ underlying(rhs)); +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr Enum operator|=(Enum& lhs, Enum rhs) noexcept { + return lhs = lhs | rhs; +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr Enum operator&=(Enum& lhs, Enum rhs) noexcept { + return lhs = lhs & rhs; +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr Enum operator^=(Enum& lhs, Enum rhs) noexcept { + return lhs = lhs ^ rhs; +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr bool none(Enum lhs) noexcept { + return !lhs; +} + +template::value && utils::EnableBitMaskOperators::value, int> = 0> +inline constexpr bool any(Enum lhs) noexcept { + return !none(lhs); +} + + +#endif // TNT_UTILS_BITMASKENUM_H diff --git a/ios/include/utils/CString.h b/ios/include/utils/CString.h index fb9af1aa..18e57307 100644 --- a/ios/include/utils/CString.h +++ b/ios/include/utils/CString.h @@ -1,393 +1,393 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_FILAMENT_CSTRING_H -#define TNT_FILAMENT_CSTRING_H - -// NOTE: this header should not include STL headers - -#include - -#include -#include -#include -#include -#include - -namespace utils { - -//! \privatesection -struct hashCStrings { - typedef const char* argument_type; - typedef size_t result_type; - result_type operator()(argument_type cstr) const noexcept { - size_t hash = 5381; - while (int c = *cstr++) { - hash = (hash * 33u) ^ size_t(c); - } - return hash; - } -}; - -//! \privatesection -struct equalCStrings { - typedef const char* first_argument_type; - typedef const char* second_argument_type; - typedef bool result_type; - bool operator()(const char* lhs, const char* rhs) const noexcept { - return !strcmp(lhs, rhs); - } -}; - -//! \privatesection -struct lessCStrings { - typedef const char* first_argument_type; - typedef const char* second_argument_type; - typedef bool result_type; - result_type operator()(first_argument_type lhs, second_argument_type rhs) const noexcept { - return strcmp(lhs, rhs) < 0; - } -}; - -// This can be used to creates a string from a string literal -- w/o underlying allocations. -// e.g.: -// StaticString s("Hello World!"); -// -template -using StringLiteral = const char[N]; - -//! \publicsection -class UTILS_PUBLIC StaticString { -public: - using value_type = char; - using size_type = uint32_t; - using difference_type = int32_t; - using const_reference = const value_type&; - using const_pointer = const value_type*; - using const_iterator = const value_type*; - - constexpr StaticString() noexcept = default; - - // initialization from a string literal - template - constexpr StaticString(StringLiteral const& other) noexcept // NOLINT(google-explicit-constructor) - : mString(other), - mLength(size_type(N - 1)), - mHash(computeHash(other)) { - } - - // assignment from a string literal - template - StaticString& operator=(StringLiteral const& other) noexcept { - mString = other; - mLength = size_type(N - 1); - mHash = computeHash(other); - return *this; - } - - // helper to make a StaticString from a C string that is known to be a string literal - static constexpr StaticString make(const_pointer literal, size_t length) noexcept { - StaticString r; - r.mString = literal; - r.mLength = size_type(length); - size_type hash = 5381; - while (int c = *literal++) { - hash = (hash * 33u) ^ size_type(c); - } - r.mHash = hash; - return r; - } - - static StaticString make(const_pointer literal) noexcept { - return make(literal, strlen(literal)); - } - - const_pointer c_str() const noexcept { return mString; } - const_pointer data() const noexcept { return mString; } - size_type size() const noexcept { return mLength; } - size_type length() const noexcept { return mLength; } - bool empty() const noexcept { return size() == 0; } - void clear() noexcept { mString = nullptr; mLength = 0; } - - const_iterator begin() const noexcept { return mString; } - const_iterator end() const noexcept { return mString + mLength; } - const_iterator cbegin() const noexcept { return begin(); } - const_iterator cend() const noexcept { return end(); } - - const_reference operator[](size_type pos) const noexcept { - assert(pos < size()); - return begin()[pos]; - } - - const_reference at(size_type pos) const noexcept { - assert(pos < size()); - return begin()[pos]; - } - - const_reference front() const noexcept { - assert(size()); - return begin()[0]; - } - - const_reference back() const noexcept { - assert(size()); - return begin()[size() - 1]; - } - - size_type getHash() const noexcept { return mHash; } - -private: - const_pointer mString = nullptr; - size_type mLength = 0; - size_type mHash = 0; - - template - static constexpr size_type computeHash(StringLiteral const& s) noexcept { - size_type hash = 5381; - for (size_t i = 0; i < N - 1; i++) { - hash = (hash * 33u) ^ size_type(s[i]); - } - return hash; - } - - int compare(const StaticString& rhs) const noexcept; - - friend bool operator==(StaticString const& lhs, StaticString const& rhs) noexcept { - return (lhs.data() == rhs.data()) || - ((lhs.size() == rhs.size()) && !strncmp(lhs.data(), rhs.data(), lhs.size())); - } - friend bool operator!=(StaticString const& lhs, StaticString const& rhs) noexcept { - return !(lhs == rhs); - } - friend bool operator<(StaticString const& lhs, StaticString const& rhs) noexcept { - return lhs.compare(rhs) < 0; - } - friend bool operator>(StaticString const& lhs, StaticString const& rhs) noexcept { - return lhs.compare(rhs) > 0; - } - friend bool operator>=(StaticString const& lhs, StaticString const& rhs) noexcept { - return !(lhs < rhs); - } - friend bool operator<=(StaticString const& lhs, StaticString const& rhs) noexcept { - return !(lhs > rhs); - } -}; - -// ------------------------------------------------------------------------------------------------ - -class UTILS_PUBLIC CString { -public: - using value_type = char; - using size_type = uint32_t; - using difference_type = int32_t; - using reference = value_type&; - using const_reference = const value_type&; - using pointer = value_type*; - using const_pointer = const value_type*; - using iterator = value_type*; - using const_iterator = const value_type*; - - CString() noexcept = default; - - // Allocates memory and appends a null. This constructor can be used to hold arbitrary data - // inside the string (i.e. it can contain nulls or non-ASCII encodings). - CString(const char* cstr, size_t length); - - // Allocates memory and copies traditional C string content. Unlike the above constructor, this - // does not alllow embedded nulls. This is explicit because this operation is costly. - explicit CString(const char* cstr); - - template - explicit CString(StringLiteral const& other) noexcept // NOLINT(google-explicit-constructor) - : CString(other, N - 1) { - } - - CString(StaticString const& s) : CString(s.c_str(), s.size()) {} - - CString(const CString& rhs); - - CString(CString&& rhs) noexcept { - this->swap(rhs); - } - - - CString& operator=(const CString& rhs); - - CString& operator=(CString&& rhs) noexcept { - this->swap(rhs); - return *this; - } - - ~CString() noexcept { - if (mData) { - free(mData - 1); - } - } - - void swap(CString& other) noexcept { - // don't use std::swap(), we don't want an STL dependency in this file - auto *temp = mCStr; - mCStr = other.mCStr; - other.mCStr = temp; - } - - const_pointer c_str() const noexcept { return mCStr; } - pointer c_str() noexcept { return mCStr; } - const_pointer c_str_safe() const noexcept { return mData ? c_str() : ""; } - const_pointer data() const noexcept { return c_str(); } - pointer data() noexcept { return c_str(); } - size_type size() const noexcept { return mData ? mData[-1].length : 0; } - size_type length() const noexcept { return size(); } - bool empty() const noexcept { return size() == 0; } - - iterator begin() noexcept { return mCStr; } - iterator end() noexcept { return begin() + length(); } - const_iterator begin() const noexcept { return data(); } - const_iterator end() const noexcept { return begin() + length(); } - const_iterator cbegin() const noexcept { return begin(); } - const_iterator cend() const noexcept { return end(); } - - CString& replace(size_type pos, size_type len, const CString& str) noexcept; - CString& insert(size_type pos, const CString& str) noexcept { return replace(pos, 0, str); } - - const_reference operator[](size_type pos) const noexcept { - assert(pos < size()); - return begin()[pos]; - } - - reference operator[](size_type pos) noexcept { - assert(pos < size()); - return begin()[pos]; - } - - const_reference at(size_type pos) const noexcept { - assert(pos < size()); - return begin()[pos]; - } - - reference at(size_type pos) noexcept { - assert(pos < size()); - return begin()[pos]; - } - - reference front() noexcept { - assert(size()); - return begin()[0]; - } - - const_reference front() const noexcept { - assert(size()); - return begin()[0]; - } - - reference back() noexcept { - assert(size()); - return begin()[size() - 1]; - } - - const_reference back() const noexcept { - assert(size()); - return begin()[size() - 1]; - } - - // placement new declared as "throw" to avoid the compiler's null-check - inline void* operator new(size_t size, void* ptr) { - assert(ptr); - return ptr; - } - -private: - struct Data { - size_type length; - }; - - // mCStr points to the C-string or nullptr. if non-null, mCStr is preceded by the string's size - union { - value_type *mCStr = nullptr; - Data* mData; // Data is stored at mData[-1] - }; - - int compare(const CString& rhs) const noexcept { - size_type lhs_size = size(); - size_type rhs_size = rhs.size(); - if (lhs_size < rhs_size) return -1; - if (lhs_size > rhs_size) return 1; - return strncmp(data(), rhs.data(), size()); - } - - friend bool operator==(CString const& lhs, StaticString const& rhs) noexcept { - return (lhs.data() == rhs.data()) || - ((lhs.size() == rhs.size()) && !strncmp(lhs.data(), rhs.data(), lhs.size())); - } - friend bool operator==(CString const& lhs, CString const& rhs) noexcept { - return (lhs.data() == rhs.data()) || - ((lhs.size() == rhs.size()) && !strncmp(lhs.data(), rhs.data(), lhs.size())); - } - friend bool operator!=(CString const& lhs, CString const& rhs) noexcept { - return !(lhs == rhs); - } - friend bool operator<(CString const& lhs, CString const& rhs) noexcept { - return lhs.compare(rhs) < 0; - } - friend bool operator>(CString const& lhs, CString const& rhs) noexcept { - return lhs.compare(rhs) > 0; - } - friend bool operator>=(CString const& lhs, CString const& rhs) noexcept { - return !(lhs < rhs); - } - friend bool operator<=(CString const& lhs, CString const& rhs) noexcept { - return !(lhs > rhs); - } -}; - -// implement this for your type for automatic conversion to CString. Failing to do so leads -// to a compile time failure. -template -CString to_string(T value) noexcept; - -} // namespace utils - -// FIXME: how could we not include this one? -// needed for std::hash, since implementation is inline, this would not cause -// binaries incompatibilities if another STL version was used. -#include - -namespace std { - -//! \privatesection -template<> -struct hash { - typedef utils::CString argument_type; - typedef size_t result_type; - utils::hashCStrings hasher; - size_t operator()(const utils::CString& s) const noexcept { - return hasher(s.c_str()); - } -}; - -//! \privatesection -template<> -struct hash { - typedef utils::StaticString argument_type; - typedef size_t result_type; - size_t operator()(const utils::StaticString& s) const noexcept { - return s.getHash(); - } -}; - -} // namespace std - -#endif // TNT_FILAMENT_CSTRING_H +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_CSTRING_H +#define TNT_UTILS_CSTRING_H + +// NOTE: this header should not include STL headers + +#include + +#include +#include +#include +#include +#include + +namespace utils { + +//! \privatesection +struct hashCStrings { + typedef const char* argument_type; + typedef size_t result_type; + result_type operator()(argument_type cstr) const noexcept { + size_t hash = 5381; + while (int c = *cstr++) { + hash = (hash * 33u) ^ size_t(c); + } + return hash; + } +}; + +//! \privatesection +struct equalCStrings { + typedef const char* first_argument_type; + typedef const char* second_argument_type; + typedef bool result_type; + bool operator()(const char* lhs, const char* rhs) const noexcept { + return !strcmp(lhs, rhs); + } +}; + +//! \privatesection +struct lessCStrings { + typedef const char* first_argument_type; + typedef const char* second_argument_type; + typedef bool result_type; + result_type operator()(first_argument_type lhs, second_argument_type rhs) const noexcept { + return strcmp(lhs, rhs) < 0; + } +}; + +// This can be used to creates a string from a string literal -- w/o underlying allocations. +// e.g.: +// StaticString s("Hello World!"); +// +template +using StringLiteral = const char[N]; + +//! \publicsection +class UTILS_PUBLIC StaticString { +public: + using value_type = char; + using size_type = uint32_t; + using difference_type = int32_t; + using const_reference = const value_type&; + using const_pointer = const value_type*; + using const_iterator = const value_type*; + + constexpr StaticString() noexcept = default; + + // initialization from a string literal + template + constexpr StaticString(StringLiteral const& other) noexcept // NOLINT(google-explicit-constructor) + : mString(other), + mLength(size_type(N - 1)), + mHash(computeHash(other)) { + } + + // assignment from a string literal + template + StaticString& operator=(StringLiteral const& other) noexcept { + mString = other; + mLength = size_type(N - 1); + mHash = computeHash(other); + return *this; + } + + // helper to make a StaticString from a C string that is known to be a string literal + static constexpr StaticString make(const_pointer literal, size_t length) noexcept { + StaticString r; + r.mString = literal; + r.mLength = size_type(length); + size_type hash = 5381; + while (int c = *literal++) { + hash = (hash * 33u) ^ size_type(c); + } + r.mHash = hash; + return r; + } + + static StaticString make(const_pointer literal) noexcept { + return make(literal, strlen(literal)); + } + + const_pointer c_str() const noexcept { return mString; } + const_pointer data() const noexcept { return mString; } + size_type size() const noexcept { return mLength; } + size_type length() const noexcept { return mLength; } + bool empty() const noexcept { return size() == 0; } + void clear() noexcept { mString = nullptr; mLength = 0; } + + const_iterator begin() const noexcept { return mString; } + const_iterator end() const noexcept { return mString + mLength; } + const_iterator cbegin() const noexcept { return begin(); } + const_iterator cend() const noexcept { return end(); } + + const_reference operator[](size_type pos) const noexcept { + assert(pos < size()); + return begin()[pos]; + } + + const_reference at(size_type pos) const noexcept { + assert(pos < size()); + return begin()[pos]; + } + + const_reference front() const noexcept { + assert(size()); + return begin()[0]; + } + + const_reference back() const noexcept { + assert(size()); + return begin()[size() - 1]; + } + + size_type getHash() const noexcept { return mHash; } + +private: + const_pointer mString = nullptr; + size_type mLength = 0; + size_type mHash = 0; + + template + static constexpr size_type computeHash(StringLiteral const& s) noexcept { + size_type hash = 5381; + for (size_t i = 0; i < N - 1; i++) { + hash = (hash * 33u) ^ size_type(s[i]); + } + return hash; + } + + int compare(const StaticString& rhs) const noexcept; + + friend bool operator==(StaticString const& lhs, StaticString const& rhs) noexcept { + return (lhs.data() == rhs.data()) || + ((lhs.size() == rhs.size()) && !strncmp(lhs.data(), rhs.data(), lhs.size())); + } + friend bool operator!=(StaticString const& lhs, StaticString const& rhs) noexcept { + return !(lhs == rhs); + } + friend bool operator<(StaticString const& lhs, StaticString const& rhs) noexcept { + return lhs.compare(rhs) < 0; + } + friend bool operator>(StaticString const& lhs, StaticString const& rhs) noexcept { + return lhs.compare(rhs) > 0; + } + friend bool operator>=(StaticString const& lhs, StaticString const& rhs) noexcept { + return !(lhs < rhs); + } + friend bool operator<=(StaticString const& lhs, StaticString const& rhs) noexcept { + return !(lhs > rhs); + } +}; + +// ------------------------------------------------------------------------------------------------ + +class UTILS_PUBLIC CString { +public: + using value_type = char; + using size_type = uint32_t; + using difference_type = int32_t; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + using iterator = value_type*; + using const_iterator = const value_type*; + + CString() noexcept = default; + + // Allocates memory and appends a null. This constructor can be used to hold arbitrary data + // inside the string (i.e. it can contain nulls or non-ASCII encodings). + CString(const char* cstr, size_t length); + + // Allocates memory and copies traditional C string content. Unlike the above constructor, this + // does not alllow embedded nulls. This is explicit because this operation is costly. + explicit CString(const char* cstr); + + template + explicit CString(StringLiteral const& other) noexcept // NOLINT(google-explicit-constructor) + : CString(other, N - 1) { + } + + CString(StaticString const& s) : CString(s.c_str(), s.size()) {} + + CString(const CString& rhs); + + CString(CString&& rhs) noexcept { + this->swap(rhs); + } + + + CString& operator=(const CString& rhs); + + CString& operator=(CString&& rhs) noexcept { + this->swap(rhs); + return *this; + } + + ~CString() noexcept { + if (mData) { + free(mData - 1); + } + } + + void swap(CString& other) noexcept { + // don't use std::swap(), we don't want an STL dependency in this file + auto *temp = mCStr; + mCStr = other.mCStr; + other.mCStr = temp; + } + + const_pointer c_str() const noexcept { return mCStr; } + pointer c_str() noexcept { return mCStr; } + const_pointer c_str_safe() const noexcept { return mData ? c_str() : ""; } + const_pointer data() const noexcept { return c_str(); } + pointer data() noexcept { return c_str(); } + size_type size() const noexcept { return mData ? mData[-1].length : 0; } + size_type length() const noexcept { return size(); } + bool empty() const noexcept { return size() == 0; } + + iterator begin() noexcept { return mCStr; } + iterator end() noexcept { return begin() + length(); } + const_iterator begin() const noexcept { return data(); } + const_iterator end() const noexcept { return begin() + length(); } + const_iterator cbegin() const noexcept { return begin(); } + const_iterator cend() const noexcept { return end(); } + + CString& replace(size_type pos, size_type len, const CString& str) noexcept; + CString& insert(size_type pos, const CString& str) noexcept { return replace(pos, 0, str); } + + const_reference operator[](size_type pos) const noexcept { + assert(pos < size()); + return begin()[pos]; + } + + reference operator[](size_type pos) noexcept { + assert(pos < size()); + return begin()[pos]; + } + + const_reference at(size_type pos) const noexcept { + assert(pos < size()); + return begin()[pos]; + } + + reference at(size_type pos) noexcept { + assert(pos < size()); + return begin()[pos]; + } + + reference front() noexcept { + assert(size()); + return begin()[0]; + } + + const_reference front() const noexcept { + assert(size()); + return begin()[0]; + } + + reference back() noexcept { + assert(size()); + return begin()[size() - 1]; + } + + const_reference back() const noexcept { + assert(size()); + return begin()[size() - 1]; + } + + // placement new declared as "throw" to avoid the compiler's null-check + inline void* operator new(size_t size, void* ptr) { + assert(ptr); + return ptr; + } + +private: + struct Data { + size_type length; + }; + + // mCStr points to the C-string or nullptr. if non-null, mCStr is preceded by the string's size + union { + value_type *mCStr = nullptr; + Data* mData; // Data is stored at mData[-1] + }; + + int compare(const CString& rhs) const noexcept { + size_type lhs_size = size(); + size_type rhs_size = rhs.size(); + if (lhs_size < rhs_size) return -1; + if (lhs_size > rhs_size) return 1; + return strncmp(data(), rhs.data(), size()); + } + + friend bool operator==(CString const& lhs, StaticString const& rhs) noexcept { + return (lhs.data() == rhs.data()) || + ((lhs.size() == rhs.size()) && !strncmp(lhs.data(), rhs.data(), lhs.size())); + } + friend bool operator==(CString const& lhs, CString const& rhs) noexcept { + return (lhs.data() == rhs.data()) || + ((lhs.size() == rhs.size()) && !strncmp(lhs.data(), rhs.data(), lhs.size())); + } + friend bool operator!=(CString const& lhs, CString const& rhs) noexcept { + return !(lhs == rhs); + } + friend bool operator<(CString const& lhs, CString const& rhs) noexcept { + return lhs.compare(rhs) < 0; + } + friend bool operator>(CString const& lhs, CString const& rhs) noexcept { + return lhs.compare(rhs) > 0; + } + friend bool operator>=(CString const& lhs, CString const& rhs) noexcept { + return !(lhs < rhs); + } + friend bool operator<=(CString const& lhs, CString const& rhs) noexcept { + return !(lhs > rhs); + } +}; + +// implement this for your type for automatic conversion to CString. Failing to do so leads +// to a compile time failure. +template +CString to_string(T value) noexcept; + +} // namespace utils + +// FIXME: how could we not include this one? +// needed for std::hash, since implementation is inline, this would not cause +// binaries incompatibilities if another STL version was used. +#include + +namespace std { + +//! \privatesection +template<> +struct hash { + typedef utils::CString argument_type; + typedef size_t result_type; + utils::hashCStrings hasher; + size_t operator()(const utils::CString& s) const noexcept { + return hasher(s.c_str()); + } +}; + +//! \privatesection +template<> +struct hash { + typedef utils::StaticString argument_type; + typedef size_t result_type; + size_t operator()(const utils::StaticString& s) const noexcept { + return s.getHash(); + } +}; + +} // namespace std + +#endif // TNT_UTILS_CSTRING_H diff --git a/ios/include/utils/CallStack.h b/ios/include/utils/CallStack.h index 3eef679f..930c3522 100644 --- a/ios/include/utils/CallStack.h +++ b/ios/include/utils/CallStack.h @@ -1,127 +1,127 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_CALLSTACK_H_ -#define UTILS_CALLSTACK_H_ - -#include -#include -#include - -#include -#include - -namespace utils { - -/** - * CallStack captures the current's thread call stack. - */ -class CallStack { -public: - /** - * Creates an empty call stack - * @see CallStack::capture() - */ - CallStack() = default; - CallStack(const CallStack&) = default; - ~CallStack() = default; - - /** - * A convenience method to create and capture the stack trace in one go. - * @param ignore number frames to ignore at the top of the stack. - * @return A CallStack object - */ - static CallStack unwind(size_t ignore = 0) noexcept; - - /** - * Capture the current thread's stack and replaces the existing one if any. - * @param ignore number frames to ignore at the top of the stack. - */ - void update(size_t ignore = 0) noexcept; - - /** - * Get the number of stack frames this object has recorded. - * @return How many stack frames are accessible through operator[] - */ - size_t getFrameCount() const noexcept; - - /** - * Return the program-counter of each stack frame captured - * @param index of the frame between 0 and getFrameCount()-1 - * @return the program-counter of the stack-frame recorded at index \p index - * @throw std::out_of_range if the index is out of range - */ - intptr_t operator [](size_t index) const; - - /** Demangles a C++ type name */ - static utils::CString demangleTypeName(const char* mangled); - - template - static utils::CString typeName() { -#if UTILS_HAS_RTTI - return demangleTypeName(typeid(T).name()); -#else - return CString(""); -#endif - } - - /** - * Outputs a CallStack into a stream. - * This will print, when possible, the demangled names of functions corresponding to the - * program-counter recorded. - */ - friend utils::io::ostream& operator <<(utils::io::ostream& stream, const CallStack& callstack); - - bool operator <(const CallStack& rhs) const; - - inline bool operator >(const CallStack& rhs) const { - return rhs < *this; - } - - inline bool operator !=(const CallStack& rhs) const { - return *this < rhs || rhs < *this; - } - - inline bool operator >=(const CallStack& rhs) const { - return !operator <(rhs); - } - - inline bool operator <=(const CallStack& rhs) const { - return !operator >(rhs); - } - - inline bool operator ==(const CallStack& rhs) const { - return !operator !=(rhs); - } - -private: - void update_gcc(size_t ignore) noexcept; - - static utils::CString demangle(const char* mangled); - - static constexpr size_t NUM_FRAMES = 20; - - struct StackFrameInfo { - intptr_t pc; - }; - - size_t m_frame_count = 0; - StackFrameInfo m_stack[NUM_FRAMES]; -}; - -} // namespace utils - -#endif // UTILS_CALLSTACK_H_ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef UTILS_CALLSTACK_H +#define UTILS_CALLSTACK_H + +#include +#include +#include + +#include +#include + +namespace utils { + +/** + * CallStack captures the current's thread call stack. + */ +class CallStack { +public: + /** + * Creates an empty call stack + * @see CallStack::capture() + */ + CallStack() = default; + CallStack(const CallStack&) = default; + ~CallStack() = default; + + /** + * A convenience method to create and capture the stack trace in one go. + * @param ignore number frames to ignore at the top of the stack. + * @return A CallStack object + */ + static CallStack unwind(size_t ignore = 0) noexcept; + + /** + * Capture the current thread's stack and replaces the existing one if any. + * @param ignore number frames to ignore at the top of the stack. + */ + void update(size_t ignore = 0) noexcept; + + /** + * Get the number of stack frames this object has recorded. + * @return How many stack frames are accessible through operator[] + */ + size_t getFrameCount() const noexcept; + + /** + * Return the program-counter of each stack frame captured + * @param index of the frame between 0 and getFrameCount()-1 + * @return the program-counter of the stack-frame recorded at index \p index + * @throw std::out_of_range if the index is out of range + */ + intptr_t operator [](size_t index) const; + + /** Demangles a C++ type name */ + static utils::CString demangleTypeName(const char* mangled); + + template + static utils::CString typeName() { +#if UTILS_HAS_RTTI + return demangleTypeName(typeid(T).name()); +#else + return CString(""); +#endif + } + + /** + * Outputs a CallStack into a stream. + * This will print, when possible, the demangled names of functions corresponding to the + * program-counter recorded. + */ + friend utils::io::ostream& operator <<(utils::io::ostream& stream, const CallStack& callstack); + + bool operator <(const CallStack& rhs) const; + + inline bool operator >(const CallStack& rhs) const { + return rhs < *this; + } + + inline bool operator !=(const CallStack& rhs) const { + return *this < rhs || rhs < *this; + } + + inline bool operator >=(const CallStack& rhs) const { + return !operator <(rhs); + } + + inline bool operator <=(const CallStack& rhs) const { + return !operator >(rhs); + } + + inline bool operator ==(const CallStack& rhs) const { + return !operator !=(rhs); + } + +private: + void update_gcc(size_t ignore) noexcept; + + static utils::CString demangle(const char* mangled); + + static constexpr size_t NUM_FRAMES = 20; + + struct StackFrameInfo { + intptr_t pc; + }; + + size_t m_frame_count = 0; + StackFrameInfo m_stack[NUM_FRAMES]; +}; + +} // namespace utils + +#endif // UTILS_CALLSTACK_H diff --git a/ios/include/utils/Condition.h b/ios/include/utils/Condition.h index f9382fcf..2ed2c07f 100644 --- a/ios/include/utils/Condition.h +++ b/ios/include/utils/Condition.h @@ -1,26 +1,26 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_CONDITION_H -#define UTILS_CONDITION_H - -#if defined(__linux__) && !defined(__SANITIZE_THREAD__) -#include -#else -#include -#endif - -#endif // UTILS_CONDITION_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_CONDITION_H +#define TNT_UTILS_CONDITION_H + +#if defined(__linux__) +#include +#else +#include +#endif + +#endif // TNT_UTILS_CONDITION_H diff --git a/ios/include/utils/CountDownLatch.h b/ios/include/utils/CountDownLatch.h index 541fbe2c..6367fffc 100644 --- a/ios/include/utils/CountDownLatch.h +++ b/ios/include/utils/CountDownLatch.h @@ -1,91 +1,91 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_COUNTDOWNLATCH_H_ -#define UTILS_COUNTDOWNLATCH_H_ - -#include - -// note: we use our version of mutex/condition to keep this public header STL free -#include -#include - -namespace utils { - -/** - * A count down latch is used to block one or several threads until the latch is signaled - * a certain number of times. - * - * Threads entering the latch are blocked until the latch is signaled enough times. - * - * @see CyclicBarrier - */ -class CountDownLatch { -public: - /** - * Creates a count down latch with a specified count. The minimum useful value is 1. - * @param count the latch counter initial value - */ - explicit CountDownLatch(size_t count) noexcept; - ~CountDownLatch() = default; - - /** - * Blocks until latch() is called \p count times. - * @see CountDownLatch(size_t count) - */ - void await() noexcept; - - /** - * Releases threads blocked in await() when called \p count times. Calling latch() more than - * \p count times has no effect. - * @see reset() - */ - void latch() noexcept; - - /** - * Resets the count-down latch to the given value. - * - * @param new_count New latch count. A value of zero will immediately unblock all waiting - * threads. - * - * @warning Use with caution. It's only safe to reset the latch count when you're sure - * that no threads are waiting in await(). This can be guaranteed in various ways, for - * instance, if you have a single thread calling await(), you could call reset() from that - * thread, or you could use a CyclicBarrier to make sure all threads using the CountDownLatch - * are at a known place (i.e.: not in await()) when reset() is called. - */ - void reset(size_t new_count) noexcept; - - /** - * @return the number of times latch() has been called since construction or reset. - * @see reset(), CountDownLatch(size_t count) - */ - size_t getCount() const noexcept; - - CountDownLatch() = delete; - CountDownLatch(const CountDownLatch&) = delete; - CountDownLatch& operator=(const CountDownLatch&) = delete; - -private: - uint32_t m_initial_count; - uint32_t m_remaining_count; - mutable Mutex m_lock; - mutable Condition m_cv; -}; - -} // namespace utils - -#endif // UTILS_COUNTDOWNLATCH_H_ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_COUNTDOWNLATCH_H +#define TNT_UTILS_COUNTDOWNLATCH_H + +#include + +// note: we use our version of mutex/condition to keep this public header STL free +#include +#include + +namespace utils { + +/** + * A count down latch is used to block one or several threads until the latch is signaled + * a certain number of times. + * + * Threads entering the latch are blocked until the latch is signaled enough times. + * + * @see CyclicBarrier + */ +class CountDownLatch { +public: + /** + * Creates a count down latch with a specified count. The minimum useful value is 1. + * @param count the latch counter initial value + */ + explicit CountDownLatch(size_t count) noexcept; + ~CountDownLatch() = default; + + /** + * Blocks until latch() is called \p count times. + * @see CountDownLatch(size_t count) + */ + void await() noexcept; + + /** + * Releases threads blocked in await() when called \p count times. Calling latch() more than + * \p count times has no effect. + * @see reset() + */ + void latch() noexcept; + + /** + * Resets the count-down latch to the given value. + * + * @param new_count New latch count. A value of zero will immediately unblock all waiting + * threads. + * + * @warning Use with caution. It's only safe to reset the latch count when you're sure + * that no threads are waiting in await(). This can be guaranteed in various ways, for + * instance, if you have a single thread calling await(), you could call reset() from that + * thread, or you could use a CyclicBarrier to make sure all threads using the CountDownLatch + * are at a known place (i.e.: not in await()) when reset() is called. + */ + void reset(size_t new_count) noexcept; + + /** + * @return the number of times latch() has been called since construction or reset. + * @see reset(), CountDownLatch(size_t count) + */ + size_t getCount() const noexcept; + + CountDownLatch() = delete; + CountDownLatch(const CountDownLatch&) = delete; + CountDownLatch& operator=(const CountDownLatch&) = delete; + +private: + uint32_t m_initial_count; + uint32_t m_remaining_count; + mutable Mutex m_lock; + mutable Condition m_cv; +}; + +} // namespace utils + +#endif // TNT_UTILS_COUNTDOWNLATCH_H diff --git a/ios/include/utils/CyclicBarrier.h b/ios/include/utils/CyclicBarrier.h index fb34ee30..dae118e8 100644 --- a/ios/include/utils/CyclicBarrier.h +++ b/ios/include/utils/CyclicBarrier.h @@ -1,84 +1,84 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_CYCLIC_BARRIER_H_ -#define UTILS_CYCLIC_BARRIER_H_ - -#include - -// note: we use our version of mutex/condition to keep this public header STL free -#include -#include - -namespace utils { - -/** - * A cyclic barrier is used to synchronize several threads to a particular execution point. - * - * Threads entering the barrier are halted until all threads reach the barrier. - * - * @see CountDownLatch - */ -class CyclicBarrier { -public: - /** - * Creates a cyclic barrier with a specified number of threads to synchronize. The minimum - * useful value is 2. A value of 0 is invalid and is silently changed to 1. - * @param num_threads Number of threads to synchronize. - */ - explicit CyclicBarrier(size_t num_threads) noexcept; - - /** - * @return The number of thread that are synchronized. - */ - size_t getThreadCount() const noexcept; - - /** - * @return Number of threads currently waiting on the barrier. - */ - size_t getWaitingThreadCount() const noexcept; - - /** - * Blocks until getThreadCount()-1 other threads reach await(). - */ - void await() noexcept; - - /** - * Resets the cyclic barrier to its original state and releases all waiting threads. - */ - void reset() noexcept; - - CyclicBarrier() = delete; - CyclicBarrier(const CyclicBarrier&) = delete; - CyclicBarrier& operator=(const CyclicBarrier&) = delete; - -private: - enum class State { - TRAP, RELEASE - }; - - const size_t m_num_threads; - mutable Mutex m_lock; - mutable Condition m_cv; - - State m_state = State::TRAP; - size_t m_trapped_threads = 0; - size_t m_released_threads = 0; -}; - -} // namespace utils - -#endif // UTILS_CYCLIC_BARRIER_H_ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_CYCLIC_BARRIER_H +#define TNT_UTILS_CYCLIC_BARRIER_H + +#include + +// note: we use our version of mutex/condition to keep this public header STL free +#include +#include + +namespace utils { + +/** + * A cyclic barrier is used to synchronize several threads to a particular execution point. + * + * Threads entering the barrier are halted until all threads reach the barrier. + * + * @see CountDownLatch + */ +class CyclicBarrier { +public: + /** + * Creates a cyclic barrier with a specified number of threads to synchronize. The minimum + * useful value is 2. A value of 0 is invalid and is silently changed to 1. + * @param num_threads Number of threads to synchronize. + */ + explicit CyclicBarrier(size_t num_threads) noexcept; + + /** + * @return The number of thread that are synchronized. + */ + size_t getThreadCount() const noexcept; + + /** + * @return Number of threads currently waiting on the barrier. + */ + size_t getWaitingThreadCount() const noexcept; + + /** + * Blocks until getThreadCount()-1 other threads reach await(). + */ + void await() noexcept; + + /** + * Resets the cyclic barrier to its original state and releases all waiting threads. + */ + void reset() noexcept; + + CyclicBarrier() = delete; + CyclicBarrier(const CyclicBarrier&) = delete; + CyclicBarrier& operator=(const CyclicBarrier&) = delete; + +private: + enum class State { + TRAP, RELEASE + }; + + const size_t m_num_threads; + mutable Mutex m_lock; + mutable Condition m_cv; + + State m_state = State::TRAP; + size_t m_trapped_threads = 0; + size_t m_released_threads = 0; +}; + +} // namespace utils + +#endif // TNT_UTILS_CYCLIC_BARRIER_H diff --git a/ios/include/utils/Entity.h b/ios/include/utils/Entity.h index 9526cb2d..74f417aa 100644 --- a/ios/include/utils/Entity.h +++ b/ios/include/utils/Entity.h @@ -1,98 +1,98 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_ENTITY_H -#define TNT_UTILS_ENTITY_H - -#include - -// FIXME: could we get rid of -#include // for std::hash - -#include -#include - -namespace utils { - -class UTILS_PUBLIC Entity { -public: - // this can be used to create an array of to-be-filled entities (see create()) - Entity() noexcept = default; - - // Entities can be copied - Entity(const Entity& e) noexcept = default; - Entity(Entity&& e) noexcept = default; - Entity& operator=(const Entity& e) noexcept = default; - Entity& operator=(Entity&& e) noexcept = default; - - // Entities can be compared - bool operator==(Entity e) const { return e.mIdentity == mIdentity; } - bool operator!=(Entity e) const { return e.mIdentity != mIdentity; } - - // Entities can be sorted - bool operator<(Entity e) const { return e.mIdentity < mIdentity; } - - bool isNull() const noexcept { - return mIdentity == 0; - } - - // an id that can be used for debugging/printing - uint32_t getId() const noexcept { - return mIdentity; - } - - explicit operator bool() const noexcept { return !isNull(); } - - void clear() noexcept { mIdentity = 0; } - - // Exports an entity to an int32_t which can be used "as is" in the Java programing language. - static int32_t smuggle(Entity entity) noexcept { - return int32_t(entity.getId()); - } - - // Imports an entity from an int32_t generated by smuggle() above. - static Entity import(int32_t identity) noexcept { - return Entity{ Type(identity) }; - } - -private: - friend class EntityManager; - friend class EntityManagerImpl; - friend struct std::hash; - using Type = uint32_t; - - explicit Entity(Type identity) noexcept : mIdentity(identity) { } - - Type mIdentity = 0; -}; - -} // namespace utils - - -namespace std { - -template<> -struct hash { - typedef utils::Entity argument_type; - typedef size_t result_type; - result_type operator()(argument_type const& e) const { - return e.getId(); - } -}; - -} // namespace std - -#endif // TNT_UTILS_ENTITY_H +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ENTITY_H +#define TNT_UTILS_ENTITY_H + +#include + +// FIXME: could we get rid of +#include // for std::hash + +#include +#include + +namespace utils { + +class UTILS_PUBLIC Entity { +public: + // this can be used to create an array of to-be-filled entities (see create()) + Entity() noexcept = default; + + // Entities can be copied + Entity(const Entity& e) noexcept = default; + Entity(Entity&& e) noexcept = default; + Entity& operator=(const Entity& e) noexcept = default; + Entity& operator=(Entity&& e) noexcept = default; + + // Entities can be compared + bool operator==(Entity e) const { return e.mIdentity == mIdentity; } + bool operator!=(Entity e) const { return e.mIdentity != mIdentity; } + + // Entities can be sorted + bool operator<(Entity e) const { return e.mIdentity < mIdentity; } + + bool isNull() const noexcept { + return mIdentity == 0; + } + + // an id that can be used for debugging/printing + uint32_t getId() const noexcept { + return mIdentity; + } + + explicit operator bool() const noexcept { return !isNull(); } + + void clear() noexcept { mIdentity = 0; } + + // Exports an entity to an int32_t which can be used "as is" in the Java programing language. + static int32_t smuggle(Entity entity) noexcept { + return int32_t(entity.getId()); + } + + // Imports an entity from an int32_t generated by smuggle() above. + static Entity import(int32_t identity) noexcept { + return Entity{ Type(identity) }; + } + +private: + friend class EntityManager; + friend class EntityManagerImpl; + friend struct std::hash; + using Type = uint32_t; + + explicit Entity(Type identity) noexcept : mIdentity(identity) { } + + Type mIdentity = 0; +}; + +} // namespace utils + + +namespace std { + +template<> +struct hash { + typedef utils::Entity argument_type; + typedef size_t result_type; + result_type operator()(argument_type const& e) const { + return e.getId(); + } +}; + +} // namespace std + +#endif // TNT_UTILS_ENTITY_H diff --git a/ios/include/utils/EntityInstance.h b/ios/include/utils/EntityInstance.h index a3e84d10..b164ed3f 100644 --- a/ios/include/utils/EntityInstance.h +++ b/ios/include/utils/EntityInstance.h @@ -1,89 +1,89 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_FILAMENT_UTILS_ENTITYINSTANCE_H -#define TNT_FILAMENT_UTILS_ENTITYINSTANCE_H - -#include - -#include - -#include - - -namespace utils { - -class UTILS_PUBLIC EntityInstanceBase { -public: - using Type = uint32_t; -protected: - Type mInstance = 0; -}; - -template -class UTILS_PUBLIC EntityInstance : public EntityInstanceBase { -public: - // default Instance is invalid - constexpr EntityInstance() noexcept = default; - - // check if this Instance is valid - constexpr bool isValid() const noexcept { return mInstance != 0; } - - // Instances of same type can be copied/assigned - constexpr EntityInstance(EntityInstance const& other) noexcept = default; - constexpr EntityInstance& operator=(EntityInstance const& other) noexcept = default; - - // EDIT instances can be converted to "read" Instances of same type - template > - constexpr explicit EntityInstance(EntityInstance const& other) noexcept { - mInstance = other.asValue(); - } - template > - EntityInstance& operator=(EntityInstance const& other) noexcept { - mInstance = other.asValue(); - return *this; - } - - // Instances can be compared - constexpr bool operator!=(EntityInstance e) const { return mInstance != e.mInstance; } - constexpr bool operator==(EntityInstance e) const { return mInstance == e.mInstance; } - - // Instances can be sorted - constexpr bool operator<(EntityInstance e) const { return mInstance < e.mInstance; } - constexpr bool operator<=(EntityInstance e) const { return mInstance <= e.mInstance; } - constexpr bool operator>(EntityInstance e) const { return mInstance > e.mInstance; } - constexpr bool operator>=(EntityInstance e) const { return mInstance >= e.mInstance; } - - // and we can iterate - constexpr EntityInstance& operator++() noexcept { ++mInstance; return *this; } - constexpr EntityInstance& operator--() noexcept { --mInstance; return *this; } - constexpr const EntityInstance operator++(int) const noexcept { return EntityInstance{ mInstance + 1 }; } - constexpr const EntityInstance operator--(int) const noexcept { return EntityInstance{ mInstance - 1 }; } - - - // return a value for this Instance (mostly needed for debugging - constexpr uint32_t asValue() const noexcept { return mInstance; } - - // auto convert to Type so it can be used as an index - constexpr operator Type() const noexcept { return mInstance; } // NOLINT(google-explicit-constructor) - - // conversion from Type so we can initialize from an index - constexpr EntityInstance(Type value) noexcept { mInstance = value; } // NOLINT(google-explicit-constructor) -}; - -} // namespace utils - -#endif // TNT_FILAMENT_UTILS_ENTITYINSTANCE_H +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ENTITYINSTANCE_H +#define TNT_UTILS_ENTITYINSTANCE_H + +#include + +#include + +#include + + +namespace utils { + +class UTILS_PUBLIC EntityInstanceBase { +public: + using Type = uint32_t; +protected: + Type mInstance = 0; +}; + +template +class UTILS_PUBLIC EntityInstance : public EntityInstanceBase { +public: + // default Instance is invalid + constexpr EntityInstance() noexcept = default; + + // check if this Instance is valid + constexpr bool isValid() const noexcept { return mInstance != 0; } + + // Instances of same type can be copied/assigned + constexpr EntityInstance(EntityInstance const& other) noexcept = default; + constexpr EntityInstance& operator=(EntityInstance const& other) noexcept = default; + + // EDIT instances can be converted to "read" Instances of same type + template > + constexpr explicit EntityInstance(EntityInstance const& other) noexcept { + mInstance = other.asValue(); + } + template > + EntityInstance& operator=(EntityInstance const& other) noexcept { + mInstance = other.asValue(); + return *this; + } + + // Instances can be compared + constexpr bool operator!=(EntityInstance e) const { return mInstance != e.mInstance; } + constexpr bool operator==(EntityInstance e) const { return mInstance == e.mInstance; } + + // Instances can be sorted + constexpr bool operator<(EntityInstance e) const { return mInstance < e.mInstance; } + constexpr bool operator<=(EntityInstance e) const { return mInstance <= e.mInstance; } + constexpr bool operator>(EntityInstance e) const { return mInstance > e.mInstance; } + constexpr bool operator>=(EntityInstance e) const { return mInstance >= e.mInstance; } + + // and we can iterate + constexpr EntityInstance& operator++() noexcept { ++mInstance; return *this; } + constexpr EntityInstance& operator--() noexcept { --mInstance; return *this; } + constexpr const EntityInstance operator++(int) const noexcept { return EntityInstance{ mInstance + 1 }; } + constexpr const EntityInstance operator--(int) const noexcept { return EntityInstance{ mInstance - 1 }; } + + + // return a value for this Instance (mostly needed for debugging + constexpr uint32_t asValue() const noexcept { return mInstance; } + + // auto convert to Type so it can be used as an index + constexpr operator Type() const noexcept { return mInstance; } // NOLINT(google-explicit-constructor) + + // conversion from Type so we can initialize from an index + constexpr EntityInstance(Type value) noexcept { mInstance = value; } // NOLINT(google-explicit-constructor) +}; + +} // namespace utils + +#endif // TNT_UTILS_ENTITYINSTANCE_H diff --git a/ios/include/utils/EntityManager.h b/ios/include/utils/EntityManager.h index f7e1544f..f44f6c28 100644 --- a/ios/include/utils/EntityManager.h +++ b/ios/include/utils/EntityManager.h @@ -1,133 +1,133 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_ENTITYMANAGER_H -#define TNT_UTILS_ENTITYMANAGER_H - -#include -#include - -#include -#include - -#ifndef FILAMENT_UTILS_TRACK_ENTITIES -#define FILAMENT_UTILS_TRACK_ENTITIES false -#endif - -#if FILAMENT_UTILS_TRACK_ENTITIES -#include -#include -#endif - -namespace utils { - -class UTILS_PUBLIC EntityManager { -public: - // Get the global EntityManager. Is is recommended to cache this value. - // Thread Safe. - static EntityManager& get() noexcept; - - class Listener { - public: - virtual void onEntitiesDestroyed(size_t n, Entity const* entities) noexcept = 0; - protected: - ~Listener() noexcept; - }; - - - // maximum number of entities that can exist at the same time - static size_t getMaxEntityCount() noexcept { - // because index 0 is reserved, we only have 2^GENERATION_SHIFT - 1 valid indices - return RAW_INDEX_COUNT - 1; - } - - // create n entities. Thread safe. - void create(size_t n, Entity* entities); - - // destroys n entities. Thread safe. - void destroy(size_t n, Entity* entities) noexcept; - - // create a new Entity. Thread safe. - // Return Entity.isNull() if the entity cannot be allocated. - Entity create() { - Entity e; - create(1, &e); - return e; - } - - // destroys an Entity. Thread safe. - void destroy(Entity e) noexcept { - destroy(1, &e); - } - - // return whether the given Entity has been destroyed (false) or not (true). - // Thread safe. - bool isAlive(Entity e) const noexcept { - assert(getIndex(e) < RAW_INDEX_COUNT); - return (!e.isNull()) && (getGeneration(e) == mGens[getIndex(e)]); - } - - // registers a listener to be called when an entity is destroyed. thread safe. - // if the listener is already register, this method has no effect. - void registerListener(Listener* l) noexcept; - - // unregisters a listener. - void unregisterListener(Listener* l) noexcept; - - - /* no user serviceable parts below */ - - // current generation of the given index. Use for debugging and testing. - uint8_t getGenerationForIndex(size_t index) const noexcept { - return mGens[index]; - } - // singleton, can't be copied - EntityManager(const EntityManager& rhs) = delete; - EntityManager& operator=(const EntityManager& rhs) = delete; - -#if FILAMENT_UTILS_TRACK_ENTITIES - std::vector getActiveEntities() const; - void dumpActiveEntities(utils::io::ostream& out) const; -#endif - -private: - friend class EntityManagerImpl; - EntityManager(); - ~EntityManager(); - - // GENERATION_SHIFT determines how many simultaneous Entities are available, the - // minimum memory requirement is 2^GENERATION_SHIFT bytes. - static constexpr const int GENERATION_SHIFT = 17; - static constexpr const size_t RAW_INDEX_COUNT = (1 << GENERATION_SHIFT); - static constexpr const Entity::Type INDEX_MASK = (1 << GENERATION_SHIFT) - 1u; - - static inline Entity::Type getGeneration(Entity e) noexcept { - return e.getId() >> GENERATION_SHIFT; - } - static inline Entity::Type getIndex(Entity e) noexcept { - return e.getId() & INDEX_MASK; - } - static inline Entity::Type makeIdentity(Entity::Type g, Entity::Type i) noexcept { - return (g << GENERATION_SHIFT) | (i & INDEX_MASK); - } - - // stores the generation of each index. - uint8_t * const mGens; -}; - -} // namespace utils - -#endif // TNT_UTILS_ENTITYMANAGER_H +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ENTITYMANAGER_H +#define TNT_UTILS_ENTITYMANAGER_H + +#include +#include + +#include +#include + +#ifndef FILAMENT_UTILS_TRACK_ENTITIES +#define FILAMENT_UTILS_TRACK_ENTITIES false +#endif + +#if FILAMENT_UTILS_TRACK_ENTITIES +#include +#include +#endif + +namespace utils { + +class UTILS_PUBLIC EntityManager { +public: + // Get the global EntityManager. Is is recommended to cache this value. + // Thread Safe. + static EntityManager& get() noexcept; + + class Listener { + public: + virtual void onEntitiesDestroyed(size_t n, Entity const* entities) noexcept = 0; + protected: + ~Listener() noexcept; + }; + + + // maximum number of entities that can exist at the same time + static size_t getMaxEntityCount() noexcept { + // because index 0 is reserved, we only have 2^GENERATION_SHIFT - 1 valid indices + return RAW_INDEX_COUNT - 1; + } + + // create n entities. Thread safe. + void create(size_t n, Entity* entities); + + // destroys n entities. Thread safe. + void destroy(size_t n, Entity* entities) noexcept; + + // create a new Entity. Thread safe. + // Return Entity.isNull() if the entity cannot be allocated. + Entity create() { + Entity e; + create(1, &e); + return e; + } + + // destroys an Entity. Thread safe. + void destroy(Entity e) noexcept { + destroy(1, &e); + } + + // return whether the given Entity has been destroyed (false) or not (true). + // Thread safe. + bool isAlive(Entity e) const noexcept { + assert(getIndex(e) < RAW_INDEX_COUNT); + return (!e.isNull()) && (getGeneration(e) == mGens[getIndex(e)]); + } + + // registers a listener to be called when an entity is destroyed. thread safe. + // if the listener is already register, this method has no effect. + void registerListener(Listener* l) noexcept; + + // unregisters a listener. + void unregisterListener(Listener* l) noexcept; + + + /* no user serviceable parts below */ + + // current generation of the given index. Use for debugging and testing. + uint8_t getGenerationForIndex(size_t index) const noexcept { + return mGens[index]; + } + // singleton, can't be copied + EntityManager(const EntityManager& rhs) = delete; + EntityManager& operator=(const EntityManager& rhs) = delete; + +#if FILAMENT_UTILS_TRACK_ENTITIES + std::vector getActiveEntities() const; + void dumpActiveEntities(utils::io::ostream& out) const; +#endif + +private: + friend class EntityManagerImpl; + EntityManager(); + ~EntityManager(); + + // GENERATION_SHIFT determines how many simultaneous Entities are available, the + // minimum memory requirement is 2^GENERATION_SHIFT bytes. + static constexpr const int GENERATION_SHIFT = 17; + static constexpr const size_t RAW_INDEX_COUNT = (1 << GENERATION_SHIFT); + static constexpr const Entity::Type INDEX_MASK = (1 << GENERATION_SHIFT) - 1u; + + static inline Entity::Type getGeneration(Entity e) noexcept { + return e.getId() >> GENERATION_SHIFT; + } + static inline Entity::Type getIndex(Entity e) noexcept { + return e.getId() & INDEX_MASK; + } + static inline Entity::Type makeIdentity(Entity::Type g, Entity::Type i) noexcept { + return (g << GENERATION_SHIFT) | (i & INDEX_MASK); + } + + // stores the generation of each index. + uint8_t * const mGens; +}; + +} // namespace utils + +#endif // TNT_UTILS_ENTITYMANAGER_H diff --git a/ios/include/utils/FixedCapacityVector.h b/ios/include/utils/FixedCapacityVector.h index a3f27393..72efe30c 100644 --- a/ios/include/utils/FixedCapacityVector.h +++ b/ios/include/utils/FixedCapacityVector.h @@ -1,410 +1,411 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_FIXEDCAPACITYVECTOR_H -#define TNT_UTILS_FIXEDCAPACITYVECTOR_H - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include - -#ifndef NDEBUG -#define FILAMENT_FORCE_CAPACITY_CHECK true -#else -#define FILAMENT_FORCE_CAPACITY_CHECK false -#endif - -namespace utils { - -/** - * FixedCapacityVector is (almost) a drop-in replacement for std::vector<> except it has a - * fixed capacity decided at runtime. The vector storage is never reallocated unless reserve() - * is called. Operations that add elements to the vector can fail if there is not enough - * capacity. - * - * An empty vector with a given capacity is created with - * FixedCapacityVector::with_capacity( capacity ); - * - * NOTE: When passing an initial size into the FixedCapacityVector constructor, default construction - * of the elements is skipped when their construction is trivial. This behavior is different from - * std::vector. e.g., std::vector(4) constructs 4 zeros while FixedCapacityVector(4) - * allocates 4 uninitialized values. Note that zero initialization is easily achieved by passing in - * the optional value argument, e.g. FixedCapacityVector(4, 0) or foo.resize(4, 0). - */ -template, bool CapacityCheck = true> -class UTILS_PUBLIC FixedCapacityVector { -public: - using allocator_type = A; - using value_type = T; - using reference = T&; - using const_reference = T const&; - using size_type = uint32_t; - using difference_type = int32_t; - using pointer = T*; - using const_pointer = T const*; - using iterator = pointer; - using const_iterator = const_pointer; - using reverse_iterator = std::reverse_iterator; - using const_reverse_iterator = std::reverse_iterator; - -private: - using storage_traits = std::allocator_traits; - -public: - /** returns an empty vector with the specified capacity */ - static FixedCapacityVector with_capacity( - size_type capacity, const allocator_type& allocator = allocator_type()) { - return FixedCapacityVector(construct_with_capacity, capacity, allocator); - } - - FixedCapacityVector() = default; - - explicit FixedCapacityVector(const allocator_type& allocator) noexcept - : mCapacityAllocator({}, allocator) { - } - - explicit FixedCapacityVector(size_type size, const allocator_type& allocator = allocator_type()) - : mSize(size), - mCapacityAllocator(size, allocator) { - mData = this->allocator().allocate(this->capacity()); - construct(begin(), end()); - } - - FixedCapacityVector(size_type size, const_reference value, - const allocator_type& alloc = allocator_type()) - : mSize(size), - mCapacityAllocator(size, alloc) { - mData = this->allocator().allocate(this->capacity()); - construct(begin(), end(), value); - } - - FixedCapacityVector(FixedCapacityVector const& rhs) - : mSize(rhs.mSize), - mCapacityAllocator(rhs.capacity(), - storage_traits::select_on_container_copy_construction(rhs.allocator())) { - mData = allocator().allocate(capacity()); - std::uninitialized_copy(rhs.begin(), rhs.end(), begin()); - } - - FixedCapacityVector(FixedCapacityVector&& rhs) noexcept { - this->swap(rhs); - } - - ~FixedCapacityVector() noexcept { - destroy(begin(), end()); - allocator().deallocate(data(), capacity()); - } - - FixedCapacityVector& operator=(FixedCapacityVector const& rhs) { - if (this != &rhs) { - FixedCapacityVector t(rhs); - this->swap(t); - } - return *this; - } - - FixedCapacityVector& operator=(FixedCapacityVector&& rhs) noexcept { - this->swap(rhs); - return *this; - } - - allocator_type get_allocator() const noexcept { - return mCapacityAllocator.second(); - } - - // -------------------------------------------------------------------------------------------- - - iterator begin() noexcept { return data(); } - iterator end() noexcept { return data() + size(); } - const_iterator begin() const noexcept { return data(); } - const_iterator end() const noexcept { return data() + size(); } - reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } - const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); } - reverse_iterator rend() noexcept { return reverse_iterator(begin()); } - const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); } - const_iterator cbegin() const noexcept { return begin(); } - const_iterator cend() const noexcept { return end(); } - const_reverse_iterator crbegin() const noexcept { return rbegin(); } - const_reverse_iterator crend() const noexcept { return rend(); } - - // -------------------------------------------------------------------------------------------- - - size_type size() const noexcept { return mSize; } - size_type capacity() const noexcept { return mCapacityAllocator.first(); } - bool empty() const noexcept { return size() == 0; } - size_type max_size() const noexcept { - return std::min(storage_traits::max_size(allocator()), - std::numeric_limits::max()); - } - - // -------------------------------------------------------------------------------------------- - - reference operator[](size_type n) noexcept { - assert(n < size()); - return *(begin() + n); - } - - const_reference operator[](size_type n) const noexcept { - assert(n < size()); - return *(begin() + n); - } - - reference front() noexcept { return *begin(); } - const_reference front() const noexcept { return *begin(); } - reference back() noexcept { return *(end() - 1); } - const_reference back() const noexcept { return *(end() - 1); } - value_type* data() noexcept { return mData; } - const value_type* data() const noexcept { return mData; } - - // -------------------------------------------------------------------------------------------- - - void push_back(const_reference v) { - auto pos = assertCapacityForSize(size() + 1); - ++mSize; - storage_traits::construct(allocator(), pos, v); - } - - void push_back(value_type&& v) { - auto pos = assertCapacityForSize(size() + 1); - ++mSize; - storage_traits::construct(allocator(), pos, std::move(v)); - } - - template - reference emplace_back(ARGS&& ... args) { - auto pos = assertCapacityForSize(size() + 1); - ++mSize; - storage_traits::construct(allocator(), pos, std::forward(args)...); - return *pos; - } - - void pop_back() { - assert(!empty()); - --mSize; - destroy(end(), end() + 1); - } - - iterator insert(const_iterator position, const_reference v) { - if (position == end()) { - push_back(v); - } else { - assertCapacityForSize(size() + 1); - pointer p = const_cast(position); - move_range(p, end(), p + 1); - ++mSize; - // here we handle inserting an element of this vector! - const_pointer pv = std::addressof(v); - if (p <= pv && pv < end()) { - *p = *(pv + 1); - } else { - *p = v; - } - } - return const_cast(position); - } - - iterator insert(const_iterator position, value_type&& v) { - if (position == end()) { - push_back(std::move(v)); - } else { - assertCapacityForSize(size() + 1); - pointer p = const_cast(position); - move_range(p, end(), p + 1); - ++mSize; - *p = std::move(v); - } - return const_cast(position); - } - - iterator erase(const_iterator pos) { - assert(pos != end()); - return erase(pos, pos + 1); - } - - iterator erase(const_iterator first, const_iterator last) { - assert(first <= last); - auto e = std::move(const_cast(last), end(), const_cast(first)); - destroy(e, end()); - mSize -= std::distance(first, last); - return const_cast(first); - } - - void clear() noexcept { - destroy(begin(), end()); - mSize = 0; - } - - void resize(size_type count) { - assertCapacityForSize(count); - if constexpr(std::is_trivially_constructible_v && - std::is_trivially_destructible_v) { - // we check for triviality here so that the implementation could be non-inline - mSize = count; - } else { - resize_non_trivial(count); - } - } - - void resize(size_type count, const_reference v) { - assertCapacityForSize(count); - resize_non_trivial(count, v); - } - - void swap(FixedCapacityVector& other) { - using std::swap; - swap(mData, other.mData); - swap(mSize, other.mSize); - mCapacityAllocator.swap(other.mCapacityAllocator); - } - - UTILS_NOINLINE - void reserve(size_type c) { - if (c > capacity()) { - FixedCapacityVector t(construct_with_capacity, c, allocator()); - t.mSize = size(); - std::uninitialized_move(begin(), end(), t.begin()); - this->swap(t); - } - } - -private: - enum construct_with_capacity_tag{ construct_with_capacity }; - - FixedCapacityVector(construct_with_capacity_tag, - size_type capacity, const allocator_type& allocator = allocator_type()) - : mCapacityAllocator(capacity, allocator) { - mData = this->allocator().allocate(this->capacity()); - } - - allocator_type& allocator() noexcept { - return mCapacityAllocator.second(); - } - - allocator_type const& allocator() const noexcept { - return mCapacityAllocator.second(); - } - - iterator assertCapacityForSize(size_type s) { - if constexpr(CapacityCheck || FILAMENT_FORCE_CAPACITY_CHECK) { - ASSERT_PRECONDITION(capacity() >= s, - "capacity exceeded: requested size %lu, available capacity %lu.", - (unsigned long)s, (unsigned long)capacity()); - } - return end(); - } - - inline void construct(iterator first, iterator last) noexcept { - // we check for triviality here so that the implementation could be non-inline - if constexpr(!std::is_trivially_constructible_v) { - construct_non_trivial(first, last); - } - } - - void construct(iterator first, iterator last, const_reference proto) noexcept { - #pragma nounroll - while (first != last) { - storage_traits::construct(allocator(), first++, proto); - } - } - - // should this be NOINLINE? - void construct_non_trivial(iterator first, iterator last) noexcept { - #pragma nounroll - while (first != last) { - storage_traits::construct(allocator(), first++); - } - } - - - inline void destroy(iterator first, iterator last) noexcept { - // we check for triviality here so that the implementation could be non-inline - if constexpr(!std::is_trivially_destructible_v) { - destroy_non_trivial(first, last); - } - } - - // should this be NOINLINE? - void destroy_non_trivial(iterator first, iterator last) noexcept { - #pragma nounroll - while (first != last) { - storage_traits::destroy(allocator(), --last); - } - } - - // should this be NOINLINE? - void resize_non_trivial(size_type count) { - if (count > size()) { - construct(end(), begin() + count); - } else if (count < size()) { - destroy(begin() + count, end()); - } - mSize = count; - } - - // should this be NOINLINE? - void resize_non_trivial(size_type count, const_reference v) { - if (count > size()) { - construct(end(), begin() + count, v); - } else if (count < size()) { - destroy(begin() + count, end()); - } - mSize = count; - } - - // should this be NOINLINE? - void move_range(pointer s, pointer e, pointer to) { - if constexpr(std::is_trivially_copy_assignable_v - && std::is_trivially_destructible_v) { - // this generates memmove -- which doesn't happen otherwise - std::move_backward(s, e, to + std::distance(s, e)); - } else { - pointer our_end = end(); - difference_type n = our_end - to; // nb of elements to move by operator= - pointer i = s + n; // 1st element to move by move ctor - for (pointer d = our_end ; i < our_end ; ++i, ++d) { - storage_traits::construct(allocator(), d, std::move(*i)); - } - std::move_backward(s, s + n, our_end); - } - } - - template - class SizeTypeWrapper { - TYPE value{}; - public: - SizeTypeWrapper() noexcept = default; - SizeTypeWrapper(SizeTypeWrapper const& rhs) noexcept = default; - explicit SizeTypeWrapper(TYPE value) noexcept : value(value) { } - SizeTypeWrapper operator=(TYPE rhs) noexcept { value = rhs; return *this; } - operator TYPE() const noexcept { return value; } - }; - - pointer mData{}; - size_type mSize{}; - compressed_pair, allocator_type> mCapacityAllocator{}; -}; - -} // namespace utils - -#endif //TNT_UTILS_FIXEDCAPACITYVECTOR_H +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_FIXEDCAPACITYVECTOR_H +#define TNT_UTILS_FIXEDCAPACITYVECTOR_H + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifndef NDEBUG +#define FILAMENT_FORCE_CAPACITY_CHECK true +#else +#define FILAMENT_FORCE_CAPACITY_CHECK false +#endif + +namespace utils { + +/** + * FixedCapacityVector is (almost) a drop-in replacement for std::vector<> except it has a + * fixed capacity decided at runtime. The vector storage is never reallocated unless reserve() + * is called. Operations that add elements to the vector can fail if there is not enough + * capacity. + * + * An empty vector with a given capacity is created with + * FixedCapacityVector::with_capacity( capacity ); + * + * NOTE: When passing an initial size into the FixedCapacityVector constructor, default construction + * of the elements is skipped when their construction is trivial. This behavior is different from + * std::vector. e.g., std::vector(4) constructs 4 zeros while FixedCapacityVector(4) + * allocates 4 uninitialized values. Note that zero initialization is easily achieved by passing in + * the optional value argument, e.g. FixedCapacityVector(4, 0) or foo.resize(4, 0). + */ +template, bool CapacityCheck = true> +class UTILS_PUBLIC FixedCapacityVector { +public: + using allocator_type = A; + using value_type = T; + using reference = T&; + using const_reference = T const&; + using size_type = uint32_t; + using difference_type = int32_t; + using pointer = T*; + using const_pointer = T const*; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + +private: + using storage_traits = std::allocator_traits; + +public: + /** returns an empty vector with the specified capacity */ + static FixedCapacityVector with_capacity( + size_type capacity, const allocator_type& allocator = allocator_type()) { + return FixedCapacityVector(construct_with_capacity, capacity, allocator); + } + + FixedCapacityVector() = default; + + explicit FixedCapacityVector(const allocator_type& allocator) noexcept + : mCapacityAllocator({}, allocator) { + } + + explicit FixedCapacityVector(size_type size, const allocator_type& allocator = allocator_type()) + : mSize(size), + mCapacityAllocator(size, allocator) { + mData = this->allocator().allocate(this->capacity()); + construct(begin(), end()); + } + + FixedCapacityVector(size_type size, const_reference value, + const allocator_type& alloc = allocator_type()) + : mSize(size), + mCapacityAllocator(size, alloc) { + mData = this->allocator().allocate(this->capacity()); + construct(begin(), end(), value); + } + + FixedCapacityVector(FixedCapacityVector const& rhs) + : mSize(rhs.mSize), + mCapacityAllocator(rhs.capacity(), + storage_traits::select_on_container_copy_construction(rhs.allocator())) { + mData = allocator().allocate(capacity()); + std::uninitialized_copy(rhs.begin(), rhs.end(), begin()); + } + + FixedCapacityVector(FixedCapacityVector&& rhs) noexcept { + this->swap(rhs); + } + + ~FixedCapacityVector() noexcept { + destroy(begin(), end()); + allocator().deallocate(data(), capacity()); + } + + FixedCapacityVector& operator=(FixedCapacityVector const& rhs) { + if (this != &rhs) { + FixedCapacityVector t(rhs); + this->swap(t); + } + return *this; + } + + FixedCapacityVector& operator=(FixedCapacityVector&& rhs) noexcept { + this->swap(rhs); + return *this; + } + + allocator_type get_allocator() const noexcept { + return mCapacityAllocator.second(); + } + + // -------------------------------------------------------------------------------------------- + + iterator begin() noexcept { return data(); } + iterator end() noexcept { return data() + size(); } + const_iterator begin() const noexcept { return data(); } + const_iterator end() const noexcept { return data() + size(); } + reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); } + reverse_iterator rend() noexcept { return reverse_iterator(begin()); } + const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); } + const_iterator cbegin() const noexcept { return begin(); } + const_iterator cend() const noexcept { return end(); } + const_reverse_iterator crbegin() const noexcept { return rbegin(); } + const_reverse_iterator crend() const noexcept { return rend(); } + + // -------------------------------------------------------------------------------------------- + + size_type size() const noexcept { return mSize; } + size_type capacity() const noexcept { return mCapacityAllocator.first(); } + bool empty() const noexcept { return size() == 0; } + size_type max_size() const noexcept { + return std::min(storage_traits::max_size(allocator()), + std::numeric_limits::max()); + } + + // -------------------------------------------------------------------------------------------- + + reference operator[](size_type n) noexcept { + assert(n < size()); + return *(begin() + n); + } + + const_reference operator[](size_type n) const noexcept { + assert(n < size()); + return *(begin() + n); + } + + reference front() noexcept { return *begin(); } + const_reference front() const noexcept { return *begin(); } + reference back() noexcept { return *(end() - 1); } + const_reference back() const noexcept { return *(end() - 1); } + value_type* data() noexcept { return mData; } + const value_type* data() const noexcept { return mData; } + + // -------------------------------------------------------------------------------------------- + + void push_back(const_reference v) { + auto pos = assertCapacityForSize(size() + 1); + ++mSize; + storage_traits::construct(allocator(), pos, v); + } + + void push_back(value_type&& v) { + auto pos = assertCapacityForSize(size() + 1); + ++mSize; + storage_traits::construct(allocator(), pos, std::move(v)); + } + + template + reference emplace_back(ARGS&& ... args) { + auto pos = assertCapacityForSize(size() + 1); + ++mSize; + storage_traits::construct(allocator(), pos, std::forward(args)...); + return *pos; + } + + void pop_back() { + assert(!empty()); + --mSize; + destroy(end(), end() + 1); + } + + iterator insert(const_iterator position, const_reference v) { + if (position == end()) { + push_back(v); + } else { + assertCapacityForSize(size() + 1); + pointer p = const_cast(position); + move_range(p, end(), p + 1); + ++mSize; + // here we handle inserting an element of this vector! + const_pointer pv = std::addressof(v); + if (p <= pv && pv < end()) { + *p = *(pv + 1); + } else { + *p = v; + } + } + return const_cast(position); + } + + iterator insert(const_iterator position, value_type&& v) { + if (position == end()) { + push_back(std::move(v)); + } else { + assertCapacityForSize(size() + 1); + pointer p = const_cast(position); + move_range(p, end(), p + 1); + ++mSize; + *p = std::move(v); + } + return const_cast(position); + } + + iterator erase(const_iterator pos) { + assert(pos != end()); + return erase(pos, pos + 1); + } + + iterator erase(const_iterator first, const_iterator last) { + assert(first <= last); + auto e = std::move(const_cast(last), end(), const_cast(first)); + destroy(e, end()); + mSize -= std::distance(first, last); + return const_cast(first); + } + + void clear() noexcept { + destroy(begin(), end()); + mSize = 0; + } + + void resize(size_type count) { + assertCapacityForSize(count); + if constexpr(std::is_trivially_constructible_v && + std::is_trivially_destructible_v) { + // we check for triviality here so that the implementation could be non-inline + mSize = count; + } else { + resize_non_trivial(count); + } + } + + void resize(size_type count, const_reference v) { + assertCapacityForSize(count); + resize_non_trivial(count, v); + } + + void swap(FixedCapacityVector& other) { + using std::swap; + swap(mData, other.mData); + swap(mSize, other.mSize); + mCapacityAllocator.swap(other.mCapacityAllocator); + } + + UTILS_NOINLINE + void reserve(size_type c) { + if (c > capacity()) { + FixedCapacityVector t(construct_with_capacity, c, allocator()); + t.mSize = size(); + std::uninitialized_move(begin(), end(), t.begin()); + this->swap(t); + } + } + +private: + enum construct_with_capacity_tag{ construct_with_capacity }; + + FixedCapacityVector(construct_with_capacity_tag, + size_type capacity, const allocator_type& allocator = allocator_type()) + : mCapacityAllocator(capacity, allocator) { + mData = this->allocator().allocate(this->capacity()); + } + + allocator_type& allocator() noexcept { + return mCapacityAllocator.second(); + } + + allocator_type const& allocator() const noexcept { + return mCapacityAllocator.second(); + } + + iterator assertCapacityForSize(size_type s) { + if constexpr(CapacityCheck || FILAMENT_FORCE_CAPACITY_CHECK) { + ASSERT_PRECONDITION(capacity() >= s, + "capacity exceeded: requested size %lu, available capacity %lu.", + (unsigned long)s, (unsigned long)capacity()); + } + return end(); + } + + inline void construct(iterator first, iterator last) noexcept { + // we check for triviality here so that the implementation could be non-inline + if constexpr(!std::is_trivially_constructible_v) { + construct_non_trivial(first, last); + } + } + + void construct(iterator first, iterator last, const_reference proto) noexcept { + #pragma nounroll + while (first != last) { + storage_traits::construct(allocator(), first++, proto); + } + } + + // should this be NOINLINE? + void construct_non_trivial(iterator first, iterator last) noexcept { + #pragma nounroll + while (first != last) { + storage_traits::construct(allocator(), first++); + } + } + + + inline void destroy(iterator first, iterator last) noexcept { + // we check for triviality here so that the implementation could be non-inline + if constexpr(!std::is_trivially_destructible_v) { + destroy_non_trivial(first, last); + } + } + + // should this be NOINLINE? + void destroy_non_trivial(iterator first, iterator last) noexcept { + #pragma nounroll + while (first != last) { + storage_traits::destroy(allocator(), --last); + } + } + + // should this be NOINLINE? + void resize_non_trivial(size_type count) { + if (count > size()) { + construct(end(), begin() + count); + } else if (count < size()) { + destroy(begin() + count, end()); + } + mSize = count; + } + + // should this be NOINLINE? + void resize_non_trivial(size_type count, const_reference v) { + if (count > size()) { + construct(end(), begin() + count, v); + } else if (count < size()) { + destroy(begin() + count, end()); + } + mSize = count; + } + + // should this be NOINLINE? + void move_range(pointer s, pointer e, pointer to) { + if constexpr(std::is_trivially_copy_assignable_v + && std::is_trivially_destructible_v) { + // this generates memmove -- which doesn't happen otherwise + std::move_backward(s, e, to + std::distance(s, e)); + } else { + pointer our_end = end(); + difference_type n = our_end - to; // nb of elements to move by operator= + pointer i = s + n; // 1st element to move by move ctor + for (pointer d = our_end ; i < our_end ; ++i, ++d) { + storage_traits::construct(allocator(), d, std::move(*i)); + } + std::move_backward(s, s + n, our_end); + } + } + + template + class SizeTypeWrapper { + TYPE value{}; + public: + SizeTypeWrapper() noexcept = default; + SizeTypeWrapper(SizeTypeWrapper const& rhs) noexcept = default; + explicit SizeTypeWrapper(TYPE value) noexcept : value(value) { } + SizeTypeWrapper operator=(TYPE rhs) noexcept { value = rhs; return *this; } + operator TYPE() const noexcept { return value; } + }; + + pointer mData{}; + size_type mSize{}; + compressed_pair, allocator_type> mCapacityAllocator{}; +}; + +} // namespace utils + +#endif // TNT_UTILS_FIXEDCAPACITYVECTOR_H diff --git a/ios/include/utils/Hash.h b/ios/include/utils/Hash.h index 955531b9..ee6e87f6 100644 --- a/ios/include/utils/Hash.h +++ b/ios/include/utils/Hash.h @@ -1,74 +1,74 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_HASH_H -#define TNT_UTILS_HASH_H - -#include // for std::hash - -#include -#include - -namespace utils { -namespace hash { - -inline uint32_t murmur3(const uint32_t* key, size_t wordCount, uint32_t seed) noexcept { - uint32_t h = seed; - size_t i = wordCount; - do { - uint32_t k = *key++; - k *= 0xcc9e2d51u; - k = (k << 15u) | (k >> 17u); - k *= 0x1b873593u; - h ^= k; - h = (h << 13u) | (h >> 19u); - h = (h * 5u) + 0xe6546b64u; - } while (--i); - h ^= wordCount; - h ^= h >> 16u; - h *= 0x85ebca6bu; - h ^= h >> 13u; - h *= 0xc2b2ae35u; - h ^= h >> 16u; - return h; -} - -template -struct MurmurHashFn { - uint32_t operator()(const T& key) const noexcept { - static_assert(0 == (sizeof(key) & 3u), "Hashing requires a size that is a multiple of 4."); - return murmur3((const uint32_t*) &key, sizeof(key) / 4, 0); - } -}; - -// combines two hashes together -template -inline void combine(size_t& seed, const T& v) noexcept { - std::hash hasher; - seed ^= hasher(v) + 0x9e3779b9u + (seed << 6u) + (seed >> 2u); -} - -// combines two hashes together, faster but less good -template -inline void combine_fast(size_t& seed, const T& v) noexcept { - std::hash hasher; - seed ^= hasher(v) << 1u; -} - -} // namespace hash -} // namespace utils - -#endif // TNT_UTILS_HASH_H +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_HASH_H +#define TNT_UTILS_HASH_H + +#include // for std::hash + +#include +#include + +namespace utils { +namespace hash { + +inline uint32_t murmur3(const uint32_t* key, size_t wordCount, uint32_t seed) noexcept { + uint32_t h = seed; + size_t i = wordCount; + do { + uint32_t k = *key++; + k *= 0xcc9e2d51u; + k = (k << 15u) | (k >> 17u); + k *= 0x1b873593u; + h ^= k; + h = (h << 13u) | (h >> 19u); + h = (h * 5u) + 0xe6546b64u; + } while (--i); + h ^= wordCount; + h ^= h >> 16u; + h *= 0x85ebca6bu; + h ^= h >> 13u; + h *= 0xc2b2ae35u; + h ^= h >> 16u; + return h; +} + +template +struct MurmurHashFn { + uint32_t operator()(const T& key) const noexcept { + static_assert(0 == (sizeof(key) & 3u), "Hashing requires a size that is a multiple of 4."); + return murmur3((const uint32_t*) &key, sizeof(key) / 4, 0); + } +}; + +// combines two hashes together +template +inline void combine(size_t& seed, const T& v) noexcept { + std::hash hasher; + seed ^= hasher(v) + 0x9e3779b9u + (seed << 6u) + (seed >> 2u); +} + +// combines two hashes together, faster but less good +template +inline void combine_fast(size_t& seed, const T& v) noexcept { + std::hash hasher; + seed ^= hasher(v) << 1u; +} + +} // namespace hash +} // namespace utils + +#endif // TNT_UTILS_HASH_H diff --git a/ios/include/utils/JobSystem.h b/ios/include/utils/JobSystem.h index 5d738197..98fc96da 100644 --- a/ios/include/utils/JobSystem.h +++ b/ios/include/utils/JobSystem.h @@ -1,545 +1,545 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_JOBSYSTEM_H -#define TNT_UTILS_JOBSYSTEM_H - -#include - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace utils { - -class JobSystem { - static constexpr size_t MAX_JOB_COUNT = 16384; - static_assert(MAX_JOB_COUNT <= 0x7FFE, "MAX_JOB_COUNT must be <= 0x7FFE"); - using WorkQueue = WorkStealingDequeue; - -public: - class Job; - - using JobFunc = void(*)(void*, JobSystem&, Job*); - - class alignas(CACHELINE_SIZE) Job { - public: - Job() noexcept {} /* = default; */ /* clang bug */ // NOLINT(modernize-use-equals-default,cppcoreguidelines-pro-type-member-init) - Job(const Job&) = delete; - Job(Job&&) = delete; - - private: - friend class JobSystem; - - // Size is chosen so that we can store at least std::function<> - // the alignas() qualifier ensures we're multiple of a cache-line. - static constexpr size_t JOB_STORAGE_SIZE_BYTES = - sizeof(std::function) > 48 ? sizeof(std::function) : 48; - static constexpr size_t JOB_STORAGE_SIZE_WORDS = - (JOB_STORAGE_SIZE_BYTES + sizeof(void*) - 1) / sizeof(void*); - - // keep it first, so it's correctly aligned with all architectures - // this is were we store the job's data, typically a std::function<> - // v7 | v8 - void* storage[JOB_STORAGE_SIZE_WORDS]; // 48 | 48 - JobFunc function; // 4 | 8 - uint16_t parent; // 2 | 2 - std::atomic runningJobCount = { 1 }; // 2 | 2 - mutable std::atomic refCount = { 1 }; // 2 | 2 - // 6 | 2 (padding) - // 64 | 64 - }; - - explicit JobSystem(size_t threadCount = 0, size_t adoptableThreadsCount = 1) noexcept; - - ~JobSystem(); - - // Make the current thread part of the thread pool. - void adopt(); - - // Remove this adopted thread from the parent. This is intended to be used for - // shutting down a JobSystem. In particular, this doesn't allow the parent to - // adopt more thread. - void emancipate(); - - - // If a parent is not specified when creating a job, that job will automatically take the - // root job as a parent. - // The root job is reset when waited on. - Job* setRootJob(Job* job) noexcept { return mRootJob = job; } - - // use setRootJob() instead - UTILS_DEPRECATED - Job* setMasterJob(Job* job) noexcept { return setRootJob(job); } - - - Job* create(Job* parent, JobFunc func) noexcept; - - // NOTE: All methods below must be called from the same thread and that thread must be - // owned by JobSystem's thread pool. - - /* - * Job creation examples: - * ---------------------- - * - * struct Functor { - * uintptr_t storage[6]; - * void operator()(JobSystem&, Jobsystem::Job*); - * } functor; - * - * struct Foo { - * uintptr_t storage[6]; - * void method(JobSystem&, Jobsystem::Job*); - * } foo; - * - * Functor and Foo size muse be <= uintptr_t[6] - * - * createJob() - * createJob(parent) - * createJob(parent, &foo) - * createJob(parent, foo) - * createJob(parent, std::ref(foo)) - * createJob(parent, functor) - * createJob(parent, std::ref(functor)) - * createJob(parent, [ up-to 6 uintptr_t ](JobSystem*, Jobsystem::Job*){ }) - * - * Utility functions: - * ------------------ - * These are less efficient, but handle any size objects using the heap if needed. - * (internally uses std::function<>), and don't require the callee to take - * a (JobSystem&, Jobsystem::Job*) as parameter. - * - * struct BigFoo { - * uintptr_t large[16]; - * void operator()(); - * void method(int answerToEverything); - * static void exec(BigFoo&) { } - * } bigFoo; - * - * jobs::createJob(js, parent, [ any-capture ](int answerToEverything){}, 42); - * jobs::createJob(js, parent, &BigFoo::method, &bigFoo, 42); - * jobs::createJob(js, parent, &BigFoo::exec, std::ref(bigFoo)); - * jobs::createJob(js, parent, bigFoo); - * jobs::createJob(js, parent, std::ref(bigFoo)); - * etc... - * - * struct SmallFunctor { - * uintptr_t storage[3]; - * void operator()(T* data, size_t count); - * } smallFunctor; - * - * jobs::parallel_for(js, data, count, [ up-to 3 uintptr_t ](T* data, size_t count) { }); - * jobs::parallel_for(js, data, count, smallFunctor); - * jobs::parallel_for(js, data, count, std::ref(smallFunctor)); - * - */ - - // creates an empty (no-op) job with an optional parent - Job* createJob(Job* parent = nullptr) noexcept { - return create(parent, nullptr); - } - - // creates a job from a KNOWN method pointer w/ object passed by pointer - // the caller must ensure the object will outlive the Job - template - Job* createJob(Job* parent, T* data) noexcept { - Job* job = create(parent, [](void* user, JobSystem& js, Job* job) { - (*static_cast(user)->*method)(js, job); - }); - if (job) { - job->storage[0] = data; - } - return job; - } - - // creates a job from a KNOWN method pointer w/ object passed by value - template - Job* createJob(Job* parent, T data) noexcept { - static_assert(sizeof(data) <= sizeof(Job::storage), "user data too large"); - Job* job = create(parent, [](void* user, JobSystem& js, Job* job) { - T* that = static_cast(user); - (that->*method)(js, job); - that->~T(); - }); - if (job) { - new(job->storage) T(std::move(data)); - } - return job; - } - - // creates a job from a functor passed by value - template - Job* createJob(Job* parent, T functor) noexcept { - static_assert(sizeof(functor) <= sizeof(Job::storage), "functor too large"); - Job* job = create(parent, [](void* user, JobSystem& js, Job* job){ - T& that = *static_cast(user); - that(js, job); - that.~T(); - }); - if (job) { - new(job->storage) T(std::move(functor)); - } - return job; - } - - - /* - * Jobs are normally finished automatically, this can be used to cancel a job before it is run. - * - * Never use this once a flavor of run() has been called. - */ - void cancel(Job*& job) noexcept; - - /* - * Adds a reference to a Job. - * - * This allows the caller to waitAndRelease() on this job from multiple threads. - * Use runAndWait() if waiting from multiple threads is not needed. - * - * This job MUST BE waited on with waitAndRelease(), or released with release(). - */ - Job* retain(Job* job) noexcept; - - /* - * Releases a reference from a Job obtained with runAndRetain() or a call to retain(). - * - * The job can't be used after this call. - */ - void release(Job*& job) noexcept; - void release(Job*&& job) noexcept { - Job* p = job; - release(p); - } - - /* - * Add job to this thread's execution queue. It's reference will drop automatically. - * Current thread must be owned by JobSystem's thread pool. See adopt(). - * - * The job can't be used after this call. - */ - void run(Job*& job) noexcept; - void run(Job*&& job) noexcept { // allows run(createJob(...)); - Job* p = job; - run(p); - } - - void signal() noexcept; - - /* - * Add job to this thread's execution queue and and keep a reference to it. - * Current thread must be owned by JobSystem's thread pool. See adopt(). - * - * This job MUST BE waited on with wait(), or released with release(). - */ - Job* runAndRetain(Job* job) noexcept; - - /* - * Wait on a job and destroys it. - * Current thread must be owned by JobSystem's thread pool. See adopt(). - * - * The job must first be obtained from runAndRetain() or retain(). - * The job can't be used after this call. - */ - void waitAndRelease(Job*& job) noexcept; - - /* - * Runs and wait for a job. This is equivalent to calling - * runAndRetain(job); - * wait(job); - * - * The job can't be used after this call. - */ - void runAndWait(Job*& job) noexcept; - void runAndWait(Job*&& job) noexcept { // allows runAndWait(createJob(...)); - Job* p = job; - runAndWait(p); - } - - // for debugging - friend utils::io::ostream& operator << (utils::io::ostream& out, JobSystem const& js); - - - // utility functions... - - // set the name of the current thread (on OSes that support it) - static void setThreadName(const char* threadName) noexcept; - - enum class Priority { - NORMAL, - DISPLAY, - URGENT_DISPLAY - }; - - static void setThreadPriority(Priority priority) noexcept; - static void setThreadAffinityById(size_t id) noexcept; - - size_t getParallelSplitCount() const noexcept { - return mParallelSplitCount; - } - -private: - // this is just to avoid using std::default_random_engine, since we're in a public header. - class default_random_engine { - static constexpr uint32_t m = 0x7fffffffu; - uint32_t mState; // must be 0 < seed < 0x7fffffff - public: - inline constexpr explicit default_random_engine(uint32_t seed = 1u) noexcept - : mState(((seed % m) == 0u) ? 1u : seed % m) { - } - inline uint32_t operator()() noexcept { - return mState = uint32_t((uint64_t(mState) * 48271u) % m); - } - }; - - struct alignas(CACHELINE_SIZE) ThreadState { // this causes 40-bytes padding - // make sure storage is cache-line aligned - WorkQueue workQueue; - - // these are not accessed by the worker threads - alignas(CACHELINE_SIZE) // this causes 56-bytes padding - JobSystem* js; - std::thread thread; - default_random_engine rndGen; - uint32_t id; - }; - - static_assert(sizeof(ThreadState) % CACHELINE_SIZE == 0, - "ThreadState doesn't align to a cache line"); - - ThreadState& getState() noexcept; - - void incRef(Job const* job) noexcept; - void decRef(Job const* job) noexcept; - - Job* allocateJob() noexcept; - JobSystem::ThreadState* getStateToStealFrom(JobSystem::ThreadState& state) noexcept; - bool hasJobCompleted(Job const* job) noexcept; - - void requestExit() noexcept; - bool exitRequested() const noexcept; - bool hasActiveJobs() const noexcept; - - void loop(ThreadState* state) noexcept; - bool execute(JobSystem::ThreadState& state) noexcept; - Job* steal(JobSystem::ThreadState& state) noexcept; - void finish(Job* job) noexcept; - - void put(WorkQueue& workQueue, Job* job) noexcept { - assert(job); - size_t index = job - mJobStorageBase; - assert(index >= 0 && index < MAX_JOB_COUNT); - workQueue.push(uint16_t(index + 1)); - } - - Job* pop(WorkQueue& workQueue) noexcept { - size_t index = workQueue.pop(); - assert(index <= MAX_JOB_COUNT); - return !index ? nullptr : &mJobStorageBase[index - 1]; - } - - Job* steal(WorkQueue& workQueue) noexcept { - size_t index = workQueue.steal(); - assert(index <= MAX_JOB_COUNT); - return !index ? nullptr : &mJobStorageBase[index - 1]; - } - - void wait(std::unique_lock& lock, Job* job = nullptr) noexcept; - void wakeAll() noexcept; - void wakeOne() noexcept; - - // these have thread contention, keep them together - utils::Mutex mWaiterLock; - utils::Condition mWaiterCondition; - - std::atomic mActiveJobs = { 0 }; - utils::Arena, LockingPolicy::NoLock> mJobPool; - - template - using aligned_vector = std::vector>; - - // these are essentially const, make sure they're on a different cache-lines than the - // read-write atomics. - // We can't use "alignas(CACHELINE_SIZE)" because the standard allocator can't make this - // guarantee. - char padding[CACHELINE_SIZE]; - - alignas(16) // at least we align to half (or quarter) cache-line - aligned_vector mThreadStates; // actual data is stored offline - std::atomic mExitRequested = { false }; // this one is almost never written - std::atomic mAdoptedThreads = { 0 }; // this one is almost never written - Job* const mJobStorageBase; // Base for conversion to indices - uint16_t mThreadCount = 0; // total # of threads in the pool - uint8_t mParallelSplitCount = 0; // # of split allowable in parallel_for - Job* mRootJob = nullptr; - - utils::SpinLock mThreadMapLock; // this should have very little contention - tsl::robin_map mThreadMap; -}; - -// ------------------------------------------------------------------------------------------------- -// Utility functions built on top of JobSystem - -namespace jobs { - -// These are convenience C++11 style job creation methods that support lambdas -// -// IMPORTANT: these are less efficient to call and may perform heap allocation -// depending on the capture and parameters -// -template -JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent, - CALLABLE&& func, ARGS&&... args) noexcept { - struct Data { - std::function f; - // Renaming the method below could cause an Arrested Development. - void gob(JobSystem&, JobSystem::Job*) noexcept { f(); } - } user{ std::bind(std::forward(func), - std::forward(args)...) }; - return js.createJob(parent, std::move(user)); -} - -template::type>::value - >::type -> -JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent, - CALLABLE&& func, T&& o, ARGS&&... args) noexcept { - struct Data { - std::function f; - // Renaming the method below could cause an Arrested Development. - void gob(JobSystem&, JobSystem::Job*) noexcept { f(); } - } user{ std::bind(std::forward(func), std::forward(o), - std::forward(args)...) }; - return js.createJob(parent, std::move(user)); -} - - -namespace details { - -template -struct ParallelForJobData { - using SplitterType = S; - using Functor = F; - using JobData = ParallelForJobData; - using size_type = uint32_t; - - ParallelForJobData(size_type start, size_type count, uint8_t splits, - Functor functor, - const SplitterType& splitter) noexcept - : start(start), count(count), - functor(std::move(functor)), - splits(splits), - splitter(splitter) { - } - - void parallelWithJobs(JobSystem& js, JobSystem::Job* parent) noexcept { - assert(parent); - - // this branch is often miss-predicted (it both sides happen 50% of the calls) -right_side: - if (splitter.split(splits, count)) { - const size_type lc = count / 2; - JobData ld(start, lc, splits + uint8_t(1), functor, splitter); - JobSystem::Job* l = js.createJob(parent, std::move(ld)); - if (UTILS_UNLIKELY(l == nullptr)) { - // couldn't create a job, just pretend we're done splitting - goto execute; - } - - // start the left side before attempting the right side, so we parallelize in case - // of job creation failure -- rare, but still. - js.run(l); - - // don't spawn a job for the right side, just reuse us -- spawning jobs is more - // costly than we'd like. - start += lc; - count -= lc; - ++splits; - goto right_side; - - } else { -execute: - // we're done splitting, do the real work here! - functor(start, count); - } - } - -private: - size_type start; // 4 - size_type count; // 4 - Functor functor; // ? - uint8_t splits; // 1 - SplitterType splitter; // 1 -}; - -} // namespace details - - -// parallel jobs with start/count indices -template -JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, - uint32_t start, uint32_t count, F functor, const S& splitter) noexcept { - using JobData = details::ParallelForJobData; - JobData jobData(start, count, 0, std::move(functor), splitter); - return js.createJob(parent, std::move(jobData)); -} - -// parallel jobs with pointer/count -template -JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, - T* data, uint32_t count, F functor, const S& splitter) noexcept { - auto user = [data, f = std::move(functor)](uint32_t s, uint32_t c) { - f(data + s, c); - }; - using JobData = details::ParallelForJobData; - JobData jobData(0, count, 0, std::move(user), splitter); - return js.createJob(parent, std::move(jobData)); -} - -// parallel jobs on a Slice<> -template -JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, - utils::Slice slice, F functor, const S& splitter) noexcept { - return parallel_for(js, parent, slice.data(), slice.size(), functor, splitter); -} - - -template -class CountSplitter { -public: - bool split(size_t splits, size_t count) const noexcept { - return (splits < MAX_SPLITS && count >= COUNT * 2); - } -}; - -} // namespace jobs -} // namespace utils - -#endif // TNT_UTILS_JOBSYSTEM_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_JOBSYSTEM_H +#define TNT_UTILS_JOBSYSTEM_H + +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace utils { + +class JobSystem { + static constexpr size_t MAX_JOB_COUNT = 16384; + static_assert(MAX_JOB_COUNT <= 0x7FFE, "MAX_JOB_COUNT must be <= 0x7FFE"); + using WorkQueue = WorkStealingDequeue; + +public: + class Job; + + using JobFunc = void(*)(void*, JobSystem&, Job*); + + class alignas(CACHELINE_SIZE) Job { + public: + Job() noexcept {} /* = default; */ /* clang bug */ // NOLINT(modernize-use-equals-default,cppcoreguidelines-pro-type-member-init) + Job(const Job&) = delete; + Job(Job&&) = delete; + + private: + friend class JobSystem; + + // Size is chosen so that we can store at least std::function<> + // the alignas() qualifier ensures we're multiple of a cache-line. + static constexpr size_t JOB_STORAGE_SIZE_BYTES = + sizeof(std::function) > 48 ? sizeof(std::function) : 48; + static constexpr size_t JOB_STORAGE_SIZE_WORDS = + (JOB_STORAGE_SIZE_BYTES + sizeof(void*) - 1) / sizeof(void*); + + // keep it first, so it's correctly aligned with all architectures + // this is were we store the job's data, typically a std::function<> + // v7 | v8 + void* storage[JOB_STORAGE_SIZE_WORDS]; // 48 | 48 + JobFunc function; // 4 | 8 + uint16_t parent; // 2 | 2 + std::atomic runningJobCount = { 1 }; // 2 | 2 + mutable std::atomic refCount = { 1 }; // 2 | 2 + // 6 | 2 (padding) + // 64 | 64 + }; + + explicit JobSystem(size_t threadCount = 0, size_t adoptableThreadsCount = 1) noexcept; + + ~JobSystem(); + + // Make the current thread part of the thread pool. + void adopt(); + + // Remove this adopted thread from the parent. This is intended to be used for + // shutting down a JobSystem. In particular, this doesn't allow the parent to + // adopt more thread. + void emancipate(); + + + // If a parent is not specified when creating a job, that job will automatically take the + // root job as a parent. + // The root job is reset when waited on. + Job* setRootJob(Job* job) noexcept { return mRootJob = job; } + + // use setRootJob() instead + UTILS_DEPRECATED + Job* setMasterJob(Job* job) noexcept { return setRootJob(job); } + + + Job* create(Job* parent, JobFunc func) noexcept; + + // NOTE: All methods below must be called from the same thread and that thread must be + // owned by JobSystem's thread pool. + + /* + * Job creation examples: + * ---------------------- + * + * struct Functor { + * uintptr_t storage[6]; + * void operator()(JobSystem&, Jobsystem::Job*); + * } functor; + * + * struct Foo { + * uintptr_t storage[6]; + * void method(JobSystem&, Jobsystem::Job*); + * } foo; + * + * Functor and Foo size muse be <= uintptr_t[6] + * + * createJob() + * createJob(parent) + * createJob(parent, &foo) + * createJob(parent, foo) + * createJob(parent, std::ref(foo)) + * createJob(parent, functor) + * createJob(parent, std::ref(functor)) + * createJob(parent, [ up-to 6 uintptr_t ](JobSystem*, Jobsystem::Job*){ }) + * + * Utility functions: + * ------------------ + * These are less efficient, but handle any size objects using the heap if needed. + * (internally uses std::function<>), and don't require the callee to take + * a (JobSystem&, Jobsystem::Job*) as parameter. + * + * struct BigFoo { + * uintptr_t large[16]; + * void operator()(); + * void method(int answerToEverything); + * static void exec(BigFoo&) { } + * } bigFoo; + * + * jobs::createJob(js, parent, [ any-capture ](int answerToEverything){}, 42); + * jobs::createJob(js, parent, &BigFoo::method, &bigFoo, 42); + * jobs::createJob(js, parent, &BigFoo::exec, std::ref(bigFoo)); + * jobs::createJob(js, parent, bigFoo); + * jobs::createJob(js, parent, std::ref(bigFoo)); + * etc... + * + * struct SmallFunctor { + * uintptr_t storage[3]; + * void operator()(T* data, size_t count); + * } smallFunctor; + * + * jobs::parallel_for(js, data, count, [ up-to 3 uintptr_t ](T* data, size_t count) { }); + * jobs::parallel_for(js, data, count, smallFunctor); + * jobs::parallel_for(js, data, count, std::ref(smallFunctor)); + * + */ + + // creates an empty (no-op) job with an optional parent + Job* createJob(Job* parent = nullptr) noexcept { + return create(parent, nullptr); + } + + // creates a job from a KNOWN method pointer w/ object passed by pointer + // the caller must ensure the object will outlive the Job + template + Job* createJob(Job* parent, T* data) noexcept { + Job* job = create(parent, [](void* user, JobSystem& js, Job* job) { + (*static_cast(user)->*method)(js, job); + }); + if (job) { + job->storage[0] = data; + } + return job; + } + + // creates a job from a KNOWN method pointer w/ object passed by value + template + Job* createJob(Job* parent, T data) noexcept { + static_assert(sizeof(data) <= sizeof(Job::storage), "user data too large"); + Job* job = create(parent, [](void* user, JobSystem& js, Job* job) { + T* that = static_cast(user); + (that->*method)(js, job); + that->~T(); + }); + if (job) { + new(job->storage) T(std::move(data)); + } + return job; + } + + // creates a job from a functor passed by value + template + Job* createJob(Job* parent, T functor) noexcept { + static_assert(sizeof(functor) <= sizeof(Job::storage), "functor too large"); + Job* job = create(parent, [](void* user, JobSystem& js, Job* job){ + T& that = *static_cast(user); + that(js, job); + that.~T(); + }); + if (job) { + new(job->storage) T(std::move(functor)); + } + return job; + } + + + /* + * Jobs are normally finished automatically, this can be used to cancel a job before it is run. + * + * Never use this once a flavor of run() has been called. + */ + void cancel(Job*& job) noexcept; + + /* + * Adds a reference to a Job. + * + * This allows the caller to waitAndRelease() on this job from multiple threads. + * Use runAndWait() if waiting from multiple threads is not needed. + * + * This job MUST BE waited on with waitAndRelease(), or released with release(). + */ + Job* retain(Job* job) noexcept; + + /* + * Releases a reference from a Job obtained with runAndRetain() or a call to retain(). + * + * The job can't be used after this call. + */ + void release(Job*& job) noexcept; + void release(Job*&& job) noexcept { + Job* p = job; + release(p); + } + + /* + * Add job to this thread's execution queue. It's reference will drop automatically. + * Current thread must be owned by JobSystem's thread pool. See adopt(). + * + * The job can't be used after this call. + */ + void run(Job*& job) noexcept; + void run(Job*&& job) noexcept { // allows run(createJob(...)); + Job* p = job; + run(p); + } + + void signal() noexcept; + + /* + * Add job to this thread's execution queue and and keep a reference to it. + * Current thread must be owned by JobSystem's thread pool. See adopt(). + * + * This job MUST BE waited on with wait(), or released with release(). + */ + Job* runAndRetain(Job* job) noexcept; + + /* + * Wait on a job and destroys it. + * Current thread must be owned by JobSystem's thread pool. See adopt(). + * + * The job must first be obtained from runAndRetain() or retain(). + * The job can't be used after this call. + */ + void waitAndRelease(Job*& job) noexcept; + + /* + * Runs and wait for a job. This is equivalent to calling + * runAndRetain(job); + * wait(job); + * + * The job can't be used after this call. + */ + void runAndWait(Job*& job) noexcept; + void runAndWait(Job*&& job) noexcept { // allows runAndWait(createJob(...)); + Job* p = job; + runAndWait(p); + } + + // for debugging + friend utils::io::ostream& operator << (utils::io::ostream& out, JobSystem const& js); + + + // utility functions... + + // set the name of the current thread (on OSes that support it) + static void setThreadName(const char* threadName) noexcept; + + enum class Priority { + NORMAL, + DISPLAY, + URGENT_DISPLAY + }; + + static void setThreadPriority(Priority priority) noexcept; + static void setThreadAffinityById(size_t id) noexcept; + + size_t getParallelSplitCount() const noexcept { + return mParallelSplitCount; + } + +private: + // this is just to avoid using std::default_random_engine, since we're in a public header. + class default_random_engine { + static constexpr uint32_t m = 0x7fffffffu; + uint32_t mState; // must be 0 < seed < 0x7fffffff + public: + inline constexpr explicit default_random_engine(uint32_t seed = 1u) noexcept + : mState(((seed % m) == 0u) ? 1u : seed % m) { + } + inline uint32_t operator()() noexcept { + return mState = uint32_t((uint64_t(mState) * 48271u) % m); + } + }; + + struct alignas(CACHELINE_SIZE) ThreadState { // this causes 40-bytes padding + // make sure storage is cache-line aligned + WorkQueue workQueue; + + // these are not accessed by the worker threads + alignas(CACHELINE_SIZE) // this causes 56-bytes padding + JobSystem* js; + std::thread thread; + default_random_engine rndGen; + uint32_t id; + }; + + static_assert(sizeof(ThreadState) % CACHELINE_SIZE == 0, + "ThreadState doesn't align to a cache line"); + + ThreadState& getState() noexcept; + + void incRef(Job const* job) noexcept; + void decRef(Job const* job) noexcept; + + Job* allocateJob() noexcept; + JobSystem::ThreadState* getStateToStealFrom(JobSystem::ThreadState& state) noexcept; + bool hasJobCompleted(Job const* job) noexcept; + + void requestExit() noexcept; + bool exitRequested() const noexcept; + bool hasActiveJobs() const noexcept; + + void loop(ThreadState* state) noexcept; + bool execute(JobSystem::ThreadState& state) noexcept; + Job* steal(JobSystem::ThreadState& state) noexcept; + void finish(Job* job) noexcept; + + void put(WorkQueue& workQueue, Job* job) noexcept { + assert(job); + size_t index = job - mJobStorageBase; + assert(index >= 0 && index < MAX_JOB_COUNT); + workQueue.push(uint16_t(index + 1)); + } + + Job* pop(WorkQueue& workQueue) noexcept { + size_t index = workQueue.pop(); + assert(index <= MAX_JOB_COUNT); + return !index ? nullptr : &mJobStorageBase[index - 1]; + } + + Job* steal(WorkQueue& workQueue) noexcept { + size_t index = workQueue.steal(); + assert(index <= MAX_JOB_COUNT); + return !index ? nullptr : &mJobStorageBase[index - 1]; + } + + void wait(std::unique_lock& lock, Job* job = nullptr) noexcept; + void wakeAll() noexcept; + void wakeOne() noexcept; + + // these have thread contention, keep them together + utils::Mutex mWaiterLock; + utils::Condition mWaiterCondition; + + std::atomic mActiveJobs = { 0 }; + utils::Arena, LockingPolicy::NoLock> mJobPool; + + template + using aligned_vector = std::vector>; + + // these are essentially const, make sure they're on a different cache-lines than the + // read-write atomics. + // We can't use "alignas(CACHELINE_SIZE)" because the standard allocator can't make this + // guarantee. + char padding[CACHELINE_SIZE]; + + alignas(16) // at least we align to half (or quarter) cache-line + aligned_vector mThreadStates; // actual data is stored offline + std::atomic mExitRequested = { false }; // this one is almost never written + std::atomic mAdoptedThreads = { 0 }; // this one is almost never written + Job* const mJobStorageBase; // Base for conversion to indices + uint16_t mThreadCount = 0; // total # of threads in the pool + uint8_t mParallelSplitCount = 0; // # of split allowable in parallel_for + Job* mRootJob = nullptr; + + utils::SpinLock mThreadMapLock; // this should have very little contention + tsl::robin_map mThreadMap; +}; + +// ------------------------------------------------------------------------------------------------- +// Utility functions built on top of JobSystem + +namespace jobs { + +// These are convenience C++11 style job creation methods that support lambdas +// +// IMPORTANT: these are less efficient to call and may perform heap allocation +// depending on the capture and parameters +// +template +JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent, + CALLABLE&& func, ARGS&&... args) noexcept { + struct Data { + std::function f; + // Renaming the method below could cause an Arrested Development. + void gob(JobSystem&, JobSystem::Job*) noexcept { f(); } + } user{ std::bind(std::forward(func), + std::forward(args)...) }; + return js.createJob(parent, std::move(user)); +} + +template::type>::value + >::type +> +JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent, + CALLABLE&& func, T&& o, ARGS&&... args) noexcept { + struct Data { + std::function f; + // Renaming the method below could cause an Arrested Development. + void gob(JobSystem&, JobSystem::Job*) noexcept { f(); } + } user{ std::bind(std::forward(func), std::forward(o), + std::forward(args)...) }; + return js.createJob(parent, std::move(user)); +} + + +namespace details { + +template +struct ParallelForJobData { + using SplitterType = S; + using Functor = F; + using JobData = ParallelForJobData; + using size_type = uint32_t; + + ParallelForJobData(size_type start, size_type count, uint8_t splits, + Functor functor, + const SplitterType& splitter) noexcept + : start(start), count(count), + functor(std::move(functor)), + splits(splits), + splitter(splitter) { + } + + void parallelWithJobs(JobSystem& js, JobSystem::Job* parent) noexcept { + assert(parent); + + // this branch is often miss-predicted (it both sides happen 50% of the calls) +right_side: + if (splitter.split(splits, count)) { + const size_type lc = count / 2; + JobData ld(start, lc, splits + uint8_t(1), functor, splitter); + JobSystem::Job* l = js.createJob(parent, std::move(ld)); + if (UTILS_UNLIKELY(l == nullptr)) { + // couldn't create a job, just pretend we're done splitting + goto execute; + } + + // start the left side before attempting the right side, so we parallelize in case + // of job creation failure -- rare, but still. + js.run(l); + + // don't spawn a job for the right side, just reuse us -- spawning jobs is more + // costly than we'd like. + start += lc; + count -= lc; + ++splits; + goto right_side; + + } else { +execute: + // we're done splitting, do the real work here! + functor(start, count); + } + } + +private: + size_type start; // 4 + size_type count; // 4 + Functor functor; // ? + uint8_t splits; // 1 + SplitterType splitter; // 1 +}; + +} // namespace details + + +// parallel jobs with start/count indices +template +JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, + uint32_t start, uint32_t count, F functor, const S& splitter) noexcept { + using JobData = details::ParallelForJobData; + JobData jobData(start, count, 0, std::move(functor), splitter); + return js.createJob(parent, std::move(jobData)); +} + +// parallel jobs with pointer/count +template +JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, + T* data, uint32_t count, F functor, const S& splitter) noexcept { + auto user = [data, f = std::move(functor)](uint32_t s, uint32_t c) { + f(data + s, c); + }; + using JobData = details::ParallelForJobData; + JobData jobData(0, count, 0, std::move(user), splitter); + return js.createJob(parent, std::move(jobData)); +} + +// parallel jobs on a Slice<> +template +JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, + utils::Slice slice, F functor, const S& splitter) noexcept { + return parallel_for(js, parent, slice.data(), slice.size(), functor, splitter); +} + + +template +class CountSplitter { +public: + bool split(size_t splits, size_t count) const noexcept { + return (splits < MAX_SPLITS && count >= COUNT * 2); + } +}; + +} // namespace jobs +} // namespace utils + +#endif // TNT_UTILS_JOBSYSTEM_H diff --git a/ios/include/utils/Log.h b/ios/include/utils/Log.h index 71de1b09..77886426 100644 --- a/ios/include/utils/Log.h +++ b/ios/include/utils/Log.h @@ -1,43 +1,46 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_LOG_H -#define TNT_UTILS_LOG_H - -#include -#include - -namespace utils { - -struct UTILS_PUBLIC Loggers { - // DEBUG level logging stream - io::ostream& d; - - // ERROR level logging stream - io::ostream& e; - - // WARNING level logging stream - io::ostream& w; - - // INFORMATION level logging stream - io::ostream& i; -}; - -extern UTILS_PUBLIC Loggers const slog; - -} // namespace utils - -#endif // TNT_UTILS_LOG_H +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_LOG_H +#define TNT_UTILS_LOG_H + +#include +#include + +namespace utils { + +struct UTILS_PUBLIC Loggers { + // DEBUG level logging stream + io::ostream& d; + + // ERROR level logging stream + io::ostream& e; + + // WARNING level logging stream + io::ostream& w; + + // INFORMATION level logging stream + io::ostream& i; + + // VERBOSE level logging stream + io::ostream& v; +}; + +extern UTILS_PUBLIC Loggers const slog; + +} // namespace utils + +#endif // TNT_UTILS_LOG_H diff --git a/ios/include/utils/Mutex.h b/ios/include/utils/Mutex.h index b0a74cb8..8d12d748 100644 --- a/ios/include/utils/Mutex.h +++ b/ios/include/utils/Mutex.h @@ -1,26 +1,26 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_MUTEX_H -#define UTILS_MUTEX_H - -#if defined(__linux__) && !defined(__SANITIZE_THREAD__) -#include -#else -#include -#endif - -#endif // UTILS_MUTEX_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_MUTEX_H +#define TNT_UTILS_MUTEX_H + +#if defined(__linux__) +#include +#else +#include +#endif + +#endif // TNT_UTILS_MUTEX_H diff --git a/ios/include/utils/NameComponentManager.h b/ios/include/utils/NameComponentManager.h index c12366a9..62201d11 100644 --- a/ios/include/utils/NameComponentManager.h +++ b/ios/include/utils/NameComponentManager.h @@ -1,133 +1,133 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_NAMECOMPONENTMANAGER_H -#define TNT_UTILS_NAMECOMPONENTMANAGER_H - -#include -#include -#include - -#include -#include -#include -#include - -#include - -namespace utils { - -class EntityManager; - -namespace details { -class SafeString { -public: - SafeString() noexcept = default; - explicit SafeString(const char* str) noexcept : mCStr(strdup(str)) { } - SafeString(SafeString&& rhs) noexcept : mCStr(rhs.mCStr) { rhs.mCStr = nullptr; } - SafeString& operator=(SafeString&& rhs) noexcept { - std::swap(mCStr, rhs.mCStr); - return *this; - } - ~SafeString() { free((void*)mCStr); } - const char* c_str() const noexcept { return mCStr; } - -private: - char const* mCStr = nullptr; -}; -} // namespace details - - -/** - * \class NameComponentManager NameComponentManager.h utils/NameComponentManager.h - * \brief Allows clients to associate string labels with entities. - * - * To access the name of an existing entity, clients should first use NameComponentManager to get a - * temporary handle called an \em instance. Please note that instances are ephemeral; clients should - * store entities, not instances. - * - * Usage example: - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * auto names = new NameComponentManager(EntityManager::get()); - * names->addComponent(myEntity); - * names->setName(names->getInstance(myEntity), "Jeanne d'Arc"); - * ... - * printf("%s\n", names->getName(names->getInstance(myEntity)); - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ -class UTILS_PUBLIC NameComponentManager : public SingleInstanceComponentManager { -public: - using Instance = EntityInstance; - - /** - * Creates a new name manager associated with the given entity manager. - * - * Note that multiple string labels could be associated with each entity simply by - * creating multiple instances of NameComponentManager. - */ - explicit NameComponentManager(EntityManager& em); - ~NameComponentManager(); - - /** - * Checks if the given entity already has a name component. - */ - using SingleInstanceComponentManager::hasComponent; - - /** - * Gets a temporary handle that can be used to access the name. - * - * @return Non-zero handle if the entity has a name component, 0 otherwise. - */ - Instance getInstance(Entity e) const noexcept { - return Instance(SingleInstanceComponentManager::getInstance(e)); - } - - /*! \cond PRIVATE */ - // these are implemented in SingleInstanceComponentManager<>, but we need to - // reimplement them in each manager, to ensure they are generated in an implementation file - // for backward binary compatibility reasons. - size_t getComponentCount() const noexcept; - Entity const* getEntities() const noexcept; - void gc(const EntityManager& em, size_t ratio = 4) noexcept; - /*! \endcond */ - - /** - * Adds a name component to the given entity if it doesn't already exist. - */ - void addComponent(Entity e); - - /** - * Removes the name component to the given entity if it exists. - */ - void removeComponent(Entity e); - - /** - * Stores a copy of the given string and associates it with the given instance. - */ - void setName(Instance instance, const char* name) noexcept; - - /** - * Retrieves the string associated with the given instance, or nullptr if none exists. - * - * @return pointer to the copy that was made during setName() - */ - const char* getName(Instance instance) const noexcept; -}; - -} // namespace utils - -#endif // TNT_UTILS_NAMECOMPONENTMANAGER_H +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_NAMECOMPONENTMANAGER_H +#define TNT_UTILS_NAMECOMPONENTMANAGER_H + +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace utils { + +class EntityManager; + +namespace details { +class SafeString { +public: + SafeString() noexcept = default; + explicit SafeString(const char* str) noexcept : mCStr(strdup(str)) { } + SafeString(SafeString&& rhs) noexcept : mCStr(rhs.mCStr) { rhs.mCStr = nullptr; } + SafeString& operator=(SafeString&& rhs) noexcept { + std::swap(mCStr, rhs.mCStr); + return *this; + } + ~SafeString() { free((void*)mCStr); } + const char* c_str() const noexcept { return mCStr; } + +private: + char const* mCStr = nullptr; +}; +} // namespace details + + +/** + * \class NameComponentManager NameComponentManager.h utils/NameComponentManager.h + * \brief Allows clients to associate string labels with entities. + * + * To access the name of an existing entity, clients should first use NameComponentManager to get a + * temporary handle called an \em instance. Please note that instances are ephemeral; clients should + * store entities, not instances. + * + * Usage example: + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * auto names = new NameComponentManager(EntityManager::get()); + * names->addComponent(myEntity); + * names->setName(names->getInstance(myEntity), "Jeanne d'Arc"); + * ... + * printf("%s\n", names->getName(names->getInstance(myEntity)); + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +class UTILS_PUBLIC NameComponentManager : public SingleInstanceComponentManager { +public: + using Instance = EntityInstance; + + /** + * Creates a new name manager associated with the given entity manager. + * + * Note that multiple string labels could be associated with each entity simply by + * creating multiple instances of NameComponentManager. + */ + explicit NameComponentManager(EntityManager& em); + ~NameComponentManager(); + + /** + * Checks if the given entity already has a name component. + */ + using SingleInstanceComponentManager::hasComponent; + + /** + * Gets a temporary handle that can be used to access the name. + * + * @return Non-zero handle if the entity has a name component, 0 otherwise. + */ + Instance getInstance(Entity e) const noexcept { + return Instance(SingleInstanceComponentManager::getInstance(e)); + } + + /*! \cond PRIVATE */ + // these are implemented in SingleInstanceComponentManager<>, but we need to + // reimplement them in each manager, to ensure they are generated in an implementation file + // for backward binary compatibility reasons. + size_t getComponentCount() const noexcept; + Entity const* getEntities() const noexcept; + void gc(const EntityManager& em, size_t ratio = 4) noexcept; + /*! \endcond */ + + /** + * Adds a name component to the given entity if it doesn't already exist. + */ + void addComponent(Entity e); + + /** + * Removes the name component to the given entity if it exists. + */ + void removeComponent(Entity e); + + /** + * Stores a copy of the given string and associates it with the given instance. + */ + void setName(Instance instance, const char* name) noexcept; + + /** + * Retrieves the string associated with the given instance, or nullptr if none exists. + * + * @return pointer to the copy that was made during setName() + */ + const char* getName(Instance instance) const noexcept; +}; + +} // namespace utils + +#endif // TNT_UTILS_NAMECOMPONENTMANAGER_H diff --git a/ios/include/utils/Panic.h b/ios/include/utils/Panic.h index 31b6bea9..2be50d18 100644 --- a/ios/include/utils/Panic.h +++ b/ios/include/utils/Panic.h @@ -1,561 +1,561 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_PANIC_H_ -#define UTILS_PANIC_H_ - -#include - -#include -#include - -#ifdef __EXCEPTIONS -# define UTILS_EXCEPTIONS 1 -#else -#endif - -/** - * @defgroup errors Handling Catastrophic Failures (Panics) - * - * @brief Failure detection and reporting facilities - * - * ## What's a Panic? ## - * - * In the context of this document, a _panic_ is a type of error due to a _contract violation_, - * it shouldn't be confused with a _result_ or _status_ code. The POSIX API for instance, - * unfortunately often conflates the two. - * @see - * - * - * Here we give the following definition of a _panic_: - * - * 1. Failures to meet a function's own **postconditions**\n - * The function cannot establish one of its own postconditions, such as (but not limited to) - * producing a valid return value object. - * - * Often these failures are only detectable at runtime, for instance they can be caused by - * arithmetic errors, as it was the case for the Ariane 5 rocket. Ariane 5 crashed because it - * reused an inertial module from Ariane 4, which didn't account for the greater horizontal - * acceleration of Ariane 5 and caused an overflow in the computations. Ariane 4's module - * wasn't per-say buggy, but was improperly used and failed to meet, obviously, certain - * postconditions. - * @see - * - * 2. Failures to meet the **preconditions** of any of a function's callees\n - * The function cannot meet a precondition of another function it must call, such as a - * restriction on a parameter. - * - * Not to be confused with the case where the preconditions of a function are already - * violated upon entry, which indicates a programming error from the caller. - * - * Typically these failures can be avoided and arise because of programming errors. - * - * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * ## Failure reporting vs. handling ## - * - * Very often when a panic, as defined above, is detected, the program has little other choice - * but to terminate.\n - * Typically these situations can be handled by _assert()_. However, _assert()_ also conflates two - * very different concepts: detecting and handling failures.\n - * The place where a failure is detected is rarely the place where there is enough - * context to decide what to do. _assert()_ terminates the program which, may or may not be - * appropriate. At the very least the failure must be logged (which _assert()_ does in a crude way), - * but some other actions might need to happen, such as:\n - * - * - logging the failure in the system-wide logger - * - providing enough information in development builds to analyze/debug the problem - * - cleanly releasing some resources, such as communication channels with other processes\n - * e.g.: to avoid their pre- or postconditions from being violated as well. - * - * In some _rare_ cases, the failure might even be ignored altogether because it doesn't matter in - * the context where it happened. This decision clearly doesn't always lie at the failure-site. - * - * It is therefore important to separate failure detection from handling. - * - * - * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * ## Failure detection and handling facilities ## - * - * Clearly, catastrophic failures should be **rare**; in fact they should - * never happen, except possibly for "failures to meet a function's own postconditions", which - * may depend on external factors and should still be very rare. Yet, when a failure happens, it - * must be detected and handled appropriately.\n - * Since panics are rare, it is desirable that the handling mechanism be as unobtrusive - * as possible, without allowing such failures to go unnoticed or swallowed by mistake. Ideally, the - * programmer using an API should have nothing special to do to handle that API's failure - * conditions.\n\n - * - * An important feature of the Panic Handling facilities here is that **panics are not part of - * the API of a function or method**\n\n - * - * - * The panic handling facility has the following benefits: - * - provides an easy way to detect and report failure - * - separates failure detection from handling - * - makes it hard for detected failures to be ignored (i.e.: not handled) - * - doesn't add burden on the API design - * - doesn't add overhead (visual or otherwise) at call sites - * - has very little performance overhead for failure detection - * - has little to no performance impact for failure handling in the common (success) case - * - is flexible and extensible - * - * Since we have established that failures are **rare**, **exceptional** situations, it would be - * appropriate to handle them with an _assert_ mechanism and that's what the API below - * provides. However, under-the-hood it uses C++ exceptions as a means to separate - * _reporting_ from _handling_. - * - * \note On devices where exceptions are not supported or appropriate, these APIs can be turned - * into a regular _std::terminate()_. - * - * - * ASSERT_PRECONDITION(condition, format, ...) - * ASSERT_POSTCONDITION(condition, format, ...) - * ASSERT_ARITHMETIC(condition, format, ...) - * ASSERT_DESTRUCTOR(condition, format, ...) - * - * - * @see ASSERT_PRECONDITION, ASSERT_POSTCONDITION, ASSERT_ARITHMETIC - * @see ASSERT_DESTRUCTOR - * - * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * ## Writing code that can assert ## - * - * Because we've separated failure reporting from failure handling, there are some considerations - * that need to be thought about when writing code that calls the macros above (i.e.: the program - * won't terminate at the point where the failure is detected).\n\n - * - * - * ### Panic guarantees ### - * - * After the failure condition is reported by a function, additional guarantees may be provided - * with regards to the state of the program. The following four levels of guarantee are - * generally recognized, each of which is a strict superset of its successors: - * - * 1. Nothrow exception guarantee\n - * The function never asserts. e.g.: This should always be the case with destructors.\n\n - * - * 2. Strong exception guarantee\n - * If the function asserts, the state of the program is rolled back to the state just before - * the function call.\n\n - * - * 3. Basic exception guarantee\n - * If the function asserts, the program is in a valid state. It may require cleanup, - * but all invariants are intact.\n\n - * - * 4. No exception guarantee\n - * If the function asserts, the program may not be in a valid state: resource leaks, memory - * corruption, or other invariant-destroying failures may have occurred. - * - * In each function, give the **strongest** safety guarantee that won't penalize callers who - * don't need it, but **always give at least the basic guarantee**. The RAII (Resource - * Acquisition Is Initialization) pattern can help with achieving these guarantees. - * - * @see [RAII](http://en.wikipedia.org/wiki/Resource_Acquisition_Is_Initialization) - * - * ### Special considerations for Constructors ### - * - * Constructors are a bit special because if a failure occurs during their execution, the - * destructor won't be called (how could it? since the object wasn't constructed!). This can lead - * to leaked resources allocated in the constructor prior to the failure. Thankfully there is - * a nice C++ syntax to handle this case: - * - * @code - * Foo::Foo(size_t s) try : m_size(s), m_buf(new uint32_t[s]) { - * ASSERT_POSTCONDITION(s&0xF==0, - * "object size is %u, but must be multiple of 16", s); - * } catch (...) { - * delete [] m_buf; - * // the exception will be automatically re-thrown - * } - * @endcode - * - * Unfortunately, this usage leaks the underlying, exception-based, implementation of the - * panic handling macros. For this reason, it is best to keep constructors simple and guarantee - * they can't fail. An _init()_ function with a factory can be used for actual initialization. - * - * - * ### Special considerations for Destructors ### - * - * In C++ destructors cannot throw exceptions and since the above macros internally use exceptions - * they cannot be used in destructors. Doing so will result in immediate termination of the - * program by _std::terminate()_.\n - * It is therefore best to always guarantee that destructors won't fail. In case of such a - * failure in a destructor the ASSERT_DESTRUCTOR() macro can be used instead, it - * will log the failure but won't terminate the program, instead it'll proceed as if nothing - * happened. Generally this will result in some resource leak which, eventually, will cause - * another failure (typically a postcondition violation).\n\n - * - * Rationale for this behavior: There are fundamentally no way to report a failure from a - * destructor in C++, violently terminating the process is inadequate because it again conflates - * failure reporting and failure handling; for instance a failure in glDeleteTextures() shouldn't - * be necessarily fatal (certainly not without saving the user's data first). The alternative - * would be for the caller to swallow the failure entirely, but that's not great either because the - * failure would go unnoticed. The solution retained here is a compromise. - * - * @see ASSERT_DESTRUCTOR - * - * ### Testing Code that Uses Panics ### - * - * Since panics use exceptions for their underlying implementation, you can test code that uses - * panics with EXPECT_THROW by doing the following things: - * \li Set panic mode to THROW (default is TERMINATE) - * \li Pass Panic to EXPECT_THROW as the exception type - * - * Example code for your test file: - * - * @code - * #include // since your code uses panics, this should include utils/Panic.hpp - * - * using utils::Panic; - * - * TEST(MyClassTest, value_that_causes_panic) { - * EXPECT_THROW(MyClass::function(value_that_causes_panic), Panic); - * } - * - * // ... other tests ... - * - * int main(int argc, char** argv) { - * ::testing::InitGoogleTest(&argc, argv); - * Panic::setMode(Panic::Mode::THROW); - * return RUN_ALL_TESTS(); - * } - * @endcode - * - */ - -namespace utils { - -// ----------------------------------------------------------------------------------------------- - -/** - * @ingroup errors - * - * \brief Base class of all exceptions thrown by all the ASSERT macros - * - * The Panic class provides the std::exception protocol, it is the base exception object - * used for all thrown exceptions. - */ -class UTILS_PUBLIC Panic { -public: - virtual ~Panic() noexcept; - - /** - * @return a detailed description of the error - * @see std::exception - */ - virtual const char* what() const noexcept = 0; - - /** - * Get the function name where the panic was detected - * @return a C string containing the function name where the panic was detected - */ - virtual const char* getFunction() const noexcept = 0; - - /** - * Get the file name where the panic was detected - * @return a C string containing the file name where the panic was detected - */ - virtual const char* getFile() const noexcept = 0; - - /** - * Get the line number in the file where the panic was detected - * @return an integer containing the line number in the file where the panic was detected - */ - virtual int getLine() const noexcept = 0; - - /** - * Logs this exception to the system-log - */ - virtual void log() const noexcept = 0; - - /** - * Get the CallStack when the panic was detected - * @return the CallStack when the panic was detected - */ - virtual const CallStack& getCallStack() const noexcept = 0; -}; - -// ----------------------------------------------------------------------------------------------- - -/** - * @ingroup errors - * - * \brief Concrete implementation of the Panic interface. - * - * The TPanic<> class implements the std::exception protocol as well as the Panic - * interface common to all exceptions thrown by the framework. - */ -template -class UTILS_PUBLIC TPanic : public Panic { -public: - // std::exception protocol - const char* what() const noexcept override; - - // Panic interface - const char* getFunction() const noexcept override; - const char* getFile() const noexcept override; - int getLine() const noexcept override; - const CallStack& getCallStack() const noexcept override; - void log() const noexcept override; - - /** - * Depending on the mode set, either throws an exception of type T with the given reason plus - * extra information about the error-site, or logs the error and calls std::terminate(). - * This function never returns. - * @param function the name of the function where the error was detected - * @param file the file where the above function in implemented - * @param line the line in the above file where the error was detected - * @param format printf style string describing the error - * @see ASSERT_PRECONDITION, ASSERT_POSTCONDITION, ASSERT_ARITHMETIC - * @see PANIC_PRECONDITION, PANIC_POSTCONDITION, PANIC_ARITHMETIC - * @see setMode() - */ - static void panic(char const* function, char const* file, int line, const char* format, ...) - UTILS_NORETURN; - - /** - * Depending on the mode set, either throws an exception of type T with the given reason plus - * extra information about the error-site, or logs the error and calls std::terminate(). - * This function never returns. - * @param function the name of the function where the error was detected - * @param file the file where the above function in implemented - * @param line the line in the above file where the error was detected - * @param s std::string describing the error - * @see ASSERT_PRECONDITION, ASSERT_POSTCONDITION, ASSERT_ARITHMETIC - * @see PANIC_PRECONDITION, PANIC_POSTCONDITION, PANIC_ARITHMETIC - * @see setMode() - */ - static inline void panic(char const* function, char const* file, int line, const std::string& s) - UTILS_NORETURN { - panic(function, file, line, s.c_str()); - } - -protected: - /** - * Creates a Panic. - * @param reason a description of the cause of the error - */ - explicit TPanic(std::string reason); - - /** - * Creates a Panic with extra information about the error-site. - * @param function the name of the function where the error was detected - * @param file the file where the above function in implemented - * @param line the line in the above file where the error was detected - * @param reason a description of the cause of the error - */ - TPanic(char const* function, char const* file, int line, std::string reason); - - ~TPanic() override; - -private: - void buildMessage(); - - CallStack m_callstack; - std::string m_reason; - char const* const m_function = nullptr; - char const* const m_file = nullptr; - const int m_line = -1; - mutable std::string m_msg; -}; - -namespace details { -// these are private, don't use -void panicLog( - char const* function, char const* file, int line, const char* format, ...) noexcept; -} // namespace details - -// ----------------------------------------------------------------------------------------------- - -/** - * @ingroup errors - * - * ASSERT_PRECONDITION uses this Panic to report a precondition failure. - * @see ASSERT_PRECONDITION - */ -class UTILS_PUBLIC PreconditionPanic : public TPanic { - // Programming error, can be avoided - // e.g.: invalid arguments - using TPanic::TPanic; - friend class TPanic; -}; - -/** - * @ingroup errors - * - * ASSERT_POSTCONDITION uses this Panic to report a postcondition failure. - * @see ASSERT_POSTCONDITION - */ -class UTILS_PUBLIC PostconditionPanic : public TPanic { - // Usually only detectable at runtime - // e.g.: dead-lock would occur, arithmetic errors - using TPanic::TPanic; - friend class TPanic; -}; - -/** - * @ingroup errors - * - * ASSERT_ARITHMETIC uses this Panic to report an arithmetic (postcondition) failure. - * @see ASSERT_ARITHMETIC - */ -class UTILS_PUBLIC ArithmeticPanic : public TPanic { - // A common case of post-condition error - // e.g.: underflow, overflow, internal computations errors - using TPanic::TPanic; - friend class TPanic; -}; - -// ----------------------------------------------------------------------------------------------- -} // namespace utils - -#ifndef NDEBUG -# define PANIC_FILE(F) (F) -#else -# define PANIC_FILE(F) "" -#endif - -/** - * PANIC_PRECONDITION is a macro that reports a PreconditionPanic - * @param format printf-style string describing the error in more details - */ -#define PANIC_PRECONDITION(format, ...) \ - ::utils::PreconditionPanic::panic(__PRETTY_FUNCTION__, \ - PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) - -/** - * PANIC_POSTCONDITION is a macro that reports a PostconditionPanic - * @param format printf-style string describing the error in more details - */ -#define PANIC_POSTCONDITION(format, ...) \ - ::utils::PostconditionPanic::panic(__PRETTY_FUNCTION__, \ - PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) - -/** - * PANIC_ARITHMETIC is a macro that reports a ArithmeticPanic - * @param format printf-style string describing the error in more details - */ -#define PANIC_ARITHMETIC(format, ...) \ - ::utils::ArithmeticPanic::panic(__PRETTY_FUNCTION__, \ - PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) - -/** - * PANIC_LOG is a macro that logs a Panic, and continues as usual. - * @param format printf-style string describing the error in more details - */ -#define PANIC_LOG(format, ...) \ - ::utils::details::panicLog(__PRETTY_FUNCTION__, \ - PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) - -/** - * @ingroup errors - * - * ASSERT_PRECONDITION is a macro that checks the given condition and reports a PreconditionPanic - * if it evaluates to false. - * @param cond a boolean expression - * @param format printf-style string describing the error in more details - */ -#define ASSERT_PRECONDITION(cond, format, ...) \ - (!UTILS_LIKELY(cond) ? PANIC_PRECONDITION(format, ##__VA_ARGS__) : (void)0) - -#if defined(UTILS_EXCEPTIONS) || !defined(NDEBUG) -#define ASSERT_PRECONDITION_NON_FATAL(cond, format, ...) \ - (!UTILS_LIKELY(cond) ? PANIC_PRECONDITION(format, ##__VA_ARGS__), false : true) -#else -#define ASSERT_PRECONDITION_NON_FATAL(cond, format, ...) \ - (!UTILS_LIKELY(cond) ? PANIC_LOG(format, ##__VA_ARGS__), false : true) -#endif - - -/** - * @ingroup errors - * - * ASSERT_POSTCONDITION is a macro that checks the given condition and reports a PostconditionPanic - * if it evaluates to false. - * @param cond a boolean expression - * @param format printf-style string describing the error in more details - * - * Example: - * @code - * int& Foo::operator[](size_t index) { - * ASSERT_POSTCONDITION(index=0 && v<65536, "overflow occurred"); - * return uint32_t(v); - * } - * @endcode - */ -#define ASSERT_ARITHMETIC(cond, format, ...) \ - (!(cond) ? PANIC_ARITHMETIC(format, ##__VA_ARGS__) : (void)0) - -#if defined(UTILS_EXCEPTIONS) || !defined(NDEBUG) -#define ASSERT_ARITHMETIC_NON_FATAL(cond, format, ...) \ - (!UTILS_LIKELY(cond) ? PANIC_ARITHMETIC(format, ##__VA_ARGS__), false : true) -#else -#define ASSERT_ARITHMETIC_NON_FATAL(cond, format, ...) \ - (!UTILS_LIKELY(cond) ? PANIC_LOG(format, ##__VA_ARGS__), false : true) -#endif - -/** - * @ingroup errors - * - * ASSERT_DESTRUCTOR is a macro that checks the given condition and logs an error - * if it evaluates to false. - * @param cond a boolean expression - * @param format printf-style string describing the error in more details - * - * @warning Use this macro if a destructor can fail, which should be avoided at all costs. - * Unlike the other ASSERT macros, this will never result in the process termination. Instead, - * the error will be logged and the program will continue as if nothing happened. - * - * Example: - * @code - * Foo::~Foo() { - * glDeleteTextures(1, &m_texture); - * GLint err = glGetError(); - * ASSERT_DESTRUCTOR(err == GL_NO_ERROR, "cannot free GL resource!"); - * } - * @endcode - */ -#define ASSERT_DESTRUCTOR(cond, format, ...) (!(cond) ? PANIC_LOG(format, ##__VA_ARGS__) : (void)0) - -#endif // UTILS_PANIC_H_ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_PANIC_H +#define TNT_UTILS_PANIC_H + +#include + +#include +#include + +#ifdef __EXCEPTIONS +# define UTILS_EXCEPTIONS 1 +#else +#endif + +/** + * @defgroup errors Handling Catastrophic Failures (Panics) + * + * @brief Failure detection and reporting facilities + * + * ## What's a Panic? ## + * + * In the context of this document, a _panic_ is a type of error due to a _contract violation_, + * it shouldn't be confused with a _result_ or _status_ code. The POSIX API for instance, + * unfortunately often conflates the two. + * @see + * + * + * Here we give the following definition of a _panic_: + * + * 1. Failures to meet a function's own **postconditions**\n + * The function cannot establish one of its own postconditions, such as (but not limited to) + * producing a valid return value object. + * + * Often these failures are only detectable at runtime, for instance they can be caused by + * arithmetic errors, as it was the case for the Ariane 5 rocket. Ariane 5 crashed because it + * reused an inertial module from Ariane 4, which didn't account for the greater horizontal + * acceleration of Ariane 5 and caused an overflow in the computations. Ariane 4's module + * wasn't per-say buggy, but was improperly used and failed to meet, obviously, certain + * postconditions. + * @see + * + * 2. Failures to meet the **preconditions** of any of a function's callees\n + * The function cannot meet a precondition of another function it must call, such as a + * restriction on a parameter. + * + * Not to be confused with the case where the preconditions of a function are already + * violated upon entry, which indicates a programming error from the caller. + * + * Typically these failures can be avoided and arise because of programming errors. + * + * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + * ## Failure reporting vs. handling ## + * + * Very often when a panic, as defined above, is detected, the program has little other choice + * but to terminate.\n + * Typically these situations can be handled by _assert()_. However, _assert()_ also conflates two + * very different concepts: detecting and handling failures.\n + * The place where a failure is detected is rarely the place where there is enough + * context to decide what to do. _assert()_ terminates the program which, may or may not be + * appropriate. At the very least the failure must be logged (which _assert()_ does in a crude way), + * but some other actions might need to happen, such as:\n + * + * - logging the failure in the system-wide logger + * - providing enough information in development builds to analyze/debug the problem + * - cleanly releasing some resources, such as communication channels with other processes\n + * e.g.: to avoid their pre- or postconditions from being violated as well. + * + * In some _rare_ cases, the failure might even be ignored altogether because it doesn't matter in + * the context where it happened. This decision clearly doesn't always lie at the failure-site. + * + * It is therefore important to separate failure detection from handling. + * + * + * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + * ## Failure detection and handling facilities ## + * + * Clearly, catastrophic failures should be **rare**; in fact they should + * never happen, except possibly for "failures to meet a function's own postconditions", which + * may depend on external factors and should still be very rare. Yet, when a failure happens, it + * must be detected and handled appropriately.\n + * Since panics are rare, it is desirable that the handling mechanism be as unobtrusive + * as possible, without allowing such failures to go unnoticed or swallowed by mistake. Ideally, the + * programmer using an API should have nothing special to do to handle that API's failure + * conditions.\n\n + * + * An important feature of the Panic Handling facilities here is that **panics are not part of + * the API of a function or method**\n\n + * + * + * The panic handling facility has the following benefits: + * - provides an easy way to detect and report failure + * - separates failure detection from handling + * - makes it hard for detected failures to be ignored (i.e.: not handled) + * - doesn't add burden on the API design + * - doesn't add overhead (visual or otherwise) at call sites + * - has very little performance overhead for failure detection + * - has little to no performance impact for failure handling in the common (success) case + * - is flexible and extensible + * + * Since we have established that failures are **rare**, **exceptional** situations, it would be + * appropriate to handle them with an _assert_ mechanism and that's what the API below + * provides. However, under-the-hood it uses C++ exceptions as a means to separate + * _reporting_ from _handling_. + * + * \note On devices where exceptions are not supported or appropriate, these APIs can be turned + * into a regular _std::terminate()_. + * + * + * ASSERT_PRECONDITION(condition, format, ...) + * ASSERT_POSTCONDITION(condition, format, ...) + * ASSERT_ARITHMETIC(condition, format, ...) + * ASSERT_DESTRUCTOR(condition, format, ...) + * + * + * @see ASSERT_PRECONDITION, ASSERT_POSTCONDITION, ASSERT_ARITHMETIC + * @see ASSERT_DESTRUCTOR + * + * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + * ## Writing code that can assert ## + * + * Because we've separated failure reporting from failure handling, there are some considerations + * that need to be thought about when writing code that calls the macros above (i.e.: the program + * won't terminate at the point where the failure is detected).\n\n + * + * + * ### Panic guarantees ### + * + * After the failure condition is reported by a function, additional guarantees may be provided + * with regards to the state of the program. The following four levels of guarantee are + * generally recognized, each of which is a strict superset of its successors: + * + * 1. Nothrow exception guarantee\n + * The function never asserts. e.g.: This should always be the case with destructors.\n\n + * + * 2. Strong exception guarantee\n + * If the function asserts, the state of the program is rolled back to the state just before + * the function call.\n\n + * + * 3. Basic exception guarantee\n + * If the function asserts, the program is in a valid state. It may require cleanup, + * but all invariants are intact.\n\n + * + * 4. No exception guarantee\n + * If the function asserts, the program may not be in a valid state: resource leaks, memory + * corruption, or other invariant-destroying failures may have occurred. + * + * In each function, give the **strongest** safety guarantee that won't penalize callers who + * don't need it, but **always give at least the basic guarantee**. The RAII (Resource + * Acquisition Is Initialization) pattern can help with achieving these guarantees. + * + * @see [RAII](http://en.wikipedia.org/wiki/Resource_Acquisition_Is_Initialization) + * + * ### Special considerations for Constructors ### + * + * Constructors are a bit special because if a failure occurs during their execution, the + * destructor won't be called (how could it? since the object wasn't constructed!). This can lead + * to leaked resources allocated in the constructor prior to the failure. Thankfully there is + * a nice C++ syntax to handle this case: + * + * @code + * Foo::Foo(size_t s) try : m_size(s), m_buf(new uint32_t[s]) { + * ASSERT_POSTCONDITION(s&0xF==0, + * "object size is %u, but must be multiple of 16", s); + * } catch (...) { + * delete [] m_buf; + * // the exception will be automatically re-thrown + * } + * @endcode + * + * Unfortunately, this usage leaks the underlying, exception-based, implementation of the + * panic handling macros. For this reason, it is best to keep constructors simple and guarantee + * they can't fail. An _init()_ function with a factory can be used for actual initialization. + * + * + * ### Special considerations for Destructors ### + * + * In C++ destructors cannot throw exceptions and since the above macros internally use exceptions + * they cannot be used in destructors. Doing so will result in immediate termination of the + * program by _std::terminate()_.\n + * It is therefore best to always guarantee that destructors won't fail. In case of such a + * failure in a destructor the ASSERT_DESTRUCTOR() macro can be used instead, it + * will log the failure but won't terminate the program, instead it'll proceed as if nothing + * happened. Generally this will result in some resource leak which, eventually, will cause + * another failure (typically a postcondition violation).\n\n + * + * Rationale for this behavior: There are fundamentally no way to report a failure from a + * destructor in C++, violently terminating the process is inadequate because it again conflates + * failure reporting and failure handling; for instance a failure in glDeleteTextures() shouldn't + * be necessarily fatal (certainly not without saving the user's data first). The alternative + * would be for the caller to swallow the failure entirely, but that's not great either because the + * failure would go unnoticed. The solution retained here is a compromise. + * + * @see ASSERT_DESTRUCTOR + * + * ### Testing Code that Uses Panics ### + * + * Since panics use exceptions for their underlying implementation, you can test code that uses + * panics with EXPECT_THROW by doing the following things: + * \li Set panic mode to THROW (default is TERMINATE) + * \li Pass Panic to EXPECT_THROW as the exception type + * + * Example code for your test file: + * + * @code + * #include // since your code uses panics, this should include utils/Panic.hpp + * + * using utils::Panic; + * + * TEST(MyClassTest, value_that_causes_panic) { + * EXPECT_THROW(MyClass::function(value_that_causes_panic), Panic); + * } + * + * // ... other tests ... + * + * int main(int argc, char** argv) { + * ::testing::InitGoogleTest(&argc, argv); + * Panic::setMode(Panic::Mode::THROW); + * return RUN_ALL_TESTS(); + * } + * @endcode + * + */ + +namespace utils { + +// ----------------------------------------------------------------------------------------------- + +/** + * @ingroup errors + * + * \brief Base class of all exceptions thrown by all the ASSERT macros + * + * The Panic class provides the std::exception protocol, it is the base exception object + * used for all thrown exceptions. + */ +class UTILS_PUBLIC Panic { +public: + virtual ~Panic() noexcept; + + /** + * @return a detailed description of the error + * @see std::exception + */ + virtual const char* what() const noexcept = 0; + + /** + * Get the function name where the panic was detected + * @return a C string containing the function name where the panic was detected + */ + virtual const char* getFunction() const noexcept = 0; + + /** + * Get the file name where the panic was detected + * @return a C string containing the file name where the panic was detected + */ + virtual const char* getFile() const noexcept = 0; + + /** + * Get the line number in the file where the panic was detected + * @return an integer containing the line number in the file where the panic was detected + */ + virtual int getLine() const noexcept = 0; + + /** + * Logs this exception to the system-log + */ + virtual void log() const noexcept = 0; + + /** + * Get the CallStack when the panic was detected + * @return the CallStack when the panic was detected + */ + virtual const CallStack& getCallStack() const noexcept = 0; +}; + +// ----------------------------------------------------------------------------------------------- + +/** + * @ingroup errors + * + * \brief Concrete implementation of the Panic interface. + * + * The TPanic<> class implements the std::exception protocol as well as the Panic + * interface common to all exceptions thrown by the framework. + */ +template +class UTILS_PUBLIC TPanic : public Panic { +public: + // std::exception protocol + const char* what() const noexcept override; + + // Panic interface + const char* getFunction() const noexcept override; + const char* getFile() const noexcept override; + int getLine() const noexcept override; + const CallStack& getCallStack() const noexcept override; + void log() const noexcept override; + + /** + * Depending on the mode set, either throws an exception of type T with the given reason plus + * extra information about the error-site, or logs the error and calls std::terminate(). + * This function never returns. + * @param function the name of the function where the error was detected + * @param file the file where the above function in implemented + * @param line the line in the above file where the error was detected + * @param format printf style string describing the error + * @see ASSERT_PRECONDITION, ASSERT_POSTCONDITION, ASSERT_ARITHMETIC + * @see PANIC_PRECONDITION, PANIC_POSTCONDITION, PANIC_ARITHMETIC + * @see setMode() + */ + static void panic(char const* function, char const* file, int line, const char* format, ...) + UTILS_NORETURN; + + /** + * Depending on the mode set, either throws an exception of type T with the given reason plus + * extra information about the error-site, or logs the error and calls std::terminate(). + * This function never returns. + * @param function the name of the function where the error was detected + * @param file the file where the above function in implemented + * @param line the line in the above file where the error was detected + * @param s std::string describing the error + * @see ASSERT_PRECONDITION, ASSERT_POSTCONDITION, ASSERT_ARITHMETIC + * @see PANIC_PRECONDITION, PANIC_POSTCONDITION, PANIC_ARITHMETIC + * @see setMode() + */ + static inline void panic(char const* function, char const* file, int line, const std::string& s) + UTILS_NORETURN { + panic(function, file, line, s.c_str()); + } + +protected: + /** + * Creates a Panic. + * @param reason a description of the cause of the error + */ + explicit TPanic(std::string reason); + + /** + * Creates a Panic with extra information about the error-site. + * @param function the name of the function where the error was detected + * @param file the file where the above function in implemented + * @param line the line in the above file where the error was detected + * @param reason a description of the cause of the error + */ + TPanic(char const* function, char const* file, int line, std::string reason); + + ~TPanic() override; + +private: + void buildMessage(); + + CallStack m_callstack; + std::string m_reason; + char const* const m_function = nullptr; + char const* const m_file = nullptr; + const int m_line = -1; + mutable std::string m_msg; +}; + +namespace details { +// these are private, don't use +void panicLog( + char const* function, char const* file, int line, const char* format, ...) noexcept; +} // namespace details + +// ----------------------------------------------------------------------------------------------- + +/** + * @ingroup errors + * + * ASSERT_PRECONDITION uses this Panic to report a precondition failure. + * @see ASSERT_PRECONDITION + */ +class UTILS_PUBLIC PreconditionPanic : public TPanic { + // Programming error, can be avoided + // e.g.: invalid arguments + using TPanic::TPanic; + friend class TPanic; +}; + +/** + * @ingroup errors + * + * ASSERT_POSTCONDITION uses this Panic to report a postcondition failure. + * @see ASSERT_POSTCONDITION + */ +class UTILS_PUBLIC PostconditionPanic : public TPanic { + // Usually only detectable at runtime + // e.g.: dead-lock would occur, arithmetic errors + using TPanic::TPanic; + friend class TPanic; +}; + +/** + * @ingroup errors + * + * ASSERT_ARITHMETIC uses this Panic to report an arithmetic (postcondition) failure. + * @see ASSERT_ARITHMETIC + */ +class UTILS_PUBLIC ArithmeticPanic : public TPanic { + // A common case of post-condition error + // e.g.: underflow, overflow, internal computations errors + using TPanic::TPanic; + friend class TPanic; +}; + +// ----------------------------------------------------------------------------------------------- +} // namespace utils + +#ifndef NDEBUG +# define PANIC_FILE(F) (F) +#else +# define PANIC_FILE(F) "" +#endif + +/** + * PANIC_PRECONDITION is a macro that reports a PreconditionPanic + * @param format printf-style string describing the error in more details + */ +#define PANIC_PRECONDITION(format, ...) \ + ::utils::PreconditionPanic::panic(__PRETTY_FUNCTION__, \ + PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) + +/** + * PANIC_POSTCONDITION is a macro that reports a PostconditionPanic + * @param format printf-style string describing the error in more details + */ +#define PANIC_POSTCONDITION(format, ...) \ + ::utils::PostconditionPanic::panic(__PRETTY_FUNCTION__, \ + PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) + +/** + * PANIC_ARITHMETIC is a macro that reports a ArithmeticPanic + * @param format printf-style string describing the error in more details + */ +#define PANIC_ARITHMETIC(format, ...) \ + ::utils::ArithmeticPanic::panic(__PRETTY_FUNCTION__, \ + PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) + +/** + * PANIC_LOG is a macro that logs a Panic, and continues as usual. + * @param format printf-style string describing the error in more details + */ +#define PANIC_LOG(format, ...) \ + ::utils::details::panicLog(__PRETTY_FUNCTION__, \ + PANIC_FILE(__FILE__), __LINE__, format, ##__VA_ARGS__) + +/** + * @ingroup errors + * + * ASSERT_PRECONDITION is a macro that checks the given condition and reports a PreconditionPanic + * if it evaluates to false. + * @param cond a boolean expression + * @param format printf-style string describing the error in more details + */ +#define ASSERT_PRECONDITION(cond, format, ...) \ + (!UTILS_LIKELY(cond) ? PANIC_PRECONDITION(format, ##__VA_ARGS__) : (void)0) + +#if defined(UTILS_EXCEPTIONS) || !defined(NDEBUG) +#define ASSERT_PRECONDITION_NON_FATAL(cond, format, ...) \ + (!UTILS_LIKELY(cond) ? PANIC_PRECONDITION(format, ##__VA_ARGS__), false : true) +#else +#define ASSERT_PRECONDITION_NON_FATAL(cond, format, ...) \ + (!UTILS_LIKELY(cond) ? PANIC_LOG(format, ##__VA_ARGS__), false : true) +#endif + + +/** + * @ingroup errors + * + * ASSERT_POSTCONDITION is a macro that checks the given condition and reports a PostconditionPanic + * if it evaluates to false. + * @param cond a boolean expression + * @param format printf-style string describing the error in more details + * + * Example: + * @code + * int& Foo::operator[](size_t index) { + * ASSERT_POSTCONDITION(index=0 && v<65536, "overflow occurred"); + * return uint32_t(v); + * } + * @endcode + */ +#define ASSERT_ARITHMETIC(cond, format, ...) \ + (!(cond) ? PANIC_ARITHMETIC(format, ##__VA_ARGS__) : (void)0) + +#if defined(UTILS_EXCEPTIONS) || !defined(NDEBUG) +#define ASSERT_ARITHMETIC_NON_FATAL(cond, format, ...) \ + (!UTILS_LIKELY(cond) ? PANIC_ARITHMETIC(format, ##__VA_ARGS__), false : true) +#else +#define ASSERT_ARITHMETIC_NON_FATAL(cond, format, ...) \ + (!UTILS_LIKELY(cond) ? PANIC_LOG(format, ##__VA_ARGS__), false : true) +#endif + +/** + * @ingroup errors + * + * ASSERT_DESTRUCTOR is a macro that checks the given condition and logs an error + * if it evaluates to false. + * @param cond a boolean expression + * @param format printf-style string describing the error in more details + * + * @warning Use this macro if a destructor can fail, which should be avoided at all costs. + * Unlike the other ASSERT macros, this will never result in the process termination. Instead, + * the error will be logged and the program will continue as if nothing happened. + * + * Example: + * @code + * Foo::~Foo() { + * glDeleteTextures(1, &m_texture); + * GLint err = glGetError(); + * ASSERT_DESTRUCTOR(err == GL_NO_ERROR, "cannot free GL resource!"); + * } + * @endcode + */ +#define ASSERT_DESTRUCTOR(cond, format, ...) (!(cond) ? PANIC_LOG(format, ##__VA_ARGS__) : (void)0) + +#endif // TNT_UTILS_PANIC_H diff --git a/ios/include/utils/Path.h b/ios/include/utils/Path.h index 59942d44..3dab02f2 100644 --- a/ios/include/utils/Path.h +++ b/ios/include/utils/Path.h @@ -1,290 +1,290 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_PATH_H_ -#define UTILS_PATH_H_ - -#include - -#include -#include -#include - -namespace utils { - -/** - * An abstract representation of file and directory paths. - */ -class UTILS_PUBLIC Path { -public: - /** - * Creates a new empty path. - */ - Path() = default; - ~Path() = default; - - /** - * Creates a new path with the specified pathname. - * - * @param pathname a non-null pathname string - */ - Path(const char* pathname); - - /** - * Creates a new path with the specified pathname. - * - * @param pathname a pathname string - */ - Path(const std::string& pathname); - - /** - * Tests whether the file or directory denoted by this abstract - * pathname exists. - * - * @return true if the file or directory denoted by this - * abstract pathname exists, false otherwise - */ - bool exists() const; - - /** - * Tests whether this abstract pathname represents a regular file. - * This method can only return true if the path exists. - * - * @return true if this pathname represents an existing file, - * false if the path doesn't exist or represents something - * else (directory, symlink, etc.) - */ - bool isFile() const; - - /** - * Tests whether this abstract pathname represents a directory. - * This method can only return true if the path exists. - * - * @return true if this pathname represents an existing directory, - * false if the path doesn't exist or represents a file - */ - bool isDirectory() const; - - /** - * Tests whether this path is empty. An empty path does not - * exist. - * - * @return true if the underlying abstract pathname is empty, - * false otherwise - */ - bool isEmpty() const { return m_path.empty(); } - - const char* c_str() const { return m_path.c_str(); } - - /** - * Replaces the abstract pathname of this object with the - * specified pathname. - * - * @param pathname a pathname string - */ - void setPath(const std::string& pathname) { - m_path = getCanonicalPath(pathname); - } - - /** - * @return the canonical pathname this path represents - */ - const std::string& getPath() const { return m_path; } - - /** - * Returns the parent of this path as Path. - * @return a new path containing the parent of this path - */ - Path getParent() const; - - /** - * Returns ancestor path where "0" is the immediate parent. - * @return a new path containing the ancestor of this path - */ - Path getAncestor(int n) const; - - /** - * Returns the name of the file or directory represented by - * this abstract pathname. - * - * @return the name of the file or directory represented by - * this abstract pathname, or an empty string if - * this path is empty - */ - std::string getName() const; - - /** - * Returns the name of the file or directory represented by - * this abstract pathname without its extension. - * - * @return the name of the file or directory represented by - * this abstract pathname, or an empty string if - * this path is empty - */ - std::string getNameWithoutExtension() const; - - /** - * Returns the file extension (after the ".") if one is present. - * Returns the empty string if no filename is present or if the - * path is a directory. - * - * @return the file extension (if one is present and - * this is not a directory), else the empty string. - */ - std::string getExtension() const; - - /** - * Returns the absolute representation of this path. - * If this path's pathname starts with a leading '/', - * the returned path is equal to this path. Otherwise, - * this path's pathname is concatenated with the current - * working directory and the result is returned. - * - * @return a new path containing the absolute representation - * of this path - */ - Path getAbsolutePath() const; - - /** - * @return true if this abstract pathname is not empty - * and starts with a leading '/', false otherwise - */ - bool isAbsolute() const; - - /** - * Splits this object's abstract pathname in a vector of file - * and directory name. If the underlying abstract pathname - * starts with a '/', the returned vector's first element - * will be the string "/". - * - * @return a vector of strings, empty if this path is empty - */ - std::vector split() const; - - /** - * Concatenates the specified path with this path in a new - * path object. - * - * @note if the pathname to concatenate with starts with - * a leading '/' then that pathname is returned without - * being concatenated to this object's pathname. - * - * @param path the path to concatenate with - * - * @return the concatenation of the two paths - */ - Path concat(const Path& path) const; - - /** - * Concatenates the specified path with this path and - * stores the result in this path. - * - * @note if the pathname to concatenate with starts with - * a leading '/' then that pathname replaces this object's - * pathname. - * - * @param path the path to concatenate with - */ - void concatToSelf(const Path& path); - - operator std::string const&() const { return m_path; } - - Path operator+(const Path& rhs) const { return concat(rhs); } - Path& operator+=(const Path& rhs) { - concatToSelf(rhs); - return *this; - } - - bool operator==(const Path& rhs) const { return m_path == rhs.m_path; } - bool operator!=(const Path& rhs) const { return m_path != rhs.m_path; } - - bool operator<(const Path& rhs) const { return m_path < rhs.m_path; } - bool operator>(const Path& rhs) const { return m_path > rhs.m_path; } - - friend std::ostream& operator<<(std::ostream& os, const Path& path); - - /** - * Returns a canonical copy of the specified pathname by removing - * unnecessary path segments such as ".", ".." and "/". - * - * @param pathname a pathname string - * - * @return the canonical representation of the specified pathname - */ - static std::string getCanonicalPath(const std::string& pathname); - - /** - * This method is equivalent to calling root.concat(leaf). - */ - static Path concat(const std::string& root, const std::string& leaf); - - /** - * @return a path representing the current working directory - */ - static Path getCurrentDirectory(); - - /** - * @return a path representing the current executable - */ - static Path getCurrentExecutable(); - - /** - * @return a path representing a directory where temporary files can be stored - */ - static Path getTemporaryDirectory(); - - /** - * Creates a directory denoted by the given path. - * This is not recursive and doesn't create intermediate directories. - * - * @return True if directory was successfully created. - * When false, errno should have details on actual error. - */ - bool mkdir() const; - - /** - * Creates a directory denoted by the given path. - * This is recursive and parent directories will be created if they do not - * exist. - * - * @return True if directory was successfully created or already exists. - * When false, errno should have details on actual error. - */ - bool mkdirRecursive() const; - - /** - * Deletes this file. - * - * @return True if file was successfully deleted. - * When false, errno should have details on actual error. - */ - bool unlinkFile(); - - /** - * Lists the contents of this directory, skipping hidden files. - * - * @return A vector of paths of the contents of the directory. If the path points to a file, - * nonexistent directory, or empty directory, an empty vector is returned. - */ - std::vector listContents() const; - -private: - std::string m_path; -}; - -} // namespace utils - -#endif // UTILS_PATH_H_ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_PATH_H +#define TNT_UTILS_PATH_H + +#include + +#include +#include +#include + +namespace utils { + +/** + * An abstract representation of file and directory paths. + */ +class UTILS_PUBLIC Path { +public: + /** + * Creates a new empty path. + */ + Path() = default; + ~Path() = default; + + /** + * Creates a new path with the specified pathname. + * + * @param pathname a non-null pathname string + */ + Path(const char* pathname); + + /** + * Creates a new path with the specified pathname. + * + * @param pathname a pathname string + */ + Path(const std::string& pathname); + + /** + * Tests whether the file or directory denoted by this abstract + * pathname exists. + * + * @return true if the file or directory denoted by this + * abstract pathname exists, false otherwise + */ + bool exists() const; + + /** + * Tests whether this abstract pathname represents a regular file. + * This method can only return true if the path exists. + * + * @return true if this pathname represents an existing file, + * false if the path doesn't exist or represents something + * else (directory, symlink, etc.) + */ + bool isFile() const; + + /** + * Tests whether this abstract pathname represents a directory. + * This method can only return true if the path exists. + * + * @return true if this pathname represents an existing directory, + * false if the path doesn't exist or represents a file + */ + bool isDirectory() const; + + /** + * Tests whether this path is empty. An empty path does not + * exist. + * + * @return true if the underlying abstract pathname is empty, + * false otherwise + */ + bool isEmpty() const { return m_path.empty(); } + + const char* c_str() const { return m_path.c_str(); } + + /** + * Replaces the abstract pathname of this object with the + * specified pathname. + * + * @param pathname a pathname string + */ + void setPath(const std::string& pathname) { + m_path = getCanonicalPath(pathname); + } + + /** + * @return the canonical pathname this path represents + */ + const std::string& getPath() const { return m_path; } + + /** + * Returns the parent of this path as Path. + * @return a new path containing the parent of this path + */ + Path getParent() const; + + /** + * Returns ancestor path where "0" is the immediate parent. + * @return a new path containing the ancestor of this path + */ + Path getAncestor(int n) const; + + /** + * Returns the name of the file or directory represented by + * this abstract pathname. + * + * @return the name of the file or directory represented by + * this abstract pathname, or an empty string if + * this path is empty + */ + std::string getName() const; + + /** + * Returns the name of the file or directory represented by + * this abstract pathname without its extension. + * + * @return the name of the file or directory represented by + * this abstract pathname, or an empty string if + * this path is empty + */ + std::string getNameWithoutExtension() const; + + /** + * Returns the file extension (after the ".") if one is present. + * Returns the empty string if no filename is present or if the + * path is a directory. + * + * @return the file extension (if one is present and + * this is not a directory), else the empty string. + */ + std::string getExtension() const; + + /** + * Returns the absolute representation of this path. + * If this path's pathname starts with a leading '/', + * the returned path is equal to this path. Otherwise, + * this path's pathname is concatenated with the current + * working directory and the result is returned. + * + * @return a new path containing the absolute representation + * of this path + */ + Path getAbsolutePath() const; + + /** + * @return true if this abstract pathname is not empty + * and starts with a leading '/', false otherwise + */ + bool isAbsolute() const; + + /** + * Splits this object's abstract pathname in a vector of file + * and directory name. If the underlying abstract pathname + * starts with a '/', the returned vector's first element + * will be the string "/". + * + * @return a vector of strings, empty if this path is empty + */ + std::vector split() const; + + /** + * Concatenates the specified path with this path in a new + * path object. + * + * @note if the pathname to concatenate with starts with + * a leading '/' then that pathname is returned without + * being concatenated to this object's pathname. + * + * @param path the path to concatenate with + * + * @return the concatenation of the two paths + */ + Path concat(const Path& path) const; + + /** + * Concatenates the specified path with this path and + * stores the result in this path. + * + * @note if the pathname to concatenate with starts with + * a leading '/' then that pathname replaces this object's + * pathname. + * + * @param path the path to concatenate with + */ + void concatToSelf(const Path& path); + + operator std::string const&() const { return m_path; } + + Path operator+(const Path& rhs) const { return concat(rhs); } + Path& operator+=(const Path& rhs) { + concatToSelf(rhs); + return *this; + } + + bool operator==(const Path& rhs) const { return m_path == rhs.m_path; } + bool operator!=(const Path& rhs) const { return m_path != rhs.m_path; } + + bool operator<(const Path& rhs) const { return m_path < rhs.m_path; } + bool operator>(const Path& rhs) const { return m_path > rhs.m_path; } + + friend std::ostream& operator<<(std::ostream& os, const Path& path); + + /** + * Returns a canonical copy of the specified pathname by removing + * unnecessary path segments such as ".", ".." and "/". + * + * @param pathname a pathname string + * + * @return the canonical representation of the specified pathname + */ + static std::string getCanonicalPath(const std::string& pathname); + + /** + * This method is equivalent to calling root.concat(leaf). + */ + static Path concat(const std::string& root, const std::string& leaf); + + /** + * @return a path representing the current working directory + */ + static Path getCurrentDirectory(); + + /** + * @return a path representing the current executable + */ + static Path getCurrentExecutable(); + + /** + * @return a path representing a directory where temporary files can be stored + */ + static Path getTemporaryDirectory(); + + /** + * Creates a directory denoted by the given path. + * This is not recursive and doesn't create intermediate directories. + * + * @return True if directory was successfully created. + * When false, errno should have details on actual error. + */ + bool mkdir() const; + + /** + * Creates a directory denoted by the given path. + * This is recursive and parent directories will be created if they do not + * exist. + * + * @return True if directory was successfully created or already exists. + * When false, errno should have details on actual error. + */ + bool mkdirRecursive() const; + + /** + * Deletes this file. + * + * @return True if file was successfully deleted. + * When false, errno should have details on actual error. + */ + bool unlinkFile(); + + /** + * Lists the contents of this directory, skipping hidden files. + * + * @return A vector of paths of the contents of the directory. If the path points to a file, + * nonexistent directory, or empty directory, an empty vector is returned. + */ + std::vector listContents() const; + +private: + std::string m_path; +}; + +} // namespace utils + +#endif // TNT_UTILS_PATH_H diff --git a/ios/include/utils/Profiler.h b/ios/include/utils/Profiler.h index 87a3d121..f41bd1e9 100644 --- a/ios/include/utils/Profiler.h +++ b/ios/include/utils/Profiler.h @@ -1,212 +1,212 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_PROFILER_H -#define TNT_UTILS_PROFILER_H - -#include -#include -#include - -#include // note: This is safe (only used inline) - -#if defined(__linux__) -# include -# include -# include -#endif - -#include - -namespace utils { - -class Profiler { -public: - enum { - INSTRUCTIONS = 0, // must be zero - CPU_CYCLES = 1, - DCACHE_REFS = 2, - DCACHE_MISSES = 3, - BRANCHES = 4, - BRANCH_MISSES = 5, - ICACHE_REFS = 6, - ICACHE_MISSES = 7, - - // Must be last one - EVENT_COUNT - }; - - enum { - EV_CPU_CYCLES = 1u << CPU_CYCLES, - EV_L1D_REFS = 1u << DCACHE_REFS, - EV_L1D_MISSES = 1u << DCACHE_MISSES, - EV_BPU_REFS = 1u << BRANCHES, - EV_BPU_MISSES = 1u << BRANCH_MISSES, - EV_L1I_REFS = 1u << ICACHE_REFS, - EV_L1I_MISSES = 1u << ICACHE_MISSES, - // helpers - EV_L1D_RATES = EV_L1D_REFS | EV_L1D_MISSES, - EV_L1I_RATES = EV_L1I_REFS | EV_L1I_MISSES, - EV_BPU_RATES = EV_BPU_REFS | EV_BPU_MISSES, - }; - - Profiler() noexcept; // must call resetEvents() - explicit Profiler(uint32_t eventMask) noexcept; - ~Profiler() noexcept; - - Profiler(const Profiler& rhs) = delete; - Profiler(Profiler&& rhs) = delete; - Profiler& operator=(const Profiler& rhs) = delete; - Profiler& operator=(Profiler&& rhs) = delete; - - // selects which events are enabled. - uint32_t resetEvents(uint32_t eventMask) noexcept; - - uint32_t getEnabledEvents() const noexcept { return mEnabledEvents; } - - // could return false if performance counters are not supported/enabled - bool isValid() const { return mCountersFd[0] >= 0; } - - class Counters { - friend class Profiler; - uint64_t nr; - uint64_t time_enabled; - uint64_t time_running; - struct { - uint64_t value; - uint64_t id; - } counters[Profiler::EVENT_COUNT]; - - friend Counters operator-(Counters lhs, const Counters& rhs) noexcept { - lhs.nr -= rhs.nr; - lhs.time_enabled -= rhs.time_enabled; - lhs.time_running -= rhs.time_running; - for (size_t i = 0; i < EVENT_COUNT; ++i) { - lhs.counters[i].value -= rhs.counters[i].value; - } - return lhs; - } - - public: - uint64_t getInstructions() const { return counters[INSTRUCTIONS].value; } - uint64_t getCpuCycles() const { return counters[CPU_CYCLES].value; } - uint64_t getL1DReferences() const { return counters[DCACHE_REFS].value; } - uint64_t getL1DMisses() const { return counters[DCACHE_MISSES].value; } - uint64_t getL1IReferences() const { return counters[ICACHE_REFS].value; } - uint64_t getL1IMisses() const { return counters[ICACHE_MISSES].value; } - uint64_t getBranchInstructions() const { return counters[BRANCHES].value; } - uint64_t getBranchMisses() const { return counters[BRANCH_MISSES].value; } - - std::chrono::duration getWallTime() const { - return std::chrono::duration(time_enabled); - } - - std::chrono::duration getRunningTime() const { - return std::chrono::duration(time_running); - } - - double getIPC() const noexcept { - uint64_t cpuCycles = getCpuCycles(); - uint64_t instructions = getInstructions(); - return double(instructions) / double(cpuCycles); - } - - double getCPI() const noexcept { - uint64_t cpuCycles = getCpuCycles(); - uint64_t instructions = getInstructions(); - return double(cpuCycles) / double(instructions); - } - - double getL1DMissRate() const noexcept { - uint64_t cacheReferences = getL1DReferences(); - uint64_t cacheMisses = getL1DMisses(); - return double(cacheMisses) / double(cacheReferences); - } - - double getL1DHitRate() const noexcept { - return 1.0 - getL1DMissRate(); - } - - double getL1IMissRate() const noexcept { - uint64_t cacheReferences = getL1IReferences(); - uint64_t cacheMisses = getL1IMisses(); - return double(cacheMisses) / double(cacheReferences); - } - - double getL1IHitRate() const noexcept { - return 1.0 - getL1IMissRate(); - } - - double getBranchMissRate() const noexcept { - uint64_t branchReferences = getBranchInstructions(); - uint64_t branchMisses = getBranchMisses(); - return double(branchMisses) / double(branchReferences); - } - - double getBranchHitRate() const noexcept { - return 1.0 - getBranchMissRate(); - } - - double getMPKI(uint64_t misses) const noexcept { - return (misses * 1000.0) / getInstructions(); - } - }; - -#if defined(__linux__) - - void reset() noexcept { - int fd = mCountersFd[0]; - ioctl(fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); - } - - void start() noexcept { - int fd = mCountersFd[0]; - ioctl(fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP); - } - - void stop() noexcept { - int fd = mCountersFd[0]; - ioctl(fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP); - } - - Counters readCounters() noexcept; - -#else // !__linux__ - - void reset() noexcept { } - void start() noexcept { } - void stop() noexcept { } - Counters readCounters() noexcept { return {}; } - -#endif // __linux__ - - bool hasBranchRates() const noexcept { - return (mCountersFd[BRANCHES] >= 0) && (mCountersFd[BRANCH_MISSES] >= 0); - } - - bool hasICacheRates() const noexcept { - return (mCountersFd[ICACHE_REFS] >= 0) && (mCountersFd[ICACHE_MISSES] >= 0); - } - -private: - UTILS_UNUSED uint8_t mIds[EVENT_COUNT] = {}; - int mCountersFd[EVENT_COUNT]; - uint32_t mEnabledEvents = 0; -}; - -} // namespace utils - -#endif // TNT_UTILS_PROFILER_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_PROFILER_H +#define TNT_UTILS_PROFILER_H + +#include +#include +#include + +#include // note: This is safe (only used inline) + +#if defined(__linux__) +# include +# include +# include +#endif + +#include + +namespace utils { + +class Profiler { +public: + enum { + INSTRUCTIONS = 0, // must be zero + CPU_CYCLES = 1, + DCACHE_REFS = 2, + DCACHE_MISSES = 3, + BRANCHES = 4, + BRANCH_MISSES = 5, + ICACHE_REFS = 6, + ICACHE_MISSES = 7, + + // Must be last one + EVENT_COUNT + }; + + enum { + EV_CPU_CYCLES = 1u << CPU_CYCLES, + EV_L1D_REFS = 1u << DCACHE_REFS, + EV_L1D_MISSES = 1u << DCACHE_MISSES, + EV_BPU_REFS = 1u << BRANCHES, + EV_BPU_MISSES = 1u << BRANCH_MISSES, + EV_L1I_REFS = 1u << ICACHE_REFS, + EV_L1I_MISSES = 1u << ICACHE_MISSES, + // helpers + EV_L1D_RATES = EV_L1D_REFS | EV_L1D_MISSES, + EV_L1I_RATES = EV_L1I_REFS | EV_L1I_MISSES, + EV_BPU_RATES = EV_BPU_REFS | EV_BPU_MISSES, + }; + + Profiler() noexcept; // must call resetEvents() + explicit Profiler(uint32_t eventMask) noexcept; + ~Profiler() noexcept; + + Profiler(const Profiler& rhs) = delete; + Profiler(Profiler&& rhs) = delete; + Profiler& operator=(const Profiler& rhs) = delete; + Profiler& operator=(Profiler&& rhs) = delete; + + // selects which events are enabled. + uint32_t resetEvents(uint32_t eventMask) noexcept; + + uint32_t getEnabledEvents() const noexcept { return mEnabledEvents; } + + // could return false if performance counters are not supported/enabled + bool isValid() const { return mCountersFd[0] >= 0; } + + class Counters { + friend class Profiler; + uint64_t nr; + uint64_t time_enabled; + uint64_t time_running; + struct { + uint64_t value; + uint64_t id; + } counters[Profiler::EVENT_COUNT]; + + friend Counters operator-(Counters lhs, const Counters& rhs) noexcept { + lhs.nr -= rhs.nr; + lhs.time_enabled -= rhs.time_enabled; + lhs.time_running -= rhs.time_running; + for (size_t i = 0; i < EVENT_COUNT; ++i) { + lhs.counters[i].value -= rhs.counters[i].value; + } + return lhs; + } + + public: + uint64_t getInstructions() const { return counters[INSTRUCTIONS].value; } + uint64_t getCpuCycles() const { return counters[CPU_CYCLES].value; } + uint64_t getL1DReferences() const { return counters[DCACHE_REFS].value; } + uint64_t getL1DMisses() const { return counters[DCACHE_MISSES].value; } + uint64_t getL1IReferences() const { return counters[ICACHE_REFS].value; } + uint64_t getL1IMisses() const { return counters[ICACHE_MISSES].value; } + uint64_t getBranchInstructions() const { return counters[BRANCHES].value; } + uint64_t getBranchMisses() const { return counters[BRANCH_MISSES].value; } + + std::chrono::duration getWallTime() const { + return std::chrono::duration(time_enabled); + } + + std::chrono::duration getRunningTime() const { + return std::chrono::duration(time_running); + } + + double getIPC() const noexcept { + uint64_t cpuCycles = getCpuCycles(); + uint64_t instructions = getInstructions(); + return double(instructions) / double(cpuCycles); + } + + double getCPI() const noexcept { + uint64_t cpuCycles = getCpuCycles(); + uint64_t instructions = getInstructions(); + return double(cpuCycles) / double(instructions); + } + + double getL1DMissRate() const noexcept { + uint64_t cacheReferences = getL1DReferences(); + uint64_t cacheMisses = getL1DMisses(); + return double(cacheMisses) / double(cacheReferences); + } + + double getL1DHitRate() const noexcept { + return 1.0 - getL1DMissRate(); + } + + double getL1IMissRate() const noexcept { + uint64_t cacheReferences = getL1IReferences(); + uint64_t cacheMisses = getL1IMisses(); + return double(cacheMisses) / double(cacheReferences); + } + + double getL1IHitRate() const noexcept { + return 1.0 - getL1IMissRate(); + } + + double getBranchMissRate() const noexcept { + uint64_t branchReferences = getBranchInstructions(); + uint64_t branchMisses = getBranchMisses(); + return double(branchMisses) / double(branchReferences); + } + + double getBranchHitRate() const noexcept { + return 1.0 - getBranchMissRate(); + } + + double getMPKI(uint64_t misses) const noexcept { + return (misses * 1000.0) / getInstructions(); + } + }; + +#if defined(__linux__) + + void reset() noexcept { + int fd = mCountersFd[0]; + ioctl(fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); + } + + void start() noexcept { + int fd = mCountersFd[0]; + ioctl(fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP); + } + + void stop() noexcept { + int fd = mCountersFd[0]; + ioctl(fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP); + } + + Counters readCounters() noexcept; + +#else // !__linux__ + + void reset() noexcept { } + void start() noexcept { } + void stop() noexcept { } + Counters readCounters() noexcept { return {}; } + +#endif // __linux__ + + bool hasBranchRates() const noexcept { + return (mCountersFd[BRANCHES] >= 0) && (mCountersFd[BRANCH_MISSES] >= 0); + } + + bool hasICacheRates() const noexcept { + return (mCountersFd[ICACHE_REFS] >= 0) && (mCountersFd[ICACHE_MISSES] >= 0); + } + +private: + UTILS_UNUSED uint8_t mIds[EVENT_COUNT] = {}; + int mCountersFd[EVENT_COUNT]; + uint32_t mEnabledEvents = 0; +}; + +} // namespace utils + +#endif // TNT_UTILS_PROFILER_H diff --git a/ios/include/utils/Range.h b/ios/include/utils/Range.h index 08e668dc..ba922bd3 100644 --- a/ios/include/utils/Range.h +++ b/ios/include/utils/Range.h @@ -1,86 +1,86 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_RANGE_H -#define TNT_UTILS_RANGE_H - -#include - -#include -#include - -#include - -namespace utils { - -template -struct Range { - using value_type = T; - T first = 0; - T last = 0; - - size_t size() const noexcept { return last - first; } - bool empty() const noexcept { return !size(); } - - class const_iterator { - friend struct Range; - T value = {}; - - public: - const_iterator() noexcept = default; - explicit const_iterator(T value) noexcept : value(value) {} - - using value_type = T; - using pointer = value_type*; - using difference_type = ptrdiff_t; - using iterator_category = std::random_access_iterator_tag; - - - const value_type operator*() const { return value; } - const value_type operator[](size_t n) const { return value + n; } - - const_iterator& operator++() { ++value; return *this; } - const_iterator& operator--() { --value; return *this; } - - const const_iterator operator++(int) { const_iterator t(value); value++; return t; } - const const_iterator operator--(int) { const_iterator t(value); value--; return t; } - - const_iterator operator+(size_t rhs) const { return { value + rhs }; } - const_iterator operator+(size_t rhs) { return { value + rhs }; } - const_iterator operator-(size_t rhs) const { return { value - rhs }; } - - difference_type operator-(const_iterator const& rhs) const { return value - rhs.value; } - - bool operator==(const_iterator const& rhs) const { return (value == rhs.value); } - bool operator!=(const_iterator const& rhs) const { return (value != rhs.value); } - bool operator>=(const_iterator const& rhs) const { return (value >= rhs.value); } - bool operator> (const_iterator const& rhs) const { return (value > rhs.value); } - bool operator<=(const_iterator const& rhs) const { return (value <= rhs.value); } - bool operator< (const_iterator const& rhs) const { return (value < rhs.value); } - }; - - const_iterator begin() noexcept { return const_iterator{ first }; } - const_iterator end() noexcept { return const_iterator{ last }; } - const_iterator begin() const noexcept { return const_iterator{ first }; } - const_iterator end() const noexcept { return const_iterator{ last }; } - - const_iterator front() const noexcept { return const_iterator{ first }; } - const_iterator back() const noexcept { return const_iterator{ last - 1 }; } -}; - -} // namespace utils - -#endif // TNT_UTILS_RANGE_H +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_RANGE_H +#define TNT_UTILS_RANGE_H + +#include + +#include +#include + +#include + +namespace utils { + +template +struct Range { + using value_type = T; + T first = 0; + T last = 0; + + size_t size() const noexcept { return last - first; } + bool empty() const noexcept { return !size(); } + + class const_iterator { + friend struct Range; + T value = {}; + + public: + const_iterator() noexcept = default; + explicit const_iterator(T value) noexcept : value(value) {} + + using value_type = T; + using pointer = value_type*; + using difference_type = ptrdiff_t; + using iterator_category = std::random_access_iterator_tag; + + + const value_type operator*() const { return value; } + const value_type operator[](size_t n) const { return value + n; } + + const_iterator& operator++() { ++value; return *this; } + const_iterator& operator--() { --value; return *this; } + + const const_iterator operator++(int) { const_iterator t(value); value++; return t; } + const const_iterator operator--(int) { const_iterator t(value); value--; return t; } + + const_iterator operator+(size_t rhs) const { return { value + rhs }; } + const_iterator operator+(size_t rhs) { return { value + rhs }; } + const_iterator operator-(size_t rhs) const { return { value - rhs }; } + + difference_type operator-(const_iterator const& rhs) const { return value - rhs.value; } + + bool operator==(const_iterator const& rhs) const { return (value == rhs.value); } + bool operator!=(const_iterator const& rhs) const { return (value != rhs.value); } + bool operator>=(const_iterator const& rhs) const { return (value >= rhs.value); } + bool operator> (const_iterator const& rhs) const { return (value > rhs.value); } + bool operator<=(const_iterator const& rhs) const { return (value <= rhs.value); } + bool operator< (const_iterator const& rhs) const { return (value < rhs.value); } + }; + + const_iterator begin() noexcept { return const_iterator{ first }; } + const_iterator end() noexcept { return const_iterator{ last }; } + const_iterator begin() const noexcept { return const_iterator{ first }; } + const_iterator end() const noexcept { return const_iterator{ last }; } + + const_iterator front() const noexcept { return const_iterator{ first }; } + const_iterator back() const noexcept { return const_iterator{ last - 1 }; } +}; + +} // namespace utils + +#endif // TNT_UTILS_RANGE_H diff --git a/ios/include/utils/SingleInstanceComponentManager.h b/ios/include/utils/SingleInstanceComponentManager.h index 76f31165..142869b5 100644 --- a/ios/include/utils/SingleInstanceComponentManager.h +++ b/ios/include/utils/SingleInstanceComponentManager.h @@ -1,314 +1,314 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_SINGLEINSTANCECOMPONENTMANAGER_H -#define TNT_UTILS_SINGLEINSTANCECOMPONENTMANAGER_H - -#include -#include -#include -#include - -#include - -#include -#include -#include - -namespace utils { - -class EntityManager; - -/* - * Helper class to create single instance component managers. - * - * This handles the component's storage as a structure-of-arrays, as well - * as the garbage collection. - * - * This is intended to be used as base class for a real component manager. When doing so, - * and the real component manager is a public API, make sure to forward the public methods - * to the implementation. - * - */ -template -class UTILS_PUBLIC SingleInstanceComponentManager { -private: - - // this is just to avoid using std::default_random_engine, since we're in a public header. - class default_random_engine { - uint32_t mState = 1u; // must be 0 < seed < 0x7fffffff - public: - inline uint32_t operator()() noexcept { - return mState = uint32_t((uint64_t(mState) * 48271u) % 0x7fffffffu); - } - }; - -protected: - static constexpr size_t ENTITY_INDEX = sizeof ... (Elements); - -public: - using SoA = StructureOfArrays; - - using Instance = EntityInstanceBase::Type; - - SingleInstanceComponentManager() noexcept { - // We always start with a dummy entry because index=0 is reserved. The component - // at index = 0, is guaranteed to be default-initialized. - // Sub-classes can use this to their advantage. - mData.push_back(); - } - - SingleInstanceComponentManager(SingleInstanceComponentManager&& rhs) noexcept {/* = default */} - SingleInstanceComponentManager& operator=(SingleInstanceComponentManager&& rhs) noexcept {/* = default */} - ~SingleInstanceComponentManager() noexcept = default; - - // not copyable - SingleInstanceComponentManager(SingleInstanceComponentManager const& rhs) = delete; - SingleInstanceComponentManager& operator=(SingleInstanceComponentManager const& rhs) = delete; - - - // returns true if the given Entity has a component of this Manager - bool hasComponent(Entity e) const noexcept { - return getInstance(e) != 0; - } - - // Get instance of this Entity to be used to retrieve components - UTILS_NOINLINE - Instance getInstance(Entity e) const noexcept { - auto const& map = mInstanceMap; - // find() generates quite a bit of code - auto pos = map.find(e); - return pos != map.end() ? pos->second : 0; - } - - // returns the number of components (i.e. size of each arrays) - size_t getComponentCount() const noexcept { - // The array as an extra dummy component at index 0, so the visible count is 1 less. - return mData.size() - 1; - } - - bool empty() const noexcept { - return getComponentCount() == 0; - } - - // returns a pointer to the Entity array. This is basically the list - // of entities this component manager handles. - // The pointer becomes invalid when adding or removing a component. - Entity const* getEntities() const noexcept { - return begin(); - } - - Entity getEntity(Instance i) const noexcept { - return elementAt(i); - } - - // Add a component to the given Entity. If the entity already has a component from this - // manager, this function is a no-op. - // This invalidates all pointers components. - inline Instance addComponent(Entity e); - - // Removes a component from the given entity. - // This invalidates all pointers components. - inline Instance removeComponent(Entity e); - - // trigger one round of garbage collection. this is intended to be called on a regular - // basis. This gc gives up after it cannot randomly free 'ratio' component in a row. - void gc(const EntityManager& em, size_t ratio = 4) noexcept { - gc(em, ratio, [this](Entity e) { - removeComponent(e); - }); - } - - // return the first instance - Instance begin() const noexcept { return 1u; } - - // return the past-the-last instance - Instance end() const noexcept { return Instance(begin() + getComponentCount()); } - - // return a pointer to the first element of the ElementIndex'th array - template - typename SoA::template TypeAt* begin() noexcept { - return mData.template data() + 1; - } - - template - typename SoA::template TypeAt const* begin() const noexcept { - return mData.template data() + 1; - } - - // return a pointer to the past-the-end element of the ElementIndex'th array - template - typename SoA::template TypeAt* end() noexcept { - return begin() + getComponentCount(); - } - - template - typename SoA::template TypeAt const* end() const noexcept { - return begin() + getComponentCount(); - } - - // return a Slice<> - template - Slice> slice() noexcept { - return { begin(), end() }; - } - - template - Slice> slice() const noexcept { - return { begin(), end() }; - } - - // return a reference to the index'th element of the ElementIndex'th array - template - typename SoA::template TypeAt& elementAt(Instance index) noexcept { - assert(index); - return data()[index]; - } - - template - typename SoA::template TypeAt const& elementAt(Instance index) const noexcept { - assert(index); - return data()[index]; - } - - // returns a pointer to the RAW ARRAY of components including the first dummy component - // Use with caution. - template - typename SoA::template TypeAt const* raw_array() const noexcept { - return data(); - } - - // We need our own version of Field because mData is private - template - struct Field : public SoA::template Field { - Field(SingleInstanceComponentManager& soa, EntityInstanceBase::Type i) noexcept - : SoA::template Field{ soa.mData, i } { - } - using SoA::template Field::operator =; - }; - -protected: - template - typename SoA::template TypeAt* data() noexcept { - return mData.template data(); - } - - template - typename SoA::template TypeAt const* data() const noexcept { - return mData.template data(); - } - - // swap only internals - void swap(Instance i, Instance j) noexcept { - assert(i); - assert(j); - if (i && j) { - // update the index map - auto& map = mInstanceMap; - Entity& ei = elementAt(i); - Entity& ej = elementAt(j); - std::swap(ei, ej); - if (ei) { - map[ei] = i; - } - if (ej) { - map[ej] = j; - } - } - } - - template - void gc(const EntityManager& em, size_t ratio, - REMOVE removeComponent) noexcept { - Entity const* entities = getEntities(); - size_t count = getComponentCount(); - size_t aliveInARow = 0; - default_random_engine& rng = mRng; - #pragma nounroll - while (count && aliveInARow < ratio) { - // note: using the modulo favorizes lower number - size_t i = rng() % count; - if (UTILS_LIKELY(em.isAlive(entities[i]))) { - ++aliveInARow; - continue; - } - aliveInARow = 0; - count--; - removeComponent(entities[i]); - } - } - -protected: - SoA mData; - -private: - // maps an entity to an instance index - tsl::robin_map mInstanceMap; - default_random_engine mRng; -}; - -// Keep these outside of the class because CLion has trouble parsing them -template -typename SingleInstanceComponentManager::Instance -SingleInstanceComponentManager::addComponent(Entity e) { - Instance ci = 0; - if (!e.isNull()) { - if (!hasComponent(e)) { - // this is like a push_back(e); - mData.push_back().template back() = e; - // index 0 is used when the component doesn't exist - ci = Instance(mData.size() - 1); - mInstanceMap[e] = ci; - } else { - // if the entity already has this component, just return its instance - ci = mInstanceMap[e]; - } - } - assert(ci != 0); - return ci; -} - -// Keep these outside of the class because CLion has trouble parsing them -template -typename SingleInstanceComponentManager::Instance -SingleInstanceComponentManager::removeComponent(Entity e) { - auto& map = mInstanceMap; - auto pos = map.find(e); - if (UTILS_LIKELY(pos != map.end())) { - size_t index = pos->second; - assert(index != 0); - size_t last = mData.size() - 1; - if (last != index) { - // move the last item to where we removed this component, as to keep - // the array tightly packed. - mData.forEach([index, last](auto* p) { - p[index] = std::move(p[last]); - }); - - Entity lastEntity = mData.template elementAt(index); - map[lastEntity] = index; - } - mData.pop_back(); - map.erase(pos); - return last; - } - return 0; -} - - -} // namespace filament - -#endif // TNT_UTILS_SINGLEINSTANCECOMPONENTMANAGER_H +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_SINGLEINSTANCECOMPONENTMANAGER_H +#define TNT_UTILS_SINGLEINSTANCECOMPONENTMANAGER_H + +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace utils { + +class EntityManager; + +/* + * Helper class to create single instance component managers. + * + * This handles the component's storage as a structure-of-arrays, as well + * as the garbage collection. + * + * This is intended to be used as base class for a real component manager. When doing so, + * and the real component manager is a public API, make sure to forward the public methods + * to the implementation. + * + */ +template +class UTILS_PUBLIC SingleInstanceComponentManager { +private: + + // this is just to avoid using std::default_random_engine, since we're in a public header. + class default_random_engine { + uint32_t mState = 1u; // must be 0 < seed < 0x7fffffff + public: + inline uint32_t operator()() noexcept { + return mState = uint32_t((uint64_t(mState) * 48271u) % 0x7fffffffu); + } + }; + +protected: + static constexpr size_t ENTITY_INDEX = sizeof ... (Elements); + +public: + using SoA = StructureOfArrays; + + using Instance = EntityInstanceBase::Type; + + SingleInstanceComponentManager() noexcept { + // We always start with a dummy entry because index=0 is reserved. The component + // at index = 0, is guaranteed to be default-initialized. + // Sub-classes can use this to their advantage. + mData.push_back(); + } + + SingleInstanceComponentManager(SingleInstanceComponentManager&& rhs) noexcept {/* = default */} + SingleInstanceComponentManager& operator=(SingleInstanceComponentManager&& rhs) noexcept {/* = default */} + ~SingleInstanceComponentManager() noexcept = default; + + // not copyable + SingleInstanceComponentManager(SingleInstanceComponentManager const& rhs) = delete; + SingleInstanceComponentManager& operator=(SingleInstanceComponentManager const& rhs) = delete; + + + // returns true if the given Entity has a component of this Manager + bool hasComponent(Entity e) const noexcept { + return getInstance(e) != 0; + } + + // Get instance of this Entity to be used to retrieve components + UTILS_NOINLINE + Instance getInstance(Entity e) const noexcept { + auto const& map = mInstanceMap; + // find() generates quite a bit of code + auto pos = map.find(e); + return pos != map.end() ? pos->second : 0; + } + + // returns the number of components (i.e. size of each arrays) + size_t getComponentCount() const noexcept { + // The array as an extra dummy component at index 0, so the visible count is 1 less. + return mData.size() - 1; + } + + bool empty() const noexcept { + return getComponentCount() == 0; + } + + // returns a pointer to the Entity array. This is basically the list + // of entities this component manager handles. + // The pointer becomes invalid when adding or removing a component. + Entity const* getEntities() const noexcept { + return begin(); + } + + Entity getEntity(Instance i) const noexcept { + return elementAt(i); + } + + // Add a component to the given Entity. If the entity already has a component from this + // manager, this function is a no-op. + // This invalidates all pointers components. + inline Instance addComponent(Entity e); + + // Removes a component from the given entity. + // This invalidates all pointers components. + inline Instance removeComponent(Entity e); + + // trigger one round of garbage collection. this is intended to be called on a regular + // basis. This gc gives up after it cannot randomly free 'ratio' component in a row. + void gc(const EntityManager& em, size_t ratio = 4) noexcept { + gc(em, ratio, [this](Entity e) { + removeComponent(e); + }); + } + + // return the first instance + Instance begin() const noexcept { return 1u; } + + // return the past-the-last instance + Instance end() const noexcept { return Instance(begin() + getComponentCount()); } + + // return a pointer to the first element of the ElementIndex'th array + template + typename SoA::template TypeAt* begin() noexcept { + return mData.template data() + 1; + } + + template + typename SoA::template TypeAt const* begin() const noexcept { + return mData.template data() + 1; + } + + // return a pointer to the past-the-end element of the ElementIndex'th array + template + typename SoA::template TypeAt* end() noexcept { + return begin() + getComponentCount(); + } + + template + typename SoA::template TypeAt const* end() const noexcept { + return begin() + getComponentCount(); + } + + // return a Slice<> + template + Slice> slice() noexcept { + return { begin(), end() }; + } + + template + Slice> slice() const noexcept { + return { begin(), end() }; + } + + // return a reference to the index'th element of the ElementIndex'th array + template + typename SoA::template TypeAt& elementAt(Instance index) noexcept { + assert(index); + return data()[index]; + } + + template + typename SoA::template TypeAt const& elementAt(Instance index) const noexcept { + assert(index); + return data()[index]; + } + + // returns a pointer to the RAW ARRAY of components including the first dummy component + // Use with caution. + template + typename SoA::template TypeAt const* raw_array() const noexcept { + return data(); + } + + // We need our own version of Field because mData is private + template + struct Field : public SoA::template Field { + Field(SingleInstanceComponentManager& soa, EntityInstanceBase::Type i) noexcept + : SoA::template Field{ soa.mData, i } { + } + using SoA::template Field::operator =; + }; + +protected: + template + typename SoA::template TypeAt* data() noexcept { + return mData.template data(); + } + + template + typename SoA::template TypeAt const* data() const noexcept { + return mData.template data(); + } + + // swap only internals + void swap(Instance i, Instance j) noexcept { + assert(i); + assert(j); + if (i && j) { + // update the index map + auto& map = mInstanceMap; + Entity& ei = elementAt(i); + Entity& ej = elementAt(j); + std::swap(ei, ej); + if (ei) { + map[ei] = i; + } + if (ej) { + map[ej] = j; + } + } + } + + template + void gc(const EntityManager& em, size_t ratio, + REMOVE removeComponent) noexcept { + Entity const* entities = getEntities(); + size_t count = getComponentCount(); + size_t aliveInARow = 0; + default_random_engine& rng = mRng; + #pragma nounroll + while (count && aliveInARow < ratio) { + // note: using the modulo favorizes lower number + size_t i = rng() % count; + if (UTILS_LIKELY(em.isAlive(entities[i]))) { + ++aliveInARow; + continue; + } + aliveInARow = 0; + count--; + removeComponent(entities[i]); + } + } + +protected: + SoA mData; + +private: + // maps an entity to an instance index + tsl::robin_map mInstanceMap; + default_random_engine mRng; +}; + +// Keep these outside of the class because CLion has trouble parsing them +template +typename SingleInstanceComponentManager::Instance +SingleInstanceComponentManager::addComponent(Entity e) { + Instance ci = 0; + if (!e.isNull()) { + if (!hasComponent(e)) { + // this is like a push_back(e); + mData.push_back().template back() = e; + // index 0 is used when the component doesn't exist + ci = Instance(mData.size() - 1); + mInstanceMap[e] = ci; + } else { + // if the entity already has this component, just return its instance + ci = mInstanceMap[e]; + } + } + assert(ci != 0); + return ci; +} + +// Keep these outside of the class because CLion has trouble parsing them +template +typename SingleInstanceComponentManager::Instance +SingleInstanceComponentManager::removeComponent(Entity e) { + auto& map = mInstanceMap; + auto pos = map.find(e); + if (UTILS_LIKELY(pos != map.end())) { + size_t index = pos->second; + assert(index != 0); + size_t last = mData.size() - 1; + if (last != index) { + // move the last item to where we removed this component, as to keep + // the array tightly packed. + mData.forEach([index, last](auto* p) { + p[index] = std::move(p[last]); + }); + + Entity lastEntity = mData.template elementAt(index); + map[lastEntity] = index; + } + mData.pop_back(); + map.erase(pos); + return last; + } + return 0; +} + + +} // namespace filament + +#endif // TNT_UTILS_SINGLEINSTANCECOMPONENTMANAGER_H diff --git a/ios/include/utils/Slice.h b/ios/include/utils/Slice.h index ed61616c..eabf7979 100644 --- a/ios/include/utils/Slice.h +++ b/ios/include/utils/Slice.h @@ -1,373 +1,373 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_SLICE_H -#define TNT_UTILS_SLICE_H - -#include -#include - -#include -#include - -#include - -namespace utils { - -/* - * A fixed-size slice of a container - */ -template -class Slice { -public: - using iterator = T*; - using const_iterator = T const*; - using value_type = T; - using reference = T&; - using const_reference = T const&; - using pointer = T*; - using const_pointer = T const*; - using size_type = SIZE_TYPE; - - Slice() noexcept = default; - - Slice(const_iterator begin, const_iterator end) noexcept - : mBegin(const_cast(begin)), mEndOffset(size_type(end - begin)) { - } - - Slice(const_pointer begin, size_type count) noexcept - : mBegin(const_cast(begin)), mEndOffset(size_type(count)) { - } - - Slice(Slice const& rhs) noexcept = default; - Slice(Slice&& rhs) noexcept = default; - Slice& operator=(Slice const& rhs) noexcept = default; - Slice& operator=(Slice&& rhs) noexcept = default; - - void set(pointer begin, size_type count) UTILS_RESTRICT noexcept { - mBegin = begin; - mEndOffset = size_type(count); - } - - void set(iterator begin, iterator end) UTILS_RESTRICT noexcept { - mBegin = begin; - mEndOffset = size_type(end - begin); - } - - void swap(Slice& rhs) UTILS_RESTRICT noexcept { - std::swap(mBegin, rhs.mBegin); - std::swap(mEndOffset, rhs.mEndOffset); - } - - void clear() UTILS_RESTRICT noexcept { - mBegin = nullptr; - mEndOffset = 0; - } - - // size - size_t size() const UTILS_RESTRICT noexcept { return mEndOffset; } - size_t sizeInBytes() const UTILS_RESTRICT noexcept { return size() * sizeof(T); } - bool empty() const UTILS_RESTRICT noexcept { return size() == 0; } - - // iterators - iterator begin() UTILS_RESTRICT noexcept { return mBegin; } - const_iterator begin() const UTILS_RESTRICT noexcept { return mBegin; } - const_iterator cbegin() const UTILS_RESTRICT noexcept { return this->begin(); } - iterator end() UTILS_RESTRICT noexcept { return &mBegin[mEndOffset]; } - const_iterator end() const UTILS_RESTRICT noexcept { return &mBegin[mEndOffset]; } - const_iterator cend() const UTILS_RESTRICT noexcept { return this->end(); } - - // data access - reference operator[](size_t n) UTILS_RESTRICT noexcept { - assert(n < size()); - return mBegin[n]; - } - - const_reference operator[](size_t n) const UTILS_RESTRICT noexcept { - assert(n < size()); - return mBegin[n]; - } - - reference at(size_t n) UTILS_RESTRICT noexcept { - return operator[](n); - } - - const_reference at(size_t n) const UTILS_RESTRICT noexcept { - return operator[](n); - } - - reference front() UTILS_RESTRICT noexcept { - assert(!empty()); - return *mBegin; - } - - const_reference front() const UTILS_RESTRICT noexcept { - assert(!empty()); - return *mBegin; - } - - reference back() UTILS_RESTRICT noexcept { - assert(!empty()); - return *(this->end() - 1); - } - - const_reference back() const UTILS_RESTRICT noexcept { - assert(!empty()); - return *(this->end() - 1); - } - - pointer data() UTILS_RESTRICT noexcept { - return this->begin(); - } - - const_pointer data() const UTILS_RESTRICT noexcept { - return this->begin(); - } - -protected: - iterator mBegin = nullptr; - size_type mEndOffset = 0; -}; - -/* - * A fixed-capacity (but growable) slice of a container - */ -template -class UTILS_PRIVATE GrowingSlice : public Slice { -public: - using iterator = typename Slice::iterator; - using const_iterator = typename Slice::const_iterator; - using value_type = typename Slice::value_type; - using reference = typename Slice::reference; - using const_reference = typename Slice::const_reference; - using pointer = typename Slice::pointer; - using const_pointer = typename Slice::const_pointer; - using size_type = typename Slice::size_type; - - GrowingSlice() noexcept = default; - GrowingSlice(GrowingSlice const& rhs) noexcept = default; - GrowingSlice(GrowingSlice&& rhs) noexcept = default; - - template - GrowingSlice(Iter begin, size_type count) noexcept - : Slice(begin, size_type(0)), - mCapOffset(count) { - } - - GrowingSlice& operator=(GrowingSlice const& rhs) noexcept = default; - GrowingSlice& operator=(GrowingSlice&& rhs) noexcept = default; - - // size - size_t remain() const UTILS_RESTRICT noexcept { return mCapOffset - this->mEndOffset; } - size_t capacity() const UTILS_RESTRICT noexcept { return mCapOffset; } - - template - void set(Iter begin, size_type count) UTILS_RESTRICT noexcept { - this->Slice::set(begin, count); - mCapOffset = count; - } - - template - void set(Iter begin, Iter end) UTILS_RESTRICT noexcept { - this->Slice::set(begin, end); - mCapOffset = size_type(end - begin); - } - - void swap(GrowingSlice& rhs) UTILS_RESTRICT noexcept { - Slice::swap(rhs); - std::swap(mCapOffset, rhs.mCapOffset); - } - - void clear() UTILS_RESTRICT noexcept { - this->mEndOffset = 0; - } - - void resize(size_type count) UTILS_RESTRICT noexcept { - assert(count < mCapOffset); - this->mEndOffset = size_type(count); - } - - T* grow(size_type count) UTILS_RESTRICT noexcept { - assert(this->size() + count <= mCapOffset); - size_t offset = this->mEndOffset; - this->mEndOffset += count; - return this->mBegin + offset; - } - - // data access - void push_back(T const& item) UTILS_RESTRICT noexcept { - T* const p = this->grow(1); - *p = item; - } - - void push_back(T&& item) UTILS_RESTRICT noexcept { - T* const p = this->grow(1); - *p = std::move(item); - } - - template - void emplace_back(ARGS&& ... args) UTILS_RESTRICT noexcept { - T* const p = this->grow(1); - new(p) T(std::forward(args)...); - } - -private: - // we use size_type == uint32_t to reduce the size on 64-bits machines - size_type mCapOffset = 0; -}; - -// ------------------------------------------------------------------------------------------------ - -/* - * A fixed-capacity (but atomically growable) slice of a container - */ -template -class AtomicGrowingSlice { -public: - using iterator = T*; - using const_iterator = T const*; - using value_type = T; - using reference = T&; - using const_reference = T const&; - using pointer = T*; - using const_pointer = T const*; - using size_type = SIZE_TYPE; - - AtomicGrowingSlice() noexcept = default; - - template - AtomicGrowingSlice(Iter begin, Iter end) noexcept - : mBegin(iterator(begin)), - mEndOffset(0), - mCapOffset(size_type(iterator(end) - iterator(begin))) { - } - - template - AtomicGrowingSlice(Iter begin, size_type count) noexcept - : mBegin(iterator(begin)), mEndOffset(0), mCapOffset(size_type(count)) { - } - - template - void set(Iter begin, size_type count) noexcept { - assert(mBegin == nullptr); - mBegin = iterator(begin); - mEndOffset.store(0, std::memory_order_relaxed); - mCapOffset = count; - } - - // clear - void clear() noexcept { - mEndOffset.store(0, std::memory_order_relaxed); - } - - // size - size_type size() const noexcept { return mEndOffset.load(std::memory_order_relaxed); } - bool empty() const noexcept { return size() == 0; } - size_type remain() const noexcept { return mCapOffset - size(); } - size_type capacity() const noexcept { return mCapOffset; } - - // iterators - iterator begin() noexcept { return mBegin; } - const_iterator begin() const noexcept { return mBegin; } - const_iterator cbegin() const noexcept { return begin(); } - iterator end() noexcept { return &mBegin[size()]; } - const_iterator end() const noexcept { return &mBegin[size()]; } - const_iterator cend() const noexcept { return end(); } - - // data access - reference operator[](size_type n) noexcept { - assert(n < size()); - return mBegin[n]; - } - - const_reference operator[](size_type n) const noexcept { - assert(n < size()); - return mBegin[n]; - } - - reference at(size_type n) noexcept { - return operator[](n); - } - - const_reference at(size_type n) const noexcept { - return operator[](n); - } - - reference front() noexcept { - assert(!empty()); - return *mBegin; - } - - const_reference front() const noexcept { - assert(!empty()); - return *mBegin; - } - - reference back() noexcept { - assert(!empty()); - return *(end() - 1); - } - - const_reference back() const noexcept { - assert(!empty()); - return *(end() - 1); - } - - pointer data() noexcept { - return begin(); - } - - const_pointer data() const noexcept { - return begin(); - } - - T* grow(size_type count) noexcept { - size_type offset = this->mEndOffset.load(std::memory_order_relaxed); - do { - if (UTILS_UNLIKELY(offset + count > mCapOffset)) { - return nullptr; - } - } while (UTILS_UNLIKELY(!this->mEndOffset.compare_exchange_weak(offset, offset + count, - std::memory_order_relaxed, std::memory_order_relaxed))); - return this->mBegin + offset; - } - - // data access - void push_back(T const& item) noexcept { - T* const p = this->grow(1); - *p = item; - } - - void push_back(T&& item) noexcept { - T* const p = this->grow(1); - *p = std::move(item); - } - - template - void emplace_back(ARGS&& ... args) noexcept { - T* const p = this->grow(1); - new(p) T(std::forward(args)...); - } - -private: - iterator mBegin = nullptr; - std::atomic mEndOffset = ATOMIC_VAR_INIT(0); - size_type mCapOffset = 0; -}; - -} // namespace utils - -#endif // TNT_UTILS_SLICE_H +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_SLICE_H +#define TNT_UTILS_SLICE_H + +#include +#include + +#include +#include + +#include + +namespace utils { + +/* + * A fixed-size slice of a container + */ +template +class Slice { +public: + using iterator = T*; + using const_iterator = T const*; + using value_type = T; + using reference = T&; + using const_reference = T const&; + using pointer = T*; + using const_pointer = T const*; + using size_type = SIZE_TYPE; + + Slice() noexcept = default; + + Slice(const_iterator begin, const_iterator end) noexcept + : mBegin(const_cast(begin)), mEndOffset(size_type(end - begin)) { + } + + Slice(const_pointer begin, size_type count) noexcept + : mBegin(const_cast(begin)), mEndOffset(size_type(count)) { + } + + Slice(Slice const& rhs) noexcept = default; + Slice(Slice&& rhs) noexcept = default; + Slice& operator=(Slice const& rhs) noexcept = default; + Slice& operator=(Slice&& rhs) noexcept = default; + + void set(pointer begin, size_type count) UTILS_RESTRICT noexcept { + mBegin = begin; + mEndOffset = size_type(count); + } + + void set(iterator begin, iterator end) UTILS_RESTRICT noexcept { + mBegin = begin; + mEndOffset = size_type(end - begin); + } + + void swap(Slice& rhs) UTILS_RESTRICT noexcept { + std::swap(mBegin, rhs.mBegin); + std::swap(mEndOffset, rhs.mEndOffset); + } + + void clear() UTILS_RESTRICT noexcept { + mBegin = nullptr; + mEndOffset = 0; + } + + // size + size_t size() const UTILS_RESTRICT noexcept { return mEndOffset; } + size_t sizeInBytes() const UTILS_RESTRICT noexcept { return size() * sizeof(T); } + bool empty() const UTILS_RESTRICT noexcept { return size() == 0; } + + // iterators + iterator begin() UTILS_RESTRICT noexcept { return mBegin; } + const_iterator begin() const UTILS_RESTRICT noexcept { return mBegin; } + const_iterator cbegin() const UTILS_RESTRICT noexcept { return this->begin(); } + iterator end() UTILS_RESTRICT noexcept { return &mBegin[mEndOffset]; } + const_iterator end() const UTILS_RESTRICT noexcept { return &mBegin[mEndOffset]; } + const_iterator cend() const UTILS_RESTRICT noexcept { return this->end(); } + + // data access + reference operator[](size_t n) UTILS_RESTRICT noexcept { + assert(n < size()); + return mBegin[n]; + } + + const_reference operator[](size_t n) const UTILS_RESTRICT noexcept { + assert(n < size()); + return mBegin[n]; + } + + reference at(size_t n) UTILS_RESTRICT noexcept { + return operator[](n); + } + + const_reference at(size_t n) const UTILS_RESTRICT noexcept { + return operator[](n); + } + + reference front() UTILS_RESTRICT noexcept { + assert(!empty()); + return *mBegin; + } + + const_reference front() const UTILS_RESTRICT noexcept { + assert(!empty()); + return *mBegin; + } + + reference back() UTILS_RESTRICT noexcept { + assert(!empty()); + return *(this->end() - 1); + } + + const_reference back() const UTILS_RESTRICT noexcept { + assert(!empty()); + return *(this->end() - 1); + } + + pointer data() UTILS_RESTRICT noexcept { + return this->begin(); + } + + const_pointer data() const UTILS_RESTRICT noexcept { + return this->begin(); + } + +protected: + iterator mBegin = nullptr; + size_type mEndOffset = 0; +}; + +/* + * A fixed-capacity (but growable) slice of a container + */ +template +class UTILS_PRIVATE GrowingSlice : public Slice { +public: + using iterator = typename Slice::iterator; + using const_iterator = typename Slice::const_iterator; + using value_type = typename Slice::value_type; + using reference = typename Slice::reference; + using const_reference = typename Slice::const_reference; + using pointer = typename Slice::pointer; + using const_pointer = typename Slice::const_pointer; + using size_type = typename Slice::size_type; + + GrowingSlice() noexcept = default; + GrowingSlice(GrowingSlice const& rhs) noexcept = default; + GrowingSlice(GrowingSlice&& rhs) noexcept = default; + + template + GrowingSlice(Iter begin, size_type count) noexcept + : Slice(begin, size_type(0)), + mCapOffset(count) { + } + + GrowingSlice& operator=(GrowingSlice const& rhs) noexcept = default; + GrowingSlice& operator=(GrowingSlice&& rhs) noexcept = default; + + // size + size_t remain() const UTILS_RESTRICT noexcept { return mCapOffset - this->mEndOffset; } + size_t capacity() const UTILS_RESTRICT noexcept { return mCapOffset; } + + template + void set(Iter begin, size_type count) UTILS_RESTRICT noexcept { + this->Slice::set(begin, count); + mCapOffset = count; + } + + template + void set(Iter begin, Iter end) UTILS_RESTRICT noexcept { + this->Slice::set(begin, end); + mCapOffset = size_type(end - begin); + } + + void swap(GrowingSlice& rhs) UTILS_RESTRICT noexcept { + Slice::swap(rhs); + std::swap(mCapOffset, rhs.mCapOffset); + } + + void clear() UTILS_RESTRICT noexcept { + this->mEndOffset = 0; + } + + void resize(size_type count) UTILS_RESTRICT noexcept { + assert(count < mCapOffset); + this->mEndOffset = size_type(count); + } + + T* grow(size_type count) UTILS_RESTRICT noexcept { + assert(this->size() + count <= mCapOffset); + size_t offset = this->mEndOffset; + this->mEndOffset += count; + return this->mBegin + offset; + } + + // data access + void push_back(T const& item) UTILS_RESTRICT noexcept { + T* const p = this->grow(1); + *p = item; + } + + void push_back(T&& item) UTILS_RESTRICT noexcept { + T* const p = this->grow(1); + *p = std::move(item); + } + + template + void emplace_back(ARGS&& ... args) UTILS_RESTRICT noexcept { + T* const p = this->grow(1); + new(p) T(std::forward(args)...); + } + +private: + // we use size_type == uint32_t to reduce the size on 64-bits machines + size_type mCapOffset = 0; +}; + +// ------------------------------------------------------------------------------------------------ + +/* + * A fixed-capacity (but atomically growable) slice of a container + */ +template +class AtomicGrowingSlice { +public: + using iterator = T*; + using const_iterator = T const*; + using value_type = T; + using reference = T&; + using const_reference = T const&; + using pointer = T*; + using const_pointer = T const*; + using size_type = SIZE_TYPE; + + AtomicGrowingSlice() noexcept = default; + + template + AtomicGrowingSlice(Iter begin, Iter end) noexcept + : mBegin(iterator(begin)), + mEndOffset(0), + mCapOffset(size_type(iterator(end) - iterator(begin))) { + } + + template + AtomicGrowingSlice(Iter begin, size_type count) noexcept + : mBegin(iterator(begin)), mEndOffset(0), mCapOffset(size_type(count)) { + } + + template + void set(Iter begin, size_type count) noexcept { + assert(mBegin == nullptr); + mBegin = iterator(begin); + mEndOffset.store(0, std::memory_order_relaxed); + mCapOffset = count; + } + + // clear + void clear() noexcept { + mEndOffset.store(0, std::memory_order_relaxed); + } + + // size + size_type size() const noexcept { return mEndOffset.load(std::memory_order_relaxed); } + bool empty() const noexcept { return size() == 0; } + size_type remain() const noexcept { return mCapOffset - size(); } + size_type capacity() const noexcept { return mCapOffset; } + + // iterators + iterator begin() noexcept { return mBegin; } + const_iterator begin() const noexcept { return mBegin; } + const_iterator cbegin() const noexcept { return begin(); } + iterator end() noexcept { return &mBegin[size()]; } + const_iterator end() const noexcept { return &mBegin[size()]; } + const_iterator cend() const noexcept { return end(); } + + // data access + reference operator[](size_type n) noexcept { + assert(n < size()); + return mBegin[n]; + } + + const_reference operator[](size_type n) const noexcept { + assert(n < size()); + return mBegin[n]; + } + + reference at(size_type n) noexcept { + return operator[](n); + } + + const_reference at(size_type n) const noexcept { + return operator[](n); + } + + reference front() noexcept { + assert(!empty()); + return *mBegin; + } + + const_reference front() const noexcept { + assert(!empty()); + return *mBegin; + } + + reference back() noexcept { + assert(!empty()); + return *(end() - 1); + } + + const_reference back() const noexcept { + assert(!empty()); + return *(end() - 1); + } + + pointer data() noexcept { + return begin(); + } + + const_pointer data() const noexcept { + return begin(); + } + + T* grow(size_type count) noexcept { + size_type offset = this->mEndOffset.load(std::memory_order_relaxed); + do { + if (UTILS_UNLIKELY(offset + count > mCapOffset)) { + return nullptr; + } + } while (UTILS_UNLIKELY(!this->mEndOffset.compare_exchange_weak(offset, offset + count, + std::memory_order_relaxed, std::memory_order_relaxed))); + return this->mBegin + offset; + } + + // data access + void push_back(T const& item) noexcept { + T* const p = this->grow(1); + *p = item; + } + + void push_back(T&& item) noexcept { + T* const p = this->grow(1); + *p = std::move(item); + } + + template + void emplace_back(ARGS&& ... args) noexcept { + T* const p = this->grow(1); + new(p) T(std::forward(args)...); + } + +private: + iterator mBegin = nullptr; + std::atomic mEndOffset = ATOMIC_VAR_INIT(0); + size_type mCapOffset = 0; +}; + +} // namespace utils + +#endif // TNT_UTILS_SLICE_H diff --git a/ios/include/utils/SpinLock.h b/ios/include/utils/SpinLock.h index 797e1627..e7ce00af 100644 --- a/ios/include/utils/SpinLock.h +++ b/ios/include/utils/SpinLock.h @@ -1,90 +1,90 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_SPINLOCK_H -#define TNT_UTILS_SPINLOCK_H - -#include - -#include - -#include -#include - -#include -#include - -namespace utils { -namespace details { - -class SpinLock { - std::atomic_flag mLock = ATOMIC_FLAG_INIT; - -public: - void lock() noexcept { - UTILS_PREFETCHW(&mLock); -#ifdef __ARM_ACLE - // we signal an event on this CPU, so that the first yield() will be a no-op, - // and falls through the test_and_set(). This is more efficient than a while { } - // construct. - UTILS_SIGNAL_EVENT(); - do { - yield(); - } while (mLock.test_and_set(std::memory_order_acquire)); -#else - goto start; - do { - yield(); -start: ; - } while (mLock.test_and_set(std::memory_order_acquire)); -#endif - } - - void unlock() noexcept { - mLock.clear(std::memory_order_release); -#ifdef __ARM_ARCH_7A__ - // on ARMv7a SEL is needed - UTILS_SIGNAL_EVENT(); - // as well as a memory barrier is needed - __dsb(0xA); // ISHST = 0xA (b1010) -#else - // on ARMv8 we could avoid the call to SE, but we'd need to write the - // test_and_set() above by hand, so the WFE only happens without a STRX first. - UTILS_BROADCAST_EVENT(); -#endif - } - -private: - inline void yield() noexcept { - // on x86 call pause instruction, on ARM call WFE - UTILS_WAIT_FOR_EVENT(); - } -}; -} // namespace details - -#if defined(__SANITIZE_THREAD__) -// Unfortunately TSAN doesn't support homegrown synchronization primitives -using SpinLock = Mutex; -#elif defined(__ARM_ARCH_7A__) -// We've had problems with "wfe" on some ARM-V7 devices, causing spurious SIGILL -using SpinLock = Mutex; -#else -using SpinLock = details::SpinLock; -#endif - -} // namespace utils - -#endif //TNT_UTILS_SPINLOCK_H +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_SPINLOCK_H +#define TNT_UTILS_SPINLOCK_H + +#include + +#include + +#include +#include + +#include +#include + +namespace utils { +namespace details { + +class SpinLock { + std::atomic_flag mLock = ATOMIC_FLAG_INIT; + +public: + void lock() noexcept { + UTILS_PREFETCHW(&mLock); +#ifdef __ARM_ACLE + // we signal an event on this CPU, so that the first yield() will be a no-op, + // and falls through the test_and_set(). This is more efficient than a while { } + // construct. + UTILS_SIGNAL_EVENT(); + do { + yield(); + } while (mLock.test_and_set(std::memory_order_acquire)); +#else + goto start; + do { + yield(); +start: ; + } while (mLock.test_and_set(std::memory_order_acquire)); +#endif + } + + void unlock() noexcept { + mLock.clear(std::memory_order_release); +#ifdef __ARM_ARCH_7A__ + // on ARMv7a SEL is needed + UTILS_SIGNAL_EVENT(); + // as well as a memory barrier is needed + __dsb(0xA); // ISHST = 0xA (b1010) +#else + // on ARMv8 we could avoid the call to SE, but we'd need to write the + // test_and_set() above by hand, so the WFE only happens without a STRX first. + UTILS_BROADCAST_EVENT(); +#endif + } + +private: + inline void yield() noexcept { + // on x86 call pause instruction, on ARM call WFE + UTILS_WAIT_FOR_EVENT(); + } +}; +} // namespace details + +#if UTILS_HAS_SANITIZE_THREAD +// Active spins with atomics slow down execution too much under ThreadSanitizer. +using SpinLock = Mutex; +#elif defined(__ARM_ARCH_7A__) +// We've had problems with "wfe" on some ARM-V7 devices, causing spurious SIGILL +using SpinLock = Mutex; +#else +using SpinLock = details::SpinLock; +#endif + +} // namespace utils + +#endif // TNT_UTILS_SPINLOCK_H diff --git a/ios/include/utils/StructureOfArrays.h b/ios/include/utils/StructureOfArrays.h index dcd62ed6..e5cbb422 100644 --- a/ios/include/utils/StructureOfArrays.h +++ b/ios/include/utils/StructureOfArrays.h @@ -1,646 +1,646 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_STRUCTUREOFARRAYS_H -#define TNT_UTILS_STRUCTUREOFARRAYS_H - -#include // note: this is safe, see how std::array is used below (inline / private) -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace utils { - -template -class StructureOfArraysBase { - // number of elements - static constexpr const size_t kArrayCount = sizeof...(Elements); - -public: - using SoA = StructureOfArraysBase; - - // Type of the Nth array - template - using TypeAt = typename std::tuple_element>::type; - - // Number of arrays - static constexpr size_t getArrayCount() noexcept { return kArrayCount; } - - // Size needed to store "size" array elements - static size_t getNeededSize(size_t size) noexcept { - return getOffset(kArrayCount - 1, size) + sizeof(TypeAt) * size; - } - - // -------------------------------------------------------------------------------------------- - - class Structure; - template class Iterator; - using iterator = Iterator; - using const_iterator = Iterator; - using size_type = size_t; - using difference_type = ptrdiff_t; - - /* - * An object that represents a reference to the type dereferenced by iterator. - * In other words, it's the return type of iterator::operator*(), and since it - * cannot be a C++ reference (&), it's an object that acts like it. - */ - class StructureRef { - friend class Structure; - friend iterator; - friend const_iterator; - StructureOfArraysBase* const UTILS_RESTRICT soa; - size_t const index; - - StructureRef(StructureOfArraysBase* soa, size_t index) : soa(soa), index(index) { } - - // assigns a value_type to a reference (i.e. assigns to what's pointed to by the reference) - template - StructureRef& assign(Structure const& rhs, std::index_sequence); - - // assigns a value_type to a reference (i.e. assigns to what's pointed to by the reference) - template - StructureRef& assign(Structure&& rhs, std::index_sequence) noexcept; - - // objects pointed to by reference can be swapped, so provide the special swap() function. - friend void swap(StructureRef lhs, StructureRef rhs) { - lhs.soa->swap(lhs.index, rhs.index); - } - - public: - // references can be created by copy-assignment only - StructureRef(StructureRef const& rhs) noexcept : soa(rhs.soa), index(rhs.index) { } - - // copy the content of a reference to the content of this one - StructureRef& operator=(StructureRef const& rhs); - - // move the content of a reference to the content of this one - StructureRef& operator=(StructureRef&& rhs) noexcept; - - // copy a value_type to the content of this reference - StructureRef& operator=(Structure const& rhs) { - return assign(rhs, std::make_index_sequence()); - } - - // move a value_type to the content of this reference - StructureRef& operator=(Structure&& rhs) noexcept { - return assign(rhs, std::make_index_sequence()); - } - - // access the elements of this reference (i.e. the "fields" of the structure) - template TypeAt const& get() const { return soa->elementAt(index); } - template TypeAt& get() { return soa->elementAt(index); } - }; - - - /* - * The value_type of iterator. This is basically the "structure" of the SoA. - * Internally we're using a tuple<> to store the data. - * This object is not trivial to construct, as it copies an entry of the SoA. - */ - class Structure { - friend class StructureRef; - friend iterator; - friend const_iterator; - using Type = std::tuple::type...>; - Type elements; - - template - static Type init(StructureRef const& rhs, std::index_sequence) { - return Type{ rhs.soa->template elementAt(rhs.index)... }; - } - - template - static Type init(StructureRef&& rhs, std::index_sequence) noexcept { - return Type{ std::move(rhs.soa->template elementAt(rhs.index))... }; - } - - public: - Structure(Structure const& rhs) = default; - Structure(Structure&& rhs) noexcept = default; - Structure& operator=(Structure const& rhs) = default; - Structure& operator=(Structure&& rhs) noexcept = default; - - // initialize and assign from a StructureRef - Structure(StructureRef const& rhs) - : elements(init(rhs, std::make_index_sequence())) {} - Structure(StructureRef&& rhs) noexcept - : elements(init(rhs, std::make_index_sequence())) {} - Structure& operator=(StructureRef const& rhs) { return operator=(Structure(rhs)); } - Structure& operator=(StructureRef&& rhs) noexcept { return operator=(Structure(rhs)); } - - // access the elements of this value_Type (i.e. the "fields" of the structure) - template TypeAt const& get() const { return std::get(elements); } - template TypeAt& get() { return std::get(elements); } - }; - - - /* - * An iterator to the SoA. This is only intended to be used with STL's algorithm, e.g.: sort(). - * Normally, SoA is not iterated globally, but rather an array at a time. - * Iterating itself is not too costly, as well as dereferencing by reference. However, - * dereferencing by value is. - */ - template - class Iterator { - friend class StructureOfArraysBase; - CVQualifiedSOAPointer soa; // don't use restrict, can have aliases if multiple iterators are created - size_t index; - - Iterator(CVQualifiedSOAPointer soa, size_t index) : soa(soa), index(index) {} - - public: - using value_type = Structure; - using reference = StructureRef; - using pointer = StructureRef*; // FIXME: this should be a StructurePtr type - using difference_type = ptrdiff_t; - using iterator_category = std::random_access_iterator_tag; - - Iterator(Iterator const& rhs) noexcept = default; - Iterator& operator=(Iterator const& rhs) = default; - - reference operator*() const { return { soa, index }; } - reference operator*() { return { soa, index }; } - reference operator[](size_t n) { return *(*this + n); } - - template TypeAt const& get() const { return soa->template elementAt(index); } - template TypeAt& get() { return soa->template elementAt(index); } - - Iterator& operator++() { ++index; return *this; } - Iterator& operator--() { --index; return *this; } - Iterator& operator+=(size_t n) { index += n; return *this; } - Iterator& operator-=(size_t n) { index -= n; return *this; } - Iterator operator+(size_t n) const { return { soa, index + n }; } - Iterator operator-(size_t n) const { return { soa, index - n }; } - difference_type operator-(Iterator const& rhs) const { return index - rhs.index; } - bool operator==(Iterator const& rhs) const { return (index == rhs.index); } - bool operator!=(Iterator const& rhs) const { return (index != rhs.index); } - bool operator>=(Iterator const& rhs) const { return (index >= rhs.index); } - bool operator> (Iterator const& rhs) const { return (index > rhs.index); } - bool operator<=(Iterator const& rhs) const { return (index <= rhs.index); } - bool operator< (Iterator const& rhs) const { return (index < rhs.index); } - - // Postfix operator needed by Microsoft STL. - const Iterator operator++(int) { Iterator it(*this); index++; return it; } - const Iterator operator--(int) { Iterator it(*this); index--; return it; } - }; - - iterator begin() noexcept { return { this, 0u }; } - iterator end() noexcept { return { this, mSize }; } - const_iterator begin() const noexcept { return { this, 0u }; } - const_iterator end() const noexcept { return { this, mSize }; } - - // -------------------------------------------------------------------------------------------- - - StructureOfArraysBase() = default; - - explicit StructureOfArraysBase(size_t capacity) { - setCapacity(capacity); - } - - // not copyable for now - StructureOfArraysBase(StructureOfArraysBase const& rhs) = delete; - StructureOfArraysBase& operator=(StructureOfArraysBase const& rhs) = delete; - - // movability is trivial, so support it - StructureOfArraysBase(StructureOfArraysBase&& rhs) noexcept { - using std::swap; - swap(mCapacity, rhs.mCapacity); - swap(mSize, rhs.mSize); - swap(mArrayOffset, rhs.mArrayOffset); - swap(mAllocator, rhs.mAllocator); - } - - StructureOfArraysBase& operator=(StructureOfArraysBase&& rhs) noexcept { - if (this != &rhs) { - using std::swap; - swap(mCapacity, rhs.mCapacity); - swap(mSize, rhs.mSize); - swap(mArrayOffset, rhs.mArrayOffset); - swap(mAllocator, rhs.mAllocator); - } - return *this; - } - - ~StructureOfArraysBase() { - destroy_each(0, mSize); - mAllocator.free(mArrayOffset[0]); - } - - // -------------------------------------------------------------------------------------------- - - // return the size the array - size_t size() const noexcept { - return mSize; - } - - // return the capacity of the array - size_t capacity() const noexcept { - return mCapacity; - } - - // set the capacity of the array. the capacity cannot be smaller than the current size, - // the call is a no-op in that case. - UTILS_NOINLINE - void setCapacity(size_t capacity) { - // allocate enough space for "capacity" elements of each array - // capacity cannot change when optional storage is specified - if (capacity >= mSize) { - const size_t sizeNeeded = getNeededSize(capacity); - void* buffer = mAllocator.alloc(sizeNeeded); - - // move all the items (one array at a time) from the old allocation to the new - // this also update the array pointers - move_each(buffer, capacity); - - // free the old buffer - std::swap(buffer, mArrayOffset[0]); - mAllocator.free(buffer); - - // and make sure to update the capacity - mCapacity = capacity; - } - } - - void ensureCapacity(size_t needed) { - if (UTILS_UNLIKELY(needed > mCapacity)) { - // not enough space, increase the capacity - const size_t capacity = (needed * 3 + 1) / 2; - setCapacity(capacity); - } - } - - // grow or shrink the array to the given size. When growing, new elements are constructed - // with their default constructor. when shrinking, discarded elements are destroyed. - // If the arrays don't have enough capacity, the capacity is increased accordingly - // (the capacity is set to 3/2 of the asked size). - UTILS_NOINLINE - void resize(size_t needed) { - ensureCapacity(needed); - resizeNoCheck(needed); - if (needed <= mCapacity) { - // TODO: see if we should shrink the arrays - } - } - - void clear() noexcept { - resizeNoCheck(0); - } - - - inline void swap(size_t i, size_t j) noexcept { - forEach([i, j](auto p) { - using std::swap; - swap(p[i], p[j]); - }); - } - - // remove and destroy the last element of each array - inline void pop_back() noexcept { - if (mSize) { - destroy_each(mSize - 1, mSize); - mSize--; - } - } - - // create an element at the end of each array - StructureOfArraysBase& push_back() noexcept { - resize(mSize + 1); - return *this; - } - - StructureOfArraysBase& push_back(Elements const& ... args) noexcept { - ensureCapacity(mSize + 1); - return push_back_unsafe(args...); - } - - StructureOfArraysBase& push_back(Elements&& ... args) noexcept { - ensureCapacity(mSize + 1); - return push_back_unsafe(std::forward(args)...); - } - - StructureOfArraysBase& push_back_unsafe(Elements const& ... args) noexcept { - const size_t last = mSize++; - size_t i = 0; - int UTILS_UNUSED dummy[] = { - (new(getArray(i) + last)Elements(args), i++, 0)... }; - return *this; - } - - StructureOfArraysBase& push_back_unsafe(Elements&& ... args) noexcept { - const size_t last = mSize++; - size_t i = 0; - int UTILS_UNUSED dummy[] = { - (new(getArray(i) + last)Elements(std::forward(args)), i++, 0)... }; - return *this; - } - - template - void forEach(F&& f, ARGS&& ... args) { - size_t i = 0; - int UTILS_UNUSED dummy[] = { - (f(getArray(i), std::forward(args)...), i++, 0)... }; - } - - // return a pointer to the first element of the ElementIndex]th array - template - TypeAt* data() noexcept { - return getArray>(ElementIndex); - } - - template - TypeAt const* data() const noexcept { - return getArray>(ElementIndex); - } - - template - TypeAt* begin() noexcept { - return getArray>(ElementIndex); - } - - template - TypeAt const* begin() const noexcept { - return getArray>(ElementIndex); - } - - template - TypeAt* end() noexcept { - return getArray>(ElementIndex) + size(); - } - - template - TypeAt const* end() const noexcept { - return getArray>(ElementIndex) + size(); - } - - template - Slice> slice() noexcept { - return { begin(), end() }; - } - - template - Slice> slice() const noexcept { - return { begin(), end() }; - } - - // return a reference to the index'th element of the ElementIndex'th array - template - TypeAt& elementAt(size_t index) noexcept { - return data()[index]; - } - - template - TypeAt const& elementAt(size_t index) const noexcept { - return data()[index]; - } - - // return a reference to the last element of the ElementIndex'th array - template - TypeAt& back() noexcept { - return data()[size() - 1]; - } - - template - TypeAt const& back() const noexcept { - return data()[size() - 1]; - } - - template - struct Field { - SoA& soa; - EntityInstanceBase::Type i; - using Type = typename SoA::template TypeAt; - - UTILS_ALWAYS_INLINE Field& operator = (Field&& rhs) noexcept { - soa.elementAt(i) = soa.elementAt(rhs.i); - return *this; - } - - // auto-conversion to the field's type - UTILS_ALWAYS_INLINE operator Type&() noexcept { - return soa.elementAt(i); - } - UTILS_ALWAYS_INLINE operator Type const&() const noexcept { - return soa.elementAt(i); - } - // dereferencing the selected field - UTILS_ALWAYS_INLINE Type& operator ->() noexcept { - return soa.elementAt(i); - } - UTILS_ALWAYS_INLINE Type const& operator ->() const noexcept { - return soa.elementAt(i); - } - // address-of the selected field - UTILS_ALWAYS_INLINE Type* operator &() noexcept { - return &soa.elementAt(i); - } - UTILS_ALWAYS_INLINE Type const* operator &() const noexcept { - return &soa.elementAt(i); - } - // assignment to the field - UTILS_ALWAYS_INLINE Type const& operator = (Type const& other) noexcept { - return (soa.elementAt(i) = other); - } - UTILS_ALWAYS_INLINE Type const& operator = (Type&& other) noexcept { - return (soa.elementAt(i) = other); - } - // comparisons - UTILS_ALWAYS_INLINE bool operator==(Type const& other) const { - return (soa.elementAt(i) == other); - } - UTILS_ALWAYS_INLINE bool operator!=(Type const& other) const { - return (soa.elementAt(i) != other); - } - // calling the field - template - UTILS_ALWAYS_INLINE decltype(auto) operator()(ARGS&& ... args) noexcept { - return soa.elementAt(i)(std::forward(args)...); - } - template - UTILS_ALWAYS_INLINE decltype(auto) operator()(ARGS&& ... args) const noexcept { - return soa.elementAt(i)(std::forward(args)...); - } - }; - -private: - template - T const* getArray(size_t arrayIndex) const { - return static_cast(mArrayOffset[arrayIndex]); - } - - template - T* getArray(size_t arrayIndex) { - return static_cast(mArrayOffset[arrayIndex]); - } - - inline void resizeNoCheck(size_t needed) noexcept { - assert(mCapacity >= needed); - if (needed < mSize) { - // we shrink the arrays - destroy_each(needed, mSize); - } else if (needed > mSize) { - // we grow the arrays - construct_each(mSize, needed); - } - // record the new size of the arrays - mSize = needed; - } - - // this calculate the offset adjusted for all data alignment of a given array - static inline size_t getOffset(size_t index, size_t capacity) noexcept { - auto offsets = getOffsets(capacity); - return offsets[index]; - } - - static inline std::array getOffsets(size_t capacity) noexcept { - // compute the required size of each array - const size_t sizes[] = { (sizeof(Elements) * capacity)... }; - - // we align each array to the same alignment guaranteed by malloc - const size_t align = alignof(std::max_align_t); - - // hopefully most of this gets unrolled and inlined - std::array offsets; - offsets[0] = 0; - #pragma unroll - for (size_t i = 1; i < kArrayCount; i++) { - size_t unalignment = sizes[i - 1] % align; - size_t alignment = unalignment ? (align - unalignment) : 0; - offsets[i] = offsets[i - 1] + (sizes[i - 1] + alignment); - } - return offsets; - } - - void construct_each(size_t from, size_t to) noexcept { - forEach([from, to](auto p) { - using T = typename std::decay::type; - // note: scalar types like int/float get initialized to zero - for (size_t i = from; i < to; i++) { - new(p + i) T(); - } - }); - } - - void destroy_each(size_t from, size_t to) noexcept { - forEach([from, to](auto p) { - using T = typename std::decay::type; - for (size_t i = from; i < to; i++) { - p[i].~T(); - } - }); - } - - void move_each(void* buffer, size_t capacity) noexcept { - auto offsets = getOffsets(capacity); - size_t index = 0; - if (mSize) { - auto size = mSize; // placate a compiler warning - forEach([buffer, &index, &offsets, size](auto p) { - using T = typename std::decay::type; - T* UTILS_RESTRICT b = static_cast(buffer); - - // go through each element and move them from the old array to the new - // then destroy them from the old array - T* UTILS_RESTRICT const arrayPointer = - reinterpret_cast(uintptr_t(b) + offsets[index]); - - // for trivial cases, just call memcpy() - if (std::is_trivially_copyable::value && - std::is_trivially_destructible::value) { - memcpy(arrayPointer, p, size * sizeof(T)); - } else { - for (size_t i = 0; i < size; i++) { - // we move an element by using the in-place move-constructor - new(arrayPointer + i) T(std::move(p[i])); - // and delete them by calling the destructor directly - p[i].~T(); - } - } - index++; - }); - } - - // update the pointers (the first offset will be filled later - for (size_t i = 1; i < kArrayCount; i++) { - mArrayOffset[i] = (char*)buffer + offsets[i]; - } - } - - // capacity in array elements - size_t mCapacity = 0; - // size in array elements - size_t mSize = 0; - // N pointers to each arrays - void *mArrayOffset[kArrayCount] = { nullptr }; - Allocator mAllocator; -}; - -template -inline -typename StructureOfArraysBase::StructureRef& -StructureOfArraysBase::StructureRef::operator=( - StructureOfArraysBase::StructureRef const& rhs) { - return operator=(Structure(rhs)); -} - -template -inline -typename StructureOfArraysBase::StructureRef& -StructureOfArraysBase::StructureRef::operator=( - StructureOfArraysBase::StructureRef&& rhs) noexcept { - return operator=(Structure(rhs)); -} - -template -template -inline -typename StructureOfArraysBase::StructureRef& -StructureOfArraysBase::StructureRef::assign( - StructureOfArraysBase::Structure const& rhs, std::index_sequence) { - // implements StructureRef& StructureRef::operator=(Structure const& rhs) - auto UTILS_UNUSED l = { (soa->elementAt(index) = std::get(rhs.elements), 0)... }; - return *this; -} - -template -template -inline -typename StructureOfArraysBase::StructureRef& -StructureOfArraysBase::StructureRef::assign( - StructureOfArraysBase::Structure&& rhs, std::index_sequence) noexcept { - // implements StructureRef& StructureRef::operator=(Structure&& rhs) noexcept - auto UTILS_UNUSED l = { - (soa->elementAt(index) = std::move(std::get(rhs.elements)), 0)... }; - return *this; -} - -template -using StructureOfArrays = StructureOfArraysBase, Elements ...>; - -} // namespace utils - -#endif // TNT_UTILS_STRUCTUREOFARRAYS_H - +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_STRUCTUREOFARRAYS_H +#define TNT_UTILS_STRUCTUREOFARRAYS_H + +#include // note: this is safe, see how std::array is used below (inline / private) +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace utils { + +template +class StructureOfArraysBase { + // number of elements + static constexpr const size_t kArrayCount = sizeof...(Elements); + +public: + using SoA = StructureOfArraysBase; + + // Type of the Nth array + template + using TypeAt = typename std::tuple_element>::type; + + // Number of arrays + static constexpr size_t getArrayCount() noexcept { return kArrayCount; } + + // Size needed to store "size" array elements + static size_t getNeededSize(size_t size) noexcept { + return getOffset(kArrayCount - 1, size) + sizeof(TypeAt) * size; + } + + // -------------------------------------------------------------------------------------------- + + class Structure; + template class Iterator; + using iterator = Iterator; + using const_iterator = Iterator; + using size_type = size_t; + using difference_type = ptrdiff_t; + + /* + * An object that represents a reference to the type dereferenced by iterator. + * In other words, it's the return type of iterator::operator*(), and since it + * cannot be a C++ reference (&), it's an object that acts like it. + */ + class StructureRef { + friend class Structure; + friend iterator; + friend const_iterator; + StructureOfArraysBase* const UTILS_RESTRICT soa; + size_t const index; + + StructureRef(StructureOfArraysBase* soa, size_t index) : soa(soa), index(index) { } + + // assigns a value_type to a reference (i.e. assigns to what's pointed to by the reference) + template + StructureRef& assign(Structure const& rhs, std::index_sequence); + + // assigns a value_type to a reference (i.e. assigns to what's pointed to by the reference) + template + StructureRef& assign(Structure&& rhs, std::index_sequence) noexcept; + + // objects pointed to by reference can be swapped, so provide the special swap() function. + friend void swap(StructureRef lhs, StructureRef rhs) { + lhs.soa->swap(lhs.index, rhs.index); + } + + public: + // references can be created by copy-assignment only + StructureRef(StructureRef const& rhs) noexcept : soa(rhs.soa), index(rhs.index) { } + + // copy the content of a reference to the content of this one + StructureRef& operator=(StructureRef const& rhs); + + // move the content of a reference to the content of this one + StructureRef& operator=(StructureRef&& rhs) noexcept; + + // copy a value_type to the content of this reference + StructureRef& operator=(Structure const& rhs) { + return assign(rhs, std::make_index_sequence()); + } + + // move a value_type to the content of this reference + StructureRef& operator=(Structure&& rhs) noexcept { + return assign(rhs, std::make_index_sequence()); + } + + // access the elements of this reference (i.e. the "fields" of the structure) + template TypeAt const& get() const { return soa->elementAt(index); } + template TypeAt& get() { return soa->elementAt(index); } + }; + + + /* + * The value_type of iterator. This is basically the "structure" of the SoA. + * Internally we're using a tuple<> to store the data. + * This object is not trivial to construct, as it copies an entry of the SoA. + */ + class Structure { + friend class StructureRef; + friend iterator; + friend const_iterator; + using Type = std::tuple::type...>; + Type elements; + + template + static Type init(StructureRef const& rhs, std::index_sequence) { + return Type{ rhs.soa->template elementAt(rhs.index)... }; + } + + template + static Type init(StructureRef&& rhs, std::index_sequence) noexcept { + return Type{ std::move(rhs.soa->template elementAt(rhs.index))... }; + } + + public: + Structure(Structure const& rhs) = default; + Structure(Structure&& rhs) noexcept = default; + Structure& operator=(Structure const& rhs) = default; + Structure& operator=(Structure&& rhs) noexcept = default; + + // initialize and assign from a StructureRef + Structure(StructureRef const& rhs) + : elements(init(rhs, std::make_index_sequence())) {} + Structure(StructureRef&& rhs) noexcept + : elements(init(rhs, std::make_index_sequence())) {} + Structure& operator=(StructureRef const& rhs) { return operator=(Structure(rhs)); } + Structure& operator=(StructureRef&& rhs) noexcept { return operator=(Structure(rhs)); } + + // access the elements of this value_Type (i.e. the "fields" of the structure) + template TypeAt const& get() const { return std::get(elements); } + template TypeAt& get() { return std::get(elements); } + }; + + + /* + * An iterator to the SoA. This is only intended to be used with STL's algorithm, e.g.: sort(). + * Normally, SoA is not iterated globally, but rather an array at a time. + * Iterating itself is not too costly, as well as dereferencing by reference. However, + * dereferencing by value is. + */ + template + class Iterator { + friend class StructureOfArraysBase; + CVQualifiedSOAPointer soa; // don't use restrict, can have aliases if multiple iterators are created + size_t index; + + Iterator(CVQualifiedSOAPointer soa, size_t index) : soa(soa), index(index) {} + + public: + using value_type = Structure; + using reference = StructureRef; + using pointer = StructureRef*; // FIXME: this should be a StructurePtr type + using difference_type = ptrdiff_t; + using iterator_category = std::random_access_iterator_tag; + + Iterator(Iterator const& rhs) noexcept = default; + Iterator& operator=(Iterator const& rhs) = default; + + reference operator*() const { return { soa, index }; } + reference operator*() { return { soa, index }; } + reference operator[](size_t n) { return *(*this + n); } + + template TypeAt const& get() const { return soa->template elementAt(index); } + template TypeAt& get() { return soa->template elementAt(index); } + + Iterator& operator++() { ++index; return *this; } + Iterator& operator--() { --index; return *this; } + Iterator& operator+=(size_t n) { index += n; return *this; } + Iterator& operator-=(size_t n) { index -= n; return *this; } + Iterator operator+(size_t n) const { return { soa, index + n }; } + Iterator operator-(size_t n) const { return { soa, index - n }; } + difference_type operator-(Iterator const& rhs) const { return index - rhs.index; } + bool operator==(Iterator const& rhs) const { return (index == rhs.index); } + bool operator!=(Iterator const& rhs) const { return (index != rhs.index); } + bool operator>=(Iterator const& rhs) const { return (index >= rhs.index); } + bool operator> (Iterator const& rhs) const { return (index > rhs.index); } + bool operator<=(Iterator const& rhs) const { return (index <= rhs.index); } + bool operator< (Iterator const& rhs) const { return (index < rhs.index); } + + // Postfix operator needed by Microsoft STL. + const Iterator operator++(int) { Iterator it(*this); index++; return it; } + const Iterator operator--(int) { Iterator it(*this); index--; return it; } + }; + + iterator begin() noexcept { return { this, 0u }; } + iterator end() noexcept { return { this, mSize }; } + const_iterator begin() const noexcept { return { this, 0u }; } + const_iterator end() const noexcept { return { this, mSize }; } + + // -------------------------------------------------------------------------------------------- + + StructureOfArraysBase() = default; + + explicit StructureOfArraysBase(size_t capacity) { + setCapacity(capacity); + } + + // not copyable for now + StructureOfArraysBase(StructureOfArraysBase const& rhs) = delete; + StructureOfArraysBase& operator=(StructureOfArraysBase const& rhs) = delete; + + // movability is trivial, so support it + StructureOfArraysBase(StructureOfArraysBase&& rhs) noexcept { + using std::swap; + swap(mCapacity, rhs.mCapacity); + swap(mSize, rhs.mSize); + swap(mArrayOffset, rhs.mArrayOffset); + swap(mAllocator, rhs.mAllocator); + } + + StructureOfArraysBase& operator=(StructureOfArraysBase&& rhs) noexcept { + if (this != &rhs) { + using std::swap; + swap(mCapacity, rhs.mCapacity); + swap(mSize, rhs.mSize); + swap(mArrayOffset, rhs.mArrayOffset); + swap(mAllocator, rhs.mAllocator); + } + return *this; + } + + ~StructureOfArraysBase() { + destroy_each(0, mSize); + mAllocator.free(mArrayOffset[0]); + } + + // -------------------------------------------------------------------------------------------- + + // return the size the array + size_t size() const noexcept { + return mSize; + } + + // return the capacity of the array + size_t capacity() const noexcept { + return mCapacity; + } + + // set the capacity of the array. the capacity cannot be smaller than the current size, + // the call is a no-op in that case. + UTILS_NOINLINE + void setCapacity(size_t capacity) { + // allocate enough space for "capacity" elements of each array + // capacity cannot change when optional storage is specified + if (capacity >= mSize) { + const size_t sizeNeeded = getNeededSize(capacity); + void* buffer = mAllocator.alloc(sizeNeeded); + + // move all the items (one array at a time) from the old allocation to the new + // this also update the array pointers + move_each(buffer, capacity); + + // free the old buffer + std::swap(buffer, mArrayOffset[0]); + mAllocator.free(buffer); + + // and make sure to update the capacity + mCapacity = capacity; + } + } + + void ensureCapacity(size_t needed) { + if (UTILS_UNLIKELY(needed > mCapacity)) { + // not enough space, increase the capacity + const size_t capacity = (needed * 3 + 1) / 2; + setCapacity(capacity); + } + } + + // grow or shrink the array to the given size. When growing, new elements are constructed + // with their default constructor. when shrinking, discarded elements are destroyed. + // If the arrays don't have enough capacity, the capacity is increased accordingly + // (the capacity is set to 3/2 of the asked size). + UTILS_NOINLINE + void resize(size_t needed) { + ensureCapacity(needed); + resizeNoCheck(needed); + if (needed <= mCapacity) { + // TODO: see if we should shrink the arrays + } + } + + void clear() noexcept { + resizeNoCheck(0); + } + + + inline void swap(size_t i, size_t j) noexcept { + forEach([i, j](auto p) { + using std::swap; + swap(p[i], p[j]); + }); + } + + // remove and destroy the last element of each array + inline void pop_back() noexcept { + if (mSize) { + destroy_each(mSize - 1, mSize); + mSize--; + } + } + + // create an element at the end of each array + StructureOfArraysBase& push_back() noexcept { + resize(mSize + 1); + return *this; + } + + StructureOfArraysBase& push_back(Elements const& ... args) noexcept { + ensureCapacity(mSize + 1); + return push_back_unsafe(args...); + } + + StructureOfArraysBase& push_back(Elements&& ... args) noexcept { + ensureCapacity(mSize + 1); + return push_back_unsafe(std::forward(args)...); + } + + StructureOfArraysBase& push_back_unsafe(Elements const& ... args) noexcept { + const size_t last = mSize++; + size_t i = 0; + int UTILS_UNUSED dummy[] = { + (new(getArray(i) + last)Elements(args), i++, 0)... }; + return *this; + } + + StructureOfArraysBase& push_back_unsafe(Elements&& ... args) noexcept { + const size_t last = mSize++; + size_t i = 0; + int UTILS_UNUSED dummy[] = { + (new(getArray(i) + last)Elements(std::forward(args)), i++, 0)... }; + return *this; + } + + template + void forEach(F&& f, ARGS&& ... args) { + size_t i = 0; + int UTILS_UNUSED dummy[] = { + (f(getArray(i), std::forward(args)...), i++, 0)... }; + } + + // return a pointer to the first element of the ElementIndex]th array + template + TypeAt* data() noexcept { + return getArray>(ElementIndex); + } + + template + TypeAt const* data() const noexcept { + return getArray>(ElementIndex); + } + + template + TypeAt* begin() noexcept { + return getArray>(ElementIndex); + } + + template + TypeAt const* begin() const noexcept { + return getArray>(ElementIndex); + } + + template + TypeAt* end() noexcept { + return getArray>(ElementIndex) + size(); + } + + template + TypeAt const* end() const noexcept { + return getArray>(ElementIndex) + size(); + } + + template + Slice> slice() noexcept { + return { begin(), end() }; + } + + template + Slice> slice() const noexcept { + return { begin(), end() }; + } + + // return a reference to the index'th element of the ElementIndex'th array + template + TypeAt& elementAt(size_t index) noexcept { + return data()[index]; + } + + template + TypeAt const& elementAt(size_t index) const noexcept { + return data()[index]; + } + + // return a reference to the last element of the ElementIndex'th array + template + TypeAt& back() noexcept { + return data()[size() - 1]; + } + + template + TypeAt const& back() const noexcept { + return data()[size() - 1]; + } + + template + struct Field { + SoA& soa; + EntityInstanceBase::Type i; + using Type = typename SoA::template TypeAt; + + UTILS_ALWAYS_INLINE Field& operator = (Field&& rhs) noexcept { + soa.elementAt(i) = soa.elementAt(rhs.i); + return *this; + } + + // auto-conversion to the field's type + UTILS_ALWAYS_INLINE operator Type&() noexcept { + return soa.elementAt(i); + } + UTILS_ALWAYS_INLINE operator Type const&() const noexcept { + return soa.elementAt(i); + } + // dereferencing the selected field + UTILS_ALWAYS_INLINE Type& operator ->() noexcept { + return soa.elementAt(i); + } + UTILS_ALWAYS_INLINE Type const& operator ->() const noexcept { + return soa.elementAt(i); + } + // address-of the selected field + UTILS_ALWAYS_INLINE Type* operator &() noexcept { + return &soa.elementAt(i); + } + UTILS_ALWAYS_INLINE Type const* operator &() const noexcept { + return &soa.elementAt(i); + } + // assignment to the field + UTILS_ALWAYS_INLINE Type const& operator = (Type const& other) noexcept { + return (soa.elementAt(i) = other); + } + UTILS_ALWAYS_INLINE Type const& operator = (Type&& other) noexcept { + return (soa.elementAt(i) = other); + } + // comparisons + UTILS_ALWAYS_INLINE bool operator==(Type const& other) const { + return (soa.elementAt(i) == other); + } + UTILS_ALWAYS_INLINE bool operator!=(Type const& other) const { + return (soa.elementAt(i) != other); + } + // calling the field + template + UTILS_ALWAYS_INLINE decltype(auto) operator()(ARGS&& ... args) noexcept { + return soa.elementAt(i)(std::forward(args)...); + } + template + UTILS_ALWAYS_INLINE decltype(auto) operator()(ARGS&& ... args) const noexcept { + return soa.elementAt(i)(std::forward(args)...); + } + }; + +private: + template + T const* getArray(size_t arrayIndex) const { + return static_cast(mArrayOffset[arrayIndex]); + } + + template + T* getArray(size_t arrayIndex) { + return static_cast(mArrayOffset[arrayIndex]); + } + + inline void resizeNoCheck(size_t needed) noexcept { + assert(mCapacity >= needed); + if (needed < mSize) { + // we shrink the arrays + destroy_each(needed, mSize); + } else if (needed > mSize) { + // we grow the arrays + construct_each(mSize, needed); + } + // record the new size of the arrays + mSize = needed; + } + + // this calculate the offset adjusted for all data alignment of a given array + static inline size_t getOffset(size_t index, size_t capacity) noexcept { + auto offsets = getOffsets(capacity); + return offsets[index]; + } + + static inline std::array getOffsets(size_t capacity) noexcept { + // compute the required size of each array + const size_t sizes[] = { (sizeof(Elements) * capacity)... }; + + // we align each array to the same alignment guaranteed by malloc + const size_t align = alignof(std::max_align_t); + + // hopefully most of this gets unrolled and inlined + std::array offsets; + offsets[0] = 0; + #pragma unroll + for (size_t i = 1; i < kArrayCount; i++) { + size_t unalignment = sizes[i - 1] % align; + size_t alignment = unalignment ? (align - unalignment) : 0; + offsets[i] = offsets[i - 1] + (sizes[i - 1] + alignment); + } + return offsets; + } + + void construct_each(size_t from, size_t to) noexcept { + forEach([from, to](auto p) { + using T = typename std::decay::type; + // note: scalar types like int/float get initialized to zero + for (size_t i = from; i < to; i++) { + new(p + i) T(); + } + }); + } + + void destroy_each(size_t from, size_t to) noexcept { + forEach([from, to](auto p) { + using T = typename std::decay::type; + for (size_t i = from; i < to; i++) { + p[i].~T(); + } + }); + } + + void move_each(void* buffer, size_t capacity) noexcept { + auto offsets = getOffsets(capacity); + size_t index = 0; + if (mSize) { + auto size = mSize; // placate a compiler warning + forEach([buffer, &index, &offsets, size](auto p) { + using T = typename std::decay::type; + T* UTILS_RESTRICT b = static_cast(buffer); + + // go through each element and move them from the old array to the new + // then destroy them from the old array + T* UTILS_RESTRICT const arrayPointer = + reinterpret_cast(uintptr_t(b) + offsets[index]); + + // for trivial cases, just call memcpy() + if (std::is_trivially_copyable::value && + std::is_trivially_destructible::value) { + memcpy(arrayPointer, p, size * sizeof(T)); + } else { + for (size_t i = 0; i < size; i++) { + // we move an element by using the in-place move-constructor + new(arrayPointer + i) T(std::move(p[i])); + // and delete them by calling the destructor directly + p[i].~T(); + } + } + index++; + }); + } + + // update the pointers (the first offset will be filled later + for (size_t i = 1; i < kArrayCount; i++) { + mArrayOffset[i] = (char*)buffer + offsets[i]; + } + } + + // capacity in array elements + size_t mCapacity = 0; + // size in array elements + size_t mSize = 0; + // N pointers to each arrays + void *mArrayOffset[kArrayCount] = { nullptr }; + Allocator mAllocator; +}; + +template +inline +typename StructureOfArraysBase::StructureRef& +StructureOfArraysBase::StructureRef::operator=( + StructureOfArraysBase::StructureRef const& rhs) { + return operator=(Structure(rhs)); +} + +template +inline +typename StructureOfArraysBase::StructureRef& +StructureOfArraysBase::StructureRef::operator=( + StructureOfArraysBase::StructureRef&& rhs) noexcept { + return operator=(Structure(rhs)); +} + +template +template +inline +typename StructureOfArraysBase::StructureRef& +StructureOfArraysBase::StructureRef::assign( + StructureOfArraysBase::Structure const& rhs, std::index_sequence) { + // implements StructureRef& StructureRef::operator=(Structure const& rhs) + auto UTILS_UNUSED l = { (soa->elementAt(index) = std::get(rhs.elements), 0)... }; + return *this; +} + +template +template +inline +typename StructureOfArraysBase::StructureRef& +StructureOfArraysBase::StructureRef::assign( + StructureOfArraysBase::Structure&& rhs, std::index_sequence) noexcept { + // implements StructureRef& StructureRef::operator=(Structure&& rhs) noexcept + auto UTILS_UNUSED l = { + (soa->elementAt(index) = std::move(std::get(rhs.elements)), 0)... }; + return *this; +} + +template +using StructureOfArrays = StructureOfArraysBase, Elements ...>; + +} // namespace utils + +#endif // TNT_UTILS_STRUCTUREOFARRAYS_H + diff --git a/ios/include/utils/Systrace.h b/ios/include/utils/Systrace.h index eaab835a..67553f77 100644 --- a/ios/include/utils/Systrace.h +++ b/ios/include/utils/Systrace.h @@ -1,278 +1,278 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_SYSTRACE_H -#define TNT_UTILS_SYSTRACE_H - - -#define SYSTRACE_TAG_NEVER (0) -#define SYSTRACE_TAG_ALWAYS (1<<0) -#define SYSTRACE_TAG_FILAMENT (1<<1) // don't change, used in makefiles -#define SYSTRACE_TAG_JOBSYSTEM (1<<2) - - -#if defined(ANDROID) - -#include - -#include -#include -#include - -#include - -/* - * The SYSTRACE_ macros use SYSTRACE_TAG as a the TAG, which should be defined - * before this file is included. If not, the SYSTRACE_TAG_ALWAYS tag will be used. - */ - -#ifndef SYSTRACE_TAG -#define SYSTRACE_TAG (SYSTRACE_TAG_ALWAYS) -#endif - -// enable tracing -#define SYSTRACE_ENABLE() ::utils::details::Systrace::enable(SYSTRACE_TAG) - -// disable tracing -#define SYSTRACE_DISABLE() ::utils::details::Systrace::disable(SYSTRACE_TAG) - - -/** - * Creates a Systrace context in the current scope. needed for calling all other systrace - * commands below. - */ -#define SYSTRACE_CONTEXT() ::utils::details::Systrace ___tracer(SYSTRACE_TAG) - - -// SYSTRACE_NAME traces the beginning and end of the current scope. To trace -// the correct start and end times this macro should be declared first in the -// scope body. -// It also automatically creates a Systrace context -#define SYSTRACE_NAME(name) ::utils::details::ScopedTrace ___tracer(SYSTRACE_TAG, name) - -// SYSTRACE_CALL is an SYSTRACE_NAME that uses the current function name. -#define SYSTRACE_CALL() SYSTRACE_NAME(__FUNCTION__) - -#define SYSTRACE_NAME_BEGIN(name) \ - ___tracer.traceBegin(SYSTRACE_TAG, name) - -#define SYSTRACE_NAME_END() \ - ___tracer.traceEnd(SYSTRACE_TAG) - - -/** - * Trace the beginning of an asynchronous event. Unlike ATRACE_BEGIN/ATRACE_END - * contexts, asynchronous events do not need to be nested. The name describes - * the event, and the cookie provides a unique identifier for distinguishing - * simultaneous events. The name and cookie used to begin an event must be - * used to end it. - */ -#define SYSTRACE_ASYNC_BEGIN(name, cookie) \ - ___tracer.asyncBegin(SYSTRACE_TAG, name, cookie) - -/** - * Trace the end of an asynchronous event. - * This should have a corresponding SYSTRACE_ASYNC_BEGIN. - */ -#define SYSTRACE_ASYNC_END(name, cookie) \ - ___tracer.asyncEnd(SYSTRACE_TAG, name, cookie) - -/** - * Traces an integer counter value. name is used to identify the counter. - * This can be used to track how a value changes over time. - */ -#define SYSTRACE_VALUE32(name, val) \ - ___tracer.value(SYSTRACE_TAG, name, int32_t(val)) - -#define SYSTRACE_VALUE64(name, val) \ - ___tracer.value(SYSTRACE_TAG, name, int64_t(val)) - -// ------------------------------------------------------------------------------------------------ -// No user serviceable code below... -// ------------------------------------------------------------------------------------------------ - -namespace utils { -namespace details { - -class Systrace { -public: - - enum tags { - NEVER = SYSTRACE_TAG_NEVER, - ALWAYS = SYSTRACE_TAG_ALWAYS, - FILAMENT = SYSTRACE_TAG_FILAMENT, - JOBSYSTEM = SYSTRACE_TAG_JOBSYSTEM - // we could define more TAGS here, as we need them. - }; - - Systrace(uint32_t tag) noexcept { - if (tag) init(tag); - } - - static void enable(uint32_t tags) noexcept; - static void disable(uint32_t tags) noexcept; - - - inline void traceBegin(uint32_t tag, const char* name) noexcept { - if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { - beginSection(this, name); - } - } - - inline void traceEnd(uint32_t tag) noexcept { - if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { - endSection(this); - } - } - - inline void asyncBegin(uint32_t tag, const char* name, int32_t cookie) noexcept { - if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { - beginAsyncSection(this, name, cookie); - } - } - - inline void asyncEnd(uint32_t tag, const char* name, int32_t cookie) noexcept { - if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { - endAsyncSection(this, name, cookie); - } - } - - inline void value(uint32_t tag, const char* name, int32_t value) noexcept { - if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { - setCounter(this, name, value); - } - } - - inline void value(uint32_t tag, const char* name, int64_t value) noexcept { - if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { - setCounter(this, name, value); - } - } - -private: - friend class ScopedTrace; - - // whether tracing is supported at all by the platform - - using ATrace_isEnabled_t = bool (*)(void); - using ATrace_beginSection_t = void (*)(const char* sectionName); - using ATrace_endSection_t = void (*)(void); - using ATrace_beginAsyncSection_t = void (*)(const char* sectionName, int32_t cookie); - using ATrace_endAsyncSection_t = void (*)(const char* sectionName, int32_t cookie); - using ATrace_setCounter_t = void (*)(const char* counterName, int64_t counterValue); - - struct GlobalState { - bool isTracingAvailable; - std::atomic isTracingEnabled; - int markerFd; - - ATrace_isEnabled_t ATrace_isEnabled; - ATrace_beginSection_t ATrace_beginSection; - ATrace_endSection_t ATrace_endSection; - ATrace_beginAsyncSection_t ATrace_beginAsyncSection; - ATrace_endAsyncSection_t ATrace_endAsyncSection; - ATrace_setCounter_t ATrace_setCounter; - - void (*beginSection)(Systrace* that, const char* name); - void (*endSection)(Systrace* that); - void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie); - void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie); - void (*setCounter)(Systrace* that, const char* name, int64_t value); - }; - - static GlobalState sGlobalState; - - - // per-instance versions for better performance - ATrace_isEnabled_t ATrace_isEnabled; - ATrace_beginSection_t ATrace_beginSection; - ATrace_endSection_t ATrace_endSection; - ATrace_beginAsyncSection_t ATrace_beginAsyncSection; - ATrace_endAsyncSection_t ATrace_endAsyncSection; - ATrace_setCounter_t ATrace_setCounter; - - void (*beginSection)(Systrace* that, const char* name); - void (*endSection)(Systrace* that); - void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie); - void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie); - void (*setCounter)(Systrace* that, const char* name, int64_t value); - - void init(uint32_t tag) noexcept; - - // cached values for faster access, no need to be initialized - bool mIsTracingEnabled; - int mMarkerFd = -1; - pid_t mPid; - - static void setup() noexcept; - static void init_once() noexcept; - static bool isTracingEnabled(uint32_t tag) noexcept; - - static void begin_body(int fd, int pid, const char* name) noexcept; - static void end_body(int fd, int pid) noexcept; - static void async_begin_body(int fd, int pid, const char* name, int32_t cookie) noexcept; - static void async_end_body(int fd, int pid, const char* name, int32_t cookie) noexcept; - static void int64_body(int fd, int pid, const char* name, int64_t value) noexcept; -}; - -// ------------------------------------------------------------------------------------------------ - -class ScopedTrace { -public: - // we don't inline this because it's relatively heavy due to a global check - ScopedTrace(uint32_t tag, const char* name) noexcept : mTrace(tag), mTag(tag) { - mTrace.traceBegin(tag, name); - } - - inline ~ScopedTrace() noexcept { - mTrace.traceEnd(mTag); - } - - inline void value(uint32_t tag, const char* name, int32_t v) noexcept { - mTrace.value(tag, name, v); - } - - inline void value(uint32_t tag, const char* name, int64_t v) noexcept { - mTrace.value(tag, name, v); - } - -private: - Systrace mTrace; - const uint32_t mTag; -}; - -} // namespace details -} // namespace utils - -// ------------------------------------------------------------------------------------------------ -#else // !ANDROID -// ------------------------------------------------------------------------------------------------ - -#define SYSTRACE_ENABLE() -#define SYSTRACE_DISABLE() -#define SYSTRACE_CONTEXT() -#define SYSTRACE_NAME(name) -#define SYSTRACE_NAME_BEGIN(name) -#define SYSTRACE_NAME_END() -#define SYSTRACE_CALL() -#define SYSTRACE_ASYNC_BEGIN(name, cookie) -#define SYSTRACE_ASYNC_END(name, cookie) -#define SYSTRACE_VALUE32(name, val) -#define SYSTRACE_VALUE64(name, val) - -#endif // ANDROID - -#endif // TNT_UTILS_SYSTRACE_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_SYSTRACE_H +#define TNT_UTILS_SYSTRACE_H + + +#define SYSTRACE_TAG_NEVER (0) +#define SYSTRACE_TAG_ALWAYS (1<<0) +#define SYSTRACE_TAG_FILAMENT (1<<1) // don't change, used in makefiles +#define SYSTRACE_TAG_JOBSYSTEM (1<<2) + + +#if defined(__ANDROID__) + +#include + +#include +#include +#include + +#include + +/* + * The SYSTRACE_ macros use SYSTRACE_TAG as a the TAG, which should be defined + * before this file is included. If not, the SYSTRACE_TAG_ALWAYS tag will be used. + */ + +#ifndef SYSTRACE_TAG +#define SYSTRACE_TAG (SYSTRACE_TAG_ALWAYS) +#endif + +// enable tracing +#define SYSTRACE_ENABLE() ::utils::details::Systrace::enable(SYSTRACE_TAG) + +// disable tracing +#define SYSTRACE_DISABLE() ::utils::details::Systrace::disable(SYSTRACE_TAG) + + +/** + * Creates a Systrace context in the current scope. needed for calling all other systrace + * commands below. + */ +#define SYSTRACE_CONTEXT() ::utils::details::Systrace ___tracer(SYSTRACE_TAG) + + +// SYSTRACE_NAME traces the beginning and end of the current scope. To trace +// the correct start and end times this macro should be declared first in the +// scope body. +// It also automatically creates a Systrace context +#define SYSTRACE_NAME(name) ::utils::details::ScopedTrace ___tracer(SYSTRACE_TAG, name) + +// SYSTRACE_CALL is an SYSTRACE_NAME that uses the current function name. +#define SYSTRACE_CALL() SYSTRACE_NAME(__FUNCTION__) + +#define SYSTRACE_NAME_BEGIN(name) \ + ___tracer.traceBegin(SYSTRACE_TAG, name) + +#define SYSTRACE_NAME_END() \ + ___tracer.traceEnd(SYSTRACE_TAG) + + +/** + * Trace the beginning of an asynchronous event. Unlike ATRACE_BEGIN/ATRACE_END + * contexts, asynchronous events do not need to be nested. The name describes + * the event, and the cookie provides a unique identifier for distinguishing + * simultaneous events. The name and cookie used to begin an event must be + * used to end it. + */ +#define SYSTRACE_ASYNC_BEGIN(name, cookie) \ + ___tracer.asyncBegin(SYSTRACE_TAG, name, cookie) + +/** + * Trace the end of an asynchronous event. + * This should have a corresponding SYSTRACE_ASYNC_BEGIN. + */ +#define SYSTRACE_ASYNC_END(name, cookie) \ + ___tracer.asyncEnd(SYSTRACE_TAG, name, cookie) + +/** + * Traces an integer counter value. name is used to identify the counter. + * This can be used to track how a value changes over time. + */ +#define SYSTRACE_VALUE32(name, val) \ + ___tracer.value(SYSTRACE_TAG, name, int32_t(val)) + +#define SYSTRACE_VALUE64(name, val) \ + ___tracer.value(SYSTRACE_TAG, name, int64_t(val)) + +// ------------------------------------------------------------------------------------------------ +// No user serviceable code below... +// ------------------------------------------------------------------------------------------------ + +namespace utils { +namespace details { + +class Systrace { +public: + + enum tags { + NEVER = SYSTRACE_TAG_NEVER, + ALWAYS = SYSTRACE_TAG_ALWAYS, + FILAMENT = SYSTRACE_TAG_FILAMENT, + JOBSYSTEM = SYSTRACE_TAG_JOBSYSTEM + // we could define more TAGS here, as we need them. + }; + + Systrace(uint32_t tag) noexcept { + if (tag) init(tag); + } + + static void enable(uint32_t tags) noexcept; + static void disable(uint32_t tags) noexcept; + + + inline void traceBegin(uint32_t tag, const char* name) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + beginSection(this, name); + } + } + + inline void traceEnd(uint32_t tag) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + endSection(this); + } + } + + inline void asyncBegin(uint32_t tag, const char* name, int32_t cookie) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + beginAsyncSection(this, name, cookie); + } + } + + inline void asyncEnd(uint32_t tag, const char* name, int32_t cookie) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + endAsyncSection(this, name, cookie); + } + } + + inline void value(uint32_t tag, const char* name, int32_t value) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + setCounter(this, name, value); + } + } + + inline void value(uint32_t tag, const char* name, int64_t value) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + setCounter(this, name, value); + } + } + +private: + friend class ScopedTrace; + + // whether tracing is supported at all by the platform + + using ATrace_isEnabled_t = bool (*)(void); + using ATrace_beginSection_t = void (*)(const char* sectionName); + using ATrace_endSection_t = void (*)(void); + using ATrace_beginAsyncSection_t = void (*)(const char* sectionName, int32_t cookie); + using ATrace_endAsyncSection_t = void (*)(const char* sectionName, int32_t cookie); + using ATrace_setCounter_t = void (*)(const char* counterName, int64_t counterValue); + + struct GlobalState { + bool isTracingAvailable; + std::atomic isTracingEnabled; + int markerFd; + + ATrace_isEnabled_t ATrace_isEnabled; + ATrace_beginSection_t ATrace_beginSection; + ATrace_endSection_t ATrace_endSection; + ATrace_beginAsyncSection_t ATrace_beginAsyncSection; + ATrace_endAsyncSection_t ATrace_endAsyncSection; + ATrace_setCounter_t ATrace_setCounter; + + void (*beginSection)(Systrace* that, const char* name); + void (*endSection)(Systrace* that); + void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*setCounter)(Systrace* that, const char* name, int64_t value); + }; + + static GlobalState sGlobalState; + + + // per-instance versions for better performance + ATrace_isEnabled_t ATrace_isEnabled; + ATrace_beginSection_t ATrace_beginSection; + ATrace_endSection_t ATrace_endSection; + ATrace_beginAsyncSection_t ATrace_beginAsyncSection; + ATrace_endAsyncSection_t ATrace_endAsyncSection; + ATrace_setCounter_t ATrace_setCounter; + + void (*beginSection)(Systrace* that, const char* name); + void (*endSection)(Systrace* that); + void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*setCounter)(Systrace* that, const char* name, int64_t value); + + void init(uint32_t tag) noexcept; + + // cached values for faster access, no need to be initialized + bool mIsTracingEnabled; + int mMarkerFd = -1; + pid_t mPid; + + static void setup() noexcept; + static void init_once() noexcept; + static bool isTracingEnabled(uint32_t tag) noexcept; + + static void begin_body(int fd, int pid, const char* name) noexcept; + static void end_body(int fd, int pid) noexcept; + static void async_begin_body(int fd, int pid, const char* name, int32_t cookie) noexcept; + static void async_end_body(int fd, int pid, const char* name, int32_t cookie) noexcept; + static void int64_body(int fd, int pid, const char* name, int64_t value) noexcept; +}; + +// ------------------------------------------------------------------------------------------------ + +class ScopedTrace { +public: + // we don't inline this because it's relatively heavy due to a global check + ScopedTrace(uint32_t tag, const char* name) noexcept : mTrace(tag), mTag(tag) { + mTrace.traceBegin(tag, name); + } + + inline ~ScopedTrace() noexcept { + mTrace.traceEnd(mTag); + } + + inline void value(uint32_t tag, const char* name, int32_t v) noexcept { + mTrace.value(tag, name, v); + } + + inline void value(uint32_t tag, const char* name, int64_t v) noexcept { + mTrace.value(tag, name, v); + } + +private: + Systrace mTrace; + const uint32_t mTag; +}; + +} // namespace details +} // namespace utils + +// ------------------------------------------------------------------------------------------------ +#else // !ANDROID +// ------------------------------------------------------------------------------------------------ + +#define SYSTRACE_ENABLE() +#define SYSTRACE_DISABLE() +#define SYSTRACE_CONTEXT() +#define SYSTRACE_NAME(name) +#define SYSTRACE_NAME_BEGIN(name) +#define SYSTRACE_NAME_END() +#define SYSTRACE_CALL() +#define SYSTRACE_ASYNC_BEGIN(name, cookie) +#define SYSTRACE_ASYNC_END(name, cookie) +#define SYSTRACE_VALUE32(name, val) +#define SYSTRACE_VALUE64(name, val) + +#endif // ANDROID + +#endif // TNT_UTILS_SYSTRACE_H diff --git a/ios/include/utils/ThermalManager.h b/ios/include/utils/ThermalManager.h new file mode 100644 index 00000000..fc38d88e --- /dev/null +++ b/ios/include/utils/ThermalManager.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_THERMALMANAGER_H +#define TNT_UTILS_THERMALMANAGER_H + +#if defined(__ANDROID__) +#include +#else +#include +#endif + +#endif // TNT_UTILS_THERMALMANAGER_H diff --git a/ios/include/utils/WorkStealingDequeue.h b/ios/include/utils/WorkStealingDequeue.h index 073b9b64..73b1ce6e 100644 --- a/ios/include/utils/WorkStealingDequeue.h +++ b/ios/include/utils/WorkStealingDequeue.h @@ -1,202 +1,202 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_WORKSTEALINGDEQUEUE_H -#define TNT_UTILS_WORKSTEALINGDEQUEUE_H - -#include - -#include -#include - -namespace utils { - -/* - * A templated, lockless, fixed-size work-stealing dequeue - * - * - * top bottom - * v v - * |----|----|----|----|----|----| - * steal() push(), pop() - * any thread main thread - * - * - */ -template -class WorkStealingDequeue { - static_assert(!(COUNT & (COUNT - 1)), "COUNT must be a power of two"); - static constexpr size_t MASK = COUNT - 1; - - // mTop and mBottom must be signed integers. We use 64-bits atomics so we don't have - // to worry about wrapping around. - using index_t = int64_t; - - std::atomic mTop = { 0 }; // written/read in pop()/steal() - std::atomic mBottom = { 0 }; // written only in pop(), read in push(), steal() - - TYPE mItems[COUNT]; - - // NOTE: it's not safe to return a reference because getItemAt() can be called - // concurrently and the caller could std::move() the item unsafely. - TYPE getItemAt(index_t index) noexcept { return mItems[index & MASK]; } - - void setItemAt(index_t index, TYPE item) noexcept { mItems[index & MASK] = item; } - -public: - using value_type = TYPE; - - inline void push(TYPE item) noexcept; - inline TYPE pop() noexcept; - inline TYPE steal() noexcept; - - size_t getSize() const noexcept { return COUNT; } - - // for debugging only... - size_t getCount() const noexcept { - index_t bottom = mBottom.load(std::memory_order_relaxed); - index_t top = mTop.load(std::memory_order_relaxed); - return bottom - top; - } -}; - -/* - * Adds an item at the BOTTOM of the queue. - * - * Must be called from the main thread. - */ -template -void WorkStealingDequeue::push(TYPE item) noexcept { - // std::memory_order_relaxed is sufficient because this load doesn't acquire anything from - // another thread. mBottom is only written in pop() which cannot be concurrent with push() - index_t bottom = mBottom.load(std::memory_order_relaxed); - setItemAt(bottom, item); - - // std::memory_order_release is used because we release the item we just pushed to other - // threads which are calling steal(). - mBottom.store(bottom + 1, std::memory_order_release); -} - -/* - * Removes an item from the BOTTOM of the queue. - * - * Must be called from the main thread. - */ -template -TYPE WorkStealingDequeue::pop() noexcept { - // std::memory_order_seq_cst is needed to guarantee ordering in steal() - // Note however that this is not a typical acquire/release operation: - // - not acquire because mBottom is only written in push() which is not concurrent - // - not release because we're not publishing anything to steal() here - // - // QUESTION: does this prevent mTop load below to be reordered before the "store" part of - // fetch_sub()? Hopefully it does. If not we'd need a full memory barrier. - // - index_t bottom = mBottom.fetch_sub(1, std::memory_order_seq_cst) - 1; - - // bottom could be -1 if we tried to pop() from an empty queue. This will be corrected below. - assert( bottom >= -1 ); - - // std::memory_order_seq_cst is needed to guarantee ordering in steal() - // Note however that this is not a typical acquire operation - // (i.e. other thread's writes of mTop don't publish data) - index_t top = mTop.load(std::memory_order_seq_cst); - - if (top < bottom) { - // Queue isn't empty and it's not the last item, just return it, this is the common case. - return getItemAt(bottom); - } - - TYPE item{}; - if (top == bottom) { - // we just took the last item - item = getItemAt(bottom); - - // Because we know we took the last item, we could be racing with steal() -- the last - // item being both at the top and bottom of the queue. - // We resolve this potential race by also stealing that item from ourselves. - if (mTop.compare_exchange_strong(top, top + 1, - std::memory_order_seq_cst, - std::memory_order_relaxed)) { - // success: we stole our last item from ourself, meaning that a concurrent steal() - // would have failed. - // mTop now equals top + 1, we adjust top to make the queue empty. - top++; - } else { - // failure: mTop was not equal to top, which means the item was stolen under our feet. - // top now equals to mTop. Simply discard the item we just popped. - // The queue is now empty. - item = TYPE(); - } - } else { - // We could be here if the item was stolen just before we read mTop, we'll adjust - // mBottom below. - assert(top - bottom == 1); - } - - // std::memory_order_relaxed used because we're not publishing any data. - // no concurrent writes to mBottom possible, it's always safe to write mBottom. - mBottom.store(top, std::memory_order_relaxed); - return item; -} - -/* - * Steals an item from the TOP of another thread's queue. - * - * This can be called concurrently with steal(), push() or pop() - * - * steal() never fails, either there is an item and it atomically takes it, or there isn't and - * it returns an empty item. - */ -template -TYPE WorkStealingDequeue::steal() noexcept { - while (true) { - /* - * Note: A Key component of this algorithm is that mTop is read before mBottom here - * (and observed as such in other threads) - */ - - // std::memory_order_seq_cst is needed to guarantee ordering in pop() - // Note however that this is not a typical acquire operation - // (i.e. other thread's writes of mTop don't publish data) - index_t top = mTop.load(std::memory_order_seq_cst); - - // std::memory_order_acquire is needed because we're acquiring items published in push(). - // std::memory_order_seq_cst is needed to guarantee ordering in pop() - index_t bottom = mBottom.load(std::memory_order_seq_cst); - - if (top >= bottom) { - // queue is empty - return TYPE(); - } - - // The queue isn't empty - TYPE item(getItemAt(top)); - if (mTop.compare_exchange_strong(top, top + 1, - std::memory_order_seq_cst, - std::memory_order_relaxed)) { - // success: we stole an item, just return it. - return item; - } - // failure: the item we just tried to steal was pop()'ed under our feet, - // simply discard it; nothing to do -- it's okay to try again. - } -} - - -} // namespace utils - -#endif // TNT_UTILS_WORKSTEALINGDEQUEUE_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_WORKSTEALINGDEQUEUE_H +#define TNT_UTILS_WORKSTEALINGDEQUEUE_H + +#include + +#include +#include + +namespace utils { + +/* + * A templated, lockless, fixed-size work-stealing dequeue + * + * + * top bottom + * v v + * |----|----|----|----|----|----| + * steal() push(), pop() + * any thread main thread + * + * + */ +template +class WorkStealingDequeue { + static_assert(!(COUNT & (COUNT - 1)), "COUNT must be a power of two"); + static constexpr size_t MASK = COUNT - 1; + + // mTop and mBottom must be signed integers. We use 64-bits atomics so we don't have + // to worry about wrapping around. + using index_t = int64_t; + + std::atomic mTop = { 0 }; // written/read in pop()/steal() + std::atomic mBottom = { 0 }; // written only in pop(), read in push(), steal() + + TYPE mItems[COUNT]; + + // NOTE: it's not safe to return a reference because getItemAt() can be called + // concurrently and the caller could std::move() the item unsafely. + TYPE getItemAt(index_t index) noexcept { return mItems[index & MASK]; } + + void setItemAt(index_t index, TYPE item) noexcept { mItems[index & MASK] = item; } + +public: + using value_type = TYPE; + + inline void push(TYPE item) noexcept; + inline TYPE pop() noexcept; + inline TYPE steal() noexcept; + + size_t getSize() const noexcept { return COUNT; } + + // for debugging only... + size_t getCount() const noexcept { + index_t bottom = mBottom.load(std::memory_order_relaxed); + index_t top = mTop.load(std::memory_order_relaxed); + return bottom - top; + } +}; + +/* + * Adds an item at the BOTTOM of the queue. + * + * Must be called from the main thread. + */ +template +void WorkStealingDequeue::push(TYPE item) noexcept { + // std::memory_order_relaxed is sufficient because this load doesn't acquire anything from + // another thread. mBottom is only written in pop() which cannot be concurrent with push() + index_t bottom = mBottom.load(std::memory_order_relaxed); + setItemAt(bottom, item); + + // std::memory_order_release is used because we release the item we just pushed to other + // threads which are calling steal(). + mBottom.store(bottom + 1, std::memory_order_release); +} + +/* + * Removes an item from the BOTTOM of the queue. + * + * Must be called from the main thread. + */ +template +TYPE WorkStealingDequeue::pop() noexcept { + // std::memory_order_seq_cst is needed to guarantee ordering in steal() + // Note however that this is not a typical acquire/release operation: + // - not acquire because mBottom is only written in push() which is not concurrent + // - not release because we're not publishing anything to steal() here + // + // QUESTION: does this prevent mTop load below to be reordered before the "store" part of + // fetch_sub()? Hopefully it does. If not we'd need a full memory barrier. + // + index_t bottom = mBottom.fetch_sub(1, std::memory_order_seq_cst) - 1; + + // bottom could be -1 if we tried to pop() from an empty queue. This will be corrected below. + assert( bottom >= -1 ); + + // std::memory_order_seq_cst is needed to guarantee ordering in steal() + // Note however that this is not a typical acquire operation + // (i.e. other thread's writes of mTop don't publish data) + index_t top = mTop.load(std::memory_order_seq_cst); + + if (top < bottom) { + // Queue isn't empty and it's not the last item, just return it, this is the common case. + return getItemAt(bottom); + } + + TYPE item{}; + if (top == bottom) { + // we just took the last item + item = getItemAt(bottom); + + // Because we know we took the last item, we could be racing with steal() -- the last + // item being both at the top and bottom of the queue. + // We resolve this potential race by also stealing that item from ourselves. + if (mTop.compare_exchange_strong(top, top + 1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + // success: we stole our last item from ourself, meaning that a concurrent steal() + // would have failed. + // mTop now equals top + 1, we adjust top to make the queue empty. + top++; + } else { + // failure: mTop was not equal to top, which means the item was stolen under our feet. + // top now equals to mTop. Simply discard the item we just popped. + // The queue is now empty. + item = TYPE(); + } + } else { + // We could be here if the item was stolen just before we read mTop, we'll adjust + // mBottom below. + assert(top - bottom == 1); + } + + // std::memory_order_relaxed used because we're not publishing any data. + // no concurrent writes to mBottom possible, it's always safe to write mBottom. + mBottom.store(top, std::memory_order_relaxed); + return item; +} + +/* + * Steals an item from the TOP of another thread's queue. + * + * This can be called concurrently with steal(), push() or pop() + * + * steal() never fails, either there is an item and it atomically takes it, or there isn't and + * it returns an empty item. + */ +template +TYPE WorkStealingDequeue::steal() noexcept { + while (true) { + /* + * Note: A Key component of this algorithm is that mTop is read before mBottom here + * (and observed as such in other threads) + */ + + // std::memory_order_seq_cst is needed to guarantee ordering in pop() + // Note however that this is not a typical acquire operation + // (i.e. other thread's writes of mTop don't publish data) + index_t top = mTop.load(std::memory_order_seq_cst); + + // std::memory_order_acquire is needed because we're acquiring items published in push(). + // std::memory_order_seq_cst is needed to guarantee ordering in pop() + index_t bottom = mBottom.load(std::memory_order_seq_cst); + + if (top >= bottom) { + // queue is empty + return TYPE(); + } + + // The queue isn't empty + TYPE item(getItemAt(top)); + if (mTop.compare_exchange_strong(top, top + 1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + // success: we stole an item, just return it. + return item; + } + // failure: the item we just tried to steal was pop()'ed under our feet, + // simply discard it; nothing to do -- it's okay to try again. + } +} + + +} // namespace utils + +#endif // TNT_UTILS_WORKSTEALINGDEQUEUE_H diff --git a/ios/include/utils/Zip2Iterator.h b/ios/include/utils/Zip2Iterator.h index 344277e8..7b9f552a 100644 --- a/ios/include/utils/Zip2Iterator.h +++ b/ios/include/utils/Zip2Iterator.h @@ -1,122 +1,122 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_ZIP2ITERATOR_H -#define TNT_UTILS_ZIP2ITERATOR_H - -#include -#include - -#include - -namespace utils { - -/* - * A random access iterator that wraps two other random access iterators. - * This mostly exists so that one can sort an array using values from another. - */ - -template -class Zip2Iterator { - std::pair mIt; - using Ref1 = typename std::iterator_traits::reference; - using Ref2 = typename std::iterator_traits::reference; - using Val1 = typename std::iterator_traits::value_type; - using Val2 = typename std::iterator_traits::value_type; - -public: - struct Ref : public std::pair { - using std::pair::pair; - using std::pair::operator=; - private: - friend void swap(Ref lhs, Ref rhs) { - using std::swap; - swap(lhs.first, rhs.first); - swap(lhs.second, rhs.second); - } - }; - - using value_type = std::pair; - using reference = Ref; - using pointer = value_type*; - using difference_type = ptrdiff_t; - using iterator_category = std::random_access_iterator_tag; - - Zip2Iterator() = default; - Zip2Iterator(It1 first, It2 second) : mIt({first, second}) {} - Zip2Iterator(Zip2Iterator const& rhs) noexcept = default; - Zip2Iterator& operator=(Zip2Iterator const& rhs) = default; - - reference operator*() const { return { *mIt.first, *mIt.second }; } - - reference operator[](size_t n) const { return *(*this + n); } - - Zip2Iterator& operator++() { - ++mIt.first; - ++mIt.second; - return *this; - } - - Zip2Iterator& operator--() { - --mIt.first; - --mIt.second; - return *this; - } - - // Postfix operator needed by Microsoft C++ - const Zip2Iterator operator++(int) { - Zip2Iterator t(*this); - mIt.first++; - mIt.second++; - return t; - } - - const Zip2Iterator operator--(int) { - Zip2Iterator t(*this); - mIt.first--; - mIt.second--; - return t; - } - - Zip2Iterator& operator+=(size_t v) { - mIt.first += v; - mIt.second += v; - return *this; - } - - Zip2Iterator& operator-=(size_t v) { - mIt.first -= v; - mIt.second -= v; - return *this; - } - - Zip2Iterator operator+(size_t rhs) const { return { mIt.first + rhs, mIt.second + rhs }; } - Zip2Iterator operator+(size_t rhs) { return { mIt.first + rhs, mIt.second + rhs }; } - Zip2Iterator operator-(size_t rhs) const { return { mIt.first - rhs, mIt.second - rhs }; } - - difference_type operator-(Zip2Iterator const& rhs) const { return mIt.first - rhs.mIt.first; } - - bool operator==(Zip2Iterator const& rhs) const { return (mIt.first == rhs.mIt.first); } - bool operator!=(Zip2Iterator const& rhs) const { return (mIt.first != rhs.mIt.first); } - bool operator>=(Zip2Iterator const& rhs) const { return (mIt.first >= rhs.mIt.first); } - bool operator> (Zip2Iterator const& rhs) const { return (mIt.first > rhs.mIt.first); } - bool operator<=(Zip2Iterator const& rhs) const { return (mIt.first <= rhs.mIt.first); } - bool operator< (Zip2Iterator const& rhs) const { return (mIt.first < rhs.mIt.first); } -}; - -} // namespace utils - -#endif /* TNT_UTILS_ZIP2ITERATOR_H */ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ZIP2ITERATOR_H +#define TNT_UTILS_ZIP2ITERATOR_H + +#include +#include + +#include + +namespace utils { + +/* + * A random access iterator that wraps two other random access iterators. + * This mostly exists so that one can sort an array using values from another. + */ + +template +class Zip2Iterator { + std::pair mIt; + using Ref1 = typename std::iterator_traits::reference; + using Ref2 = typename std::iterator_traits::reference; + using Val1 = typename std::iterator_traits::value_type; + using Val2 = typename std::iterator_traits::value_type; + +public: + struct Ref : public std::pair { + using std::pair::pair; + using std::pair::operator=; + private: + friend void swap(Ref lhs, Ref rhs) { + using std::swap; + swap(lhs.first, rhs.first); + swap(lhs.second, rhs.second); + } + }; + + using value_type = std::pair; + using reference = Ref; + using pointer = value_type*; + using difference_type = ptrdiff_t; + using iterator_category = std::random_access_iterator_tag; + + Zip2Iterator() = default; + Zip2Iterator(It1 first, It2 second) : mIt({first, second}) {} + Zip2Iterator(Zip2Iterator const& rhs) noexcept = default; + Zip2Iterator& operator=(Zip2Iterator const& rhs) = default; + + reference operator*() const { return { *mIt.first, *mIt.second }; } + + reference operator[](size_t n) const { return *(*this + n); } + + Zip2Iterator& operator++() { + ++mIt.first; + ++mIt.second; + return *this; + } + + Zip2Iterator& operator--() { + --mIt.first; + --mIt.second; + return *this; + } + + // Postfix operator needed by Microsoft C++ + const Zip2Iterator operator++(int) { + Zip2Iterator t(*this); + mIt.first++; + mIt.second++; + return t; + } + + const Zip2Iterator operator--(int) { + Zip2Iterator t(*this); + mIt.first--; + mIt.second--; + return t; + } + + Zip2Iterator& operator+=(size_t v) { + mIt.first += v; + mIt.second += v; + return *this; + } + + Zip2Iterator& operator-=(size_t v) { + mIt.first -= v; + mIt.second -= v; + return *this; + } + + Zip2Iterator operator+(size_t rhs) const { return { mIt.first + rhs, mIt.second + rhs }; } + Zip2Iterator operator+(size_t rhs) { return { mIt.first + rhs, mIt.second + rhs }; } + Zip2Iterator operator-(size_t rhs) const { return { mIt.first - rhs, mIt.second - rhs }; } + + difference_type operator-(Zip2Iterator const& rhs) const { return mIt.first - rhs.mIt.first; } + + bool operator==(Zip2Iterator const& rhs) const { return (mIt.first == rhs.mIt.first); } + bool operator!=(Zip2Iterator const& rhs) const { return (mIt.first != rhs.mIt.first); } + bool operator>=(Zip2Iterator const& rhs) const { return (mIt.first >= rhs.mIt.first); } + bool operator> (Zip2Iterator const& rhs) const { return (mIt.first > rhs.mIt.first); } + bool operator<=(Zip2Iterator const& rhs) const { return (mIt.first <= rhs.mIt.first); } + bool operator< (Zip2Iterator const& rhs) const { return (mIt.first < rhs.mIt.first); } +}; + +} // namespace utils + +#endif // TNT_UTILS_ZIP2ITERATOR_H diff --git a/ios/include/utils/algorithm.h b/ios/include/utils/algorithm.h index 54c28fef..72240c27 100644 --- a/ios/include/utils/algorithm.h +++ b/ios/include/utils/algorithm.h @@ -1,277 +1,277 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_ALGORITHM_H -#define TNT_UTILS_ALGORITHM_H - -#include - -#include // for std::less -#include // for std::enable_if - -#include -#include - -namespace utils { - -namespace details { - -template -constexpr inline T popcount(T v) noexcept { - static_assert(sizeof(T) * CHAR_BIT <= 128, "details::popcount() only support up to 128 bits"); - constexpr T ONES = ~T(0); - v = v - ((v >> 1u) & ONES / 3); - v = (v & ONES / 15 * 3) + ((v >> 2u) & ONES / 15 * 3); - v = (v + (v >> 4u)) & ONES / 255 * 15; - return (T) (v * (ONES / 255)) >> (sizeof(T) - 1) * CHAR_BIT; -} - -template::value>> -constexpr inline T clz(T x) noexcept { - static_assert(sizeof(T) * CHAR_BIT <= 128, "details::clz() only support up to 128 bits"); - x |= (x >> 1u); - x |= (x >> 2u); - x |= (x >> 4u); - x |= (x >> 8u); - x |= (x >> 16u); - if (sizeof(T) * CHAR_BIT >= 64) { // just to silence compiler warning - x |= (x >> 32u); - } - if (sizeof(T) * CHAR_BIT >= 128) { // just to silence compiler warning - x |= (x >> 64u); - } - return T(sizeof(T) * CHAR_BIT) - details::popcount(x); -} - -template::value>> -constexpr inline T ctz(T x) noexcept { - static_assert(sizeof(T) * CHAR_BIT <= 64, "details::ctz() only support up to 64 bits"); - T c = sizeof(T) * CHAR_BIT; - x &= -x; // equivalent to x & (~x + 1) - if (x) c--; - if (sizeof(T) * CHAR_BIT >= 64) { - if (x & T(0x00000000FFFFFFFF)) c -= 32; - } - if (x & T(0x0000FFFF0000FFFF)) c -= 16; - if (x & T(0x00FF00FF00FF00FF)) c -= 8; - if (x & T(0x0F0F0F0F0F0F0F0F)) c -= 4; - if (x & T(0x3333333333333333)) c -= 2; - if (x & T(0x5555555555555555)) c -= 1; - return c; -} - -} // namespace details - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned int UTILS_ALWAYS_INLINE clz(unsigned int x) noexcept { -#if __has_builtin(__builtin_clz) - return __builtin_clz(x); -#else - return details::clz(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned long UTILS_ALWAYS_INLINE clz(unsigned long x) noexcept { -#if __has_builtin(__builtin_clzl) - return __builtin_clzl(x); -#else - return details::clz(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned long long UTILS_ALWAYS_INLINE clz(unsigned long long x) noexcept { -#if __has_builtin(__builtin_clzll) - return __builtin_clzll(x); -#else - return details::clz(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned int UTILS_ALWAYS_INLINE ctz(unsigned int x) noexcept { -#if __has_builtin(__builtin_ctz) - return __builtin_ctz(x); -#else - return details::ctz(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned long UTILS_ALWAYS_INLINE ctz(unsigned long x) noexcept { -#if __has_builtin(__builtin_ctzl) - return __builtin_ctzl(x); -#else - return details::ctz(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned long long UTILS_ALWAYS_INLINE ctz(unsigned long long x) noexcept { -#if __has_builtin(__builtin_ctzll) - return __builtin_ctzll(x); -#else - return details::ctz(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned int UTILS_ALWAYS_INLINE popcount(unsigned int x) noexcept { -#if __has_builtin(__builtin_popcount) - return __builtin_popcount(x); -#else - return details::popcount(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned long UTILS_ALWAYS_INLINE popcount(unsigned long x) noexcept { -#if __has_builtin(__builtin_popcountl) - return __builtin_popcountl(x); -#else - return details::popcount(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -unsigned long long UTILS_ALWAYS_INLINE popcount(unsigned long long x) noexcept { -#if __has_builtin(__builtin_popcountll) - return __builtin_popcountll(x); -#else - return details::popcount(x); -#endif -} - -constexpr inline UTILS_PUBLIC UTILS_PURE -uint8_t UTILS_ALWAYS_INLINE popcount(uint8_t x) noexcept { - return (uint8_t)popcount((unsigned int)x); -} - -template::value && std::is_unsigned::value>> -constexpr inline UTILS_PUBLIC UTILS_PURE -T log2i(T x) noexcept { - return (sizeof(x) * 8 - 1u) - clz(x); -} - -/* - * branch-less version of std::lower_bound and std::upper_bound. - * These versions are intended to be fully inlined, which only happens when the size - * of the array is known at compile time. This code also performs better if the - * array is a power-of-two in size. - * - * These code works even if the conditions above are not met, and becomes a less-branches - * algorithm instead of a branch-less one! - */ - -template> -inline UTILS_PUBLIC -RandomAccessIterator lower_bound( - RandomAccessIterator first, RandomAccessIterator last, const T& value, - COMPARE comp = std::less(), - bool assume_power_of_two = false) { - size_t len = last - first; - - if (!assume_power_of_two) { - // handle non power-of-two sized arrays. If it's POT, the next line is a no-op - // and gets optimized out if the size is known at compile time. - len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 - size_t difference = (last - first) - len; - // If len was already a POT, then difference will be 0. - // We need to explicitly check this case to avoid dereferencing past the end of the array - first += !difference || comp(first[len], value) ? difference : 0; - } - - while (len) { - // The number of repetitions here doesn't affect the result. We manually unroll the loop - // twice, to guarantee we have at least two iterations without branches (for the case - // where the size is not known at compile time - first += comp(first[len >>= 1u], value) ? len : 0; - first += comp(first[len >>= 1u], value) ? len : 0; - } - first += comp(*first, value); - return first; -} - -template> -inline UTILS_PUBLIC -RandomAccessIterator upper_bound( - RandomAccessIterator first, RandomAccessIterator last, - const T& value, COMPARE comp = std::less(), - bool assume_power_of_two = false) { - size_t len = last - first; - - if (!assume_power_of_two) { - // handle non power-of-two sized arrays. If it's POT, the next line is a no-op - // and gets optimized out if the size is known at compile time. - len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 - size_t difference = (last - first) - len; - // If len was already a POT, then difference will be 0. - // We need to explicitly check this case to avoid dereferencing past the end of the array - first += !difference || comp(value, first[len]) ? 0 : difference; - } - - while (len) { - // The number of repetitions here doesn't affect the result. We manually unroll the loop - // twice, to guarantee we have at least two iterations without branches (for the case - // where the size is not known at compile time - first += !comp(value, first[len >>= 1u]) ? len : 0; - first += !comp(value, first[len >>= 1u]) ? len : 0; - } - first += !comp(value, *first); - return first; -} - -template -inline UTILS_PUBLIC -RandomAccessIterator partition_point( - RandomAccessIterator first, RandomAccessIterator last, COMPARE pred, - bool assume_power_of_two = false) { - size_t len = last - first; - - if (!assume_power_of_two) { - // handle non power-of-two sized arrays. If it's POT, the next line is a no-op - // and gets optimized out if the size is known at compile time. - len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 - size_t difference = (last - first) - len; - first += !difference || pred(first[len]) ? difference : 0; - } - - while (len) { - // The number of repetitions here doesn't affect the result. We manually unroll the loop - // twice, to guarantee we have at least two iterations without branches (for the case - // where the size is not known at compile time - first += pred(first[len>>=1u]) ? len : 0; - first += pred(first[len>>=1u]) ? len : 0; - } - first += pred(*first); - return first; -} - -template -typename std::enable_if_t< - (sizeof(To) == sizeof(From)) && - std::is_trivially_copyable::value, - To> -// constexpr support needs compiler magic -bit_cast(const From &src) noexcept { - return reinterpret_cast(src); -} - -} // namespace utils - -#endif // TNT_UTILS_ALGORITHM_H +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ALGORITHM_H +#define TNT_UTILS_ALGORITHM_H + +#include + +#include // for std::less +#include // for std::enable_if + +#include +#include + +namespace utils { + +namespace details { + +template +constexpr inline T popcount(T v) noexcept { + static_assert(sizeof(T) * CHAR_BIT <= 128, "details::popcount() only support up to 128 bits"); + constexpr T ONES = ~T(0); + v = v - ((v >> 1u) & ONES / 3); + v = (v & ONES / 15 * 3) + ((v >> 2u) & ONES / 15 * 3); + v = (v + (v >> 4u)) & ONES / 255 * 15; + return (T) (v * (ONES / 255)) >> (sizeof(T) - 1) * CHAR_BIT; +} + +template::value>> +constexpr inline T clz(T x) noexcept { + static_assert(sizeof(T) * CHAR_BIT <= 128, "details::clz() only support up to 128 bits"); + x |= (x >> 1u); + x |= (x >> 2u); + x |= (x >> 4u); + x |= (x >> 8u); + x |= (x >> 16u); + if (sizeof(T) * CHAR_BIT >= 64) { // just to silence compiler warning + x |= (x >> 32u); + } + if (sizeof(T) * CHAR_BIT >= 128) { // just to silence compiler warning + x |= (x >> 64u); + } + return T(sizeof(T) * CHAR_BIT) - details::popcount(x); +} + +template::value>> +constexpr inline T ctz(T x) noexcept { + static_assert(sizeof(T) * CHAR_BIT <= 64, "details::ctz() only support up to 64 bits"); + T c = sizeof(T) * CHAR_BIT; + x &= -x; // equivalent to x & (~x + 1) + if (x) c--; + if (sizeof(T) * CHAR_BIT >= 64) { + if (x & T(0x00000000FFFFFFFF)) c -= 32; + } + if (x & T(0x0000FFFF0000FFFF)) c -= 16; + if (x & T(0x00FF00FF00FF00FF)) c -= 8; + if (x & T(0x0F0F0F0F0F0F0F0F)) c -= 4; + if (x & T(0x3333333333333333)) c -= 2; + if (x & T(0x5555555555555555)) c -= 1; + return c; +} + +} // namespace details + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned int UTILS_ALWAYS_INLINE clz(unsigned int x) noexcept { +#if __has_builtin(__builtin_clz) + return __builtin_clz(x); +#else + return details::clz(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned long UTILS_ALWAYS_INLINE clz(unsigned long x) noexcept { +#if __has_builtin(__builtin_clzl) + return __builtin_clzl(x); +#else + return details::clz(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned long long UTILS_ALWAYS_INLINE clz(unsigned long long x) noexcept { +#if __has_builtin(__builtin_clzll) + return __builtin_clzll(x); +#else + return details::clz(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned int UTILS_ALWAYS_INLINE ctz(unsigned int x) noexcept { +#if __has_builtin(__builtin_ctz) + return __builtin_ctz(x); +#else + return details::ctz(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned long UTILS_ALWAYS_INLINE ctz(unsigned long x) noexcept { +#if __has_builtin(__builtin_ctzl) + return __builtin_ctzl(x); +#else + return details::ctz(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned long long UTILS_ALWAYS_INLINE ctz(unsigned long long x) noexcept { +#if __has_builtin(__builtin_ctzll) + return __builtin_ctzll(x); +#else + return details::ctz(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned int UTILS_ALWAYS_INLINE popcount(unsigned int x) noexcept { +#if __has_builtin(__builtin_popcount) + return __builtin_popcount(x); +#else + return details::popcount(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned long UTILS_ALWAYS_INLINE popcount(unsigned long x) noexcept { +#if __has_builtin(__builtin_popcountl) + return __builtin_popcountl(x); +#else + return details::popcount(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +unsigned long long UTILS_ALWAYS_INLINE popcount(unsigned long long x) noexcept { +#if __has_builtin(__builtin_popcountll) + return __builtin_popcountll(x); +#else + return details::popcount(x); +#endif +} + +constexpr inline UTILS_PUBLIC UTILS_PURE +uint8_t UTILS_ALWAYS_INLINE popcount(uint8_t x) noexcept { + return (uint8_t)popcount((unsigned int)x); +} + +template::value && std::is_unsigned::value>> +constexpr inline UTILS_PUBLIC UTILS_PURE +T log2i(T x) noexcept { + return (sizeof(x) * 8 - 1u) - clz(x); +} + +/* + * branch-less version of std::lower_bound and std::upper_bound. + * These versions are intended to be fully inlined, which only happens when the size + * of the array is known at compile time. This code also performs better if the + * array is a power-of-two in size. + * + * These code works even if the conditions above are not met, and becomes a less-branches + * algorithm instead of a branch-less one! + */ + +template> +inline UTILS_PUBLIC +RandomAccessIterator lower_bound( + RandomAccessIterator first, RandomAccessIterator last, const T& value, + COMPARE comp = std::less(), + bool assume_power_of_two = false) { + size_t len = last - first; + + if (!assume_power_of_two) { + // handle non power-of-two sized arrays. If it's POT, the next line is a no-op + // and gets optimized out if the size is known at compile time. + len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 + size_t difference = (last - first) - len; + // If len was already a POT, then difference will be 0. + // We need to explicitly check this case to avoid dereferencing past the end of the array + first += !difference || comp(first[len], value) ? difference : 0; + } + + while (len) { + // The number of repetitions here doesn't affect the result. We manually unroll the loop + // twice, to guarantee we have at least two iterations without branches (for the case + // where the size is not known at compile time + first += comp(first[len >>= 1u], value) ? len : 0; + first += comp(first[len >>= 1u], value) ? len : 0; + } + first += comp(*first, value); + return first; +} + +template> +inline UTILS_PUBLIC +RandomAccessIterator upper_bound( + RandomAccessIterator first, RandomAccessIterator last, + const T& value, COMPARE comp = std::less(), + bool assume_power_of_two = false) { + size_t len = last - first; + + if (!assume_power_of_two) { + // handle non power-of-two sized arrays. If it's POT, the next line is a no-op + // and gets optimized out if the size is known at compile time. + len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 + size_t difference = (last - first) - len; + // If len was already a POT, then difference will be 0. + // We need to explicitly check this case to avoid dereferencing past the end of the array + first += !difference || comp(value, first[len]) ? 0 : difference; + } + + while (len) { + // The number of repetitions here doesn't affect the result. We manually unroll the loop + // twice, to guarantee we have at least two iterations without branches (for the case + // where the size is not known at compile time + first += !comp(value, first[len >>= 1u]) ? len : 0; + first += !comp(value, first[len >>= 1u]) ? len : 0; + } + first += !comp(value, *first); + return first; +} + +template +inline UTILS_PUBLIC +RandomAccessIterator partition_point( + RandomAccessIterator first, RandomAccessIterator last, COMPARE pred, + bool assume_power_of_two = false) { + size_t len = last - first; + + if (!assume_power_of_two) { + // handle non power-of-two sized arrays. If it's POT, the next line is a no-op + // and gets optimized out if the size is known at compile time. + len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 + size_t difference = (last - first) - len; + first += !difference || pred(first[len]) ? difference : 0; + } + + while (len) { + // The number of repetitions here doesn't affect the result. We manually unroll the loop + // twice, to guarantee we have at least two iterations without branches (for the case + // where the size is not known at compile time + first += pred(first[len>>=1u]) ? len : 0; + first += pred(first[len>>=1u]) ? len : 0; + } + first += pred(*first); + return first; +} + +template +typename std::enable_if_t< + (sizeof(To) == sizeof(From)) && + std::is_trivially_copyable::value, + To> +// constexpr support needs compiler magic +bit_cast(const From &src) noexcept { + return reinterpret_cast(src); +} + +} // namespace utils + +#endif // TNT_UTILS_ALGORITHM_H diff --git a/ios/include/utils/android/ThermalManager.h b/ios/include/utils/android/ThermalManager.h new file mode 100644 index 00000000..6c303b04 --- /dev/null +++ b/ios/include/utils/android/ThermalManager.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ANDROID_THERMALMANAGER_H +#define TNT_UTILS_ANDROID_THERMALMANAGER_H + +#include + +#include + +struct AThermalManager; + +namespace utils { + +class ThermalManager { +public: + enum class ThermalStatus : int8_t { + ERROR = -1, + NONE, + LIGHT, + MODERATE, + SEVERE, + CRITICAL, + EMERGENCY, + SHUTDOWN + }; + + ThermalManager(); + ~ThermalManager(); + + // Movable + ThermalManager(ThermalManager&& rhs) noexcept; + ThermalManager& operator=(ThermalManager&& rhs) noexcept; + + // not copiable + ThermalManager(ThermalManager const& rhs) = delete; + ThermalManager& operator=(ThermalManager const& rhs) = delete; + + ThermalStatus getCurrentThermalStatus() const noexcept; + +private: + AThermalManager* mThermalManager = nullptr; +}; + +} // namespace utils + +#endif // TNT_UTILS_ANDROID_THERMALMANAGER_H diff --git a/ios/include/utils/api_level.h b/ios/include/utils/api_level.h index b8d3dc41..0d2ec830 100644 --- a/ios/include/utils/api_level.h +++ b/ios/include/utils/api_level.h @@ -1,34 +1,34 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_API_H -#define TNT_UTILS_API_H - -#include - -namespace utils { - -/** - * Returns this platform's API level. On Android this function will return - * the API level as defined by the SDK API level version. If a platform does - * not have an API level, this function returns 0. - */ -UTILS_PUBLIC -int api_level(); - -} // namespace utils - -#endif // TNT_UTILS_ARCHITECTURE_H +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_APILEVEL_H +#define TNT_UTILS_APILEVEL_H + +#include + +namespace utils { + +/** + * Returns this platform's API level. On Android this function will return + * the API level as defined by the SDK API level version. If a platform does + * not have an API level, this function returns 0. + */ +UTILS_PUBLIC +int api_level(); + +} // namespace utils + +#endif // TNT_UTILS_APILEVEL_H diff --git a/ios/include/utils/architecture.h b/ios/include/utils/architecture.h index 60b6ed70..83b11794 100644 --- a/ios/include/utils/architecture.h +++ b/ios/include/utils/architecture.h @@ -1,28 +1,28 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_ARCHITECTURE_H -#define TNT_UTILS_ARCHITECTURE_H - -#include - -namespace utils { - -constexpr size_t CACHELINE_SIZE = 64; - -} // namespace utils - -#endif // TNT_UTILS_ARCHITECTURE_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ARCHITECTURE_H +#define TNT_UTILS_ARCHITECTURE_H + +#include + +namespace utils { + +constexpr size_t CACHELINE_SIZE = 64; + +} // namespace utils + +#endif // TNT_UTILS_ARCHITECTURE_H diff --git a/ios/include/utils/ashmem.h b/ios/include/utils/ashmem.h index bacd3bfc..c9ca9e3e 100644 --- a/ios/include/utils/ashmem.h +++ b/ios/include/utils/ashmem.h @@ -1,28 +1,28 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_ASHMEM_H -#define TNT_UTILS_ASHMEM_H - -#include - -namespace utils { - -int ashmem_create_region(const char *name, size_t size); - -} // namespace utils - -#endif /* TNT_UTILS_ASHMEM_H */ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ASHMEM_H +#define TNT_UTILS_ASHMEM_H + +#include + +namespace utils { + +int ashmem_create_region(const char *name, size_t size); + +} // namespace utils + +#endif // TNT_UTILS_ASHMEM_H diff --git a/ios/include/utils/bitset.h b/ios/include/utils/bitset.h index b9cfe35c..45112cec 100644 --- a/ios/include/utils/bitset.h +++ b/ios/include/utils/bitset.h @@ -1,316 +1,324 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_BITSET_H -#define TNT_UTILS_BITSET_H - -#include -#include - -#include -#include -#include - -#include -#include - -#if defined(__ARM_NEON) -# if defined(__ARM_ACLE) && defined(__aarch64__) -# include -# define TNT_UTILS_BITSET_USE_NEON 1 -# endif -#endif - -namespace utils { - -/* - * This bitset<> class is different from std::bitset<> in that it allows us to control - * the exact storage size. This is useful for small bitset (e.g. < 64, on 64-bits machines). - * It also allows for lexicographical compares (i.e. sorting). - */ - -template::value && - std::is_unsigned::value>::type> -class UTILS_PUBLIC bitset { - T storage[N]; - -public: - static constexpr T BITS_PER_WORD = sizeof(T) * 8; - static constexpr T BIT_COUNT = BITS_PER_WORD * N; - static constexpr T WORLD_COUNT = N; - using container_type = T; - - bitset() noexcept { - std::fill(std::begin(storage), std::end(storage), 0); - } - - T getBitsAt(size_t n) const noexcept { - return storage[n]; - } - - T& getBitsAt(size_t n) noexcept { - return storage[n]; - } - - T getValue() const noexcept { - static_assert(N == 1, "bitfield must only have one storage word"); - return storage[0]; - } - - void setValue(T value) noexcept { - static_assert(N == 1, "bitfield must only have one storage word"); - storage[0] = value; - } - - template - void forEachSetBit(F exec) const noexcept { - for (size_t i = 0; i < N; i++) { - T v = storage[i]; - while (v) { - T k = utils::ctz(v); - v &= ~(T(1) << k); - exec(size_t(k + BITS_PER_WORD * i)); - } - } - } - - size_t size() const noexcept { return N * BITS_PER_WORD; } - - bool test(size_t bit) const noexcept { return operator[](bit); } - - void set(size_t b) noexcept { - storage[b / BITS_PER_WORD] |= T(1) << (b % BITS_PER_WORD); - } - - void set(size_t b, bool value) noexcept { - storage[b / BITS_PER_WORD] &= ~(T(1) << (b % BITS_PER_WORD)); - storage[b / BITS_PER_WORD] |= T(value) << (b % BITS_PER_WORD); - } - - void unset(size_t b) noexcept { - storage[b / BITS_PER_WORD] &= ~(T(1) << (b % BITS_PER_WORD)); - } - - void flip(size_t b) noexcept { - storage[b / BITS_PER_WORD] ^= T(1) << (b % BITS_PER_WORD); - } - - - void reset() noexcept { - std::fill(std::begin(storage), std::end(storage), 0); - } - - bool operator[](size_t b) const noexcept { - return bool(storage[b / BITS_PER_WORD] & (T(1) << (b % BITS_PER_WORD))); - } - - size_t count() const noexcept { -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0 && BIT_COUNT / 128 < 31) { - // Use NEON for bitset multiple of 128 bits. - // The intermediate computation can't handle more than 31*128 bits because - // intermediate counts must be 8 bits. - uint8x16_t const* const p = (uint8x16_t const*) storage; - uint8x16_t counts = vcntq_u8(p[0]); - for (size_t i = 1; i < BIT_COUNT / 128; ++i) { - counts += vcntq_u8(p[i]); - } - return vaddlvq_u8(counts); - } else -#endif - { - T r = utils::popcount(storage[0]); - for (size_t i = 1; i < N; ++i) { - r += utils::popcount(storage[i]); - } - return r; - } - } - - bool any() const noexcept { -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0) { - uint64x2_t const* const p = (uint64x2_t const*) storage; - uint64x2_t r = p[0]; - for (size_t i = 1; i < BIT_COUNT / 128; ++i) { - r |= p[i]; - } - return bool(r[0] | r[1]); - } else -#endif - { - T r = storage[0]; - for (size_t i = 1; i < N; ++i) { - r |= storage[i]; - } - return bool(r); - } - } - - bool none() const noexcept { - return !any(); - } - - bool all() const noexcept { -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0) { - uint64x2_t const* const p = (uint64x2_t const*) storage; - uint64x2_t r = p[0]; - for (size_t i = 1; i < BIT_COUNT / 128; ++i) { - r &= p[i]; - } - return T(~(r[0] & r[1])) == T(0); - } else -#endif - { - T r = storage[0]; - for (size_t i = 1; i < N; ++i) { - r &= storage[i]; - } - return T(~r) == T(0); - } - } - - bool operator!=(const bitset& b) const noexcept { -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0) { - bitset temp(*this ^ b); - uint64x2_t const* const p = (uint64x2_t const*) temp.storage; - uint64x2_t r = p[0]; - for (size_t i = 1; i < BIT_COUNT / 128; ++i) { - r |= p[i]; - } - return bool(r[0] | r[1]); - } else -#endif - { - T r = storage[0] ^ b.storage[0]; - for (size_t i = 1; i < N; ++i) { - r |= storage[i] ^ b.storage[i]; - } - return bool(r); - } - } - - bool operator==(const bitset& b) const noexcept { - return !operator!=(b); - } - - bitset& operator&=(const bitset& b) noexcept { -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0) { - uint8x16_t* const p = (uint8x16_t*) storage; - uint8x16_t const* const q = (uint8x16_t const*) b.storage; - for (size_t i = 0; i < BIT_COUNT / 128; ++i) { - p[i] &= q[i]; - } - } else -#endif - { - for (size_t i = 0; i < N; ++i) { - storage[i] &= b.storage[i]; - } - } - return *this; - } - - bitset& operator|=(const bitset& b) noexcept { -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0) { - uint8x16_t* const p = (uint8x16_t*) storage; - uint8x16_t const* const q = (uint8x16_t const*) b.storage; - for (size_t i = 0; i < BIT_COUNT / 128; ++i) { - p[i] |= q[i]; - } - } else -#endif - { - for (size_t i = 0; i < N; ++i) { - storage[i] |= b.storage[i]; - } - } - return *this; - } - - bitset& operator^=(const bitset& b) noexcept { -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0) { - uint8x16_t* const p = (uint8x16_t*) storage; - uint8x16_t const* const q = (uint8x16_t const*) b.storage; - for (size_t i = 0; i < BIT_COUNT / 128; ++i) { - p[i] ^= q[i]; - } - } else -#endif - { - for (size_t i = 0; i < N; ++i) { - storage[i] ^= b.storage[i]; - } - } - return *this; - } - - bitset operator~() const noexcept { - bitset r; -#if defined(TNT_UTILS_BITSET_USE_NEON) - if (BIT_COUNT % 128 == 0) { - uint8x16_t* const p = (uint8x16_t*) r.storage; - uint8x16_t const* const q = (uint8x16_t const*) storage; - for (size_t i = 0; i < BIT_COUNT / 128; ++i) { - p[i] = ~q[i]; - } - } else -#endif - { - for (size_t i = 0; i < N; ++i) { - r.storage[i] = ~storage[i]; - } - } - return r; - } - -private: - friend bool operator<(bitset const& lhs, bitset const& rhs) noexcept { - return std::lexicographical_compare( - std::begin(lhs.storage), std::end(lhs.storage), - std::begin(rhs.storage), std::end(rhs.storage) - ); - } - - friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept { - return bitset(lhs) &= rhs; - } - - friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept { - return bitset(lhs) |= rhs; - } - - friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept { - return bitset(lhs) ^= rhs; - } -}; - -using bitset8 = bitset; -using bitset32 = bitset; -using bitset256 = bitset; - -static_assert(sizeof(bitset8) == sizeof(uint8_t), "bitset8 isn't 8 bits!"); -static_assert(sizeof(bitset32) == sizeof(uint32_t), "bitset32 isn't 32 bits!"); - -} // namespace utils - -#endif // TNT_UTILS_BITSET_H +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_BITSET_H +#define TNT_UTILS_BITSET_H + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#if defined(__ARM_NEON) +# if defined(__ARM_ACLE) && defined(__aarch64__) +# include +# define TNT_UTILS_BITSET_USE_NEON 1 +# endif +#endif + +namespace utils { + +/* + * This bitset<> class is different from std::bitset<> in that it allows us to control + * the exact storage size. This is useful for small bitset (e.g. < 64, on 64-bits machines). + * It also allows for lexicographical compares (i.e. sorting). + */ + +template::value && + std::is_unsigned::value>::type> +class UTILS_PUBLIC bitset { + T storage[N]; + +public: + static constexpr T BITS_PER_WORD = sizeof(T) * 8; + static constexpr T BIT_COUNT = BITS_PER_WORD * N; + static constexpr T WORLD_COUNT = N; + using container_type = T; + + bitset() noexcept { + std::fill(std::begin(storage), std::end(storage), 0); + } + + T getBitsAt(size_t n) const noexcept { + assert_invariant(n + void forEachSetBit(F exec) const noexcept { + for (size_t i = 0; i < N; i++) { + T v = storage[i]; + while (v) { + T k = utils::ctz(v); + v &= ~(T(1) << k); + exec(size_t(k + BITS_PER_WORD * i)); + } + } + } + + size_t size() const noexcept { return N * BITS_PER_WORD; } + + bool test(size_t bit) const noexcept { return operator[](bit); } + + void set(size_t b) noexcept { + assert_invariant(b / BITS_PER_WORD < N); + storage[b / BITS_PER_WORD] |= T(1) << (b % BITS_PER_WORD); + } + + void set(size_t b, bool value) noexcept { + assert_invariant(b / BITS_PER_WORD < N); + storage[b / BITS_PER_WORD] &= ~(T(1) << (b % BITS_PER_WORD)); + storage[b / BITS_PER_WORD] |= T(value) << (b % BITS_PER_WORD); + } + + void unset(size_t b) noexcept { + assert_invariant(b / BITS_PER_WORD < N); + storage[b / BITS_PER_WORD] &= ~(T(1) << (b % BITS_PER_WORD)); + } + + void flip(size_t b) noexcept { + assert_invariant(b / BITS_PER_WORD < N); + storage[b / BITS_PER_WORD] ^= T(1) << (b % BITS_PER_WORD); + } + + + void reset() noexcept { + std::fill(std::begin(storage), std::end(storage), 0); + } + + bool operator[](size_t b) const noexcept { + assert_invariant(b / BITS_PER_WORD < N); + return bool(storage[b / BITS_PER_WORD] & (T(1) << (b % BITS_PER_WORD))); + } + + size_t count() const noexcept { +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0 && BIT_COUNT / 128 < 31) { + // Use NEON for bitset multiple of 128 bits. + // The intermediate computation can't handle more than 31*128 bits because + // intermediate counts must be 8 bits. + uint8x16_t const* const p = (uint8x16_t const*) storage; + uint8x16_t counts = vcntq_u8(p[0]); + for (size_t i = 1; i < BIT_COUNT / 128; ++i) { + counts += vcntq_u8(p[i]); + } + return vaddlvq_u8(counts); + } else +#endif + { + T r = utils::popcount(storage[0]); + for (size_t i = 1; i < N; ++i) { + r += utils::popcount(storage[i]); + } + return r; + } + } + + bool any() const noexcept { +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0) { + uint64x2_t const* const p = (uint64x2_t const*) storage; + uint64x2_t r = p[0]; + for (size_t i = 1; i < BIT_COUNT / 128; ++i) { + r |= p[i]; + } + return bool(r[0] | r[1]); + } else +#endif + { + T r = storage[0]; + for (size_t i = 1; i < N; ++i) { + r |= storage[i]; + } + return bool(r); + } + } + + bool none() const noexcept { + return !any(); + } + + bool all() const noexcept { +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0) { + uint64x2_t const* const p = (uint64x2_t const*) storage; + uint64x2_t r = p[0]; + for (size_t i = 1; i < BIT_COUNT / 128; ++i) { + r &= p[i]; + } + return T(~(r[0] & r[1])) == T(0); + } else +#endif + { + T r = storage[0]; + for (size_t i = 1; i < N; ++i) { + r &= storage[i]; + } + return T(~r) == T(0); + } + } + + bool operator!=(const bitset& b) const noexcept { +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0) { + bitset temp(*this ^ b); + uint64x2_t const* const p = (uint64x2_t const*) temp.storage; + uint64x2_t r = p[0]; + for (size_t i = 1; i < BIT_COUNT / 128; ++i) { + r |= p[i]; + } + return bool(r[0] | r[1]); + } else +#endif + { + T r = storage[0] ^ b.storage[0]; + for (size_t i = 1; i < N; ++i) { + r |= storage[i] ^ b.storage[i]; + } + return bool(r); + } + } + + bool operator==(const bitset& b) const noexcept { + return !operator!=(b); + } + + bitset& operator&=(const bitset& b) noexcept { +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0) { + uint8x16_t* const p = (uint8x16_t*) storage; + uint8x16_t const* const q = (uint8x16_t const*) b.storage; + for (size_t i = 0; i < BIT_COUNT / 128; ++i) { + p[i] &= q[i]; + } + } else +#endif + { + for (size_t i = 0; i < N; ++i) { + storage[i] &= b.storage[i]; + } + } + return *this; + } + + bitset& operator|=(const bitset& b) noexcept { +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0) { + uint8x16_t* const p = (uint8x16_t*) storage; + uint8x16_t const* const q = (uint8x16_t const*) b.storage; + for (size_t i = 0; i < BIT_COUNT / 128; ++i) { + p[i] |= q[i]; + } + } else +#endif + { + for (size_t i = 0; i < N; ++i) { + storage[i] |= b.storage[i]; + } + } + return *this; + } + + bitset& operator^=(const bitset& b) noexcept { +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0) { + uint8x16_t* const p = (uint8x16_t*) storage; + uint8x16_t const* const q = (uint8x16_t const*) b.storage; + for (size_t i = 0; i < BIT_COUNT / 128; ++i) { + p[i] ^= q[i]; + } + } else +#endif + { + for (size_t i = 0; i < N; ++i) { + storage[i] ^= b.storage[i]; + } + } + return *this; + } + + bitset operator~() const noexcept { + bitset r; +#if defined(TNT_UTILS_BITSET_USE_NEON) + if (BIT_COUNT % 128 == 0) { + uint8x16_t* const p = (uint8x16_t*) r.storage; + uint8x16_t const* const q = (uint8x16_t const*) storage; + for (size_t i = 0; i < BIT_COUNT / 128; ++i) { + p[i] = ~q[i]; + } + } else +#endif + { + for (size_t i = 0; i < N; ++i) { + r.storage[i] = ~storage[i]; + } + } + return r; + } + +private: + friend bool operator<(bitset const& lhs, bitset const& rhs) noexcept { + return std::lexicographical_compare( + std::begin(lhs.storage), std::end(lhs.storage), + std::begin(rhs.storage), std::end(rhs.storage) + ); + } + + friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept { + return bitset(lhs) &= rhs; + } + + friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept { + return bitset(lhs) |= rhs; + } + + friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept { + return bitset(lhs) ^= rhs; + } +}; + +using bitset8 = bitset; +using bitset32 = bitset; +using bitset256 = bitset; + +static_assert(sizeof(bitset8) == sizeof(uint8_t), "bitset8 isn't 8 bits!"); +static_assert(sizeof(bitset32) == sizeof(uint32_t), "bitset32 isn't 32 bits!"); + +} // namespace utils + +#endif // TNT_UTILS_BITSET_H diff --git a/ios/include/utils/compiler.h b/ios/include/utils/compiler.h index f8e91922..8c3551b1 100644 --- a/ios/include/utils/compiler.h +++ b/ios/include/utils/compiler.h @@ -1,226 +1,228 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_COMPILER_H -#define TNT_UTILS_COMPILER_H - -// compatibility with non-clang compilers... -#ifndef __has_attribute -#define __has_attribute(x) 0 -#endif - -#ifndef __has_feature -#define __has_feature(x) 0 -#endif - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - -#if __has_attribute(visibility) -# define UTILS_PUBLIC __attribute__((visibility("default"))) -#else -# define UTILS_PUBLIC -#endif - -#if __has_attribute(deprecated) -# define UTILS_DEPRECATED [[deprecated]] -#else -# define UTILS_DEPRECATED -#endif - -#if __has_attribute(packed) -# define UTILS_PACKED __attribute__((packed)) -#else -# define UTILS_PACKED -#endif - -#if __has_attribute(noreturn) -# define UTILS_NORETURN __attribute__((noreturn)) -#else -# define UTILS_NORETURN -#endif - -#if __has_attribute(visibility) -# ifndef TNT_DEV -# define UTILS_PRIVATE __attribute__((visibility("hidden"))) -# else -# define UTILS_PRIVATE -# endif -#else -# define UTILS_PRIVATE -#endif - -#define UTILS_NO_SANITIZE_THREAD -#if defined(__has_feature) -# if __has_feature(thread_sanitizer) -# undef UTILS_NO_SANITIZE_THREAD -# define UTILS_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread"))) -# endif -#endif - -#define UTILS_HAS_SANITIZE_MEMORY 0 -#if defined(__has_feature) -# if __has_feature(memory_sanitizer) -# undef UTILS_HAS_SANITIZE_MEMORY -# define UTILS_HAS_SANITIZE_MEMORY 1 -# endif -#endif - -/* - * helps the compiler's optimizer predicting branches - */ -#if __has_builtin(__builtin_expect) -# ifdef __cplusplus -# define UTILS_LIKELY( exp ) (__builtin_expect( !!(exp), true )) -# define UTILS_UNLIKELY( exp ) (__builtin_expect( !!(exp), false )) -# else -# define UTILS_LIKELY( exp ) (__builtin_expect( !!(exp), 1 )) -# define UTILS_UNLIKELY( exp ) (__builtin_expect( !!(exp), 0 )) -# endif -#else -# define UTILS_LIKELY( exp ) (!!(exp)) -# define UTILS_UNLIKELY( exp ) (!!(exp)) -#endif - -#if __has_builtin(__builtin_prefetch) -# define UTILS_PREFETCH( exp ) (__builtin_prefetch(exp)) -#else -# define UTILS_PREFETCH( exp ) -#endif - -#if __has_builtin(__builtin_assume) -# define UTILS_ASSUME( exp ) (__builtin_assume(exp)) -#else -# define UTILS_ASSUME( exp ) -#endif - -#if (defined(__i386__) || defined(__x86_64__)) -# define UTILS_HAS_HYPER_THREADING 1 // on x86 we assume we have hyper-threading. -#else -# define UTILS_HAS_HYPER_THREADING 0 -#endif - -#if defined(__EMSCRIPTEN__) || defined(FILAMENT_SINGLE_THREADED) -# define UTILS_HAS_THREADING 0 -#else -# define UTILS_HAS_THREADING 1 -#endif - -#if __has_attribute(noinline) -#define UTILS_NOINLINE __attribute__((noinline)) -#else -#define UTILS_NOINLINE -#endif - -#if __has_attribute(always_inline) -#define UTILS_ALWAYS_INLINE __attribute__((always_inline)) -#else -#define UTILS_ALWAYS_INLINE -#endif - -#if __has_attribute(pure) -#define UTILS_PURE __attribute__((pure)) -#else -#define UTILS_PURE -#endif - -#if __has_attribute(maybe_unused) -#define UTILS_UNUSED [[maybe_unused]] -#define UTILS_UNUSED_IN_RELEASE [[maybe_unused]] -#elif __has_attribute(unused) -#define UTILS_UNUSED __attribute__((unused)) -#define UTILS_UNUSED_IN_RELEASE __attribute__((unused)) -#else -#define UTILS_UNUSED -#define UTILS_UNUSED_IN_RELEASE -#endif - -#if defined(_MSC_VER) && _MSC_VER >= 1900 -# define UTILS_RESTRICT __restrict -#elif (defined(__clang__) || defined(__GNUC__)) -# define UTILS_RESTRICT __restrict__ -#else -# define UTILS_RESTRICT -#endif - -#if defined(_MSC_VER) && _MSC_VER >= 1900 -# define UTILS_HAS_FEATURE_CXX_THREAD_LOCAL 1 -#elif __has_feature(cxx_thread_local) -# define UTILS_HAS_FEATURE_CXX_THREAD_LOCAL 1 -#else -# define UTILS_HAS_FEATURE_CXX_THREAD_LOCAL 0 -#endif - -#if __has_feature(cxx_rtti) || defined(_CPPRTTI) -# define UTILS_HAS_RTTI 1 -#else -# define UTILS_HAS_RTTI 0 -#endif - -#ifdef __ARM_ACLE -# include -# define UTILS_WAIT_FOR_INTERRUPT() __wfi() -# define UTILS_WAIT_FOR_EVENT() __wfe() -# define UTILS_BROADCAST_EVENT() __sev() -# define UTILS_SIGNAL_EVENT() __sevl() -# define UTILS_PAUSE() __yield() -# define UTILS_PREFETCHW(addr) __pldx(1, 0, 0, addr) -#else // !__ARM_ACLE -# if (defined(__i386__) || defined(__x86_64__)) -# define UTILS_X86_PAUSE {__asm__ __volatile__( "rep; nop" : : : "memory" );} -# define UTILS_WAIT_FOR_INTERRUPT() UTILS_X86_PAUSE -# define UTILS_WAIT_FOR_EVENT() UTILS_X86_PAUSE -# define UTILS_BROADCAST_EVENT() -# define UTILS_SIGNAL_EVENT() -# define UTILS_PAUSE() UTILS_X86_PAUSE -# define UTILS_PREFETCHW(addr) UTILS_PREFETCH(addr) -# else // !x86 -# define UTILS_WAIT_FOR_INTERRUPT() -# define UTILS_WAIT_FOR_EVENT() -# define UTILS_BROADCAST_EVENT() -# define UTILS_SIGNAL_EVENT() -# define UTILS_PAUSE() -# define UTILS_PREFETCHW(addr) UTILS_PREFETCH(addr) -# endif // x86 -#endif // __ARM_ACLE - - -// ssize_t is a POSIX type. -#if defined(WIN32) || defined(_WIN32) -#include -typedef SSIZE_T ssize_t; -#endif - -#ifdef _MSC_VER -# define UTILS_EMPTY_BASES __declspec(empty_bases) -#else -# define UTILS_EMPTY_BASES -#endif - -#if defined(WIN32) || defined(_WIN32) - #define IMPORTSYMB __declspec(dllimport) -#else - #define IMPORTSYMB -#endif - -#if defined(_MSC_VER) && !defined(__PRETTY_FUNCTION__) -# define __PRETTY_FUNCTION__ __FUNCSIG__ -#endif - - - -#endif // TNT_UTILS_COMPILER_H +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_COMPILER_H +#define TNT_UTILS_COMPILER_H + +// compatibility with non-clang compilers... +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#if __has_attribute(visibility) +# define UTILS_PUBLIC __attribute__((visibility("default"))) +#else +# define UTILS_PUBLIC +#endif + +#if __has_attribute(deprecated) +# define UTILS_DEPRECATED [[deprecated]] +#else +# define UTILS_DEPRECATED +#endif + +#if __has_attribute(packed) +# define UTILS_PACKED __attribute__((packed)) +#else +# define UTILS_PACKED +#endif + +#if __has_attribute(noreturn) +# define UTILS_NORETURN __attribute__((noreturn)) +#else +# define UTILS_NORETURN +#endif + +#if __has_attribute(visibility) +# ifndef TNT_DEV +# define UTILS_PRIVATE __attribute__((visibility("hidden"))) +# else +# define UTILS_PRIVATE +# endif +#else +# define UTILS_PRIVATE +#endif + +#define UTILS_NO_SANITIZE_THREAD +#if __has_feature(thread_sanitizer) +#undef UTILS_NO_SANITIZE_THREAD +#define UTILS_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread"))) +#endif + +#define UTILS_HAS_SANITIZE_THREAD 0 +#if __has_feature(thread_sanitizer) || defined(__SANITIZE_THREAD__) +#undef UTILS_HAS_SANITIZE_THREAD +#define UTILS_HAS_SANITIZE_THREAD 1 +#endif + +#define UTILS_HAS_SANITIZE_MEMORY 0 +#if __has_feature(memory_sanitizer) +#undef UTILS_HAS_SANITIZE_MEMORY +#define UTILS_HAS_SANITIZE_MEMORY 1 +#endif + +/* + * helps the compiler's optimizer predicting branches + */ +#if __has_builtin(__builtin_expect) +# ifdef __cplusplus +# define UTILS_LIKELY( exp ) (__builtin_expect( !!(exp), true )) +# define UTILS_UNLIKELY( exp ) (__builtin_expect( !!(exp), false )) +# else +# define UTILS_LIKELY( exp ) (__builtin_expect( !!(exp), 1 )) +# define UTILS_UNLIKELY( exp ) (__builtin_expect( !!(exp), 0 )) +# endif +#else +# define UTILS_LIKELY( exp ) (!!(exp)) +# define UTILS_UNLIKELY( exp ) (!!(exp)) +#endif + +#if __has_builtin(__builtin_prefetch) +# define UTILS_PREFETCH( exp ) (__builtin_prefetch(exp)) +#else +# define UTILS_PREFETCH( exp ) +#endif + +#if __has_builtin(__builtin_assume) +# define UTILS_ASSUME( exp ) (__builtin_assume(exp)) +#else +# define UTILS_ASSUME( exp ) +#endif + +#if (defined(__i386__) || defined(__x86_64__)) +# define UTILS_HAS_HYPER_THREADING 1 // on x86 we assume we have hyper-threading. +#else +# define UTILS_HAS_HYPER_THREADING 0 +#endif + +#if defined(__EMSCRIPTEN__) || defined(FILAMENT_SINGLE_THREADED) +# define UTILS_HAS_THREADING 0 +#else +# define UTILS_HAS_THREADING 1 +#endif + +#if __has_attribute(noinline) +#define UTILS_NOINLINE __attribute__((noinline)) +#else +#define UTILS_NOINLINE +#endif + +#if __has_attribute(always_inline) +#define UTILS_ALWAYS_INLINE __attribute__((always_inline)) +#else +#define UTILS_ALWAYS_INLINE +#endif + +#if __has_attribute(pure) +#define UTILS_PURE __attribute__((pure)) +#else +#define UTILS_PURE +#endif + +#if __has_attribute(maybe_unused) +#define UTILS_UNUSED [[maybe_unused]] +#define UTILS_UNUSED_IN_RELEASE [[maybe_unused]] +#elif __has_attribute(unused) +#define UTILS_UNUSED __attribute__((unused)) +#define UTILS_UNUSED_IN_RELEASE __attribute__((unused)) +#else +#define UTILS_UNUSED +#define UTILS_UNUSED_IN_RELEASE +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1900 +# define UTILS_RESTRICT __restrict +#elif (defined(__clang__) || defined(__GNUC__)) +# define UTILS_RESTRICT __restrict__ +#else +# define UTILS_RESTRICT +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1900 +# define UTILS_HAS_FEATURE_CXX_THREAD_LOCAL 1 +#elif __has_feature(cxx_thread_local) +# define UTILS_HAS_FEATURE_CXX_THREAD_LOCAL 1 +#else +# define UTILS_HAS_FEATURE_CXX_THREAD_LOCAL 0 +#endif + +#if __has_feature(cxx_rtti) || defined(_CPPRTTI) +# define UTILS_HAS_RTTI 1 +#else +# define UTILS_HAS_RTTI 0 +#endif + +#ifdef __ARM_ACLE +# include +# define UTILS_WAIT_FOR_INTERRUPT() __wfi() +# define UTILS_WAIT_FOR_EVENT() __wfe() +# define UTILS_BROADCAST_EVENT() __sev() +# define UTILS_SIGNAL_EVENT() __sevl() +# define UTILS_PAUSE() __yield() +# define UTILS_PREFETCHW(addr) __pldx(1, 0, 0, addr) +#else // !__ARM_ACLE +# if (defined(__i386__) || defined(__x86_64__)) +# define UTILS_X86_PAUSE {__asm__ __volatile__( "rep; nop" : : : "memory" );} +# define UTILS_WAIT_FOR_INTERRUPT() UTILS_X86_PAUSE +# define UTILS_WAIT_FOR_EVENT() UTILS_X86_PAUSE +# define UTILS_BROADCAST_EVENT() +# define UTILS_SIGNAL_EVENT() +# define UTILS_PAUSE() UTILS_X86_PAUSE +# define UTILS_PREFETCHW(addr) UTILS_PREFETCH(addr) +# else // !x86 +# define UTILS_WAIT_FOR_INTERRUPT() +# define UTILS_WAIT_FOR_EVENT() +# define UTILS_BROADCAST_EVENT() +# define UTILS_SIGNAL_EVENT() +# define UTILS_PAUSE() +# define UTILS_PREFETCHW(addr) UTILS_PREFETCH(addr) +# endif // x86 +#endif // __ARM_ACLE + + +// ssize_t is a POSIX type. +#if defined(WIN32) || defined(_WIN32) +#include +typedef SSIZE_T ssize_t; +#endif + +#ifdef _MSC_VER +# define UTILS_EMPTY_BASES __declspec(empty_bases) +#else +# define UTILS_EMPTY_BASES +#endif + +#if defined(WIN32) || defined(_WIN32) + #define IMPORTSYMB __declspec(dllimport) +#else + #define IMPORTSYMB +#endif + +#if defined(_MSC_VER) && !defined(__PRETTY_FUNCTION__) +# define __PRETTY_FUNCTION__ __FUNCSIG__ +#endif + + + +#endif // TNT_UTILS_COMPILER_H diff --git a/ios/include/utils/compressed_pair.h b/ios/include/utils/compressed_pair.h index c3b9195b..62bf2de4 100644 --- a/ios/include/utils/compressed_pair.h +++ b/ios/include/utils/compressed_pair.h @@ -1,68 +1,68 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_COMPRESSED_PAIR_H -#define TNT_UTILS_COMPRESSED_PAIR_H - -#include -#include - -namespace utils { - -template -struct dependent_type : public T { -}; - -template, bool> = true> -struct compressed_pair : private T1, private T2 { - - template, Dummy>::value && - dependent_type, Dummy>::value>> - compressed_pair() : T1(), T2() {} - - template - compressed_pair(U1&& other1, U2&& other2) - : T1(std::forward(other1)), - T2(std::forward(other2)) {} - - T1& first() noexcept { - return static_cast(*this); - } - - T2& second() noexcept { - return static_cast(*this); - } - - T1 const& first() const noexcept { - return static_cast(*this); - } - - T2 const& second() const noexcept { - return static_cast(*this); - } - - void swap(compressed_pair& other) noexcept { - using std::swap; - swap(first(), other.first()); - swap(second(), other.second()); - } -}; - -} // namespace utils - -#endif //TNT_UTILS_COMPRESSED_PAIR_H +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_COMPRESSED_PAIR_H +#define TNT_UTILS_COMPRESSED_PAIR_H + +#include +#include + +namespace utils { + +template +struct dependent_type : public T { +}; + +template, bool> = true> +struct compressed_pair : private T1, private T2 { + + template, Dummy>::value && + dependent_type, Dummy>::value>> + compressed_pair() : T1(), T2() {} + + template + compressed_pair(U1&& other1, U2&& other2) + : T1(std::forward(other1)), + T2(std::forward(other2)) {} + + T1& first() noexcept { + return static_cast(*this); + } + + T2& second() noexcept { + return static_cast(*this); + } + + T1 const& first() const noexcept { + return static_cast(*this); + } + + T2 const& second() const noexcept { + return static_cast(*this); + } + + void swap(compressed_pair& other) noexcept { + using std::swap; + swap(first(), other.first()); + swap(second(), other.second()); + } +}; + +} // namespace utils + +#endif // TNT_UTILS_COMPRESSED_PAIR_H diff --git a/ios/include/utils/debug.h b/ios/include/utils/debug.h index 6984f75e..4c118725 100644 --- a/ios/include/utils/debug.h +++ b/ios/include/utils/debug.h @@ -1,33 +1,33 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_DEBUG_H -#define TNT_UTILS_DEBUG_H - -#include - -namespace utils { -void panic(const char *func, const char * file, int line, const char *assertion) noexcept; -} // namespace filament - -#ifdef NDEBUG -# define assert_invariant(e) ((void)0) -#else -# define assert_invariant(e) \ - (UTILS_LIKELY(e) ? ((void)0) : utils::panic(__func__, __FILE__, __LINE__, #e)) -#endif // NDEBUG - -#endif //TNT_UTILS_DEBUG_H +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_DEBUG_H +#define TNT_UTILS_DEBUG_H + +#include + +namespace utils { +void panic(const char *func, const char * file, int line, const char *assertion) noexcept; +} // namespace filament + +#ifdef NDEBUG +# define assert_invariant(e) ((void)0) +#else +# define assert_invariant(e) \ + (UTILS_LIKELY(e) ? ((void)0) : utils::panic(__func__, __FILE__, __LINE__, #e)) +#endif // NDEBUG + +#endif // TNT_UTILS_DEBUG_H diff --git a/ios/include/utils/generic/Condition.h b/ios/include/utils/generic/Condition.h index 8df002f9..accde2be 100644 --- a/ios/include/utils/generic/Condition.h +++ b/ios/include/utils/generic/Condition.h @@ -1,39 +1,39 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_GENERIC_CONDITION_H -#define UTILS_GENERIC_CONDITION_H - -#include - -namespace utils { - -class Condition : public std::condition_variable { -public: - using std::condition_variable::condition_variable; - - inline void notify_n(size_t n) noexcept { - if (n == 1) { - notify_one(); - } else if (n > 1) { - notify_all(); - } - } -}; - -} // namespace utils - -#endif // UTILS_GENERIC_CONDITION_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_GENERIC_CONDITION_H +#define TNT_UTILS_GENERIC_CONDITION_H + +#include + +namespace utils { + +class Condition : public std::condition_variable { +public: + using std::condition_variable::condition_variable; + + inline void notify_n(size_t n) noexcept { + if (n == 1) { + notify_one(); + } else if (n > 1) { + notify_all(); + } + } +}; + +} // namespace utils + +#endif // TNT_UTILS_GENERIC_CONDITION_H diff --git a/ios/include/utils/generic/Mutex.h b/ios/include/utils/generic/Mutex.h index cbd90942..d5976d04 100644 --- a/ios/include/utils/generic/Mutex.h +++ b/ios/include/utils/generic/Mutex.h @@ -1,28 +1,28 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_GENERIC_MUTEX_H -#define UTILS_GENERIC_MUTEX_H - -#include - -namespace utils { - -using Mutex = std::mutex; - -} // namespace utils - -#endif // UTILS_GENERIC_MUTEX_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_GENERIC_MUTEX_H +#define TNT_UTILS_GENERIC_MUTEX_H + +#include + +namespace utils { + +using Mutex = std::mutex; + +} // namespace utils + +#endif // TNT_UTILS_GENERIC_MUTEX_H diff --git a/ios/include/utils/generic/ThermalManager.h b/ios/include/utils/generic/ThermalManager.h new file mode 100644 index 00000000..2d0088e5 --- /dev/null +++ b/ios/include/utils/generic/ThermalManager.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_GENERIC_THERMALMANAGER_H +#define TNT_UTILS_GENERIC_THERMALMANAGER_H + +#include + +#include + +namespace utils { + +class ThermalManager { +public: + enum class ThermalStatus : int8_t { + ERROR = -1, + NONE, + LIGHT, + MODERATE, + SEVERE, + CRITICAL, + EMERGENCY, + SHUTDOWN + }; + + ThermalManager() = default; + + // Movable + ThermalManager(ThermalManager&& rhs) noexcept = default; + ThermalManager& operator=(ThermalManager&& rhs) noexcept = default; + + // not copiable + ThermalManager(ThermalManager const& rhs) = delete; + ThermalManager& operator=(ThermalManager const& rhs) = delete; + + ThermalStatus getCurrentThermalStatus() const noexcept { + return ThermalStatus::NONE; + } +}; + +} // namespace utils + +#endif // TNT_UTILS_GENERIC_THERMALMANAGER_H diff --git a/ios/include/utils/linux/Condition.h b/ios/include/utils/linux/Condition.h index 15f14143..c2ff21f8 100644 --- a/ios/include/utils/linux/Condition.h +++ b/ios/include/utils/linux/Condition.h @@ -1,123 +1,123 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_LINUX_CONDITION_H -#define UTILS_LINUX_CONDITION_H - -#include -#include -#include // for cv_status -#include -#include // for unique_lock - -#include - -#include - -namespace utils { - -/* - * A very simple condition variable class that can be used as an (almost) drop-in replacement - * for std::condition_variable (doesn't have the timed wait() though). - * It is very low overhead as most of it is inlined. - */ - -class Condition { -public: - Condition() noexcept = default; - Condition(const Condition&) = delete; - Condition& operator=(const Condition&) = delete; - - void notify_all() noexcept { - pulse(std::numeric_limits::max()); - } - - void notify_one() noexcept { - pulse(1); - } - - void notify_n(size_t n) noexcept { - if (n > 0) pulse(n); - } - - void wait(std::unique_lock& lock) noexcept { - wait_until(lock.mutex(), false, nullptr); - } - - template - void wait(std::unique_lock& lock, P predicate) { - while (!predicate()) { - wait(lock); - } - } - - template - std::cv_status wait_until(std::unique_lock& lock, - const std::chrono::time_point& timeout_time) noexcept { - // convert to nanoseconds - uint64_t ns = std::chrono::duration(timeout_time.time_since_epoch()).count(); - using sec_t = decltype(timespec::tv_sec); - using nsec_t = decltype(timespec::tv_nsec); - timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) }; - return wait_until(lock.mutex(), false, &ts); - } - - template - std::cv_status wait_until(std::unique_lock& lock, - const std::chrono::time_point& timeout_time) noexcept { - // convert to nanoseconds - uint64_t ns = std::chrono::duration(timeout_time.time_since_epoch()).count(); - using sec_t = decltype(timespec::tv_sec); - using nsec_t = decltype(timespec::tv_nsec); - timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) }; - return wait_until(lock.mutex(), true, &ts); - } - - template - bool wait_until(std::unique_lock& lock, - const std::chrono::time_point& timeout_time, P predicate) noexcept { - while (!predicate()) { - if (wait_until(lock, timeout_time) == std::cv_status::timeout) { - return predicate(); - } - } - return true; - } - - template - std::cv_status wait_for(std::unique_lock& lock, - const std::chrono::duration& rel_time) noexcept { - return wait_until(lock, std::chrono::steady_clock::now() + rel_time); - } - - template - bool wait_for(std::unique_lock& lock, - const std::chrono::duration& rel_time, P pred) noexcept { - return wait_until(lock, std::chrono::steady_clock::now() + rel_time, std::move(pred)); - } - -private: - std::atomic mState = { 0 }; - - void pulse(int threadCount) noexcept; - - std::cv_status wait_until(Mutex* lock, - bool realtimeClock, timespec* ts) noexcept; -}; - -} // namespace utils - -#endif // UTILS_LINUX_CONDITION_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_LINUX_CONDITION_H +#define TNT_UTILS_LINUX_CONDITION_H + +#include +#include +#include // for cv_status +#include +#include // for unique_lock + +#include + +#include + +namespace utils { + +/* + * A very simple condition variable class that can be used as an (almost) drop-in replacement + * for std::condition_variable (doesn't have the timed wait() though). + * It is very low overhead as most of it is inlined. + */ + +class Condition { +public: + Condition() noexcept = default; + Condition(const Condition&) = delete; + Condition& operator=(const Condition&) = delete; + + void notify_all() noexcept { + pulse(std::numeric_limits::max()); + } + + void notify_one() noexcept { + pulse(1); + } + + void notify_n(size_t n) noexcept { + if (n > 0) pulse(n); + } + + void wait(std::unique_lock& lock) noexcept { + wait_until(lock.mutex(), false, nullptr); + } + + template + void wait(std::unique_lock& lock, P predicate) { + while (!predicate()) { + wait(lock); + } + } + + template + std::cv_status wait_until(std::unique_lock& lock, + const std::chrono::time_point& timeout_time) noexcept { + // convert to nanoseconds + uint64_t ns = std::chrono::duration(timeout_time.time_since_epoch()).count(); + using sec_t = decltype(timespec::tv_sec); + using nsec_t = decltype(timespec::tv_nsec); + timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) }; + return wait_until(lock.mutex(), false, &ts); + } + + template + std::cv_status wait_until(std::unique_lock& lock, + const std::chrono::time_point& timeout_time) noexcept { + // convert to nanoseconds + uint64_t ns = std::chrono::duration(timeout_time.time_since_epoch()).count(); + using sec_t = decltype(timespec::tv_sec); + using nsec_t = decltype(timespec::tv_nsec); + timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) }; + return wait_until(lock.mutex(), true, &ts); + } + + template + bool wait_until(std::unique_lock& lock, + const std::chrono::time_point& timeout_time, P predicate) noexcept { + while (!predicate()) { + if (wait_until(lock, timeout_time) == std::cv_status::timeout) { + return predicate(); + } + } + return true; + } + + template + std::cv_status wait_for(std::unique_lock& lock, + const std::chrono::duration& rel_time) noexcept { + return wait_until(lock, std::chrono::steady_clock::now() + rel_time); + } + + template + bool wait_for(std::unique_lock& lock, + const std::chrono::duration& rel_time, P pred) noexcept { + return wait_until(lock, std::chrono::steady_clock::now() + rel_time, std::move(pred)); + } + +private: + std::atomic mState = { 0 }; + + void pulse(int threadCount) noexcept; + + std::cv_status wait_until(Mutex* lock, + bool realtimeClock, timespec* ts) noexcept; +}; + +} // namespace utils + +#endif // TNT_UTILS_LINUX_CONDITION_H diff --git a/ios/include/utils/linux/Mutex.h b/ios/include/utils/linux/Mutex.h index 672a2592..2dcc7ebc 100644 --- a/ios/include/utils/linux/Mutex.h +++ b/ios/include/utils/linux/Mutex.h @@ -1,64 +1,64 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_LINUX_MUTEX_H -#define UTILS_LINUX_MUTEX_H - -#include - -#include - -namespace utils { - -/* - * A very simple mutex class that can be used as an (almost) drop-in replacement - * for std::mutex. - * It is very low overhead as most of it is inlined. - */ - -class Mutex { -public: - constexpr Mutex() noexcept = default; - Mutex(const Mutex&) = delete; - Mutex& operator=(const Mutex&) = delete; - - void lock() noexcept { - uint32_t old_state = UNLOCKED; - if (UTILS_UNLIKELY(!mState.compare_exchange_strong(old_state, - LOCKED, std::memory_order_acquire, std::memory_order_relaxed))) { - wait(); - } - } - - void unlock() noexcept { - if (UTILS_UNLIKELY(mState.exchange(UNLOCKED, std::memory_order_release) == LOCKED_CONTENDED)) { - wake(); - } - } - -private: - enum { - UNLOCKED = 0, LOCKED = 1, LOCKED_CONTENDED = 2 - }; - std::atomic mState = { UNLOCKED }; - - void wait() noexcept; - void wake() noexcept; -}; - -} // namespace utils - -#endif // UTILS_LINUX_MUTEX_H +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_LINUX_MUTEX_H +#define TNT_UTILS_LINUX_MUTEX_H + +#include + +#include + +namespace utils { + +/* + * A very simple mutex class that can be used as an (almost) drop-in replacement + * for std::mutex. + * It is very low overhead as most of it is inlined. + */ + +class Mutex { +public: + constexpr Mutex() noexcept = default; + Mutex(const Mutex&) = delete; + Mutex& operator=(const Mutex&) = delete; + + void lock() noexcept { + uint32_t old_state = UNLOCKED; + if (UTILS_UNLIKELY(!mState.compare_exchange_strong(old_state, + LOCKED, std::memory_order_acquire, std::memory_order_relaxed))) { + wait(); + } + } + + void unlock() noexcept { + if (UTILS_UNLIKELY(mState.exchange(UNLOCKED, std::memory_order_release) == LOCKED_CONTENDED)) { + wake(); + } + } + +private: + enum { + UNLOCKED = 0, LOCKED = 1, LOCKED_CONTENDED = 2 + }; + std::atomic mState = { UNLOCKED }; + + void wait() noexcept; + void wake() noexcept; +}; + +} // namespace utils + +#endif // TNT_UTILS_LINUX_MUTEX_H diff --git a/ios/include/utils/memalign.h b/ios/include/utils/memalign.h index 6a05f019..04cdf555 100644 --- a/ios/include/utils/memalign.h +++ b/ios/include/utils/memalign.h @@ -1,115 +1,115 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_MEMALIGN_H -#define TNT_UTILS_MEMALIGN_H - -#include - -#include -#include -#include - -#if defined(WIN32) -#include -#endif - -namespace utils { - -inline void* aligned_alloc(size_t size, size_t align) noexcept { - assert(align && !(align & align - 1)); - - void* p = nullptr; - - // must be a power of two and >= sizeof(void*) - while (align < sizeof(void*)) { - align <<= 1u; - } - -#if defined(WIN32) - p = ::_aligned_malloc(size, align); -#else - ::posix_memalign(&p, align, size); -#endif - return p; -} - -inline void aligned_free(void* p) noexcept { -#if defined(WIN32) - ::_aligned_free(p); -#else - ::free(p); -#endif -} - -/* - * This allocator can be used with std::vector for instance to ensure all items are aligned - * to their alignof(). e.g. - * - * template - * using aligned_vector = std::vector>; - * - * aligned_vector foos; - * - */ -template -class STLAlignedAllocator { - static_assert(!(alignof(TYPE) & (alignof(TYPE) - 1)), "alignof(T) must be a power of two"); - -public: - using value_type = TYPE; - using pointer = TYPE*; - using const_pointer = const TYPE*; - using reference = TYPE&; - using const_reference = const TYPE&; - using size_type = std::size_t; - using difference_type = std::ptrdiff_t; - using propagate_on_container_move_assignment = std::true_type; - using is_always_equal = std::true_type; - - template - struct rebind { using other = STLAlignedAllocator; }; - - inline STLAlignedAllocator() noexcept = default; - - template - inline explicit STLAlignedAllocator(const STLAlignedAllocator&) noexcept {} - - inline ~STLAlignedAllocator() noexcept = default; - - inline pointer allocate(size_type n) noexcept { - return (pointer)aligned_alloc(n * sizeof(value_type), alignof(TYPE)); - } - - inline void deallocate(pointer p, size_type) { - aligned_free(p); - } - - // stateless allocators are always equal - template - bool operator==(const STLAlignedAllocator& rhs) const noexcept { - return true; - } - - template - bool operator!=(const STLAlignedAllocator& rhs) const noexcept { - return false; - } -}; - -} // namespace utils - -#endif // TNT_UTILS_MEMALIGN_H +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_MEMALIGN_H +#define TNT_UTILS_MEMALIGN_H + +#include + +#include +#include +#include + +#if defined(WIN32) +#include +#endif + +namespace utils { + +inline void* aligned_alloc(size_t size, size_t align) noexcept { + assert(align && !(align & align - 1)); + + void* p = nullptr; + + // must be a power of two and >= sizeof(void*) + while (align < sizeof(void*)) { + align <<= 1u; + } + +#if defined(WIN32) + p = ::_aligned_malloc(size, align); +#else + ::posix_memalign(&p, align, size); +#endif + return p; +} + +inline void aligned_free(void* p) noexcept { +#if defined(WIN32) + ::_aligned_free(p); +#else + ::free(p); +#endif +} + +/* + * This allocator can be used with std::vector for instance to ensure all items are aligned + * to their alignof(). e.g. + * + * template + * using aligned_vector = std::vector>; + * + * aligned_vector foos; + * + */ +template +class STLAlignedAllocator { + static_assert(!(alignof(TYPE) & (alignof(TYPE) - 1)), "alignof(T) must be a power of two"); + +public: + using value_type = TYPE; + using pointer = TYPE*; + using const_pointer = const TYPE*; + using reference = TYPE&; + using const_reference = const TYPE&; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using propagate_on_container_move_assignment = std::true_type; + using is_always_equal = std::true_type; + + template + struct rebind { using other = STLAlignedAllocator; }; + + inline STLAlignedAllocator() noexcept = default; + + template + inline explicit STLAlignedAllocator(const STLAlignedAllocator&) noexcept {} + + inline ~STLAlignedAllocator() noexcept = default; + + inline pointer allocate(size_type n) noexcept { + return (pointer)aligned_alloc(n * sizeof(value_type), alignof(TYPE)); + } + + inline void deallocate(pointer p, size_type) { + aligned_free(p); + } + + // stateless allocators are always equal + template + bool operator==(const STLAlignedAllocator& rhs) const noexcept { + return true; + } + + template + bool operator!=(const STLAlignedAllocator& rhs) const noexcept { + return false; + } +}; + +} // namespace utils + +#endif // TNT_UTILS_MEMALIGN_H diff --git a/ios/include/utils/ostream.h b/ios/include/utils/ostream.h index 596af86d..1c5e2602 100644 --- a/ios/include/utils/ostream.h +++ b/ios/include/utils/ostream.h @@ -1,136 +1,139 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_UTILS_OSTREAM_H -#define TNT_UTILS_OSTREAM_H - -#include - -#include -#include // ssize_t is a POSIX type. - -namespace utils { -namespace io { - -class UTILS_PUBLIC ostream { -public: - - virtual ~ostream(); - - ostream& operator<<(short value) noexcept; - ostream& operator<<(unsigned short value) noexcept; - - ostream& operator<<(char value) noexcept; - ostream& operator<<(unsigned char value) noexcept; - - ostream& operator<<(int value) noexcept; - ostream& operator<<(unsigned int value) noexcept; - - ostream& operator<<(long value) noexcept; - ostream& operator<<(unsigned long value) noexcept; - - ostream& operator<<(long long value) noexcept; - ostream& operator<<(unsigned long long value) noexcept; - - ostream& operator<<(float value) noexcept; - ostream& operator<<(double value) noexcept; - ostream& operator<<(long double value) noexcept; - - ostream& operator<<(bool value) noexcept; - - ostream& operator<<(const void* value) noexcept; - - ostream& operator<<(const char* string) noexcept; - ostream& operator<<(const unsigned char* string) noexcept; - - ostream& operator<<(ostream& (* f)(ostream&)) noexcept { return f(*this); } - - ostream& dec() noexcept; - ostream& hex() noexcept; - -protected: - class Buffer { - public: - Buffer() noexcept; - ~Buffer() noexcept; - - Buffer(const Buffer&) = delete; - Buffer& operator=(const Buffer&) = delete; - - char* buffer = nullptr; // buffer address - char* curr = nullptr; // current pointer - size_t size = 0; // size remaining - size_t capacity = 0; // total capacity of the buffer - const char* get() const noexcept { return buffer; } - void advance(ssize_t n) noexcept; - void reset() noexcept; - void reserve(size_t newSize) noexcept; - }; - - Buffer mData; - Buffer& getBuffer() noexcept { return mData; } - -private: - virtual ostream& flush() noexcept = 0; - - friend ostream& hex(ostream& s) noexcept; - friend ostream& dec(ostream& s) noexcept; - friend ostream& endl(ostream& s) noexcept; - friend ostream& flush(ostream& s) noexcept; - - enum type { - SHORT, USHORT, CHAR, UCHAR, INT, UINT, LONG, ULONG, LONG_LONG, ULONG_LONG, DOUBLE, - LONG_DOUBLE - }; - - bool mShowHex = false; - const char* getFormat(type t) const noexcept; - - /* - * Checks that the buffer has room for s additional bytes, growing the allocation if necessary. - */ - void growBufferIfNeeded(size_t s) noexcept; -}; - -// handles std::string -inline ostream& operator << (ostream& o, std::string const& s) noexcept { return o << s.c_str(); } - -// handles utils::bitset -inline ostream& operator << (ostream& o, utils::bitset32 const& s) noexcept { - return o << (void*)uintptr_t(s.getValue()); -} - -// handles vectors from libmath (but we do this generically, without needing a dependency on libmath) -template class VECTOR, typename T> -inline ostream& operator<<(ostream& stream, const VECTOR& v) { - stream << "< "; - for (size_t i = 0; i < v.size() - 1; i++) { - stream << v[i] << ", "; - } - stream << v[v.size() - 1] << " >"; - return stream; -} - -inline ostream& hex(ostream& s) noexcept { return s.hex(); } -inline ostream& dec(ostream& s) noexcept { return s.dec(); } -inline ostream& endl(ostream& s) noexcept { s << "\n"; return s.flush(); } -inline ostream& flush(ostream& s) noexcept { return s.flush(); } - -} // namespace io - -} // namespace utils - -#endif // TNT_UTILS_OSTREAM_H +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_OSTREAM_H +#define TNT_UTILS_OSTREAM_H + +#include +#include +#include + +#include +#include // ssize_t is a POSIX type. + +namespace utils::io { + +class UTILS_PUBLIC ostream { +public: + + virtual ~ostream(); + + ostream& operator<<(short value) noexcept; + ostream& operator<<(unsigned short value) noexcept; + + ostream& operator<<(char value) noexcept; + ostream& operator<<(unsigned char value) noexcept; + + ostream& operator<<(int value) noexcept; + ostream& operator<<(unsigned int value) noexcept; + + ostream& operator<<(long value) noexcept; + ostream& operator<<(unsigned long value) noexcept; + + ostream& operator<<(long long value) noexcept; + ostream& operator<<(unsigned long long value) noexcept; + + ostream& operator<<(float value) noexcept; + ostream& operator<<(double value) noexcept; + ostream& operator<<(long double value) noexcept; + + ostream& operator<<(bool value) noexcept; + + ostream& operator<<(const void* value) noexcept; + + ostream& operator<<(const char* string) noexcept; + ostream& operator<<(const unsigned char* string) noexcept; + + ostream& operator<<(ostream& (* f)(ostream&)) noexcept { return f(*this); } + + ostream& dec() noexcept; + ostream& hex() noexcept; + +protected: + class Buffer { + public: + Buffer() noexcept; + ~Buffer() noexcept; + + Buffer(const Buffer&) = delete; + Buffer& operator=(const Buffer&) = delete; + + const char* get() const noexcept { return buffer; } + + std::pair grow(size_t s) noexcept; + void advance(ssize_t n) noexcept; + void reset() noexcept; + + private: + void reserve(size_t newSize) noexcept; + + char* buffer = nullptr; // buffer address + char* curr = nullptr; // current pointer + size_t size = 0; // size remaining + size_t capacity = 0; // total capacity of the buffer + }; + + std::mutex mLock; + Buffer mData; + Buffer& getBuffer() noexcept { return mData; } + + ostream& print(const char* format, ...) noexcept; + +private: + virtual ostream& flush() noexcept = 0; + + friend ostream& hex(ostream& s) noexcept; + friend ostream& dec(ostream& s) noexcept; + friend ostream& endl(ostream& s) noexcept; + friend ostream& flush(ostream& s) noexcept; + + enum type { + SHORT, USHORT, CHAR, UCHAR, INT, UINT, LONG, ULONG, LONG_LONG, ULONG_LONG, DOUBLE, + LONG_DOUBLE + }; + + inline const char* getFormat(type t) const noexcept; + + bool mShowHex = false; +}; + +// handles std::string +inline ostream& operator << (ostream& o, std::string const& s) noexcept { return o << s.c_str(); } + +// handles utils::bitset +inline ostream& operator << (ostream& o, utils::bitset32 const& s) noexcept { + return o << (void*)uintptr_t(s.getValue()); +} + +// handles vectors from libmath (but we do this generically, without needing a dependency on libmath) +template class VECTOR, typename T> +inline ostream& operator<<(ostream& stream, const VECTOR& v) { + stream << "< "; + for (size_t i = 0; i < v.size() - 1; i++) { + stream << v[i] << ", "; + } + stream << v[v.size() - 1] << " >"; + return stream; +} + +inline ostream& hex(ostream& s) noexcept { return s.hex(); } +inline ostream& dec(ostream& s) noexcept { return s.dec(); } +inline ostream& endl(ostream& s) noexcept { s << "\n"; return s.flush(); } +inline ostream& flush(ostream& s) noexcept { return s.flush(); } + +} // namespace utils::io + +#endif // TNT_UTILS_OSTREAM_H diff --git a/ios/include/utils/sstream.h b/ios/include/utils/sstream.h index 411a128a..35cb61b6 100644 --- a/ios/include/utils/sstream.h +++ b/ios/include/utils/sstream.h @@ -1,37 +1,37 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TNT_SSTREAM_H -#define TNT_SSTREAM_H - -#include - -namespace utils { -namespace io { - -class sstream : public ostream { -public: - - ostream &flush() noexcept override; - - const char* c_str() const noexcept; - -}; - -} // namespace io -} // namespace utils - -#endif //TNT_SSTREAM_H +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_SSTREAM_H +#define TNT_UTILS_SSTREAM_H + +#include + +namespace utils { +namespace io { + +class sstream : public ostream { +public: + + ostream &flush() noexcept override; + + const char* c_str() const noexcept; + +}; + +} // namespace io +} // namespace utils + +#endif // TNT_UTILS_SSTREAM_H diff --git a/ios/src/morph/Log.h b/ios/include/utils/string.h similarity index 58% rename from ios/src/morph/Log.h rename to ios/include/utils/string.h index dcf7a1cb..040044c0 100644 --- a/ios/src/morph/Log.h +++ b/ios/include/utils/string.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 The Android Open Source Project + * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,30 +14,15 @@ * limitations under the License. */ -#ifndef TNT_UTILS_LOG_H -#define TNT_UTILS_LOG_H +#ifndef TNT_UTILS_STRING_H +#define TNT_UTILS_STRING_H -#include #include namespace utils { -struct UTILS_PUBLIC Loggers { - // DEBUG level logging stream - io::ostream& d; - - // ERROR level logging stream - io::ostream& e; - - // WARNING level logging stream - io::ostream& w; - - // INFORMATION level logging stream - io::ostream& i; -}; - -extern UTILS_PUBLIC Loggers const slog; +float strtof_c(const char* start, char** end); } // namespace utils -#endif // TNT_UTILS_LOG_H +#endif // TNT_UTILS_STRING_H diff --git a/ios/include/utils/trap.h b/ios/include/utils/trap.h index f95933d7..aedc3ddb 100644 --- a/ios/include/utils/trap.h +++ b/ios/include/utils/trap.h @@ -1,40 +1,40 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef UTILS_TRAP_H -#define UTILS_TRAP_H - -#include - -#if defined(WIN32) -#include -#include -#endif - -namespace utils { - -// This can be used as a programmatic breakpoint. -inline void debug_trap() noexcept { -#if defined(WIN32) - DebugBreak(); -#else - std::raise(SIGINT); -#endif -} - -} // namespace utils - -#endif // UTILS_TRAP_H +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_TRAP_H +#define TNT_UTILS_TRAP_H + +#include + +#if defined(WIN32) +#include +#include +#endif + +namespace utils { + +// This can be used as a programmatic breakpoint. +inline void debug_trap() noexcept { +#if defined(WIN32) + DebugBreak(); +#else + std::raise(SIGINT); +#endif +} + +} // namespace utils + +#endif // TNT_UTILS_TRAP_H diff --git a/ios/include/utils/unwindows.h b/ios/include/utils/unwindows.h index 6bd6574d..328ba4dd 100644 --- a/ios/include/utils/unwindows.h +++ b/ios/include/utils/unwindows.h @@ -1,51 +1,51 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#if defined (WIN32) - -#ifdef max -#undef max -#endif - -#ifdef min -#undef min -#endif - -#ifdef far -#undef far -#endif - -#ifdef near -#undef near -#endif - -#ifdef ERROR -#undef ERROR -#endif - -#ifdef OPAQUE -#undef OPAQUE -#endif - -#ifdef TRANSPARENT -#undef TRANSPARENT -#endif - -#ifdef PURE -#undef PURE -#endif - -#endif +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined (WIN32) + +#ifdef max +#undef max +#endif + +#ifdef min +#undef min +#endif + +#ifdef far +#undef far +#endif + +#ifdef near +#undef near +#endif + +#ifdef ERROR +#undef ERROR +#endif + +#ifdef OPAQUE +#undef OPAQUE +#endif + +#ifdef TRANSPARENT +#undef TRANSPARENT +#endif + +#ifdef PURE +#undef PURE +#endif + +#endif diff --git a/ios/include/utils/vector.h b/ios/include/utils/vector.h index fa150da0..f02419fe 100644 --- a/ios/include/utils/vector.h +++ b/ios/include/utils/vector.h @@ -1,60 +1,60 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#ifndef UTILS_VECTOR_H -#define UTILS_VECTOR_H - -namespace utils { - -/** - * Inserts the specified item in the vector at its sorted position. - */ -template -static inline void insert_sorted(std::vector& v, T item) { - auto pos = std::lower_bound(v.begin(), v.end(), item); - v.insert(pos, std::move(item)); -} - -/** - * Inserts the specified item in the vector at its sorted position. - * The item type must implement the < operator. If the specified - * item is already present in the vector, this method returns without - * inserting the item again. - * - * @return True if the item was inserted at is sorted position, false - * if the item already exists in the vector. - */ -template -static inline bool insert_sorted_unique(std::vector& v, T item) { - if (UTILS_LIKELY(v.size() == 0 || v.back() < item)) { - v.push_back(item); - return true; - } - - auto pos = std::lower_bound(v.begin(), v.end(), item); - if (UTILS_LIKELY(pos == v.end() || item < *pos)) { - v.insert(pos, std::move(item)); - return true; - } - - return false; -} - -} // end utils namespace - -#endif //UTILS_VECTOR_H +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_VECTOR_H +#define TNT_UTILS_VECTOR_H + +#include + +namespace utils { + +/** + * Inserts the specified item in the vector at its sorted position. + */ +template +static inline void insert_sorted(std::vector& v, T item) { + auto pos = std::lower_bound(v.begin(), v.end(), item); + v.insert(pos, std::move(item)); +} + +/** + * Inserts the specified item in the vector at its sorted position. + * The item type must implement the < operator. If the specified + * item is already present in the vector, this method returns without + * inserting the item again. + * + * @return True if the item was inserted at is sorted position, false + * if the item already exists in the vector. + */ +template +static inline bool insert_sorted_unique(std::vector& v, T item) { + if (UTILS_LIKELY(v.size() == 0 || v.back() < item)) { + v.push_back(item); + return true; + } + + auto pos = std::lower_bound(v.begin(), v.end(), item); + if (UTILS_LIKELY(pos == v.end() || item < *pos)) { + v.insert(pos, std::move(item)); + return true; + } + + return false; +} + +} // end utils namespace + +#endif // TNT_UTILS_VECTOR_H diff --git a/ios/include/utils/win32/stdtypes.h b/ios/include/utils/win32/stdtypes.h index 669f1cee..f593d8cb 100644 --- a/ios/include/utils/win32/stdtypes.h +++ b/ios/include/utils/win32/stdtypes.h @@ -1,33 +1,33 @@ -/* -* Copyright (C) 2018 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -#ifndef TNT_UTILS_WIN32_STDTYPES_H -#define TNT_UTILS_WIN32_STDTYPES_H - -#if defined(WIN32) -#include - -// Copied from linux libc sys/stat.h: -#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) -#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) -#define PATH_MAX (MAX_PATH) - -// For getcwd -#include -#define getcwd _getcwd - -#endif -#endif +/* +* Copyright (C) 2018 The Android Open Source Project +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#ifndef TNT_UTILS_WIN32_STDTYPES_H +#define TNT_UTILS_WIN32_STDTYPES_H + +#if defined(WIN32) +#include + +// Copied from linux libc sys/stat.h: +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#define PATH_MAX (MAX_PATH) + +// For getcwd +#include +#define getcwd _getcwd + +#endif +#endif // TNT_UTILS_WIN32_STDTYPES_H diff --git a/ios/include/viewer/AutomationEngine.h b/ios/include/viewer/AutomationEngine.h deleted file mode 100644 index cddff04a..00000000 --- a/ios/include/viewer/AutomationEngine.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef VIEWER_AUTOMATION_ENGINE_H -#define VIEWER_AUTOMATION_ENGINE_H - -#include - -namespace filament { - -class ColorGrading; -class Engine; -class LightManager; -class MaterialInstance; -class Renderer; -class View; - -namespace viewer { - -/** - * The AutomationEngine makes it easy to push a bag of settings values to Filament. - * It can also be used to iterate through settings permutations for testing purposes. - * - * When creating an automation engine for testing purposes, clients give it an immutable reference - * to an AutomationSpec. It is always in one of two states: running or idle. The running state can - * be entered immediately (startRunning) or by requesting batch mode (startBatchMode). - * - * When executing a test, clients should call tick() after each frame is rendered, which gives - * automation an opportunity to push settings to Filament, increment the current test index (if - * enough time has elapsed), and request an asychronous screenshot. - * - * The time to sleep between tests is configurable and can be set to zero. Automation also waits a - * specified minimum number of frames between tests. - * - * Batch mode is meant for non-interactive applications. In batch mode, automation defers applying - * the first test case until the client unblocks it via signalBatchMode(). This is useful when - * waiting for a large model file to become fully loaded. Batch mode also offers a query - * (shouldClose) that is triggered after the last test has been invoked. - */ -class UTILS_PUBLIC AutomationEngine { -public: - /** - * Allows users to toggle screenshots, change the sleep duration between tests, etc. - */ - struct Options { - /** - * Minimum time that automation waits between applying a settings object and advancing - * to the next test case. Specified in seconds. - */ - float sleepDuration = 0.2; - - /** - * Similar to sleepDuration, but expressed as a frame count. Both the minimum sleep time - * and the minimum frame count must be elapsed before automation advances to the next test. - */ - int minFrameCount = 2; - - /** - * If true, test progress is dumped to the utils Log (info priority). - */ - bool verbose = true; - - /** - * If true, the tick function writes out a screenshot before advancing to the next test. - */ - bool exportScreenshots = false; - - /** - * If true, the tick function writes out a settings JSON file before advancing. - */ - bool exportSettings = false; - }; - - /** - * Collection of Filament objects that can be modified by the automation engine. - */ - struct ViewerContent { - View* view; - Renderer* renderer; - MaterialInstance* const* materials; - size_t materialCount; - LightManager* lightManager; - Scene* scene; - IndirectLight* indirectLight; - utils::Entity sunlight; - utils::Entity* assetLights; - size_t assetLightCount; - }; - - /** - * Creates an automation engine and places it in an idle state. - * - * @param spec Specifies a set of settings permutations (owned by the client). - * @param settings Client-owned settings object. This not only supplies the initial - * state, it also receives changes during tick(). This is useful when - * building automation into an application that has a settings UI. - * - * @see setOptions - * @see startRunning - */ - AutomationEngine(const AutomationSpec* spec, Settings* settings) : - mSpec(spec), mSettings(settings) {} - - /** - * Shortcut constructor that creates an automation engine from a JSON string. - * - * This constructor can be used if the user does not need to monitor how the settings - * change over time and does not need ownership over the AutomationSpec. - * - * An example of a JSON spec can be found by searching the repo for DEFAULT_AUTOMATION. - * This is documented using a JSON schema (look for viewer/schemas/automation.json). - * - * @param jsonSpec Valid JSON string that conforms to the automation schema. - * @param size Number of characters in the JSON string. - * @return Automation engine or null if unable to read the JSON. - */ - static AutomationEngine* createFromJSON(const char* jsonSpec, size_t size); - - /** - * Creates an automation engine for the sole purpose of pushing settings, or for executing - * the default test sequence. - * - * To see how the default test sequence is generated, search for DEFAULT_AUTOMATION. - */ - static AutomationEngine* createDefault(); - - /** - * Activates the automation test. During the subsequent call to tick(), the first test is - * applied and automation enters the running state. - */ - void startRunning(); - - /** - * Activates the automation test, but enters a paused state until the user calls - * signalBatchMode(). - */ - void startBatchMode(); - - /** - * Notifies the automation engine that time has passed, a new frame has been rendered. - * - * This is when settings get applied, screenshots are (optionally) exported, and the internal - * test counter is potentially incremented. - * - * @param content Contains the Filament View, Materials, and Renderer that get modified. - * @param deltaTime The amount of time that has passed since the previous tick in seconds. - */ - void tick(const ViewerContent& content, float deltaTime); - - /** - * Mutates a set of client-owned Filament objects according to a JSON string. - * - * This method is an alternative to tick(). It allows clients to use the automation engine as a - * remote control, as opposed to iterating through a predetermined test sequence. - * - * This updates the stashed Settings object, then pushes those settings to the given - * Filament objects. Clients can optionally call getColorGrading() after calling this method. - * - * @param json Contains the JSON string with a set of changes that need to be pushed. - * @param jsonLength Number of characters in the json string. - * @param content Contains a set of Filament objects that you want to mutate. - */ - void applySettings(const char* json, size_t jsonLength, const ViewerContent& content); - - /** - * Gets a color grading object that corresponds to the latest settings. - * - * This method either returns a cached instance, or it destroys the cached instance and creates - * a new one. - */ - ColorGrading* getColorGrading(Engine* engine); - - /** - * Gets the current viewer options. - * - * NOTE: Focal length here might be different from the user-specified value, due to DoF options. - */ - ViewerOptions getViewerOptions() const; - - /** - * Signals that batch mode can begin. Call this after all meshes and textures finish loading. - */ - void signalBatchMode() { mBatchModeAllowed = true; } - - /** - * Cancels an in-progress automation session. - */ - void stopRunning() { mIsRunning = false; } - - /** - * Signals that the application is closing, so all pending screenshots should be cancelled. - */ - void terminate(); - - /** - * Configures the automation engine for users who wish to set up a custom sleep time - * between tests, etc. - */ - void setOptions(Options options) { mOptions = options; } - - /** - * Returns true if automation is in batch mode and all tests have finished. - */ - bool shouldClose() const { return mShouldClose; } - - /** - * Convenience function that writes out a JSON file to disk containing all settings. - * - * @param Settings State vector to serialize. - * @param filename Desired JSON filename. - */ - static void exportSettings(const Settings& settings, const char* filename); - - Options getOptions() const { return mOptions; } - bool isRunning() const { return mIsRunning; } - size_t currentTest() const { return mCurrentTest; } - size_t testCount() const { return mSpec->size(); } - bool isBatchModeEnabled() const { return mBatchModeEnabled; } - const char* getStatusMessage() const; - ~AutomationEngine(); - -private: - AutomationSpec const * const mSpec; - Settings * const mSettings; - Options mOptions; - - Engine* mColorGradingEngine = nullptr; - ColorGrading* mColorGrading = nullptr; - ColorGradingSettings mColorGradingSettings = {}; - - size_t mCurrentTest; - float mElapsedTime; - int mElapsedFrames; - bool mIsRunning = false; - bool mBatchModeEnabled = false; - bool mRequestStart = false; - bool mShouldClose = false; - bool mBatchModeAllowed = false; - bool mTerminated = false; - bool mOwnsSettings = false; - -public: - // For internal use from a screenshot callback. - void requestClose() { mShouldClose = true; } - bool isTerminated() const { return mTerminated; } -}; - -} // namespace viewer -} // namespace filament - -#endif // VIEWER_AUTOMATION_ENGINE_H diff --git a/ios/include/viewer/AutomationSpec.h b/ios/include/viewer/AutomationSpec.h deleted file mode 100644 index 99d82f11..00000000 --- a/ios/include/viewer/AutomationSpec.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef VIEWER_AUTOMATION_SPEC_H -#define VIEWER_AUTOMATION_SPEC_H - -#include - -#include - -namespace filament { -namespace viewer { - -/** - * Immutable list of Settings objects generated from a JSON spec. - * - * Each top-level item in the JSON spec is an object with "name", "base" and "permute". - * The "base" object specifies a single set of changes to apply to default settings. - * The optional "permute" object specifies a cross product of changes to apply to the base. - * - * The following example generates a total of 5 test cases. - * [{ - * "name": "simple", - * "base": { - * "view.dof.cocScale": 1.0, - * "view.bloom.strength": 0.5 - * }, - * "permute": { - * "view.bloom.enabled": [false, true], - * "view.dof.enabled": [false, true] - * } - * }, - * { - * "name": "ppoff", - * "base": { - * "view.postProcessingEnabled": false - * } - * }] - */ -class UTILS_PUBLIC AutomationSpec { -public: - - // Parses a JSON spec, then generates a list of Settings objects. - // Returns null on failure (see utils log for warnings and errors). - // Clients should release memory using "delete". - static AutomationSpec* generate(const char* jsonSpec, size_t size); - - // Generates a list of Settings objects using an embedded JSON spec. - static AutomationSpec* generateDefaultTestCases(); - - // Returns the number of generated Settings objects. - size_t size() const; - - // Gets a generated Settings object and copies it out. - // Returns false if the given index is out of bounds. - bool get(size_t index, Settings* out) const; - - // Returns the name of the JSON group for a given Settings object. - char const* getName(size_t index) const; - - // Frees all Settings objects and name strings. - ~AutomationSpec(); - -private: - struct Impl; - AutomationSpec(Impl*); - Impl* mImpl; -}; - -} // namespace viewer -} // namespace filament - -#endif // VIEWER_AUTOMATION_SPEC_H diff --git a/ios/include/viewer/RemoteServer.h b/ios/include/viewer/RemoteServer.h deleted file mode 100644 index aba761c1..00000000 --- a/ios/include/viewer/RemoteServer.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef VIEWER_REMOTE_SERVER_H -#define VIEWER_REMOTE_SERVER_H - -#include - -#include - -#include -#include - -class CivetServer; - -namespace filament { -namespace viewer { - -class MessageSender; -class MessageReceiver; - -/** - * Encapsulates a message sent from the web client. - * - * All instances of ReceivedMessage and their data / strings are owned by RemoteServer. - * These can be freed via RemoteServer::releaseReceivedMessage(). - */ -struct ReceivedMessage { - char* label; - char* buffer; - size_t bufferByteCount; - size_t messageUid; -}; - -/** - * Manages a tiny WebSocket server that can receive model data and viewer settings. - * - * Client apps can call peekReceivedMessage to check for new data, or acquireReceivedMessage - * to pop it off the small internal queue. When they are done examining the message contents - * they should call releaseReceivedMessage. - */ -class UTILS_PUBLIC RemoteServer { -public: - RemoteServer(int port = 8082); - ~RemoteServer(); - bool isValid() const { return mMessageSender; } - - /** - * Checks if a download is currently in progress and returns its label. - * Returns null if nothing is being downloaded. - */ - char const* peekIncomingLabel() const; - - /** - * Pops a message off the incoming queue or returns null if there are no unread messages. - * - * After examining its contents, users should free the message with releaseReceivedMessage. - */ - ReceivedMessage const* acquireReceivedMessage(); - - /** - * Frees the memory that holds the contents of a received message. - */ - void releaseReceivedMessage(ReceivedMessage const* message); - - void sendMessage(const Settings& settings); - void sendMessage(const char* label, const char* buffer, size_t bufsize); - - // For internal use (makes JNI simpler) - ReceivedMessage const* peekReceivedMessage() const; - -private: - void enqueueReceivedMessage(ReceivedMessage* message); - void setIncomingMessage(ReceivedMessage* message); - MessageSender* mMessageSender = nullptr; - MessageReceiver* mMessageReceiver = nullptr; - size_t mNextMessageUid = 0; - static const size_t kMessageCapacity = 4; - ReceivedMessage* mReceivedMessages[kMessageCapacity] = {}; - ReceivedMessage* mIncomingMessage = nullptr; - JsonSerializer mSerializer; - mutable std::mutex mReceivedMessagesMutex; - friend class MessageReceiver; -}; - -} // namespace viewer -} // namespace filament - -#endif // VIEWER_REMOTE_SERVER_H diff --git a/ios/include/viewer/Settings.h b/ios/include/viewer/Settings.h deleted file mode 100644 index 77fd9853..00000000 --- a/ios/include/viewer/Settings.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef VIEWER_SETTINGS_H -#define VIEWER_SETTINGS_H - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include - -#include - -namespace filament { - -class Skybox; -class Renderer; - -namespace viewer { - -struct ColorGradingSettings; -struct DynamicLightingSettings; -struct MaterialSettings; -struct Settings; -struct ViewSettings; -struct LightSettings; -struct ViewerOptions; - -enum class ToneMapping : uint8_t { - LINEAR = 0, - ACES_LEGACY = 1, - ACES = 2, - FILMIC = 3, - GENERIC = 4, - DISPLAY_RANGE = 5, -}; - -using AmbientOcclusionOptions = filament::View::AmbientOcclusionOptions; -using AntiAliasing = filament::View::AntiAliasing; -using BloomOptions = filament::View::BloomOptions; -using DepthOfFieldOptions = filament::View::DepthOfFieldOptions; -using Dithering = filament::View::Dithering; -using FogOptions = filament::View::FogOptions; -using RenderQuality = filament::View::RenderQuality; -using ShadowType = filament::View::ShadowType; -using TemporalAntiAliasingOptions = filament::View::TemporalAntiAliasingOptions; -using VignetteOptions = filament::View::VignetteOptions; -using VsmShadowOptions = filament::View::VsmShadowOptions; -using LightManager = filament::LightManager; - -// These functions push all editable property values to their respective Filament objects. -void applySettings(const ViewSettings& settings, View* dest); -void applySettings(const MaterialSettings& settings, MaterialInstance* dest); -void applySettings(const LightSettings& settings, IndirectLight* ibl, utils::Entity sunlight, - utils::Entity* sceneLights, size_t sceneLightCount, LightManager* lm, Scene* scene); -void applySettings(const ViewerOptions& settings, Camera* camera, Skybox* skybox, - Renderer* renderer); - -// Creates a new ColorGrading object based on the given settings. -UTILS_PUBLIC -ColorGrading* createColorGrading(const ColorGradingSettings& settings, Engine* engine); - -class UTILS_PUBLIC JsonSerializer { -public: - JsonSerializer(); - ~JsonSerializer(); - - // Writes a human-readable JSON string into an internal buffer and returns the result. - const std::string& writeJson(const Settings& in); - - // Reads the given JSON blob and updates the corresponding fields in the given Settings object. - // - The given JSON blob need not specify all settings. - // - Returns true if successful. - // - This function writes warnings and error messages into the utils log. - bool readJson(const char* jsonChunk, size_t size, Settings* out); - -private: - class Context; - Context* context; -}; - -struct GenericToneMapperSettings { - float contrast = 1.4f; - float shoulder = 0.5f; - float midGrayIn = 0.18f; - float midGrayOut = 0.266f; - float hdrMax = 10.0f; - bool operator!=(const GenericToneMapperSettings &rhs) const { return !(rhs == *this); } - bool operator==(const GenericToneMapperSettings &rhs) const; -}; - -struct ColorGradingSettings { - bool enabled = true; - filament::ColorGrading::QualityLevel quality = filament::ColorGrading::QualityLevel::MEDIUM; - ToneMapping toneMapping = ToneMapping::ACES_LEGACY; - GenericToneMapperSettings genericToneMapper; - bool luminanceScaling = false; - float exposure = 0.0f; - float temperature = 0.0f; - float tint = 0.0f; - math::float3 outRed{1.0f, 0.0f, 0.0f}; - math::float3 outGreen{0.0f, 1.0f, 0.0f}; - math::float3 outBlue{0.0f, 0.0f, 1.0f}; - math::float4 shadows{1.0f, 1.0f, 1.0f, 0.0f}; - math::float4 midtones{1.0f, 1.0f, 1.0f, 0.0f}; - math::float4 highlights{1.0f, 1.0f, 1.0f, 0.0f}; - math::float4 ranges{0.0f, 0.333f, 0.550f, 1.0f}; - float contrast = 1.0f; - float vibrance = 1.0f; - float saturation = 1.0f; - math::float3 slope{1.0f}; - math::float3 offset{0.0f}; - math::float3 power{1.0f}; - math::float3 gamma{1.0f}; - math::float3 midPoint{1.0f}; - math::float3 scale{1.0f}; - bool linkedCurves = false; - bool operator!=(const ColorGradingSettings &rhs) const { return !(rhs == *this); } - bool operator==(const ColorGradingSettings &rhs) const; -}; - -struct DynamicLightingSettings { - float zLightNear = 5; - float zLightFar = 100; -}; - -// This defines fields in the same order as the setter methods in filament::View. -struct ViewSettings { - uint8_t sampleCount = 1; - AntiAliasing antiAliasing = AntiAliasing::FXAA; - TemporalAntiAliasingOptions taa; - ColorGradingSettings colorGrading; - AmbientOcclusionOptions ssao; - BloomOptions bloom; - FogOptions fog; - DepthOfFieldOptions dof; - VignetteOptions vignette; - Dithering dithering = Dithering::TEMPORAL; - RenderQuality renderQuality; - DynamicLightingSettings dynamicLighting; - ShadowType shadowType = ShadowType::PCF; - VsmShadowOptions vsmShadowOptions; - bool postProcessingEnabled = true; -}; - -template -struct MaterialProperty { std::string name; T value; }; - -// This struct has a fixed size for simplicity. Each non-empty property name is an override. -struct MaterialSettings { - static constexpr size_t MAX_COUNT = 4; - MaterialProperty scalar[MAX_COUNT]; - MaterialProperty float3[MAX_COUNT]; - MaterialProperty float4[MAX_COUNT]; -}; - -struct LightSettings { - bool enableShadows = true; - bool enableSunlight = true; - LightManager::ShadowOptions shadowOptions; - float sunlightIntensity = 100000.0f; - math::float3 sunlightDirection = {0.6, -1.0, -0.8};; - math::float3 sunlightColor = filament::Color::toLinear({ 0.98, 0.92, 0.89}); - float iblIntensity = 30000.0f; - float iblRotation = 0.0f; -}; - -struct ViewerOptions { - float cameraAperture = 16.0f; - float cameraSpeed = 125.0f; - float cameraISO = 100.0f; - float groundShadowStrength = 0.75f; - bool groundPlaneEnabled = false; - bool skyboxEnabled = true; - sRGBColor backgroundColor = { 0.0f }; - float cameraFocalLength = 28.0f; - float cameraFocusDistance = 10.0f; - bool autoScaleEnabled = true; -}; - -struct Settings { - ViewSettings view; - MaterialSettings material; - LightSettings lighting; - ViewerOptions viewer; -}; - -} // namespace viewer -} // namespace filament - -#endif // VIEWER_SETTINGS_H diff --git a/ios/include/viewer/SimpleViewer.h b/ios/include/viewer/SimpleViewer.h deleted file mode 100644 index 61caa14f..00000000 --- a/ios/include/viewer/SimpleViewer.h +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef VIEWER_SIMPLEVIEWER_H -#define VIEWER_SIMPLEVIEWER_H - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include -#include - -#include -#include - -namespace filagui { - class ImGuiHelper; -} - -namespace filament { -namespace viewer { - -/** - * \class SimpleViewer SimpleViewer.h viewer/SimpleViewer.h - * \brief Manages the state for a simple glTF viewer with imgui controls and a tree view. - * - * This is a utility that can be used across multiple platforms, including web. - * - * \note If you don't need ImGui controls, there is no need to use this class, just use AssetLoader - * instead. - */ -class UTILS_PUBLIC SimpleViewer { -public: - using Animator = gltfio::Animator; - using FilamentAsset = gltfio::FilamentAsset; - using FilamentInstance = gltfio::FilamentInstance; - - static constexpr int DEFAULT_SIDEBAR_WIDTH = 350; - - /** - * Constructs a SimpleViewer that has a fixed association with the given Filament objects. - * - * Upon construction, the simple viewer may create some additional Filament objects (such as - * light sources) that it owns. - */ - SimpleViewer(filament::Engine* engine, filament::Scene* scene, filament::View* view, - int sidebarWidth = DEFAULT_SIDEBAR_WIDTH); - - /** - * Destroys the SimpleViewer and any Filament entities that it owns. - */ - ~SimpleViewer(); - - /** - * Adds the asset's ready-to-render entities into the scene. - * - * The viewer does not claim ownership over the asset or its entities. Clients should use - * AssetLoader and ResourceLoader to load an asset before passing it in. - * - * @param asset The asset to view. - * @param instanceToAnimate Optional instance from which to get the animator. - */ - void populateScene(FilamentAsset* asset, FilamentInstance* instanceToAnimate = nullptr); - - /** - * Removes the current asset from the viewer. - * - * This removes all the asset entities from the Scene, but does not destroy them. - */ - void removeAsset(); - - /** - * Sets or changes the current scene's IBL to allow the UI manipulate it. - */ - void setIndirectLight(filament::IndirectLight* ibl, filament::math::float3 const* sh3); - - /** - * Applies the currently-selected glTF animation to the transformation hierarchy and updates - * the bone matrices on all renderables. - */ - void applyAnimation(double currentTime); - - /** - * Constructs ImGui controls for the current frame and responds to everything that the user has - * changed since the previous frame. - * - * If desired this can be used in conjunction with the filagui library, which allows clients to - * render ImGui controls with Filament. - */ - void updateUserInterface(); - - /** - * Alternative to updateUserInterface that uses an internal instance of ImGuiHelper. - * - * This utility method is designed for clients that do not want to manage their own instance of - * ImGuiHelper (e.g., JavaScript clients). - * - * Behind the scenes this simply calls ImGuiHelper->render() and passes updateUserInterface into - * its callback. Note that the first call might be slower since it requires the creation of the - * internal ImGuiHelper instance. - */ - void renderUserInterface(float timeStepInSeconds, filament::View* guiView, float pixelRatio); - - /** - * Event-passing methods, useful only when SimpleViewer manages its own instance of ImGuiHelper. - * The key codes used in these methods are just normal ASCII/ANSI codes. - * @{ - */ - void mouseEvent(float mouseX, float mouseY, bool mouseButton, float mouseWheelY, bool control); - void keyDownEvent(int keyCode); - void keyUpEvent(int keyCode); - void keyPressEvent(int charCode); - /** @}*/ - - /** - * Retrieves the current width of the ImGui "window" which we are using as a sidebar. - * Clients can monitor this value to adjust the size of the view. - */ - int getSidebarWidth() const { return mSidebarWidth; } - - /** - * Allows clients to inject custom UI. - */ - void setUiCallback(std::function callback) { mCustomUI = callback; } - - /** - * Draws the bounding box of each renderable. - * Defaults to false. - */ - void enableWireframe(bool b) { mEnableWireframe = b; } - - /** - * Enables a built-in light source (useful for creating shadows). - * Defaults to true. - */ - void enableSunlight(bool b) { mSettings.lighting.enableSunlight = b; } - - /** - * Enables dithering on the view. - * Defaults to true. - */ - void enableDithering(bool b) { - mSettings.view.dithering = b ? Dithering::TEMPORAL : Dithering::NONE; - } - - /** - * Enables FXAA antialiasing in the post-process pipeline. - * Defaults to true. - */ - void enableFxaa(bool b) { - mSettings.view.antiAliasing = b ? AntiAliasing::FXAA : AntiAliasing::NONE; - } - - /** - * Enables hardware-based MSAA antialiasing. - * Defaults to true. - */ - void enableMsaa(bool b) { mSettings.view.sampleCount = b ? 4 : 1; } - - /** - * Enables screen-space ambient occlusion in the post-process pipeline. - * Defaults to true. - */ - void enableSSAO(bool b) { mSettings.view.ssao.enabled = b; } - - /** - * Enables Bloom. - * Defaults to true. - */ - void enableBloom(bool bloom) { mSettings.view.bloom.enabled = bloom; } - - /** - * Adjusts the intensity of the IBL. - * See also filament::IndirectLight::setIntensity(). - * Defaults to 30000.0. - */ - void setIBLIntensity(float brightness) { mSettings.lighting.iblIntensity = brightness; } - - /** - * Updates the transform at the root node according to the autoScaleEnabled setting. - */ - void updateRootTransform(); - - /** - * Gets a modifiable reference to stashed state. - */ - Settings& getSettings() { return mSettings; } - - void stopAnimation() { mCurrentAnimation = 0; } - - int getCurrentCamera() const { return mCurrentCamera; } - -private: - void updateIndirectLight(); - - // Immutable properties set from the constructor. - filament::Engine* const mEngine; - filament::Scene* const mScene; - filament::View* const mView; - const utils::Entity mSunlight; - - // Lazily instantiated fields. - filagui::ImGuiHelper* mImGuiHelper = nullptr; - - // Properties that can be changed from the application. - FilamentAsset* mAsset = nullptr; - Animator* mAnimator = nullptr; - filament::IndirectLight* mIndirectLight = nullptr; - std::function mCustomUI; - - // Properties that can be changed from the UI. - int mCurrentAnimation = 1; - bool mResetAnimation = true; - bool mEnableWireframe = false; - int mVsmMsaaSamplesLog2 = 1; - Settings mSettings; - int mSidebarWidth; - uint32_t mFlags; - - // 0 is the default "free camera". Additional cameras come from the gltf file (1-based index). - int mCurrentCamera = 0; - - // Color grading UI state. - float mToneMapPlot[1024]; - float mRangePlot[1024 * 3]; - float mCurvePlot[1024 * 3]; -}; - -UTILS_PUBLIC -filament::math::mat4f fitIntoUnitCube(const filament::Aabb& bounds, float zoffset); - -} // namespace viewer -} // namespace filament - -#endif // VIEWER_SIMPLEVIEWER_H diff --git a/ios/src/morph/DependencyGraph.h b/ios/src/DependencyGraph.h similarity index 100% rename from ios/src/morph/DependencyGraph.h rename to ios/src/DependencyGraph.h diff --git a/ios/src/morph/DracoCache.h b/ios/src/DracoCache.h similarity index 100% rename from ios/src/morph/DracoCache.h rename to ios/src/DracoCache.h diff --git a/ios/src/morph/FFilamentAsset.h b/ios/src/FFilamentAsset.h similarity index 96% rename from ios/src/morph/FFilamentAsset.h rename to ios/src/FFilamentAsset.h index 7c7dc798..a4061fe5 100644 --- a/ios/src/morph/FFilamentAsset.h +++ b/ios/src/FFilamentAsset.h @@ -44,7 +44,7 @@ #include "FFilamentInstance.h" #include -#include +#include #include @@ -65,13 +65,13 @@ namespace gltfio { class Animator; class Wireframe; +class MorphHelper; // Encapsulates VertexBuffer::setBufferAt() or IndexBuffer::setBuffer(). struct BufferSlot { const cgltf_accessor* accessor; cgltf_attribute_type attribute; int bufferIndex; // for vertex buffers only - int morphTarget; // 0 if no morphing, otherwise 1-based index filament::VertexBuffer* vertexBuffer; filament::IndexBuffer* indexBuffer; }; @@ -97,8 +97,6 @@ struct Primitive { filament::IndexBuffer* indices = nullptr; filament::Aabb aabb; // object-space bounding box UvMap uvmap; // mapping from each glTF UV set to either UV0 or UV1 (8 bytes) - uint8_t morphPositions[4] = {}; // Buffer indices for MORPH_POSITION_0, MORPH_POSITION_1 etc. - uint8_t morphTangents[4] = {}; // Buffer indices for MORPH_TANGENTS_0, MORPH_TANGENTS_1, etc. }; using MeshCache = tsl::robin_map>; @@ -194,6 +192,10 @@ struct FFilamentAsset : public FilamentAsset { Animator* getAnimator() noexcept; + void setMorphWeights(utils::Entity entity , const float* weights, size_t count) noexcept; + + int getMorphTargetCount(utils::Entity entity) noexcept; + utils::Entity getWireframe() noexcept; filament::Engine* getEngine() const noexcept { @@ -243,6 +245,7 @@ struct FFilamentAsset : public FilamentAsset { std::vector mInstances; SkinVector mSkins; // unused for instanced assets Animator* mAnimator = nullptr; + MorphHelper* mMorpher = nullptr; Wireframe* mWireframe = nullptr; bool mResourcesLoaded = false; DependencyGraph mDependencyGraph; diff --git a/ios/src/morph/FFilamentInstance.h b/ios/src/FFilamentInstance.h similarity index 100% rename from ios/src/morph/FFilamentInstance.h rename to ios/src/FFilamentInstance.h diff --git a/ios/src/morph/GltfEnums.h b/ios/src/GltfEnums.h similarity index 98% rename from ios/src/morph/GltfEnums.h rename to ios/src/GltfEnums.h index ad6a34aa..39e71b7a 100644 --- a/ios/src/morph/GltfEnums.h +++ b/ios/src/GltfEnums.h @@ -122,12 +122,16 @@ inline bool getPrimitiveType(cgltf_primitive_type in, case cgltf_primitive_type_lines: *out = filament::RenderableManager::PrimitiveType::LINES; return true; + case cgltf_primitive_type_line_strip: + *out = filament::RenderableManager::PrimitiveType::LINE_STRIP; + return true; case cgltf_primitive_type_triangles: *out = filament::RenderableManager::PrimitiveType::TRIANGLES; return true; - case cgltf_primitive_type_line_loop: - case cgltf_primitive_type_line_strip: case cgltf_primitive_type_triangle_strip: + *out = filament::RenderableManager::PrimitiveType::TRIANGLE_STRIP; + return true; + case cgltf_primitive_type_line_loop: case cgltf_primitive_type_triangle_fan: return false; } diff --git a/ios/src/Log.h b/ios/src/Log.h new file mode 100644 index 00000000..8acfc0e9 --- /dev/null +++ b/ios/src/Log.h @@ -0,0 +1,25 @@ +#ifdef __OBJC__ +#import +#elif defined __ANDROID__ +#include +#define LOGTAG "PolyvoxFilament" +#else +#include +#endif + +void Log(const char *fmt, ...) { + va_list args; + va_start(args, fmt); + +#ifdef __ANDROID__ + __android_log_vprint(ANDROID_LOG_DEBUG, LOGTAG, fmt, args); +#elif defined __OBJC__ + NSString *format = [[NSString alloc] initWithUTF8String:fmt]; + NSLogv(format, args); + [format release]; +#else + printf(fmt, args); +#endif + + va_end(args); +} \ No newline at end of file diff --git a/ios/src/MorphHelper.h b/ios/src/MorphHelper.h new file mode 100644 index 00000000..45c51ba0 --- /dev/null +++ b/ios/src/MorphHelper.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GLTFIO_MORPHHELPER_H +#define GLTFIO_MORPHHELPER_H + +#include "FFilamentAsset.h" +#include "FFilamentInstance.h" + +#include + +#include + +#include + +#include + +struct cgltf_node; +struct cgltf_mesh; +struct cgltf_primitive; + +namespace gltfio { + +/** + * Internal class that partitions lists of morph weights and maintains a cache of BufferObject + * instances. This allows gltfio to support up to 255 morph targets. + * + * Each partition is associated with an unordered set of 4 (or fewer) morph target indices, which + * we call the "primary indices" for that time slice. + * + * Animator has ownership over a single instance of MorphHelper, thus it is 1:1 with FilamentAsset. + */ +class MorphHelper { +public: + using Entity = utils::Entity; + MorphHelper(FFilamentAsset* asset, FFilamentInstance* inst); + ~MorphHelper(); + + void setWeights(Entity entity, float const* weights, int count) noexcept; + int getTargetCount(Entity entity) const noexcept; + +private: + struct GltfPrimitive { + filament::MorphTargetBuffer* targets; + }; + + struct TableEntry { + std::vector primitives; // TODO: flatten this? + }; + + void addPrimitive(cgltf_mesh const* mesh, int primitiveIndex, Entity entity); + + tsl::robin_map mMorphTable; + const FFilamentAsset* mAsset; + const FFilamentInstance* mInstance; +}; + +} // namespace gltfio + +#endif // GLTFIO_MORPHHELPER_H diff --git a/ios/src/morph/TangentsJob.h b/ios/src/TangentsJob.h similarity index 100% rename from ios/src/morph/TangentsJob.h rename to ios/src/TangentsJob.h diff --git a/ios/src/Wireframe.h b/ios/src/Wireframe.h new file mode 100644 index 00000000..dc083561 --- /dev/null +++ b/ios/src/Wireframe.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GLTFIO_WIREFRAME_H +#define GLTFIO_WIREFRAME_H + +#include +#include + +#include + +namespace gltfio { + +struct FFilamentAsset; + +struct Wireframe { + Wireframe(FFilamentAsset* asset); + ~Wireframe(); + const FFilamentAsset* mAsset; + utils::Entity mEntity; + filament::VertexBuffer* mVertexBuffer; + filament::IndexBuffer* mIndexBuffer; +}; + +} // namsepace gltfio + +#endif // GLTFIO_WIREFRAME_H diff --git a/ios/src/morph/CPUMorpher.h b/ios/src/morph/CPUMorpher.h deleted file mode 100644 index 48d7c939..00000000 --- a/ios/src/morph/CPUMorpher.h +++ /dev/null @@ -1,91 +0,0 @@ -// /* -// * Copyright (C) 2021 The Android Open Source Project -// * -// * Licensed under the Apache License, Version 2.0 (the "License"); -// * you may not use this file except in compliance with the License. -// * You may obtain a copy of the License at -// * -// * http://www.apache.org/licenses/LICENSE-2.0 -// * -// * Unless required by applicable law or agreed to in writing, software -// * distributed under the License is distributed on an "AS IS" BASIS, -// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// * See the License for the specific language governing permissions and -// * limitations under the License. -// */ - -// #ifndef GLTFIO_CPUMORPHER_H -// #define GLTFIO_CPUMORPHER_H - -// #include "Morpher.h" - -// #include "FFilamentAsset.h" -// #include "FFilamentInstance.h" - -// #include - -// #include - -// #include - -// struct cgltf_node; -// struct cgltf_mesh; -// struct cgltf_primitive; -// struct cgltf_attribute; - -// using namespace gltfio; - - -// namespace agltfio { - -// /** -// * Helper for supporting more than 4 active morph targets. -// * -// * All morph values are calculated on CPU and collected into a single target, which will be -// * uploaded with weight of 1. This is effectively doing the morphing on CPU. -// * -// * Obviously this is slower than the stock morpher as it needs to upload buffer every frame. -// * So beware of the performance penalty. -// */ -// class CPUMorpher : public Morpher { -// public: -// using Entity = utils::Entity; -// CPUMorpher(FFilamentAsset* asset, FFilamentInstance* instance); -// ~CPUMorpher(); - -// void applyWeights(Entity targetEntity, float const* weights, size_t count) noexcept; - -// private: -// struct GltfTarget { -// int morphTargetIndex; -// cgltf_attribute_type attribute_type; -// cgltf_type type; -// std::vector indices; -// std::vector values; -// }; - -// struct GltfPrimitive { -// filament::VertexBuffer* vertexBuffer; -// int baseSlot; -// size_t floatsCount; -// filament::BufferObject* morphBuffer1 = nullptr; -// filament::BufferObject* morphBuffer2 = nullptr; -// std::vector targets; -// }; - -// struct TableEntry { -// std::vector primitives; -// }; - -// void addPrimitive(cgltf_mesh const* mesh, int primitiveIndex, TableEntry* entry); -// int determineBaseSlot(const cgltf_primitive& prim) const; -// int findPositionAttribute(const cgltf_primitive& prim) const; - -// std::vector mPartiallySortedWeights; -// tsl::robin_map mMorphTable; -// const FFilamentAsset* mAsset; -// }; - -// } // namespace gltfio - -// #endif // GLTFIO_CPUMORPHER_H diff --git a/ios/src/morph/GPUMorphHelper.h b/ios/src/morph/GPUMorphHelper.h deleted file mode 100644 index 2b058fd9..00000000 --- a/ios/src/morph/GPUMorphHelper.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "FFilamentAsset.h" -#include "FFilamentInstance.h" -#include "filament/Texture.h" -#include "filament/Engine.h" -#include - -#include - -#include - -using namespace filament; - -struct cgltf_node; -struct cgltf_mesh; -struct cgltf_primitive; - -namespace gltfio { - - /// - /// A GPUMorphHelper instance can be created per mesh (this avoids creating textures for meshes that do not require animation). - /// For each primitive in the mesh, a texture is created to store the target positions and normals. - /// The texture is laid out as x * 1 * z, where z is the number of morph targets and x is the number of vertices for the primitive. - /// A MaterialInstance is created for each primitive, then applied to the entity identified by entityName. - /// - class GPUMorphHelper { - public: - using Entity = utils::Entity; - - GPUMorphHelper(FFilamentAsset *asset, const char* meshName, int* primitives, int numPrimitives); - - ~GPUMorphHelper(); - - void applyWeights(float const *weights, size_t count) noexcept; - - private: - - struct GltfTarget { - const void *bufferObject; - uint32_t bufferSize; - int morphTargetIndex; - cgltf_attribute_type type; - }; - - struct GltfPrimitive { - filament::VertexBuffer *vertexBuffer; - Texture* texture; - std::vector targets; // TODO: flatten this? - const char* materialName; - cgltf_size numTargets = 0; - cgltf_size numVertices = 0; - MaterialInstance* materialInstance; - }; - - int numAttributes = 2; - - void addPrimitive(cgltf_mesh const *mesh, int primitiveIndex); - - void createTextures(); - - cgltf_mesh const* targetMesh; - - FFilamentAsset *mAsset; - std::vector> animatablePrimitives; - }; -} diff --git a/ios/src/morph/GltfHelpers.h b/ios/src/morph/GltfHelpers.h deleted file mode 100644 index 6a684233..00000000 --- a/ios/src/morph/GltfHelpers.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GLTFIO_GLTFHELPERS_H -#define GLTFIO_GLTFHELPERS_H - -#include - -static const uint8_t* cgltf_buffer_view_data(const cgltf_buffer_view* view) { - if (view->data) - return (const uint8_t*)view->data; - - if (!view->buffer->data) - return NULL; - - const uint8_t* result = (const uint8_t*)view->buffer->data; - result += view->offset; - return result; -} - -static cgltf_size cgltf_component_size(cgltf_component_type component_type) { - switch (component_type) - { - case cgltf_component_type_r_8: - case cgltf_component_type_r_8u: - return 1; - case cgltf_component_type_r_16: - case cgltf_component_type_r_16u: - return 2; - case cgltf_component_type_r_32u: - case cgltf_component_type_r_32f: - return 4; - case cgltf_component_type_invalid: - default: - return 0; - } -} - -static cgltf_size cgltf_component_read_index(const void* in, cgltf_component_type component_type) { - switch (component_type) - { - case cgltf_component_type_r_16: - return *((const int16_t*) in); - case cgltf_component_type_r_16u: - return *((const uint16_t*) in); - case cgltf_component_type_r_32u: - return *((const uint32_t*) in); - case cgltf_component_type_r_32f: - return (cgltf_size)*((const float*) in); - case cgltf_component_type_r_8: - return *((const int8_t*) in); - case cgltf_component_type_r_8u: - return *((const uint8_t*) in); - default: - return 0; - } -} - -static cgltf_float cgltf_component_read_float(const void* in, cgltf_component_type component_type, - cgltf_bool normalized) { - if (component_type == cgltf_component_type_r_32f) - { - return *((const float*) in); - } - - if (normalized) - { - switch (component_type) - { - // note: glTF spec doesn't currently define normalized conversions for 32-bit integers - case cgltf_component_type_r_16: - return *((const int16_t*) in) / (cgltf_float)32767; - case cgltf_component_type_r_16u: - return *((const uint16_t*) in) / (cgltf_float)65535; - case cgltf_component_type_r_8: - return *((const int8_t*) in) / (cgltf_float)127; - case cgltf_component_type_r_8u: - return *((const uint8_t*) in) / (cgltf_float)255; - default: - return 0; - } - } - - return (cgltf_float)cgltf_component_read_index(in, component_type); -} - -static cgltf_bool cgltf_element_read_float(const uint8_t* element, cgltf_type type, - cgltf_component_type component_type, cgltf_bool normalized, cgltf_float* out, - cgltf_size element_size) { - cgltf_size num_components = cgltf_num_components(type); - - if (element_size < num_components) { - return 0; - } - - // There are three special cases for component extraction, see #data-alignment in the 2.0 spec. - - cgltf_size component_size = cgltf_component_size(component_type); - - for (cgltf_size i = 0; i < num_components; ++i) - { - out[i] = cgltf_component_read_float(element + component_size * i, component_type, normalized); - } - return 1; -} - -#endif // GLTFIO_GLTFHELPERS_H diff --git a/ios/src/morph/upcast.h b/ios/src/upcast.h similarity index 100% rename from ios/src/morph/upcast.h rename to ios/src/upcast.h