From ea2964c6c67d54b2a60d30dd9bc28318fd286958 Mon Sep 17 00:00:00 2001 From: Nick Fisher Date: Sun, 10 Jul 2022 17:49:56 +1000 Subject: [PATCH] update headers to Filament v1.25.0 --- ios/include/backend/DriverEnums.h | 108 +- ios/include/backend/Handle.h | 1 + ios/include/backend/Platform.h | 2 +- ios/include/filamat/MaterialBuilder.h | 4 + ios/include/filament/IndirectLight.h | 2 +- ios/include/filament/MaterialChunkType.h | 1 + ios/include/filament/MaterialEnums.h | 2 +- ios/include/filament/MorphTargetBuffer.h | 30 +- ios/include/filament/Options.h | 79 +- ios/include/filament/RenderableManager.h | 72 +- ios/include/filament/Renderer.h | 25 +- ios/include/filament/SkinningBuffer.h | 3 + ios/include/filament/Skybox.h | 2 +- ios/include/filament/Stream.h | 87 +- ios/include/filament/ToneMapper.h | 12 +- ios/include/filament/View.h | 15 + ios/include/filament/Viewport.h | 14 - ios/include/filameshio/MeshReader.h | 2 +- ios/include/geometry/Transcoder.h | 1 + ios/include/gltfio/Animator.h | 30 +- ios/include/gltfio/AssetLoader.h | 24 +- ios/include/gltfio/FilamentAsset.h | 90 +- ios/include/gltfio/FilamentInstance.h | 4 +- ios/include/gltfio/MaterialProvider.h | 23 +- ios/include/gltfio/NodeManager.h | 110 + ios/include/gltfio/ResourceLoader.h | 17 +- ios/include/gltfio/TextureProvider.h | 180 + ios/include/gltfio/materials/uberarchive.h | 13 + ios/include/gltfio/math.h | 4 +- ios/include/gltfio/resources/gltfresources.h | 46 - .../gltfio/resources/gltfresources_lite.h | 16 - ios/include/image/ColorTransform.h | 4 +- .../image/{KtxBundle.h => Ktx1Bundle.h} | 16 +- ios/include/image/KtxUtility.h | 367 - ios/include/ktxreader/Ktx1Reader.h | 142 + ios/include/ktxreader/Ktx2Reader.h | 203 + ios/include/stb/stb_image.h | 7529 +++++++++++++++++ ios/include/uberz/ArchiveEnums.h | 32 + ios/include/uberz/ReadableArchive.h | 79 + ios/include/uberz/WritableArchive.h | 64 + ios/include/utils/Allocator.h | 9 +- ios/include/utils/BinaryTreeArray.h | 114 + ios/include/utils/CString.h | 54 +- ios/include/utils/Condition.h | 26 + ios/include/utils/CountDownLatch.h | 91 + ios/include/utils/CyclicBarrier.h | 84 + ios/include/utils/Entity.h | 28 +- ios/include/utils/EntityManager.h | 2 +- ios/include/utils/Hash.h | 103 + ios/include/utils/Invocable.h | 11 +- ios/include/utils/JobSystem.h | 530 ++ ios/include/utils/NameComponentManager.h | 31 +- ios/include/utils/Profiler.h | 212 + ios/include/utils/Range.h | 88 + ios/include/utils/RangeMap.h | 308 + .../utils/SingleInstanceComponentManager.h | 2 +- ios/include/utils/Stopwatch.h | 103 + ios/include/utils/StructureOfArrays.h | 3 +- ios/include/utils/Systrace.h | 278 + ios/include/utils/ThermalManager.h | 26 + ios/include/utils/ThreadUtils.h | 32 + ios/include/utils/WorkStealingDequeue.h | 202 + ios/include/utils/Zip2Iterator.h | 122 + ios/include/utils/algorithm.h | 69 - ios/include/utils/android/ThermalManager.h | 60 + ios/include/utils/api_level.h | 34 + ios/include/utils/architecture.h | 28 + ios/include/utils/ashmem.h | 28 + ios/include/utils/generic/Condition.h | 39 + ios/include/utils/generic/ThermalManager.h | 56 + ios/include/utils/linux/Condition.h | 123 + ios/include/utils/linux/Mutex.h | 64 + ios/include/utils/memalign.h | 8 +- ios/include/utils/ostream.h | 15 +- ios/include/utils/sstream.h | 32 + ios/include/utils/string.h | 28 + ios/include/utils/trap.h | 40 + ios/include/utils/vector.h | 61 + ios/include/utils/win32/stdtypes.h | 33 + ios/include/viewer/AutomationEngine.h | 2 +- ios/include/viewer/Settings.h | 4 +- .../viewer/{SimpleViewer.h => ViewerGui.h} | 54 +- 82 files changed, 11802 insertions(+), 890 deletions(-) create mode 100644 ios/include/gltfio/NodeManager.h create mode 100644 ios/include/gltfio/TextureProvider.h create mode 100644 ios/include/gltfio/materials/uberarchive.h delete mode 100644 ios/include/gltfio/resources/gltfresources.h delete mode 100644 ios/include/gltfio/resources/gltfresources_lite.h rename ios/include/image/{KtxBundle.h => Ktx1Bundle.h} (96%) delete mode 100644 ios/include/image/KtxUtility.h create mode 100644 ios/include/ktxreader/Ktx1Reader.h create mode 100644 ios/include/ktxreader/Ktx2Reader.h create mode 100644 ios/include/stb/stb_image.h create mode 100644 ios/include/uberz/ArchiveEnums.h create mode 100644 ios/include/uberz/ReadableArchive.h create mode 100644 ios/include/uberz/WritableArchive.h create mode 100644 ios/include/utils/BinaryTreeArray.h create mode 100644 ios/include/utils/Condition.h create mode 100644 ios/include/utils/CountDownLatch.h create mode 100644 ios/include/utils/CyclicBarrier.h create mode 100644 ios/include/utils/Hash.h create mode 100644 ios/include/utils/JobSystem.h create mode 100644 ios/include/utils/Profiler.h create mode 100644 ios/include/utils/Range.h create mode 100644 ios/include/utils/RangeMap.h create mode 100644 ios/include/utils/Stopwatch.h create mode 100644 ios/include/utils/Systrace.h create mode 100644 ios/include/utils/ThermalManager.h create mode 100644 ios/include/utils/ThreadUtils.h create mode 100644 ios/include/utils/WorkStealingDequeue.h create mode 100644 ios/include/utils/Zip2Iterator.h create mode 100644 ios/include/utils/android/ThermalManager.h create mode 100644 ios/include/utils/api_level.h create mode 100644 ios/include/utils/architecture.h create mode 100644 ios/include/utils/ashmem.h create mode 100644 ios/include/utils/generic/Condition.h create mode 100644 ios/include/utils/generic/ThermalManager.h create mode 100644 ios/include/utils/linux/Condition.h create mode 100644 ios/include/utils/linux/Mutex.h create mode 100644 ios/include/utils/sstream.h create mode 100644 ios/include/utils/string.h create mode 100644 ios/include/utils/trap.h create mode 100644 ios/include/utils/vector.h create mode 100644 ios/include/utils/win32/stdtypes.h rename ios/include/viewer/{SimpleViewer.h => ViewerGui.h} (80%) diff --git a/ios/include/backend/DriverEnums.h b/ios/include/backend/DriverEnums.h index 95050f2a..aee528da 100644 --- a/ios/include/backend/DriverEnums.h +++ b/ios/include/backend/DriverEnums.h @@ -33,7 +33,6 @@ #include #include -namespace filament { /** * Types and enums used by filament's driver. * @@ -41,24 +40,24 @@ namespace filament { * internal redeclaration of these types. * For e.g. Use Texture::Sampler instead of filament::SamplerType. */ -namespace backend { +namespace filament::backend { -static constexpr uint64_t SWAP_CHAIN_CONFIG_TRANSPARENT = 0x1; -static constexpr uint64_t SWAP_CHAIN_CONFIG_READABLE = 0x2; -static constexpr uint64_t SWAP_CHAIN_CONFIG_ENABLE_XCB = 0x4; +static constexpr uint64_t SWAP_CHAIN_CONFIG_TRANSPARENT = 0x1; +static constexpr uint64_t SWAP_CHAIN_CONFIG_READABLE = 0x2; +static constexpr uint64_t SWAP_CHAIN_CONFIG_ENABLE_XCB = 0x4; static constexpr uint64_t SWAP_CHAIN_CONFIG_APPLE_CVPIXELBUFFER = 0x8; -static constexpr size_t MAX_VERTEX_ATTRIBUTE_COUNT = 16; // This is guaranteed by OpenGL ES. -static constexpr size_t MAX_VERTEX_SAMPLER_COUNT = 16; // This is guaranteed by OpenGL ES. -static constexpr size_t MAX_FRAGMENT_SAMPLER_COUNT = 16; // This is guaranteed by OpenGL ES. -static constexpr size_t MAX_SAMPLER_COUNT = 32; // This is guaranteed by OpenGL ES. -static constexpr size_t MAX_VERTEX_BUFFER_COUNT = 16; // Max number of bound buffer objects. +static constexpr size_t MAX_VERTEX_ATTRIBUTE_COUNT = 16; // This is guaranteed by OpenGL ES. +static constexpr size_t MAX_VERTEX_SAMPLER_COUNT = 16; // This is guaranteed by OpenGL ES. +static constexpr size_t MAX_FRAGMENT_SAMPLER_COUNT = 16; // This is guaranteed by OpenGL ES. +static constexpr size_t MAX_SAMPLER_COUNT = 32; // This is guaranteed by OpenGL ES. +static constexpr size_t MAX_VERTEX_BUFFER_COUNT = 16; // Max number of bound buffer objects. static_assert(MAX_VERTEX_BUFFER_COUNT <= MAX_VERTEX_ATTRIBUTE_COUNT, "The number of buffer objects that can be attached to a VertexBuffer must be " "less than or equal to the maximum number of vertex attributes."); -static constexpr size_t CONFIG_BINDING_COUNT = 8; +static constexpr size_t CONFIG_BINDING_COUNT = 12; // This is guaranteed by OpenGL ES. /** * Selects which driver a particular Engine should use. @@ -129,7 +128,6 @@ inline constexpr TargetBufferFlags getTargetBufferFlagsAt(size_t index) noexcept enum class BufferUsage : uint8_t { STATIC, //!< content modified once, used many times DYNAMIC, //!< content modified frequently, used many times - STREAM, //!< content invalidated and modified frequently, used many times }; /** @@ -142,9 +140,9 @@ struct Viewport { uint32_t width; //!< width in pixels uint32_t height; //!< height in pixels //! get the right coordinate in window space of the viewport - int32_t right() const noexcept { return left + width; } + int32_t right() const noexcept { return left + int32_t(width); } //! get the top coordinate in window space of the viewport - int32_t top() const noexcept { return bottom + height; } + int32_t top() const noexcept { return bottom + int32_t(height); } }; /** @@ -160,7 +158,7 @@ struct DepthRange { * @see Fence, Fence::wait() */ enum class FenceStatus : int8_t { - ERROR = -1, //!< An error occured. The Fence condition is not satisfied. + ERROR = -1, //!< An error occurred. The Fence condition is not satisfied. CONDITION_SATISFIED = 0, //!< The Fence condition is satisfied. TIMEOUT_EXPIRED = 1, //!< wait()'s timeout expired. The Fence condition is not satisfied. }; @@ -169,7 +167,7 @@ enum class FenceStatus : int8_t { * Status codes for sync objects */ enum class SyncStatus : int8_t { - ERROR = -1, //!< An error occured. The Sync is not signaled. + ERROR = -1, //!< An error occurred. The Sync is not signaled. SIGNALED = 0, //!< The Sync is signaled. NOT_SIGNALED = 1, //!< The Sync is not signaled yet }; @@ -720,7 +718,7 @@ enum class SamplerCompareMode : uint8_t { COMPARE_TO_TEXTURE = 1 }; -//! comparison function for the depth sampler +//! comparison function for the depth / stencil sampler enum class SamplerCompareFunc : uint8_t { // don't change the enums values LE = 0, //!< Less or equal @@ -729,11 +727,11 @@ enum class SamplerCompareFunc : uint8_t { G, //!< Strictly greater than E, //!< Equal NE, //!< Not equal - A, //!< Always. Depth testing is deactivated. - N //!< Never. The depth test always fails. + A, //!< Always. Depth / stencil testing is deactivated. + N //!< Never. The depth / stencil test always fails. }; -//! Sampler paramters +//! Sampler parameters struct SamplerParams { // NOLINT union { struct { @@ -786,10 +784,21 @@ enum class BlendFunction : uint8_t { SRC_ALPHA_SATURATE //!< f(src, dst) = (1,1,1) * min(src.a, 1 - dst.a), 1 }; +//! stencil operation +enum class StencilOperation : uint8_t { + KEEP, //!< Keeps the current value. + ZERO, //!< Sets the value to 0. + REPLACE, //!< Sets the value to the stencil reference value. + INCR, //!< Increments the current value. Clamps to the maximum representable unsigned value. + INCR_WRAP, //!< Increments the current value. Wraps value to zero when incrementing the maximum representable unsigned value. + DECR, //!< Decrements the current value. Clamps to 0. + DECR_WRAP, //!< Decrements the current value. Wraps value to the maximum representable unsigned value when decrementing a value of zero. + INVERT, //!< Bitwise inverts the current value. +}; + //! Stream for external textures enum class StreamType { NATIVE, //!< Not synchronized but copy-free. Good for video. - TEXTURE_ID, //!< Synchronized, but GL-only and incurs copies. Good for AR on devices before API 26. ACQUIRED, //!< Synchronized, copy-free, and take a release callback. Good for AR but requires API 26+. }; @@ -819,9 +828,11 @@ struct RasterState { using DepthFunc = backend::SamplerCompareFunc; using BlendEquation = backend::BlendEquation; using BlendFunction = backend::BlendFunction; + using StencilFunction = backend::SamplerCompareFunc; + using StencilOperation = backend::StencilOperation; RasterState() noexcept { // NOLINT - static_assert(sizeof(RasterState) == sizeof(uint32_t), + static_assert(sizeof(RasterState) == sizeof(uint64_t), "RasterState size not what was intended"); culling = CullingMode::BACK; blendEquationRGB = BlendEquation::ADD; @@ -830,6 +841,10 @@ struct RasterState { blendFunctionSrcAlpha = BlendFunction::ONE; blendFunctionDstRGB = BlendFunction::ZERO; blendFunctionDstAlpha = BlendFunction::ZERO; + stencilFunc = StencilFunction::A; + stencilOpStencilFail = StencilOperation::KEEP; + stencilOpDepthFail = StencilOperation::KEEP; + stencilOpDepthStencilPass = StencilOperation::KEEP; } bool operator == (RasterState rhs) const noexcept { return u == rhs.u; } @@ -858,40 +873,56 @@ struct RasterState { union { struct { //! culling mode - CullingMode culling : 2; // 2 + CullingMode culling : 2; // 2 //! blend equation for the red, green and blue components - BlendEquation blendEquationRGB : 3; // 5 + BlendEquation blendEquationRGB : 3; // 5 //! blend equation for the alpha component - BlendEquation blendEquationAlpha : 3; // 8 + BlendEquation blendEquationAlpha : 3; // 8 //! blending function for the source color - BlendFunction blendFunctionSrcRGB : 4; // 12 + BlendFunction blendFunctionSrcRGB : 4; // 12 //! blending function for the source alpha - BlendFunction blendFunctionSrcAlpha : 4; // 16 + BlendFunction blendFunctionSrcAlpha : 4; // 16 //! blending function for the destination color - BlendFunction blendFunctionDstRGB : 4; // 20 + BlendFunction blendFunctionDstRGB : 4; // 20 //! blending function for the destination alpha - BlendFunction blendFunctionDstAlpha : 4; // 24 + BlendFunction blendFunctionDstAlpha : 4; // 24 //! Whether depth-buffer writes are enabled - bool depthWrite : 1; // 25 + bool depthWrite : 1; // 25 //! Depth test function - DepthFunc depthFunc : 3; // 28 + DepthFunc depthFunc : 3; // 28 //! Whether color-buffer writes are enabled - bool colorWrite : 1; // 29 + bool colorWrite : 1; // 29 //! use alpha-channel as coverage mask for anti-aliasing - bool alphaToCoverage : 1; // 30 + bool alphaToCoverage : 1; // 30 //! whether front face winding direction must be inverted - bool inverseFrontFaces : 1; // 31 + bool inverseFrontFaces : 1; // 31 + //! Whether stencil-buffer writes are enabled + bool stencilWrite : 1; // 32 + //! Stencil reference value + uint8_t stencilRef : 8; // 40 + //! Stencil test function + StencilFunction stencilFunc : 3; // 43 + //! Stencil operation when stencil test fails + StencilOperation stencilOpStencilFail : 3; // 46 //! padding, must be 0 - uint8_t padding : 1; // 32 + uint8_t padding0 : 2; // 48 + //! Stencil operation when stencil test passes but depth test fails + StencilOperation stencilOpDepthFail : 3; // 51 + //! Stencil operation when both stencil and depth test pass + StencilOperation stencilOpDepthStencilPass : 3; // 54 + //! padding, must be 0 + uint8_t padding1 : 2; // 56 + //! padding, must be 0 + uint8_t padding2 : 8; // 64 }; - uint32_t u = 0; + uint64_t u = 0; }; }; @@ -914,7 +945,7 @@ struct ShaderStageFlags { (fragment && type == ShaderType::FRAGMENT); } }; -static constexpr ShaderStageFlags ALL_SHADER_STAGE_FLAGS = { .vertex = true, .fragment = true }; +static constexpr ShaderStageFlags ALL_SHADER_STAGE_FLAGS = { true, true }; /** * Selects which buffers to clear at the beginning of the render pass, as well as which buffers @@ -998,8 +1029,7 @@ enum class Workaround : uint16_t { ALLOW_READ_ONLY_ANCILLARY_FEEDBACK_LOOP }; -} // namespace backend -} // namespace filament +} // namespace filament::backend template<> struct utils::EnableBitMaskOperators : public std::true_type {}; diff --git a/ios/include/backend/Handle.h b/ios/include/backend/Handle.h index 5bbfaeab..7317e2c5 100644 --- a/ios/include/backend/Handle.h +++ b/ios/include/backend/Handle.h @@ -26,6 +26,7 @@ #include #include +#include namespace filament::backend { diff --git a/ios/include/backend/Platform.h b/ios/include/backend/Platform.h index 26ba709f..87c8cefe 100644 --- a/ios/include/backend/Platform.h +++ b/ios/include/backend/Platform.h @@ -80,7 +80,7 @@ public: * underlying platform is returned and \p backendHint is updated * accordingly. Can't be nullptr. * - * @return A pointer to the Plaform object. + * @return A pointer to the Platform object. * * @see destroy */ diff --git a/ios/include/filamat/MaterialBuilder.h b/ios/include/filamat/MaterialBuilder.h index 435aed0b..8a3ba129 100644 --- a/ios/include/filamat/MaterialBuilder.h +++ b/ios/include/filamat/MaterialBuilder.h @@ -388,6 +388,9 @@ public: //! Enable / disable depth based culling (enabled by default, material instances can override). MaterialBuilder& depthCulling(bool enable) noexcept; + //! Enable / disable instanced primitives (disabled by default). + MaterialBuilder& instanced(bool enable) noexcept; + /** * Double-sided materials don't cull faces, equivalent to culling(CullingMode::NONE). * doubleSided() overrides culling() if called. @@ -731,6 +734,7 @@ private: bool mDoubleSidedCapability = false; bool mColorWrite = true; bool mDepthTest = true; + bool mInstanced = true; bool mDepthWrite = true; bool mDepthWriteSet = false; diff --git a/ios/include/filament/IndirectLight.h b/ios/include/filament/IndirectLight.h index 2ddc148f..70448523 100644 --- a/ios/include/filament/IndirectLight.h +++ b/ios/include/filament/IndirectLight.h @@ -260,7 +260,7 @@ public: * range can be adjusted with this method. * * @param intensity Scale factor applied to the environment and irradiance such that - * the result is in lux, or lumen/m^2(default = 30000) + * the result is in lux, or lumen/m^2 (default = 30000) */ void setIntensity(float intensity) noexcept; diff --git a/ios/include/filament/MaterialChunkType.h b/ios/include/filament/MaterialChunkType.h index b80e6354..39172417 100644 --- a/ios/include/filament/MaterialChunkType.h +++ b/ios/include/filament/MaterialChunkType.h @@ -72,6 +72,7 @@ enum UTILS_PUBLIC ChunkType : uint64_t { MaterialColorWrite = charTo64bitNum("MAT_CWRIT"), MaterialDepthWrite = charTo64bitNum("MAT_DWRIT"), MaterialDepthTest = charTo64bitNum("MAT_DTEST"), + MaterialInstanced = charTo64bitNum("MAT_INSTA"), MaterialCullingMode = charTo64bitNum("MAT_CUMO"), MaterialHasCustomDepthShader =charTo64bitNum("MAT_CSDP"), diff --git a/ios/include/filament/MaterialEnums.h b/ios/include/filament/MaterialEnums.h index 3bc8a929..3eaf24e6 100644 --- a/ios/include/filament/MaterialEnums.h +++ b/ios/include/filament/MaterialEnums.h @@ -27,7 +27,7 @@ namespace filament { // update this when a new version of filament wouldn't work with older materials -static constexpr size_t MATERIAL_VERSION = 21; +static constexpr size_t MATERIAL_VERSION = 25; /** * Supported shading models diff --git a/ios/include/filament/MorphTargetBuffer.h b/ios/include/filament/MorphTargetBuffer.h index d96fe6df..d080d0da 100644 --- a/ios/include/filament/MorphTargetBuffer.h +++ b/ios/include/filament/MorphTargetBuffer.h @@ -78,43 +78,49 @@ public: }; /** - * Updates the position of morph target at the index. + * Updates positions for the given morph target. + * + * This is equivalent to the float4 method, but uses 1.0 for the 4th component. * * Both positions and tangents must be provided. * * @param engine Reference to the filament::Engine associated with this MorphTargetBuffer. * @param targetIndex the index of morph target to be updated. - * @param weights pointer to at least count positions - * @param count number of position elements in positions + * @param positions pointer to at least "count" positions + * @param count number of float3 vectors in positions + * @param offset offset into the target buffer, expressed as a number of float4 vectors * @see setTangentsAt */ void setPositionsAt(Engine& engine, size_t targetIndex, math::float3 const* positions, size_t count, size_t offset = 0); /** - * Updates the position of morph target at the index. + * Updates positions for the given morph target. * * Both positions and tangents must be provided. * * @param engine Reference to the filament::Engine associated with this MorphTargetBuffer. * @param targetIndex the index of morph target to be updated. - * @param weights pointer to at least count positions - * @param count number of position elements in positions - * @see setPositionsAt + * @param positions pointer to at least "count" positions + * @param count number of float4 vectors in positions + * @param offset offset into the target buffer, expressed as a number of float4 vectors + * @see setTangentsAt */ void setPositionsAt(Engine& engine, size_t targetIndex, math::float4 const* positions, size_t count, size_t offset = 0); /** - * Updates the position of morph target at the index. + * Updates tangents for the given morph target. * - * Both positions and tangents must be provided. + * These quaternions must be represented as signed shorts, where real numbers in the [-1,+1] + * range multiplied by 32767. * * @param engine Reference to the filament::Engine associated with this MorphTargetBuffer. * @param targetIndex the index of morph target to be updated. - * @param tangents pointer to at least count tangents - * @param count number of tangent elements in tangents - * @see setTangentsAt + * @param tangents pointer to at least "count" tangents + * @param count number of short4 quaternions in tangents + * @param offset offset into the target buffer, expressed as a number of short4 vectors + * @see setPositionsAt */ void setTangentsAt(Engine& engine, size_t targetIndex, math::short4 const* tangents, size_t count, size_t offset = 0); diff --git a/ios/include/filament/Options.h b/ios/include/filament/Options.h index 6c750ae9..95c864a4 100644 --- a/ios/include/filament/Options.h +++ b/ios/include/filament/Options.h @@ -25,6 +25,9 @@ namespace filament { class Texture; +/** + * Generic quality level. + */ enum class QualityLevel : uint8_t { LOW, MEDIUM, @@ -69,8 +72,8 @@ enum class BlendMode : uint8_t { * */ struct DynamicResolutionOptions { - math::float2 minScale = math::float2(0.5f); //!< minimum scale factors in x and y - math::float2 maxScale = math::float2(1.0f); //!< maximum scale factors in x and y + math::float2 minScale = {0.5f, 0.5f}; //!< minimum scale factors in x and y %codegen_java_float% + math::float2 maxScale = {1.0f, 1.0f}; //!< maximum scale factors in x and y %codegen_java_float% float sharpness = 0.9f; //!< sharpness when QualityLevel::MEDIUM or higher is used [0 (disabled), 1 (sharpest)] bool enabled = false; //!< enable or disable dynamic resolution bool homogeneousScaling = false; //!< set to true to force homogeneous scaling @@ -125,8 +128,8 @@ struct BloomOptions { ADD, //!< Bloom is modulated by the strength parameter and added to the scene INTERPOLATE //!< Bloom is interpolated with the scene using the strength parameter }; - Texture* dirt = nullptr; //!< user provided dirt texture - float dirtStrength = 0.2f; //!< strength of the dirt texture + Texture* dirt = nullptr; //!< user provided dirt texture %codegen_skip_json% %codegen_skip_javascript% + float dirtStrength = 0.2f; //!< strength of the dirt texture %codegen_skip_json% %codegen_skip_javascript% float strength = 0.10f; //!< bloom's strength between 0.0 and 1.0 uint32_t resolution = 360; //!< resolution of vertical axis (2^levels to 2048) float anamorphism = 1.0f; //!< bloom x/y aspect-ratio (1/32 to 32) @@ -151,16 +154,16 @@ struct BloomOptions { * Options to control fog in the scene */ struct FogOptions { - float distance = 0.0f; //!< distance in world units from the camera where the fog starts ( >= 0.0 ) - float maximumOpacity = 1.0f; //!< fog's maximum opacity between 0 and 1 - float height = 0.0f; //!< fog's floor in world units - float heightFalloff = 1.0f; //!< how fast fog dissipates with altitude - LinearColor color{0.5f}; //!< fog's color (linear), see fogColorFromIbl - float density = 0.1f; //!< fog's density at altitude given by 'height' - float inScatteringStart = 0.0f; //!< distance in world units from the camera where in-scattering starts - float inScatteringSize = -1.0f; //!< size of in-scattering (>0 to activate). Good values are >> 1 (e.g. ~10 - 100). - bool fogColorFromIbl = false; //!< Fog color will be modulated by the IBL color in the view direction. - bool enabled = false; //!< enable or disable fog + float distance = 0.0f; //!< distance in world units from the camera where the fog starts ( >= 0.0 ) + float maximumOpacity = 1.0f; //!< fog's maximum opacity between 0 and 1 + float height = 0.0f; //!< fog's floor in world units + float heightFalloff = 1.0f; //!< how fast fog dissipates with altitude + LinearColor color = {0.5f, 0.5f, 0.5f};//!< fog's color (linear), see fogColorFromIbl + float density = 0.1f; //!< fog's density at altitude given by 'height' + float inScatteringStart = 0.0f; //!< distance in world units from the camera where in-scattering starts + float inScatteringSize = -1.0f; //!< size of in-scattering (>0 to activate). Good values are >> 1 (e.g. ~10 - 100). + bool fogColorFromIbl = false; //!< Fog color will be modulated by the IBL color in the view direction. + bool enabled = false; //!< enable or disable fog }; /** @@ -174,8 +177,9 @@ struct FogOptions { */ struct DepthOfFieldOptions { enum class Filter : uint8_t { - NONE = 0, - MEDIAN = 2 + NONE, + UNUSED, + MEDIAN }; float cocScale = 1.0f; //!< circle of confusion scale factor (amount of blur) float maxApertureDiameter = 0.01f; //!< maximum aperture diameter in meters (zero to disable rotation) @@ -227,7 +231,7 @@ struct VignetteOptions { float midPoint = 0.5f; //!< high values restrict the vignette closer to the corners, between 0 and 1 float roundness = 0.5f; //!< controls the shape of the vignette, from a rounded rectangle (0.0), to an oval (0.5), to a circle (1.0) float feather = 0.5f; //!< softening amount of the vignette effect, between 0 and 1 - LinearColorA color{0.0f, 0.0f, 0.0f, 1.0f}; //!< color of the vignette effect, alpha is currently ignored + LinearColorA color = {0.0f, 0.0f, 0.0f, 1.0f}; //!< color of the vignette effect, alpha is currently ignored bool enabled = false; //!< enables or disables the vignette effect }; @@ -271,17 +275,18 @@ struct AmbientOcclusionOptions { * Ambient shadows from dominant light */ struct Ssct { - float lightConeRad = 1.0f; //!< full cone angle in radian, between 0 and pi/2 - float shadowDistance = 0.3f; //!< how far shadows can be cast - float contactDistanceMax = 1.0f; //!< max distance for contact - float intensity = 0.8f; //!< intensity - math::float3 lightDirection{ 0, -1, 0 }; //!< light direction - float depthBias = 0.01f; //!< depth bias in world units (mitigate self shadowing) - float depthSlopeBias = 0.01f; //!< depth slope bias (mitigate self shadowing) - uint8_t sampleCount = 4; //!< tracing sample count, between 1 and 255 - uint8_t rayCount = 1; //!< # of rays to trace, between 1 and 255 - bool enabled = false; //!< enables or disables SSCT - } ssct; + float lightConeRad = 1.0f; //!< full cone angle in radian, between 0 and pi/2 + float shadowDistance = 0.3f; //!< how far shadows can be cast + float contactDistanceMax = 1.0f; //!< max distance for contact + float intensity = 0.8f; //!< intensity + math::float3 lightDirection = { 0, -1, 0 }; //!< light direction + float depthBias = 0.01f; //!< depth bias in world units (mitigate self shadowing) + float depthSlopeBias = 0.01f; //!< depth slope bias (mitigate self shadowing) + uint8_t sampleCount = 4; //!< tracing sample count, between 1 and 255 + uint8_t rayCount = 1; //!< # of rays to trace, between 1 and 255 + bool enabled = false; //!< enables or disables SSCT + }; + Ssct ssct; // %codegen_skip_javascript% %codegen_java_flatten% }; /** @@ -328,21 +333,31 @@ struct ScreenSpaceReflectionsOptions { bool enabled = false; }; +/** + * Options for the screen-space guard band. + * A guard band can be enabled to avoid some artifacts towards the edge of the screen when + * using screen-space effects such as SSAO. Enabling the guard band reduces performance slightly. + * Currently the guard band can only be enabled or disabled. + */ +struct GuardBandOptions { + bool enabled = false; +}; + /** * List of available post-processing anti-aliasing techniques. * @see setAntiAliasing, getAntiAliasing, setSampleCount */ enum class AntiAliasing : uint8_t { - NONE = 0, //!< no anti aliasing performed as part of post-processing - FXAA = 1 //!< FXAA is a low-quality but very efficient type of anti-aliasing. (default). + NONE, //!< no anti aliasing performed as part of post-processing + FXAA //!< FXAA is a low-quality but very efficient type of anti-aliasing. (default). }; /** * List of available post-processing dithering techniques. */ enum class Dithering : uint8_t { - NONE = 0, //!< No dithering - TEMPORAL = 1 //!< Temporal dithering (default) + NONE, //!< No dithering + TEMPORAL //!< Temporal dithering (default) }; /** diff --git a/ios/include/filament/RenderableManager.h b/ios/include/filament/RenderableManager.h index 51b30e87..bf523c8e 100644 --- a/ios/include/filament/RenderableManager.h +++ b/ios/include/filament/RenderableManager.h @@ -205,10 +205,12 @@ public: * avoid using a separate View for the HUD. Note that priority is completely orthogonal to * Builder::layerMask, which merely controls visibility. * - * \see Builder::blendOrder() + * @param priority clamped to the range [0..7], defaults to 4; 7 is lowest priority + * (rendered last). * - * The priority is clamped to the range [0..7], defaults to 4; 7 is lowest priority - * (rendered last). + * @return Builder reference for chaining calls. + * + * @see Builder::blendOrder(), RenderableManager::setBlendOrderAt() */ Builder& priority(uint8_t priority) noexcept; @@ -337,23 +339,44 @@ public: */ Builder& morphing(uint8_t level, size_t primitiveIndex, MorphTargetBuffer* morphTargetBuffer, size_t offset, size_t count) noexcept; + inline Builder& morphing(uint8_t level, size_t primitiveIndex, MorphTargetBuffer* morphTargetBuffer) noexcept; /** - * Sets an ordering index for blended primitives that all live at the same Z value. + * Sets the drawing order for blended primitives. The drawing order is either global or + * local (default) to this Renderable. In either case, the Renderable priority takes + * precedence. * * @param primitiveIndex the primitive of interest * @param order draw order number (0 by default). Only the lowest 15 bits are used. + * + * @return Builder reference for chaining calls. + * + * @see globalBlendOrderEnabled */ Builder& blendOrder(size_t primitiveIndex, uint16_t order) noexcept; + /** + * Sets whether the blend order is global or local to this Renderable (by default). + * + * @param primitiveIndex the primitive of interest + * @param enabled true for global, false for local blend ordering. + * + * @return Builder reference for chaining calls. + * + * @see blendOrder + */ + Builder& globalBlendOrderEnabled(size_t primitiveIndex, bool enabled) noexcept; + + /** * Specifies the number of draw instance of this renderable. The default is 1 instance and * the maximum number of instances allowed is 65535. 0 is invalid. * All instances are culled using the same bounding box, so care must be taken to make * sure all instances render inside the specified bounding box. - * The material can use getInstanceIndex() in the vertex shader to get the instance index and + * The material must set its `instanced` parameter to `true` in order to use + * getInstanceIndex() in the vertex or fragment shader to get the instance index and * possibly adjust the position or transform. * * @param instanceCount the number of instances silently clamped between 1 and 65535. @@ -394,6 +417,7 @@ public: MaterialInstance const* materialInstance = nullptr; PrimitiveType type = PrimitiveType::TRIANGLES; uint16_t blendOrder = 0; + bool globalBlendOrderEnabled = false; struct { MorphTargetBuffer* buffer = nullptr; size_t offset = 0; @@ -497,7 +521,15 @@ public: void setBones(Instance instance, math::mat4f const* transforms, size_t boneCount = 1, size_t offset = 0); //!< \overload /** - * Associates a SkinningBuffer to a renderable instance + * Associates a region of a SkinningBuffer to a renderable instance + * + * Note: due to hardware limitations offset + 256 must be smaller or equal to + * skinningBuffer->getBoneCount() + * + * @param instance Instance of the component obtained from getInstance(). + * @param skinningBuffer skinning buffer to associate to the instance + * @param count Size of the region in bones, must be smaller or equal to 256. + * @param offset Start offset of the region in bones */ void setSkinningBuffer(Instance instance, SkinningBuffer* skinningBuffer, size_t count, size_t offset); @@ -511,7 +543,7 @@ public: * @param instance Instance of the component obtained from getInstance(). * @param weights Pointer to morph target weights to be update. * @param count Number of morph target weights. - * @param offset Index of the first first morph target weight to set at instance. + * @param offset Index of the first morph target weight to set at instance. */ void setMorphWeights(Instance instance, float const* weights, size_t count, size_t offset = 0); @@ -578,24 +610,28 @@ public: size_t offset, size_t count) noexcept; /** - * Changes the active range of indices or topology for the given primitive. - * - * \see Builder::geometry() - */ - void setGeometryAt(Instance instance, size_t primitiveIndex, - PrimitiveType type, size_t offset, size_t count) noexcept; - - /** - * Changes the ordering index for blended primitives that all live at the same Z value. - * - * \see Builder::blendOrder() + * Changes the drawing order for blended primitives. The drawing order is either global or + * local (default) to this Renderable. In either case, the Renderable priority takes precedence. * * @param instance the renderable of interest * @param primitiveIndex the primitive of interest * @param order draw order number (0 by default). Only the lowest 15 bits are used. + * + * @see Builder::blendOrder(), setGlobalBlendOrderEnabledAt() */ void setBlendOrderAt(Instance instance, size_t primitiveIndex, uint16_t order) noexcept; + /** + * Changes whether the blend order is global or local to this Renderable (by default). + * + * @param instance the renderable of interest + * @param primitiveIndex the primitive of interest + * @param enabled true for global, false for local blend ordering. + * + * @see Builder::globalBlendOrderEnabled(), setBlendOrderAt() + */ + void setGlobalBlendOrderEnabledAt(Instance instance, size_t primitiveIndex, bool enabled) noexcept; + /** * Retrieves the set of enabled attribute slots in the given primitive's VertexBuffer. */ diff --git a/ios/include/filament/Renderer.h b/ios/include/filament/Renderer.h index cbb3aa19..ad189c2f 100644 --- a/ios/include/filament/Renderer.h +++ b/ios/include/filament/Renderer.h @@ -80,11 +80,8 @@ public: // refresh-rate of the display in Hz. set to 0 for offscreen or turn off frame-pacing. float refreshRate = 60.0f; - // how far in advance a buffer must be queued for presentation at a given time in ns - uint64_t presentationDeadlineNanos = 0; - - // offset by which vsyncSteadyClockTimeNano provided in beginFrame() is offset in ns - uint64_t vsyncOffsetNanos = 0; + [[deprecated]] uint64_t presentationDeadlineNanos = 0; + [[deprecated]] uint64_t vsyncOffsetNanos = 0; }; /** @@ -243,6 +240,20 @@ public: bool beginFrame(SwapChain* swapChain, uint64_t vsyncSteadyClockTimeNano = 0u); + /** + * Set the time at which the frame must be presented to the display. + * + * This must be called between beginFrame() and endFrame(). + * + * @param monotonic_clock_ns the time in nanoseconds corresponding to the system monotonic up-time clock. + * the presentation time is typically set in the middle of the period + * of interest. The presentation time cannot be too far in the + * future because it is limited by how many buffers are available in + * the display sub-system. Typically it is set to 1 or 2 vsync periods + * away. + */ + void setPresentationTime(int64_t monotonic_clock_ns); + /** * Render a View into this renderer's window. * @@ -444,6 +455,10 @@ public: * * It is also possible to use a Fence to wait for the read-back. * + * OpenGL only: if issuing a readPixels on a RenderTarget backed by a Texture that had data + * uploaded to it via setImage, the data returned from readPixels will be y-flipped with respect + * to the setImage call. + * * @remark * readPixels() is intended for debugging and testing. It will impact performance significantly. * diff --git a/ios/include/filament/SkinningBuffer.h b/ios/include/filament/SkinningBuffer.h index ca313d74..007feb85 100644 --- a/ios/include/filament/SkinningBuffer.h +++ b/ios/include/filament/SkinningBuffer.h @@ -33,6 +33,7 @@ namespace filament { /** * SkinningBuffer is used to hold skinning data (bones). It is a simple wraper around * a structured UBO. + * @see RenderableManager::setSkinningBuffer */ class UTILS_PUBLIC SkinningBuffer : public FilamentAPI { struct BuilderDetails; @@ -93,6 +94,7 @@ public: * @param transforms pointer to at least count Bone * @param count number of Bone elements in transforms * @param offset offset in elements (not bytes) in the SkinningBuffer (not in transforms) + * @see RenderableManager::setSkinningBuffer */ void setBones(Engine& engine, RenderableManager::Bone const* transforms, size_t count, size_t offset = 0); @@ -103,6 +105,7 @@ public: * @param transforms pointer to at least count mat4f * @param count number of mat4f elements in transforms * @param offset offset in elements (not bytes) in the SkinningBuffer (not in transforms) + * @see RenderableManager::setSkinningBuffer */ void setBones(Engine& engine, math::mat4f const* transforms, size_t count, size_t offset = 0); diff --git a/ios/include/filament/Skybox.h b/ios/include/filament/Skybox.h index bf4342a4..5e9f2c60 100644 --- a/ios/include/filament/Skybox.h +++ b/ios/include/filament/Skybox.h @@ -124,7 +124,7 @@ public: * * Ignored if an environment is set. * - * @param color + * @param color the constant color * * @return This Builder, for chaining calls. */ diff --git a/ios/include/filament/Stream.h b/ios/include/filament/Stream.h index ab877ac7..dd9df40e 100644 --- a/ios/include/filament/Stream.h +++ b/ios/include/filament/Stream.h @@ -35,10 +35,9 @@ class Engine; /** * Stream is used to attach a video stream to a Filament `Texture`. * - * Note that the `Stream` class is fairly Android centric. It supports three different + * Note that the `Stream` class is fairly Android centric. It supports two different * configurations: * - * - TEXTURE_ID...takes an OpenGL texture ID and incurs a copy * - ACQUIRED.....connects to an Android AHardwareBuffer * - NATIVE.......connects to an Android SurfaceTexture * @@ -67,10 +66,6 @@ class Engine; * - Filament invokes low-level graphics commands on the \em{driver thread}. * - The thread that calls `beginFrame` is called the \em{main thread}. * - * The TEXTURE_ID configuration achieves synchronization automatically. In this mode, Filament - * performs a copy on the main thread during `beginFrame` by blitting the external image into - * an internal round-robin queue of images. This copy has a run-time cost. - * * For ACQUIRED streams, there is no need to perform the copy because Filament explictly acquires * the stream, then releases it later via a callback function. This configuration is especially * useful when the Vulkan backend is enabled. @@ -97,7 +92,7 @@ public: * By default, Stream objects are ACQUIRED and must have external images pushed to them via *
Stream::setAcquiredImage
. * - * To create a NATIVE or TEXTURE_ID stream, call one of the
stream
methods + * To create a NATIVE stream, call one of the
stream
methods * on the builder. */ class Builder : public BuilderBase { @@ -122,23 +117,6 @@ public: */ Builder& stream(void* stream) noexcept; - /** - * Creates a TEXTURE_ID stream. This will sample data from the supplied - * external texture and copy it into an internal private texture. - * - * @param externalTextureId An opaque texture id (typically a GLuint created with glGenTextures) - * In a context shared with filament. In that case this texture's - * target must be GL_TEXTURE_EXTERNAL_OES and the wrap mode must - * be CLAMP_TO_EDGE. - * - * @return This Builder, for chaining calls. - * - * @see Texture::setExternalStream() - * @deprecated this method existed only for ARCore which doesn't need this anymore, use Texture::import() instead. - */ - UTILS_DEPRECATED - Builder& stream(intptr_t externalTextureId) noexcept; - /** * * @param width initial width of the incoming stream. Whether this value is used is @@ -173,7 +151,7 @@ public: }; /** - * Indicates whether this stream is a NATIVE stream, TEXTURE_ID stream, or ACQUIRED stream. + * Indicates whether this stream is a NATIVE stream or ACQUIRED stream. */ StreamType getStreamType() const noexcept; @@ -190,7 +168,7 @@ public: * also where the callback is invoked. This method can only be used for streams that were * constructed without calling the `stream` method on the builder. * - * @see Stream for more information about NATIVE, TEXTURE_ID, and ACQUIRED configurations. + * @see Stream for more information about NATIVE and ACQUIRED configurations. * * @param image Pointer to AHardwareBuffer, casted to void* since this is a public header. * @param callback This is triggered by Filament when it wishes to release the image. @@ -222,63 +200,6 @@ public: */ void setDimensions(uint32_t width, uint32_t height) noexcept; - /** - * Read-back the content of the last frame of a Stream since the last call to - * Renderer.beginFrame(). - * - * The Stream must be of type externalTextureId. This function is a no-op otherwise. - * - * @param xoffset Left offset of the sub-region to read back. - * @param yoffset Bottom offset of the sub-region to read back. - * @param width Width of the sub-region to read back. - * @param height Height of the sub-region to read back. - * @param buffer Client-side buffer where the read-back will be written. - * - * The following format are always supported: - * - PixelBufferDescriptor::PixelDataFormat::RGBA - * - PixelBufferDescriptor::PixelDataFormat::RGBA_INTEGER - * - * The following types are always supported: - * - PixelBufferDescriptor::PixelDataType::UBYTE - * - PixelBufferDescriptor::PixelDataType::UINT - * - PixelBufferDescriptor::PixelDataType::INT - * - PixelBufferDescriptor::PixelDataType::FLOAT - * - * Other combination of format/type may be supported. If a combination is - * not supported, this operation may fail silently. Use a DEBUG build - * to get some logs about the failure. - * - * Stream buffer User buffer (PixelBufferDescriptor&) - * +--------------------+ - * | | .stride .alignment - * | | ----------------------->--> - * | | O----------------------+--+ low addresses - * | | | | | | - * | w | | | .top | | - * | <---------> | | V | | - * | +---------+ | | +---------+ | | - * | | ^ | | ======> | | | | | - * | x | h| | | |.left| | | | - * +------>| v | | +---->| | | | - * | +.........+ | | +.........+ | | - * | ^ | | | | - * | y | | +----------------------+--+ high addresses - * O------------+-------+ - * - * Typically readPixels() will be called after Renderer.beginFrame(). - * - * After issuing this method, the callback associated with `buffer` will be invoked on the - * main thread, indicating that the read-back has completed. Typically, this will happen - * after multiple calls to beginFrame(), render(), endFrame(). - * - * It is also possible to use a Fence to wait for the read-back. - * - * @remark - * readPixels() is intended for debugging and testing. It will impact performance significantly. - */ - void readPixels(uint32_t xoffset, uint32_t yoffset, uint32_t width, uint32_t height, - backend::PixelBufferDescriptor&& buffer) noexcept; - /** * Returns the presentation time of the currently displayed frame in nanosecond. * diff --git a/ios/include/filament/ToneMapper.h b/ios/include/filament/ToneMapper.h index 0bbe06fe..8d19c597 100644 --- a/ios/include/filament/ToneMapper.h +++ b/ios/include/filament/ToneMapper.h @@ -134,12 +134,12 @@ struct UTILS_PUBLIC GenericToneMapper final : public ToneMapper { * constructor parameters approximate an ACES tone mapping curve * and the maximum input value is set to 10.0. * - * @param contrast: controls the contrast of the curve, must be > 0.0, values - * in the range 0.5..2.0 are recommended. - * @param midGrayIn: sets the input middle gray, between 0.0 and 1.0. - * @param midGrayOut: sets the output middle gray, between 0.0 and 1.0. - * @param hdrMax: defines the maximum input value that will be mapped to - * output white. Must be >= 1.0. + * @param contrast controls the contrast of the curve, must be > 0.0, values + * in the range 0.5..2.0 are recommended. + * @param midGrayIn sets the input middle gray, between 0.0 and 1.0. + * @param midGrayOut sets the output middle gray, between 0.0 and 1.0. + * @param hdrMax defines the maximum input value that will be mapped to + * output white. Must be >= 1.0. */ explicit GenericToneMapper( float contrast = 1.55f, diff --git a/ios/include/filament/View.h b/ios/include/filament/View.h index 14c7b838..2de06014 100644 --- a/ios/include/filament/View.h +++ b/ios/include/filament/View.h @@ -84,6 +84,7 @@ public: using VsmShadowOptions = VsmShadowOptions; using SoftShadowOptions = SoftShadowOptions; using ScreenSpaceReflectionsOptions = ScreenSpaceReflectionsOptions; + using GuardBandOptions = GuardBandOptions; /** * Sets the View's name. Only useful for debugging. @@ -345,6 +346,20 @@ public: */ ScreenSpaceReflectionsOptions const& getScreenSpaceReflectionsOptions() const noexcept; + /** + * Enables or disable screen-space guard band. Disabled by default. + * + * @param options guard band options + */ + void setGuardBandOptions(GuardBandOptions options) noexcept; + + /** + * Returns screen-space guard band options. + * + * @return guard band options + */ + GuardBandOptions const& getGuardBandOptions() const noexcept; + /** * Enables or disable multi-sample anti-aliasing (MSAA). Disabled by default. * diff --git a/ios/include/filament/Viewport.h b/ios/include/filament/Viewport.h index 7755cd17..a641e7ab 100644 --- a/ios/include/filament/Viewport.h +++ b/ios/include/filament/Viewport.h @@ -43,11 +43,6 @@ public: */ Viewport() noexcept : backend::Viewport{} {} - Viewport(const Viewport& viewport) noexcept = default; - Viewport(Viewport&& viewport) noexcept = default; - Viewport& operator=(const Viewport& viewport) noexcept = default; - Viewport& operator=(Viewport&& viewport) noexcept = default; - /** * Creates a Viewport from its left-bottom coordinates, width and height in pixels * @@ -67,16 +62,7 @@ public: */ bool empty() const noexcept { return !width || !height; } - /** - * Computes a new scaled Viewport - * @param s scaling factor on the x and y axes. - * @return A new scaled Viewport. The coordinates and dimensions of the new Viewport are - * rounded to the nearest integer value. - */ - Viewport scale(math::float2 s) const noexcept; - private: - /** * Compares two Viewports for equality * @param lhs reference to the left hand side Viewport diff --git a/ios/include/filameshio/MeshReader.h b/ios/include/filameshio/MeshReader.h index f9da44bd..22533791 100644 --- a/ios/include/filameshio/MeshReader.h +++ b/ios/include/filameshio/MeshReader.h @@ -63,7 +63,7 @@ public: void unregisterAll(); - std::size_t numRegistered() const noexcept; + size_t numRegistered() const noexcept; void getRegisteredMaterials(filament::MaterialInstance** materialList, utils::CString* materialNameList) const; diff --git a/ios/include/geometry/Transcoder.h b/ios/include/geometry/Transcoder.h index 0c77c941..805c2609 100644 --- a/ios/include/geometry/Transcoder.h +++ b/ios/include/geometry/Transcoder.h @@ -31,6 +31,7 @@ enum class ComponentType { SHORT, //!< If normalization is enabled, this maps from [-32767,32767] to [-1,+1] USHORT, //!< If normalization is enabled, this maps from [0,65535] to [0, +1] HALF, //!< 1 sign bit, 5 exponent bits, and 5 mantissa bits. + FLOAT, //!< Standard 32-bit float }; /** diff --git a/ios/include/gltfio/Animator.h b/ios/include/gltfio/Animator.h index ab47f572..072d532a 100644 --- a/ios/include/gltfio/Animator.h +++ b/ios/include/gltfio/Animator.h @@ -20,7 +20,7 @@ #include #include -namespace gltfio { +namespace filament::gltfio { struct FFilamentAsset; struct FFilamentInstance; @@ -56,6 +56,32 @@ public: */ void updateBoneMatrices(); + /** + * Applies a blended transform to the union of nodes affected by two animations. + * Used for cross-fading from a previous skinning-based animation or rigid body animation. + * + * First, this stashes the current transform hierarchy into a transient memory buffer. + * + * Next, this applies previousAnimIndex / previousAnimTime to the actual asset by internally + * calling applyAnimation(). + * + * Finally, the stashed local transforms are lerped (via the scale / translation / rotation + * components) with their live counterparts, and the results are pushed to the asset. + * + * To achieve a cross fade effect with skinned models, clients will typically call animator + * methods in this order: (1) applyAnimation (2) applyCrossFade (3) updateBoneMatrices. The + * animation that clients pass to applyAnimation is the "current" animation corresponding to + * alpha=1, while the "previous" animation passed to applyCrossFade corresponds to alpha=0. + */ + void applyCrossFade(size_t previousAnimIndex, float previousAnimTime, float alpha); + + /** + * Pass the identity matrix into all bone nodes, useful for returning to the T pose. + * + * NOTE: this operation is independent of \c animation. + */ + void resetBoneMatrices(); + /** Returns the number of \c animation definitions in the glTF asset. */ size_t getAnimationCount() const; @@ -88,6 +114,6 @@ private: AnimatorImpl* mImpl; }; -} // namespace gltfio +} // namespace filament::gltfio #endif // GLTFIO_ANIMATOR_H diff --git a/ios/include/gltfio/AssetLoader.h b/ios/include/gltfio/AssetLoader.h index 81feaeb2..327e4c9b 100644 --- a/ios/include/gltfio/AssetLoader.h +++ b/ios/include/gltfio/AssetLoader.h @@ -34,7 +34,9 @@ namespace utils { /** * Loader and pipeline for glTF 2.0 assets. */ -namespace gltfio { +namespace filament::gltfio { + +class NodeManager; /** * \struct AssetConfiguration AssetLoader.h gltfio/AssetLoader.h @@ -47,7 +49,7 @@ struct AssetConfiguration { //! Controls whether the loader uses filamat to generate materials on the fly, or loads a small //! set of precompiled ubershader materials. Deleting the MaterialProvider is the client's - //! responsibility. See createMaterialGenerator() and createUbershaderLoader(). + //! responsibility. See createJitShaderProvider() and createUbershaderProvider(). MaterialProvider* materials; //! Optional manager for associating string names with entities in the transform hierarchy. @@ -70,7 +72,8 @@ struct AssetConfiguration { * object, which is a bundle of Filament entities, material instances, textures, vertex buffers, * and index buffers. * - * Clients must use AssetLoader to create and destroy FilamentAsset objects. + * Clients must use AssetLoader to create and destroy FilamentAsset objects. This is similar to + * how filament::Engine is used to create and destroy core objects like VertexBuffer. * * AssetLoader does not fetch external buffer data or create textures on its own. Clients can use * ResourceLoader for this, which obtains the URI list from the asset. This is demonstrated in the @@ -83,7 +86,8 @@ struct AssetConfiguration { * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * auto engine = Engine::create(); - * auto materials = createMaterialGenerator(engine); + * auto materials = createJitShaderProvider(engine); + * auto decoder = createStbProvider(engine); * auto loader = AssetLoader::create({engine, materials}); * * // Parse the glTF content and create Filament entities. @@ -92,7 +96,10 @@ struct AssetConfiguration { * content.clear(); * * // Load buffers and textures from disk. - * ResourceLoader({engine, ".", true}).loadResources(asset); + * ResourceLoader resourceLoader({engine, ".", true}); + * resourceLoader.addTextureProvider("image/png", decoder) + * resourceLoader.addTextureProvider("image/jpeg", decoder) + * resourceLoader.loadResources(asset); * * // Free the glTF hierarchy as it is no longer needed. * asset->releaseSourceData(); @@ -114,6 +121,7 @@ struct AssetConfiguration { * loader->destroyAsset(asset); * materials->destroyMaterials(); * delete materials; + * delete decoder; * AssetLoader::destroy(&loader); * Engine::destroy(&engine); * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -221,7 +229,9 @@ public: utils::NameComponentManager* getNames() const noexcept; - MaterialProvider* getMaterialProvider() const noexcept; + NodeManager& getNodeManager() noexcept; + + MaterialProvider& getMaterialProvider() noexcept; /*! \cond PRIVATE */ protected: @@ -236,6 +246,6 @@ public: /*! \endcond */ }; -} // namespace gltfio +} // namespace filament::gltfio #endif // GLTFIO_ASSETLOADER_H diff --git a/ios/include/gltfio/FilamentAsset.h b/ios/include/gltfio/FilamentAsset.h index 6078aa6c..8165bbb6 100644 --- a/ios/include/gltfio/FilamentAsset.h +++ b/ios/include/gltfio/FilamentAsset.h @@ -20,6 +20,8 @@ #include #include +#include + #include #include @@ -27,9 +29,10 @@ namespace filament { class Camera; class Engine; class MaterialInstance; + class Scene; } -namespace gltfio { +namespace filament::gltfio { class Animator; class FilamentInstance; @@ -42,7 +45,7 @@ class FilamentInstance; * * This class owns a hierarchy of entities that have been loaded from a glTF asset. Every entity has * a filament::TransformManager component, and some entities also have \c Name, \c Renderable, - * \c Light, or \c Camera components. + * \c Light, \c Camera, or \c Node components. * * In addition to the aforementioned entities, an asset has strong ownership over a list of * filament::VertexBuffer, filament::IndexBuffer, filament::MaterialInstance, filament::Texture, @@ -55,12 +58,14 @@ class FilamentInstance; */ class UTILS_PUBLIC FilamentAsset { public: + using Entity = utils::Entity; + using SceneMask = NodeManager::SceneMask; /** * Gets the list of entities, one for each glTF node. All of these have a Transform component. * Some of the returned entities may also have a Renderable component and/or a Light component. */ - const utils::Entity* getEntities() const noexcept; + const Entity* getEntities() const noexcept; /** * Gets the number of entities returned by getEntities(). @@ -70,13 +75,23 @@ public: /** * Gets the list of entities in the scene representing lights. All of these have a Light component. */ - const utils::Entity* getLightEntities() const noexcept; + const Entity* getLightEntities() const noexcept; /** * Gets the number of entities returned by getLightEntities(). */ size_t getLightEntityCount() const noexcept; + /** + * Gets the list of entities in the asset that have renderable components. + */ + const utils::Entity* getRenderableEntities() const noexcept; + + /** + * Gets the number of entities returned by getRenderableEntities(). + */ + size_t getRenderableEntityCount() const noexcept; + /** * Gets the list of entities in the scene representing cameras. All of these have a \c Camera * component. @@ -95,7 +110,7 @@ public: * * @see filament::Camera::setScaling */ - const utils::Entity* getCameraEntities() const noexcept; + const Entity* getCameraEntities() const noexcept; /** * Gets the number of entities returned by getCameraEntities(). @@ -109,7 +124,7 @@ public: * assets, this is a "super root" where each of its children is a root in a particular instance. * This allows users to transform all instances en masse if they wish to do so. */ - utils::Entity getRoot() const noexcept; + Entity getRoot() const noexcept; /** * Pops a ready renderable off the queue, or returns 0 if no renderables have become ready. @@ -122,12 +137,12 @@ public: * textures gradually become ready through asynchronous loading. For example, on every frame * progressive applications can do something like this: * - * while (utils::Entity e = popRenderable()) { scene.addEntity(e); } + * while (Entity e = popRenderable()) { scene.addEntity(e); } * * \see ResourceLoader#asyncBeginLoad * \see popRenderables() */ - utils::Entity popRenderable() noexcept; + Entity popRenderable() noexcept; /** * Pops up to "count" ready renderables off the queue, or returns the available number. @@ -138,7 +153,7 @@ public: * * \see ResourceLoader#asyncBeginLoad */ - size_t popRenderables(utils::Entity* entities, size_t count) noexcept; + size_t popRenderables(Entity* entities, size_t count) noexcept; /** Gets all material instances. These are already bound to renderables. */ const filament::MaterialInstance* const* getMaterialInstances() const noexcept; @@ -159,10 +174,10 @@ public: filament::Aabb getBoundingBox() const noexcept; /** Gets the NameComponentManager label for the given entity, if it exists. */ - const char* getName(utils::Entity) const noexcept; + const char* getName(Entity) const noexcept; /** Returns the first entity with the given name, or 0 if none exist. */ - utils::Entity getFirstEntityByName(const char* name) noexcept; + Entity getFirstEntityByName(const char* name) noexcept; /** * Gets a list of entities with the given name. @@ -174,7 +189,7 @@ public: * @return If entities is non-null, the number of entities written to the entity pointer. * Otherwise this returns the number of entities with the given name. */ - size_t getEntitiesByName(const char* name, utils::Entity* entities, + size_t getEntitiesByName(const char* name, Entity* entities, size_t maxCount) const noexcept; /** @@ -187,11 +202,11 @@ public: * @return If entities is non-null, the number of entities written to the entity pointer. * Otherwise this returns the number of entities with the given prefix. */ - size_t getEntitiesByPrefix(const char* prefix, utils::Entity* entities, + size_t getEntitiesByPrefix(const char* prefix, Entity* entities, size_t maxCount) const noexcept; /** Gets the glTF extras string for a specific node, or for the asset, if it exists. */ - const char* getExtras(utils::Entity entity = {}) const noexcept; + const char* getExtras(Entity entity = {}) const noexcept; /** * Returns the animation engine. @@ -221,17 +236,32 @@ public: /** * Gets joints at skin index. */ - const utils::Entity* getJointsAt(size_t skinIndex) const noexcept; + const Entity* getJointsAt(size_t skinIndex) const noexcept; + + /** + * Attaches the given skin to the given node, which must have an associated mesh with + * BONE_INDICES and BONE_WEIGHTS attributes. + * + * This is a no-op if the given skin index or target is invalid. + */ + void attachSkin(size_t skinIndex, Entity target) noexcept; + + /** + * Detaches the given skin from the given node. + * + * This is a no-op if the given skin index or target is invalid. + */ + void detachSkin(size_t skinIndex, Entity target) noexcept; /** * Gets the morph target name at the given index in the given entity. */ - const char* getMorphTargetNameAt(utils::Entity entity, size_t targetIndex) const noexcept; + const char* getMorphTargetNameAt(Entity entity, size_t targetIndex) const noexcept; /** * Returns the number of morph targets in the given entity. */ - size_t getMorphTargetCountAt(utils::Entity entity) const noexcept; + size_t getMorphTargetCountAt(Entity entity) const noexcept; /** * Returns the number of material variants in the asset. @@ -261,7 +291,7 @@ public: * Lazily creates a single LINES renderable that draws the transformed bounding-box hierarchy * for diagnostic purposes. The wireframe is owned by the asset so clients should not delete it. */ - utils::Entity getWireframe() noexcept; + Entity getWireframe() noexcept; /** * Returns the Filament engine associated with the AssetLoader that created this asset. @@ -282,6 +312,28 @@ public: */ const void* getSourceAsset() noexcept; + /** + * Returns the number of scenes in the asset. + */ + size_t getSceneCount() const noexcept; + + /** + * Returns the name of the given scene. + * + * Returns null if the given scene does not have a name or is out of bounds. + */ + const char* getSceneName(size_t sceneIndex) const noexcept; + + /** + * Adds entities to a Filament scene only if they belong to at least one of the given glTF + * scenes. + * + * This is just a helper that provides an alternative to directly calling scene->addEntities() + * and provides filtering functionality. + */ + void addEntitiesToScene(filament::Scene& targetScene, const Entity* entities, size_t count, + SceneMask sceneFilter); + /*! \cond PRIVATE */ FilamentInstance** getAssetInstances() noexcept; @@ -299,6 +351,6 @@ public: /*! \endcond */ }; -} // namespace gltfio +} // namespace filament::gltfio #endif // GLTFIO_FILAMENTASSET_H diff --git a/ios/include/gltfio/FilamentInstance.h b/ios/include/gltfio/FilamentInstance.h index 1f1509e3..60d8e8d9 100644 --- a/ios/include/gltfio/FilamentInstance.h +++ b/ios/include/gltfio/FilamentInstance.h @@ -20,7 +20,7 @@ #include #include -namespace gltfio { +namespace filament::gltfio { class Animator; class FilamentAsset; @@ -76,6 +76,6 @@ public: Animator* getAnimator() noexcept; }; -} // namespace gltfio +} // namespace filament::gltfio #endif // GLTFIO_FILAMENTINSTANCE_H diff --git a/ios/include/gltfio/MaterialProvider.h b/ios/include/gltfio/MaterialProvider.h index 9353ea9d..3497a7ef 100644 --- a/ios/include/gltfio/MaterialProvider.h +++ b/ios/include/gltfio/MaterialProvider.h @@ -26,7 +26,7 @@ #include #include -namespace gltfio { +namespace filament::gltfio { enum class AlphaMode : uint8_t { OPAQUE, @@ -118,12 +118,12 @@ inline uint8_t getNumUvSets(const UvMap& uvmap) { * \class MaterialProvider MaterialProvider.h gltfio/MaterialProvider.h * \brief Interface to a provider of glTF materials (has two implementations). * - * - The \c MaterialGenerator implementation generates materials at run time (which can be slow) and - * requires the filamat library, but produces streamlined shaders. See createMaterialGenerator(). + * - The \c JitShaderProvider implementation generates materials at run time (which can be slow) and + * requires the filamat library, but produces streamlined shaders. See createJitShaderProvider(). * - * - The \c UbershaderLoader implementation uses a small number of pre-built materials with complex + * - The \c UbershaderProvider implementation uses a small number of pre-built materials with complex * fragment shaders, but does not require any run time work or usage of filamat. See - * createUbershaderLoader(). + * createUbershaderProvider(). * * Both implementations of MaterialProvider maintain a small cache of materials which must be * explicitly freed using destroyMaterials(). These materials are not freed automatically when the @@ -186,23 +186,22 @@ void processShaderString(std::string* shader, const UvMap& uvmap, * * Requires \c libfilamat to be linked in. Not available in \c libgltfio_core. * - * @see createUbershaderLoader + * @see createUbershaderProvider */ UTILS_PUBLIC -MaterialProvider* createMaterialGenerator(filament::Engine* engine, bool optimizeShaders = false); +MaterialProvider* createJitShaderProvider(filament::Engine* engine, bool optimizeShaders = false); /** * Creates a material provider that loads a small set of pre-built materials. * * @return New material provider that can quickly load a material from a cache. * - * Requires \c libgltfio_resources to be linked in. - * - * @see createMaterialGenerator + * @see createJitShaderProvider */ UTILS_PUBLIC -MaterialProvider* createUbershaderLoader(filament::Engine* engine); +MaterialProvider* createUbershaderProvider(filament::Engine* engine, const void* archive, + size_t archiveByteCount); -} // namespace gltfio +} // namespace filament::gltfio #endif // GLTFIO_MATERIALPROVIDER_H diff --git a/ios/include/gltfio/NodeManager.h b/ios/include/gltfio/NodeManager.h new file mode 100644 index 00000000..04f7bd6d --- /dev/null +++ b/ios/include/gltfio/NodeManager.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GLTFIO_NODEMANAGER_H +#define GLTFIO_NODEMANAGER_H + +#include + +#include +#include +#include +#include +#include + +namespace utils { +class Entity; +} // namespace utils + +namespace filament::gltfio { + +class FNodeManager; + +/** + * NodeManager is used to add annotate entities with glTF-specific information. + * + * Node components are created by gltfio and exposed to users to allow inspection. + * + * Nodes do not store the glTF hierarchy or names; see TransformManager and NameComponentManager. + */ +class UTILS_PUBLIC NodeManager { +public: + using Instance = utils::EntityInstance; + using Entity = utils::Entity; + using CString = utils::CString; + using SceneMask = utils::bitset32; + + static constexpr size_t MAX_SCENE_COUNT = 32; + + /** + * Returns whether a particular Entity is associated with a component of this NodeManager + * @param e An Entity. + * @return true if this Entity has a component associated with this manager. + */ + bool hasComponent(Entity e) const noexcept; + + /** + * Gets an Instance representing the node component associated with the given Entity. + * @param e An Entity. + * @return An Instance object, which represents the node component associated with the Entity e. + * @note Use Instance::isValid() to make sure the component exists. + * @see hasComponent() + */ + Instance getInstance(Entity e) const noexcept; + + /** + * Creates a node component and associates it with the given entity. + * @param entity An Entity to associate a node component with. + * + * If this component already exists on the given entity, it is first destroyed as if + * destroy(Entity e) was called. + * + * @see destroy() + */ + void create(Entity entity); + + /** + * Destroys this component from the given entity. + * @param e An entity. + * + * @see create() + */ + void destroy(Entity e) noexcept; + + void setMorphTargetNames(Instance ci, utils::FixedCapacityVector names) noexcept; + const utils::FixedCapacityVector& getMorphTargetNames(Instance ci) const noexcept; + + void setExtras(Instance ci, CString extras) noexcept; + const CString& getExtras(Instance ci) const noexcept; + + void setSceneMembership(Instance ci, SceneMask scenes) noexcept; + SceneMask getSceneMembership(Instance ci) const noexcept; + +protected: + NodeManager() noexcept = default; + ~NodeManager() = default; + +public: + NodeManager(NodeManager const&) = delete; + NodeManager(NodeManager&&) = delete; + NodeManager& operator=(NodeManager const&) = delete; + NodeManager& operator=(NodeManager&&) = delete; +}; + +} // namespace filament::gltfio + + +#endif // GLTFIO_NODEMANAGER_H diff --git a/ios/include/gltfio/ResourceLoader.h b/ios/include/gltfio/ResourceLoader.h index a92a4f85..d79200cf 100644 --- a/ios/include/gltfio/ResourceLoader.h +++ b/ios/include/gltfio/ResourceLoader.h @@ -27,10 +27,11 @@ namespace filament { class Engine; } -namespace gltfio { +namespace filament::gltfio { struct FFilamentAsset; class AssetPool; +class TextureProvider; /** * \struct ResourceConfiguration ResourceLoader.h gltfio/ResourceLoader.h @@ -53,8 +54,8 @@ struct ResourceConfiguration { //! do not need this, but it is useful for robustness. bool recomputeBoundingBoxes; - //! If true, ignore skinned primitives bind transform when compute bounding box. Implicitly true - //! for instanced asset. Only applicable when recomputeBoundingBoxes is set to true + //! If true, ignores skinning when computing bounding boxes. Implicitly true for instanced + //! assets. Only applicable when recomputeBoundingBoxes is set to true. bool ignoreBindTransform; }; @@ -94,6 +95,13 @@ public: */ void addResourceData(const char* uri, BufferDescriptor&& buffer); + /** + * Register a plugin that can consume PNG / JPEG content and produce filament::Texture objects. + * + * Destruction of the given provider is the client's responsibility. + */ + void addTextureProvider(const char* mimeType, TextureProvider* provider); + /** * Checks if the given resource has already been added to the URI cache. */ @@ -153,7 +161,6 @@ public: private: bool loadResources(FFilamentAsset* asset, bool async); - void applySparseData(FFilamentAsset* asset) const; void normalizeSkinningWeights(FFilamentAsset* asset) const; void updateBoundingBoxes(FFilamentAsset* asset) const; AssetPool* mPool; @@ -161,7 +168,7 @@ private: Impl* pImpl; }; -} // namespace gltfio +} // namespace filament::gltfio #endif // GLTFIO_RESOURCELOADER_H diff --git a/ios/include/gltfio/TextureProvider.h b/ios/include/gltfio/TextureProvider.h new file mode 100644 index 00000000..2cabe1e3 --- /dev/null +++ b/ios/include/gltfio/TextureProvider.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GLTFIO_TEXTUREPROVIDER_H +#define GLTFIO_TEXTUREPROVIDER_H + +#include +#include + +#include + +namespace filament { + class Engine; + class Texture; +} + +namespace filament::gltfio { + +/** + * TextureProvider is an interface that allows clients to implement their own texture decoding + * facility for JPEG, PNG, or KTX2 content. It constructs Filament Texture objects synchronously, + * but populates their miplevels asynchronously. + * + * gltfio calls all public methods from the foreground thread, i.e. the thread that the Filament + * engine was created with. However the implementation may create 0 or more background threads to + * perform decoding work. + * + * The following pseudocode illustrates how this interface could be used, but in practice the only + * client is the gltfio ResourceLoader. + * + * filament::Engine* engine = ...; + * TextureProvider* provider = createStbProvider(engine); + * + * for (auto filename : textureFiles) { + * std::vector buf = readEntireFile(filename); + * Texture* texture = provider->pushTexture(buf.data(), buf.size(), "image/png", 0); + * if (texture == nullptr) { puts(provider->getPushMessage()); exit(1); } + * } + * + * // At this point, the returned textures can be bound to material instances, but none of their + * // miplevel images have been populated yet. + * + * while (provider->getPoppedCount() < provider->getPushedCount()) { + * sleep(200); + * + * // The following call gives the provider an opportunity to reap the results of any + * // background decoder work that has been completed (e.g. by calling Texture::setImage). + * provider->updateQueue(); + * + * // Check for textures that now have all their miplevels initialized. + * while (Texture* texture = provider->popTexture()) { + * printf("%p has all its miplevels ready.\n", texture); + * } + * } + * + * delete provider; + */ +class UTILS_PUBLIC TextureProvider { +public: + using Texture = filament::Texture; + using FlagBits = uint64_t; + + enum class Flags { + sRGB = 1 << 0, + }; + + /** + * Creates a Filament texture and pushes it to the asynchronous decoding queue. + * + * If construction fails, nothing is pushed to the queue and null is returned. The failure + * reason can be obtained with getPushMessage(). The given buffer pointer is not held, so the + * caller can free it immediately. It is also the caller's responsibility to free the returned + * Texture object, but it is only safe to do so after it has been popped from the queue. + */ + virtual Texture* pushTexture(const uint8_t* data, size_t byteCount, + const char* mimeType, FlagBits flags) = 0; + + /** + * Checks if any texture is ready to be removed from the asynchronous decoding queue, and if so + * pops it off. + * + * Unless an error or cancellation occurred during the decoding process, the returned texture + * should have all its miplevels populated. If the texture is not complete, the reason can be + * obtained with getPopMessage(). + * + * Due to concurrency, textures are not necessarily popped off in the same order they were + * pushed. Returns null if there are no textures that are ready to be popped. + */ + virtual Texture* popTexture() = 0; + + /** + * Polls textures in the queue and uploads mipmap images if any have emerged from the decoder. + * + * This gives the provider an opportunity to call Texture::setImage() on the foreground thread. + * If needed, it can also call Texture::generateMipmaps() here. + * + * Items in the decoding queue can become "poppable" only during this call. + */ + virtual void updateQueue() = 0; + + /** + * Returns a failure message for the most recent call to pushTexture(), or null for success. + * + * Note that this method does not pertain to the decoding process. If decoding fails, clients to + * can pop the incomplete texture off the queue and obtain a failure message using the + * getPopFailure() method. + * + * The returned string is owned by the provider and becomes invalid after the next call to + * pushTexture(). + */ + virtual const char* getPushMessage() const = 0; + + /** + * Returns a failure message for the most recent call to popTexture(), or null for success. + * + * If the most recent call to popTexture() returned null, then no error occurred and this + * returns null. If the most recent call to popTexture() returned a "complete" texture (i.e. + * all miplevels present), then this returns null. This returns non-null only if an error or + * cancellation occurred while decoding the popped texture. + * + * The returned string is owned by the provider and becomes invalid after the next call to + * popTexture(). + */ + virtual const char* getPopMessage() const = 0; + + /** + * Waits for all outstanding decoding jobs to complete. + * + * Clients should call updateQueue() afterwards if they wish to update the push / pop queue. + */ + virtual void waitForCompletion() = 0; + + /** + * Cancels all not-yet-started decoding jobs and waits for all other jobs to complete. + * + * Jobs that have already started cannot be canceled. Textures whose decoding process has + * been cancelled will be made poppable on the subsequent call to updateQueue(). + */ + virtual void cancelDecoding() = 0; + + /** Total number of successful push calls since the provider was created. */ + virtual size_t getPushedCount() const = 0; + + /** Total number of successful pop calls since the provider was created. */ + virtual size_t getPoppedCount() const = 0; + + /** Total number of textures that have become ready-to-pop since the provider was created. */ + virtual size_t getDecodedCount() const = 0; + + virtual ~TextureProvider() = default; +}; + +/** + * Creates a simple decoder based on stb_image that can handle "image/png" and "image/jpeg". + * This works only if your build configuration includes STB. + */ +TextureProvider* createStbProvider(filament::Engine* engine); + +/** + * Creates a decoder that can handle certain types of "image/ktx2" content as specified in + * the KHR_texture_basisu specification. + */ +TextureProvider* createKtx2Provider(filament::Engine* engine); + +} // namespace filament::gltfio + +#endif // GLTFIO_TEXTUREPROVIDER_H diff --git a/ios/include/gltfio/materials/uberarchive.h b/ios/include/gltfio/materials/uberarchive.h new file mode 100644 index 00000000..b36ddd36 --- /dev/null +++ b/ios/include/gltfio/materials/uberarchive.h @@ -0,0 +1,13 @@ +#ifndef UBERARCHIVE_H_ +#define UBERARCHIVE_H_ + +#include + +extern "C" { + extern const uint8_t UBERARCHIVE_PACKAGE[]; + extern int UBERARCHIVE_DEFAULT_OFFSET; + extern int UBERARCHIVE_DEFAULT_SIZE; +} +#define UBERARCHIVE_DEFAULT_DATA (UBERARCHIVE_PACKAGE + UBERARCHIVE_DEFAULT_OFFSET) + +#endif diff --git a/ios/include/gltfio/math.h b/ios/include/gltfio/math.h index f94d3cc7..a971122d 100644 --- a/ios/include/gltfio/math.h +++ b/ios/include/gltfio/math.h @@ -23,7 +23,7 @@ #include #include -namespace gltfio { +namespace filament::gltfio { template UTILS_PUBLIC T cubicSpline(const T& vert0, const T& tang0, const T& vert1, const T& tang1, float t) { @@ -121,6 +121,6 @@ inline filament::math::mat3f matrixFromUvTransform(const float offset[2], float return filament::math::mat3f(sx * c, sx * s, tx, -sy * s, sy * c, ty, 0.0f, 0.0f, 1.0f); }; -} // namespace gltfio +} // namespace filament::gltfio #endif // GLTFIO_MATH_H diff --git a/ios/include/gltfio/resources/gltfresources.h b/ios/include/gltfio/resources/gltfresources.h deleted file mode 100644 index a820383e..00000000 --- a/ios/include/gltfio/resources/gltfresources.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef GLTFRESOURCES_H_ -#define GLTFRESOURCES_H_ - -#include - -extern "C" { - extern const uint8_t GLTFRESOURCES_PACKAGE[]; - extern int GLTFRESOURCES_LIT_FADE_OFFSET; - extern int GLTFRESOURCES_LIT_FADE_SIZE; - extern int GLTFRESOURCES_LIT_OPAQUE_OFFSET; - extern int GLTFRESOURCES_LIT_OPAQUE_SIZE; - extern int GLTFRESOURCES_LIT_MASKED_OFFSET; - extern int GLTFRESOURCES_LIT_MASKED_SIZE; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_FADE_OFFSET; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_FADE_SIZE; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_OFFSET; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_SIZE; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_OFFSET; - extern int GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_SIZE; - extern int GLTFRESOURCES_UNLIT_FADE_OFFSET; - extern int GLTFRESOURCES_UNLIT_FADE_SIZE; - extern int GLTFRESOURCES_UNLIT_OPAQUE_OFFSET; - extern int GLTFRESOURCES_UNLIT_OPAQUE_SIZE; - extern int GLTFRESOURCES_UNLIT_MASKED_OFFSET; - extern int GLTFRESOURCES_UNLIT_MASKED_SIZE; - extern int GLTFRESOURCES_LIT_VOLUME_OFFSET; - extern int GLTFRESOURCES_LIT_VOLUME_SIZE; - extern int GLTFRESOURCES_LIT_TRANSMISSION_OFFSET; - extern int GLTFRESOURCES_LIT_TRANSMISSION_SIZE; - extern int GLTFRESOURCES_LIT_SHEEN_OFFSET; - extern int GLTFRESOURCES_LIT_SHEEN_SIZE; -} -#define GLTFRESOURCES_LIT_FADE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_FADE_OFFSET) -#define GLTFRESOURCES_LIT_OPAQUE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_OPAQUE_OFFSET) -#define GLTFRESOURCES_LIT_MASKED_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_MASKED_OFFSET) -#define GLTFRESOURCES_SPECULARGLOSSINESS_FADE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_SPECULARGLOSSINESS_FADE_OFFSET) -#define GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_SPECULARGLOSSINESS_OPAQUE_OFFSET) -#define GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_SPECULARGLOSSINESS_MASKED_OFFSET) -#define GLTFRESOURCES_UNLIT_FADE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_UNLIT_FADE_OFFSET) -#define GLTFRESOURCES_UNLIT_OPAQUE_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_UNLIT_OPAQUE_OFFSET) -#define GLTFRESOURCES_UNLIT_MASKED_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_UNLIT_MASKED_OFFSET) -#define GLTFRESOURCES_LIT_VOLUME_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_VOLUME_OFFSET) -#define GLTFRESOURCES_LIT_TRANSMISSION_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_TRANSMISSION_OFFSET) -#define GLTFRESOURCES_LIT_SHEEN_DATA (GLTFRESOURCES_PACKAGE + GLTFRESOURCES_LIT_SHEEN_OFFSET) - -#endif diff --git a/ios/include/gltfio/resources/gltfresources_lite.h b/ios/include/gltfio/resources/gltfresources_lite.h deleted file mode 100644 index 8bef16f4..00000000 --- a/ios/include/gltfio/resources/gltfresources_lite.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef GLTFRESOURCES_LITE_H_ -#define GLTFRESOURCES_LITE_H_ - -#include - -extern "C" { - extern const uint8_t GLTFRESOURCES_LITE_PACKAGE[]; - extern int GLTFRESOURCES_LITE_LIT_OPAQUE_OFFSET; - extern int GLTFRESOURCES_LITE_LIT_OPAQUE_SIZE; - extern int GLTFRESOURCES_LITE_LIT_FADE_OFFSET; - extern int GLTFRESOURCES_LITE_LIT_FADE_SIZE; -} -#define GLTFRESOURCES_LITE_LIT_OPAQUE_DATA (GLTFRESOURCES_LITE_PACKAGE + GLTFRESOURCES_LITE_LIT_OPAQUE_OFFSET) -#define GLTFRESOURCES_LITE_LIT_FADE_DATA (GLTFRESOURCES_LITE_PACKAGE + GLTFRESOURCES_LITE_LIT_FADE_OFFSET) - -#endif diff --git a/ios/include/image/ColorTransform.h b/ios/include/image/ColorTransform.h index 334c21b0..8986cfd3 100644 --- a/ios/include/image/ColorTransform.h +++ b/ios/include/image/ColorTransform.h @@ -158,8 +158,8 @@ inline filament::math::float3 linearToSRGB(const filament::math::float3& color) return sRGBColor; } -// Creates a n-channel sRGB image from a linear floating-point image. -// The source image can have more than N channels, but only the first N are converted to sRGB. +// Creates a N-channel sRGB image from a linear floating-point image. +// The source image can have more than N channels, but only the first 3 are converted to sRGB. template std::unique_ptr fromLinearTosRGB(const LinearImage& image) { const size_t w = image.getWidth(); diff --git a/ios/include/image/KtxBundle.h b/ios/include/image/Ktx1Bundle.h similarity index 96% rename from ios/include/image/KtxBundle.h rename to ios/include/image/Ktx1Bundle.h index 3975a644..d408dc7c 100644 --- a/ios/include/image/KtxBundle.h +++ b/ios/include/image/Ktx1Bundle.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef IMAGE_KTXBUNDLE_H -#define IMAGE_KTXBUNDLE_H +#ifndef IMAGE_KTX1BUNDLE_H +#define IMAGE_KTX1BUNDLE_H #include @@ -48,7 +48,7 @@ struct KtxBlobList; struct KtxMetadata; /** - * KtxBundle is a structured set of opaque data blobs that can be passed straight to the GPU, such + * Ktx1Bundle is a structured set of opaque data blobs that can be passed straight to the GPU, such * that a single bundle corresponds to a single texture object. It is well suited for storing * block-compressed texture data. * @@ -63,22 +63,22 @@ struct KtxMetadata; * * https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/ */ -class UTILS_PUBLIC KtxBundle { +class UTILS_PUBLIC Ktx1Bundle { public: - ~KtxBundle(); + ~Ktx1Bundle(); /** * Creates a hierarchy of empty texture blobs, to be filled later via setBlob(). */ - KtxBundle(uint32_t numMipLevels, uint32_t arrayLength, bool isCubemap); + Ktx1Bundle(uint32_t numMipLevels, uint32_t arrayLength, bool isCubemap); /** * Creates a new bundle by deserializing the given data. * * Typically, this constructor is used to consume the contents of a KTX file. */ - KtxBundle(uint8_t const* bytes, uint32_t nbytes); + Ktx1Bundle(uint8_t const* bytes, uint32_t nbytes); /** * Serializes the bundle into the given target memory. Returns false if there's not enough @@ -276,4 +276,4 @@ private: } // namespace image -#endif /* IMAGE_KTXBUNDLE_H */ +#endif /* IMAGE_Ktx1Bundle_H */ diff --git a/ios/include/image/KtxUtility.h b/ios/include/image/KtxUtility.h deleted file mode 100644 index 7ff4d89f..00000000 --- a/ios/include/image/KtxUtility.h +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef IMAGE_KTXUTILITY_H -#define IMAGE_KTXUTILITY_H - -#include -#include - -#include - -namespace image { - -/** - * Allows clients to create Filament textures from KtxBundle objects. - * - * Note that libimage does not have a dependency on libfilament, so for simplicity this is a - * header-only library with inlined functions. - */ -namespace ktx { - - using Texture = filament::Texture; - using Engine = filament::Engine; - - using TextureFormat = Texture::InternalFormat; - using CompressedPixelDataType = Texture::CompressedType; - using PixelDataType = Texture::Type; - using PixelDataFormat = Texture::Format; - using PixelBufferDescriptor = Texture::PixelBufferDescriptor; - - using Callback = void(*)(void* userdata); - - CompressedPixelDataType toCompressedPixelDataType(const KtxInfo& info); - PixelDataType toPixelDataType(const KtxInfo& info); - PixelDataFormat toPixelDataFormat(const KtxInfo& info); - bool isCompressed(const KtxInfo& info); - TextureFormat toTextureFormat(const KtxInfo& info); - TextureFormat toSrgbTextureFormat(TextureFormat tex); - - /** - * Creates a Texture object from a KTX file and populates all of its faces and miplevels. - * - * @param engine Used to create the Filament Texture - * @param ktx In-memory representation of a KTX file - * @param srgb Forces the KTX-specified format into an SRGB format if possible - * @param callback Gets called after all texture data has been uploaded to the GPU - * @param userdata Passed into the callback - */ - inline Texture* createTexture(Engine* engine, const KtxBundle& ktx, bool srgb, - Callback callback, void* userdata) { - using Sampler = Texture::Sampler; - const auto& ktxinfo = ktx.getInfo(); - const uint32_t nmips = ktx.getNumMipLevels(); - const auto cdatatype = toCompressedPixelDataType(ktxinfo); - const auto datatype = toPixelDataType(ktxinfo); - const auto dataformat = toPixelDataFormat(ktxinfo); - - auto texformat = toTextureFormat(ktxinfo); - if (srgb) { - texformat = toSrgbTextureFormat(texformat); - } - - Texture* texture = Texture::Builder() - .width(ktxinfo.pixelWidth) - .height(ktxinfo.pixelHeight) - .levels(static_cast(nmips)) - .sampler(ktx.isCubemap() ? Sampler::SAMPLER_CUBEMAP : Sampler::SAMPLER_2D) - .format(texformat) - .build(*engine); - - struct Userdata { - uint32_t remainingBuffers; - Callback callback; - void* userdata; - }; - - Userdata* cbuser = new Userdata({nmips, callback, userdata}); - - PixelBufferDescriptor::Callback cb = [](void*, size_t, void* cbuserptr) { - Userdata* cbuser = (Userdata*) cbuserptr; - if (--cbuser->remainingBuffers == 0) { - if (cbuser->callback) { - cbuser->callback(cbuser->userdata); - } - delete cbuser; - } - }; - - uint8_t* data; - uint32_t size; - - if (isCompressed(ktxinfo)) { - if (ktx.isCubemap()) { - for (uint32_t level = 0; level < nmips; ++level) { - ktx.getBlob({level, 0, 0}, &data, &size); - PixelBufferDescriptor pbd(data, size * 6, cdatatype, size, cb, cbuser); - texture->setImage(*engine, level, std::move(pbd), Texture::FaceOffsets(size)); - } - return texture; - } - for (uint32_t level = 0; level < nmips; ++level) { - ktx.getBlob({level, 0, 0}, &data, &size); - PixelBufferDescriptor pbd(data, size, cdatatype, size, cb, cbuser); - texture->setImage(*engine, level, std::move(pbd)); - } - return texture; - } - - if (ktx.isCubemap()) { - for (uint32_t level = 0; level < nmips; ++level) { - ktx.getBlob({level, 0, 0}, &data, &size); - PixelBufferDescriptor pbd(data, size * 6, dataformat, datatype, cb, cbuser); - texture->setImage(*engine, level, std::move(pbd), Texture::FaceOffsets(size)); - } - return texture; - } - - for (uint32_t level = 0; level < nmips; ++level) { - ktx.getBlob({level, 0, 0}, &data, &size); - PixelBufferDescriptor pbd(data, size, dataformat, datatype, cb, cbuser); - texture->setImage(*engine, level, std::move(pbd)); - } - return texture; - } - - /** - * Creates a Texture object from a KTX bundle, populates all of its faces and miplevels, - * and automatically destroys the bundle after all the texture data has been uploaded. - * - * @param engine Used to create the Filament Texture - * @param ktx In-memory representation of a KTX file - * @param srgb Forces the KTX-specified format into an SRGB format if possible - */ - inline Texture* createTexture(Engine* engine, KtxBundle* ktx, bool srgb) { - auto freeKtx = [] (void* userdata) { - KtxBundle* ktx = (KtxBundle*) userdata; - delete ktx; - }; - return createTexture(engine, *ktx, srgb, freeKtx, ktx); - } - - template - T toCompressedFilamentEnum(uint32_t format) { - switch (format) { - case KtxBundle::RGB_S3TC_DXT1: return T::DXT1_RGB; - case KtxBundle::RGBA_S3TC_DXT1: return T::DXT1_RGBA; - case KtxBundle::RGBA_S3TC_DXT3: return T::DXT3_RGBA; - case KtxBundle::RGBA_S3TC_DXT5: return T::DXT5_RGBA; - case KtxBundle::RGBA_ASTC_4x4: return T::RGBA_ASTC_4x4; - case KtxBundle::RGBA_ASTC_5x4: return T::RGBA_ASTC_5x4; - case KtxBundle::RGBA_ASTC_5x5: return T::RGBA_ASTC_5x5; - case KtxBundle::RGBA_ASTC_6x5: return T::RGBA_ASTC_6x5; - case KtxBundle::RGBA_ASTC_6x6: return T::RGBA_ASTC_6x6; - case KtxBundle::RGBA_ASTC_8x5: return T::RGBA_ASTC_8x5; - case KtxBundle::RGBA_ASTC_8x6: return T::RGBA_ASTC_8x6; - case KtxBundle::RGBA_ASTC_8x8: return T::RGBA_ASTC_8x8; - case KtxBundle::RGBA_ASTC_10x5: return T::RGBA_ASTC_10x5; - case KtxBundle::RGBA_ASTC_10x6: return T::RGBA_ASTC_10x6; - case KtxBundle::RGBA_ASTC_10x8: return T::RGBA_ASTC_10x8; - case KtxBundle::RGBA_ASTC_10x10: return T::RGBA_ASTC_10x10; - case KtxBundle::RGBA_ASTC_12x10: return T::RGBA_ASTC_12x10; - case KtxBundle::RGBA_ASTC_12x12: return T::RGBA_ASTC_12x12; - case KtxBundle::SRGB8_ALPHA8_ASTC_4x4: return T::SRGB8_ALPHA8_ASTC_4x4; - case KtxBundle::SRGB8_ALPHA8_ASTC_5x4: return T::SRGB8_ALPHA8_ASTC_5x4; - case KtxBundle::SRGB8_ALPHA8_ASTC_5x5: return T::SRGB8_ALPHA8_ASTC_5x5; - case KtxBundle::SRGB8_ALPHA8_ASTC_6x5: return T::SRGB8_ALPHA8_ASTC_6x5; - case KtxBundle::SRGB8_ALPHA8_ASTC_6x6: return T::SRGB8_ALPHA8_ASTC_6x6; - case KtxBundle::SRGB8_ALPHA8_ASTC_8x5: return T::SRGB8_ALPHA8_ASTC_8x5; - case KtxBundle::SRGB8_ALPHA8_ASTC_8x6: return T::SRGB8_ALPHA8_ASTC_8x6; - case KtxBundle::SRGB8_ALPHA8_ASTC_8x8: return T::SRGB8_ALPHA8_ASTC_8x8; - case KtxBundle::SRGB8_ALPHA8_ASTC_10x5: return T::SRGB8_ALPHA8_ASTC_10x5; - case KtxBundle::SRGB8_ALPHA8_ASTC_10x6: return T::SRGB8_ALPHA8_ASTC_10x6; - case KtxBundle::SRGB8_ALPHA8_ASTC_10x8: return T::SRGB8_ALPHA8_ASTC_10x8; - case KtxBundle::SRGB8_ALPHA8_ASTC_10x10: return T::SRGB8_ALPHA8_ASTC_10x10; - case KtxBundle::SRGB8_ALPHA8_ASTC_12x10: return T::SRGB8_ALPHA8_ASTC_12x10; - case KtxBundle::SRGB8_ALPHA8_ASTC_12x12: return T::SRGB8_ALPHA8_ASTC_12x12; - case KtxBundle::R11_EAC: return T::EAC_R11; - case KtxBundle::SIGNED_R11_EAC: return T::EAC_R11_SIGNED; - case KtxBundle::RG11_EAC: return T::EAC_RG11; - case KtxBundle::SIGNED_RG11_EAC: return T::EAC_RG11_SIGNED; - case KtxBundle::RGB8_ETC2: return T::ETC2_RGB8; - case KtxBundle::SRGB8_ETC2: return T::ETC2_SRGB8; - case KtxBundle::RGB8_ALPHA1_ETC2: return T::ETC2_RGB8_A1; - case KtxBundle::SRGB8_ALPHA1_ETC: return T::ETC2_SRGB8_A1; - case KtxBundle::RGBA8_ETC2_EAC: return T::ETC2_EAC_RGBA8; - case KtxBundle::SRGB8_ALPHA8_ETC2_EAC: return T::ETC2_EAC_SRGBA8; - } - return (T) 0xffff; - } - - inline CompressedPixelDataType toCompressedPixelDataType(const KtxInfo& info) { - return toCompressedFilamentEnum(info.glInternalFormat); - } - - inline PixelDataType toPixelDataType(const KtxInfo& info) { - switch (info.glType) { - case KtxBundle::UNSIGNED_BYTE: return PixelDataType::UBYTE; - case KtxBundle::UNSIGNED_SHORT: return PixelDataType::USHORT; - case KtxBundle::HALF_FLOAT: return PixelDataType::HALF; - case KtxBundle::FLOAT: return PixelDataType::FLOAT; - case KtxBundle::R11F_G11F_B10F: return PixelDataType::UINT_10F_11F_11F_REV; - } - return (PixelDataType) 0xff; - } - - inline PixelDataFormat toPixelDataFormat(const KtxInfo& info) { - switch (info.glFormat) { - case KtxBundle::LUMINANCE: - case KtxBundle::RED: return PixelDataFormat::R; - case KtxBundle::RG: return PixelDataFormat::RG; - case KtxBundle::RGB: return PixelDataFormat::RGB; - case KtxBundle::RGBA: return PixelDataFormat::RGBA; - // glFormat should NOT be a sized format according to the spec - // however cmgen was generating incorrect files until after Filament 1.8.0 - // so we keep this line here to preserve compatibility with older assets - case KtxBundle::R11F_G11F_B10F: return PixelDataFormat::RGB; - } - return (PixelDataFormat) 0xff; - } - - inline bool isCompressed(const KtxInfo& info) { - return info.glFormat == 0; - } - - inline TextureFormat toSrgbTextureFormat(TextureFormat format) { - switch(format) { - // Non-compressed - case Texture::InternalFormat::RGB8: - return Texture::InternalFormat::SRGB8; - case Texture::InternalFormat::RGBA8: - return Texture::InternalFormat::SRGB8_A8; - - // ASTC - case Texture::InternalFormat::RGBA_ASTC_4x4: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_4x4; - case Texture::InternalFormat::RGBA_ASTC_5x4: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_5x4; - case Texture::InternalFormat::RGBA_ASTC_5x5: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_5x5; - case Texture::InternalFormat::RGBA_ASTC_6x5: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_6x5; - case Texture::InternalFormat::RGBA_ASTC_6x6: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_6x6; - case Texture::InternalFormat::RGBA_ASTC_8x5: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_8x5; - case Texture::InternalFormat::RGBA_ASTC_8x6: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_8x6; - case Texture::InternalFormat::RGBA_ASTC_8x8: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_8x8; - case Texture::InternalFormat::RGBA_ASTC_10x5: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_10x5; - case Texture::InternalFormat::RGBA_ASTC_10x6: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_10x6; - case Texture::InternalFormat::RGBA_ASTC_10x8: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_10x8; - case Texture::InternalFormat::RGBA_ASTC_10x10: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_10x10; - case Texture::InternalFormat::RGBA_ASTC_12x10: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_12x10; - case Texture::InternalFormat::RGBA_ASTC_12x12: - return Texture::InternalFormat::SRGB8_ALPHA8_ASTC_12x12; - - // ETC2 - case Texture::InternalFormat::ETC2_RGB8: - return Texture::InternalFormat::ETC2_SRGB8; - case Texture::InternalFormat::ETC2_RGB8_A1: - return Texture::InternalFormat::ETC2_SRGB8_A1; - case Texture::InternalFormat::ETC2_EAC_RGBA8: - return Texture::InternalFormat::ETC2_EAC_SRGBA8; - - // DXT - case Texture::InternalFormat::DXT1_RGB: - return Texture::InternalFormat::DXT1_SRGB; - case Texture::InternalFormat::DXT1_RGBA: - return Texture::InternalFormat::DXT1_SRGBA; - case Texture::InternalFormat::DXT3_RGBA: - return Texture::InternalFormat::DXT3_SRGBA; - case Texture::InternalFormat::DXT5_RGBA: - return Texture::InternalFormat::DXT5_SRGBA; - - default: - return format; - } - } - - inline TextureFormat toTextureFormat(const KtxInfo& info) { - switch (info.glInternalFormat) { - case KtxBundle::RED: return TextureFormat::R8; - case KtxBundle::RG: return TextureFormat::RG8; - case KtxBundle::RGB: return TextureFormat::RGB8; - case KtxBundle::RGBA: return TextureFormat::RGBA8; - case KtxBundle::LUMINANCE: return TextureFormat::R8; - case KtxBundle::LUMINANCE_ALPHA: return TextureFormat::RG8; - case KtxBundle::R8: return TextureFormat::R8; - case KtxBundle::R8_SNORM: return TextureFormat::R8_SNORM; - case KtxBundle::R8UI: return TextureFormat::R8UI; - case KtxBundle::R8I: return TextureFormat::R8I; - case KtxBundle::STENCIL_INDEX8: return TextureFormat::STENCIL8; - case KtxBundle::R16F: return TextureFormat::R16F; - case KtxBundle::R16UI: return TextureFormat::R16UI; - case KtxBundle::R16I: return TextureFormat::R16I; - case KtxBundle::RG8: return TextureFormat::RG8; - case KtxBundle::RG8_SNORM: return TextureFormat::RG8_SNORM; - case KtxBundle::RG8UI: return TextureFormat::RG8UI; - case KtxBundle::RG8I: return TextureFormat::RG8I; - case KtxBundle::RGB565: return TextureFormat::RGB565; - case KtxBundle::RGB9_E5: return TextureFormat::RGB9_E5; - case KtxBundle::RGB5_A1: return TextureFormat::RGB5_A1; - case KtxBundle::RGBA4: return TextureFormat::RGBA4; - case KtxBundle::DEPTH_COMPONENT16: return TextureFormat::DEPTH16; - case KtxBundle::RGB8: return TextureFormat::RGB8; - case KtxBundle::SRGB8: return TextureFormat::SRGB8; - case KtxBundle::RGB8_SNORM: return TextureFormat::RGB8_SNORM; - case KtxBundle::RGB8UI: return TextureFormat::RGB8UI; - case KtxBundle::RGB8I: return TextureFormat::RGB8I; - case KtxBundle::R32F: return TextureFormat::R32F; - case KtxBundle::R32UI: return TextureFormat::R32UI; - case KtxBundle::R32I: return TextureFormat::R32I; - case KtxBundle::RG16F: return TextureFormat::RG16F; - case KtxBundle::RG16UI: return TextureFormat::RG16UI; - case KtxBundle::RG16I: return TextureFormat::RG16I; - case KtxBundle::R11F_G11F_B10F: return TextureFormat::R11F_G11F_B10F; - case KtxBundle::RGBA8: return TextureFormat::RGBA8; - case KtxBundle::SRGB8_ALPHA8: return TextureFormat::SRGB8_A8; - case KtxBundle::RGBA8_SNORM: return TextureFormat::RGBA8_SNORM; - case KtxBundle::RGB10_A2: return TextureFormat::RGB10_A2; - case KtxBundle::RGBA8UI: return TextureFormat::RGBA8UI; - case KtxBundle::RGBA8I: return TextureFormat::RGBA8I; - case KtxBundle::DEPTH24_STENCIL8: return TextureFormat::DEPTH24_STENCIL8; - case KtxBundle::DEPTH32F_STENCIL8: return TextureFormat::DEPTH32F_STENCIL8; - case KtxBundle::RGB16F: return TextureFormat::RGB16F; - case KtxBundle::RGB16UI: return TextureFormat::RGB16UI; - case KtxBundle::RGB16I: return TextureFormat::RGB16I; - case KtxBundle::RG32F: return TextureFormat::RG32F; - case KtxBundle::RG32UI: return TextureFormat::RG32UI; - case KtxBundle::RG32I: return TextureFormat::RG32I; - case KtxBundle::RGBA16F: return TextureFormat::RGBA16F; - case KtxBundle::RGBA16UI: return TextureFormat::RGBA16UI; - case KtxBundle::RGBA16I: return TextureFormat::RGBA16I; - case KtxBundle::RGB32F: return TextureFormat::RGB32F; - case KtxBundle::RGB32UI: return TextureFormat::RGB32UI; - case KtxBundle::RGB32I: return TextureFormat::RGB32I; - case KtxBundle::RGBA32F: return TextureFormat::RGBA32F; - case KtxBundle::RGBA32UI: return TextureFormat::RGBA32UI; - case KtxBundle::RGBA32I: return TextureFormat::RGBA32I; - } - return toCompressedFilamentEnum(info.glInternalFormat); - } - -} // namespace ktx - -} // namespace image - -#endif diff --git a/ios/include/ktxreader/Ktx1Reader.h b/ios/include/ktxreader/Ktx1Reader.h new file mode 100644 index 00000000..5bd68918 --- /dev/null +++ b/ios/include/ktxreader/Ktx1Reader.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef KTXREADER_KTX1READER_H +#define KTXREADER_KTX1READER_H + +#include + +#include + +namespace filament { + class Engine; +} + +namespace ktxreader { + +using KtxInfo = image::KtxInfo; +using Ktx1Bundle = image::Ktx1Bundle; + +/** + * Allows clients to create Filament textures from Ktx1Bundle objects. + */ +namespace Ktx1Reader { + + using Texture = filament::Texture; + using Engine = filament::Engine; + + using TextureFormat = Texture::InternalFormat; + using CompressedPixelDataType = Texture::CompressedType; + using PixelDataType = Texture::Type; + using PixelDataFormat = Texture::Format; + using PixelBufferDescriptor = Texture::PixelBufferDescriptor; + + using Callback = void(*)(void* userdata); + + CompressedPixelDataType toCompressedPixelDataType(const KtxInfo& info); + PixelDataType toPixelDataType(const KtxInfo& info); + PixelDataFormat toPixelDataFormat(const KtxInfo& info); + bool isCompressed(const KtxInfo& info); + TextureFormat toTextureFormat(const KtxInfo& info); + TextureFormat toSrgbTextureFormat(TextureFormat tex); + + template + T toCompressedFilamentEnum(uint32_t format) { + switch (format) { + case Ktx1Bundle::RGB_S3TC_DXT1: return T::DXT1_RGB; + case Ktx1Bundle::RGBA_S3TC_DXT1: return T::DXT1_RGBA; + case Ktx1Bundle::RGBA_S3TC_DXT3: return T::DXT3_RGBA; + case Ktx1Bundle::RGBA_S3TC_DXT5: return T::DXT5_RGBA; + case Ktx1Bundle::RGBA_ASTC_4x4: return T::RGBA_ASTC_4x4; + case Ktx1Bundle::RGBA_ASTC_5x4: return T::RGBA_ASTC_5x4; + case Ktx1Bundle::RGBA_ASTC_5x5: return T::RGBA_ASTC_5x5; + case Ktx1Bundle::RGBA_ASTC_6x5: return T::RGBA_ASTC_6x5; + case Ktx1Bundle::RGBA_ASTC_6x6: return T::RGBA_ASTC_6x6; + case Ktx1Bundle::RGBA_ASTC_8x5: return T::RGBA_ASTC_8x5; + case Ktx1Bundle::RGBA_ASTC_8x6: return T::RGBA_ASTC_8x6; + case Ktx1Bundle::RGBA_ASTC_8x8: return T::RGBA_ASTC_8x8; + case Ktx1Bundle::RGBA_ASTC_10x5: return T::RGBA_ASTC_10x5; + case Ktx1Bundle::RGBA_ASTC_10x6: return T::RGBA_ASTC_10x6; + case Ktx1Bundle::RGBA_ASTC_10x8: return T::RGBA_ASTC_10x8; + case Ktx1Bundle::RGBA_ASTC_10x10: return T::RGBA_ASTC_10x10; + case Ktx1Bundle::RGBA_ASTC_12x10: return T::RGBA_ASTC_12x10; + case Ktx1Bundle::RGBA_ASTC_12x12: return T::RGBA_ASTC_12x12; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_4x4: return T::SRGB8_ALPHA8_ASTC_4x4; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_5x4: return T::SRGB8_ALPHA8_ASTC_5x4; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_5x5: return T::SRGB8_ALPHA8_ASTC_5x5; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_6x5: return T::SRGB8_ALPHA8_ASTC_6x5; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_6x6: return T::SRGB8_ALPHA8_ASTC_6x6; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_8x5: return T::SRGB8_ALPHA8_ASTC_8x5; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_8x6: return T::SRGB8_ALPHA8_ASTC_8x6; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_8x8: return T::SRGB8_ALPHA8_ASTC_8x8; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_10x5: return T::SRGB8_ALPHA8_ASTC_10x5; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_10x6: return T::SRGB8_ALPHA8_ASTC_10x6; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_10x8: return T::SRGB8_ALPHA8_ASTC_10x8; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_10x10: return T::SRGB8_ALPHA8_ASTC_10x10; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_12x10: return T::SRGB8_ALPHA8_ASTC_12x10; + case Ktx1Bundle::SRGB8_ALPHA8_ASTC_12x12: return T::SRGB8_ALPHA8_ASTC_12x12; + case Ktx1Bundle::R11_EAC: return T::EAC_R11; + case Ktx1Bundle::SIGNED_R11_EAC: return T::EAC_R11_SIGNED; + case Ktx1Bundle::RG11_EAC: return T::EAC_RG11; + case Ktx1Bundle::SIGNED_RG11_EAC: return T::EAC_RG11_SIGNED; + case Ktx1Bundle::RGB8_ETC2: return T::ETC2_RGB8; + case Ktx1Bundle::SRGB8_ETC2: return T::ETC2_SRGB8; + case Ktx1Bundle::RGB8_ALPHA1_ETC2: return T::ETC2_RGB8_A1; + case Ktx1Bundle::SRGB8_ALPHA1_ETC: return T::ETC2_SRGB8_A1; + case Ktx1Bundle::RGBA8_ETC2_EAC: return T::ETC2_EAC_RGBA8; + case Ktx1Bundle::SRGB8_ALPHA8_ETC2_EAC: return T::ETC2_EAC_SRGBA8; + } + return (T) 0xffff; + } + + /** + * Creates a Texture object from a KTX file and populates all of its faces and miplevels. + * + * @param engine Used to create the Filament Texture + * @param ktx In-memory representation of a KTX file + * @param srgb Requests an sRGB format from the KTX file + * @param callback Gets called after all texture data has been uploaded to the GPU + * @param userdata Passed into the callback + */ + Texture* createTexture(Engine* engine, const Ktx1Bundle& ktx, bool srgb, + Callback callback, void* userdata); + + /** + * Creates a Texture object from a KTX bundle, populates all of its faces and miplevels, + * and automatically destroys the bundle after all the texture data has been uploaded. + * + * @param engine Used to create the Filament Texture + * @param ktx In-memory representation of a KTX file + * @param srgb Requests an sRGB format from the KTX file + */ + Texture* createTexture(Engine* engine, Ktx1Bundle* ktx, bool srgb); + + CompressedPixelDataType toCompressedPixelDataType(const KtxInfo& info); + + PixelDataType toPixelDataType(const KtxInfo& info); + + PixelDataFormat toPixelDataFormat(const KtxInfo& info); + + bool isCompressed(const KtxInfo& info); + + bool isSrgbTextureFormat(TextureFormat format); + + TextureFormat toTextureFormat(const KtxInfo& info); + +} // namespace Ktx1Reader +} // namespace ktxreader + +#endif diff --git a/ios/include/ktxreader/Ktx2Reader.h b/ios/include/ktxreader/Ktx2Reader.h new file mode 100644 index 00000000..91c5a537 --- /dev/null +++ b/ios/include/ktxreader/Ktx2Reader.h @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef KTXREADER_KTX2READER_H +#define KTXREADER_KTX2READER_H + +#include +#include + +#include + +#include + +namespace filament { + class Engine; +} + +namespace basist { + class ktx2_transcoder; +} + +namespace ktxreader { + +class Ktx2Reader { + public: + using Engine = filament::Engine; + using Texture = filament::Texture; + enum class TransferFunction { LINEAR, sRGB }; + + enum class Result { + SUCCESS, + COMPRESSED_TRANSCODE_FAILURE, + UNCOMPRESSED_TRANSCODE_FAILURE, + FORMAT_UNSUPPORTED, + FORMAT_ALREADY_REQUESTED, + }; + + Ktx2Reader(Engine& engine, bool quiet = false); + ~Ktx2Reader(); + + /** + * Requests that the reader constructs Filament textures with given internal format. + * + * This MUST be called at least once before calling load(). + * + * As a reminder, a basis-encoded KTX2 can be quickly transcoded to any number of formats, + * so you need to tell it what formats your hw supports. That's why this method exists. + * + * Call requestFormat as many times as needed; formats that are submitted early are + * considered higher priority. + * + * If BasisU knows a priori that the given format is not available (e.g. if the build has + * disabled it), the format is not added and FORMAT_UNSUPPORTED is returned. + * + * Returns FORMAT_ALREADY_REQUESTED if the given format has already been requested. + * + * Hint: BasisU supports the following uncompressed formats: RGBA8, RGB565, RGBA4. + */ + Result requestFormat(Texture::InternalFormat format) noexcept; + + /** + * Removes the given format from the list, or does nothing if it hasn't been requested. + */ + void unrequestFormat(Texture::InternalFormat format) noexcept; + + /** + * Attempts to create and load a Filament texture from the given KTX2 blob. + * + * If none of the requested formats can be extracted from the data, this returns null. + * + * This method iterates through the requested format list, checking each one against the + * platform's capabilities and its availability from the transcoder. When a suitable format + * is determined, it then performs lossless decompression (zstd) before transcoding the data + * into the final format. + * + * The transfer function specified here is used in two ways: + * 1) It is checked against the transfer function that was specified as metadata + * in the KTX2 blob. If they do not match, this method fails. + * 2) It is used as a filter when determining the final internal format. + */ + Texture* load(const void* data, size_t size, TransferFunction transfer); + + /** + * Asynchronous Interface + * ====================== + * + * Alternative API suitable for asynchronous transcoding of mipmap levels. + * If unsure that you need to use this, then don't, just call load() instead. + * Usage pseudocode: + * + * auto async = reader->asyncCreate(data, size, TransferFunction::LINEAR); + * mTexture = async->getTexture(); + * auto backgroundThread = spawnThread({ async->doTranscoding(); }) + * backgroundThread.wait(); + * async->uploadImages(); + * reader->asyncDestroy(async); + * + * In the documentation comments, "foreground thread" refers to the thread that the + * Filament Engine was created on. + */ + class Async { + public: + /** + * Retrieves the Texture object. + * + * The texture is available immediately, but does not have its miplevels ready until + * after doTranscoding() and the subsequent uploadImages() have been completed. The + * caller has ownership over this texture and is responsible for freeing it after all + * miplevels have been uploaded. + */ + Texture* getTexture() const noexcept; + + /** + * Loads all mipmaps from the KTX2 file and transcodes them to the resolved format. + * + * This does not return until all mipmaps have been transcoded. This is typically + * called from a background thread. + */ + Result doTranscoding(); + + /** + * Uploads pending mipmaps to the texture. + * + * This can safely be called while doTranscoding() is still working in another thread. + * Since this calls Texture::setImage(), it should be called from the foreground thread; + * see "Thread safety" in the documentation for filament::Engine. + */ + void uploadImages(); + + protected: + Async() noexcept = default; + ~Async() = default; + + public: + Async(Async const&) = delete; + Async(Async&&) = delete; + Async& operator=(Async const&) = delete; + Async& operator=(Async&&) = delete; + + friend class Ktx2Reader; + }; + + /** + * Creates a texture without starting the transcode process. + * + * This method is an alternative to load() that allows users to populate mipmap levels + * asynchronously. The texture object however is still created synchronously. + * + * - For a usage example, see the documentation for the Async object. + * - Creates a copy of the given buffer, allowing clients to free it immediately. + * - Returns null if none of the requested formats can be extracted from the data. + * + * This method iterates through the requested format list, checking each one against the + * platform's capabilities and its availability from the transcoder. When a suitable format + * is determined, it then performs lossless decompression (zstd) before transcoding the data + * into the final format. + * + * The transfer function specified here is used in two ways: + * 1) It is checked against the transfer function that was specified as metadata + * in the KTX2 blob. If they do not match, this method fails. + * 2) It is used as a filter when determining the final internal format. + */ + Async* asyncCreate(const void* data, size_t size, TransferFunction transfer); + + /** + * Frees the given async object and sets it to null. + * + * This frees the original source data (i.e. the raw content of the KTX2 file) but does not + * free the associated Texture object. This can be done after transcoding has finished. + */ + void asyncDestroy(Async** async); + + private: + Ktx2Reader(const Ktx2Reader&) = delete; + Ktx2Reader& operator=(const Ktx2Reader&) = delete; + Ktx2Reader(Ktx2Reader&& that) noexcept = delete; + Ktx2Reader& operator=(Ktx2Reader&& that) noexcept = delete; + + Texture* createTexture(basist::ktx2_transcoder* transcoder, const void* data, + size_t size, TransferFunction transfer); + + Engine& mEngine; + basist::ktx2_transcoder* const mTranscoder; + utils::FixedCapacityVector mRequestedFormats; + bool mQuiet; +}; + +} // namespace ktxreader + +#endif diff --git a/ios/include/stb/stb_image.h b/ios/include/stb/stb_image.h new file mode 100644 index 00000000..5a6c863f --- /dev/null +++ b/ios/include/stb/stb_image.h @@ -0,0 +1,7529 @@ +/* stb_image - v2.20 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Phil Jordan + Dave Moore Roy Eltham Hayaki Saito Nathan Reed + Won Chun Luke Graham Johan Duparc Nick Verigakis + the Horde3D community Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Laurent Gomila Cort Stratton Sergio Gonzalez github:snagar + Aruelien Pocheville Thibault Reuille Cass Everitt github:Zelex + Ryamond Barbiero Paul Du Bois Engin Manap github:grim210 + Aldo Culquicondor Philipp Wiesemann Dale Weiler github:sammyhw + Oriol Ferrer Mesia Josh Tobin Matthew Gregan github:phprus + Julian Raschke Gregory Mullen Baldur Karlsson github:poppolopoppo + Christian Floisand Kevin Schmidt JR Smith github:darealshinji + Blazej Dariusz Roszkowski github:Michaelangel007 +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbiw_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// NOT THREADSAFE +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + fseek((FILE*) user, n, SEEK_CUR); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 8) { + STBI_ASSERT(ri.bits_per_channel == 16); + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 16) { + STBI_ASSERT(ri.bits_per_channel == 8); + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode))) + return 0; + +#if _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (int) (z->zout - z->zout_start); + limit = old_limit = (int) (z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) + c = stbi__zreceive(a,3)+3; + else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a,7)+11; + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[288] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth < 8) + ri->bits_per_channel = 8; + else + ri->bits_per_channel = p->depth; + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1; z >>= 1; } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v >= 0 && v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->background = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->history = (stbi_uc *) stbi__malloc(g->w * g->h); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset( g->out, 0x00, 4 * g->w * g->h ); + memset( g->background, 0x00, 4 * g->w * g->h ); // state of the background (starts transparent) + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispoase of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + out = (stbi_uc*) STBI_REALLOC( out, layers * stride ); + if (delays) { + *delays = (int*) STBI_REALLOC( *delays, sizeof(int) * layers ); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + (void) stbi__get32be(s); + (void) stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/ios/include/uberz/ArchiveEnums.h b/ios/include/uberz/ArchiveEnums.h new file mode 100644 index 00000000..b2741be9 --- /dev/null +++ b/ios/include/uberz/ArchiveEnums.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef UBERZ_ARCHIVE_ENUMS_H +#define UBERZ_ARCHIVE_ENUMS_H + +#include + +namespace filament::uberz { + + enum class ArchiveFeature : uint64_t { + UNSUPPORTED, + OPTIONAL, + REQUIRED, + }; + +} // namespace filament::uberz + +#endif // UBERZ_ARCHIVE_ENUMS_H diff --git a/ios/include/uberz/ReadableArchive.h b/ios/include/uberz/ReadableArchive.h new file mode 100644 index 00000000..4e5dc216 --- /dev/null +++ b/ios/include/uberz/ReadableArchive.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef UBERZ_READABLE_ARCHIVE_H +#define UBERZ_READABLE_ARCHIVE_H + +#include + +#include + +#include + +namespace filament::uberz { + +// ArchiveSpec is a parse-free binary format. The client simply casts a word-aligned content blob +// into a ReadableArchive struct pointer, then calls the following function to convert all the +// offset fields into pointers. +void convertOffsetsToPointers(struct ReadableArchive* archive); + +#pragma clang diagnostic push +#pragma clang diagnostic warning "-Wpadded" + +// Precompiled set of materials bundled with a list of features flags that each material supports. +// This is the readable counterpart to WriteableArchive. +// Used by gltfio; users do not need to access this class directly. +struct ReadableArchive { + uint32_t magic; + uint32_t version; + uint64_t specsCount; + union { + struct ArchiveSpec* specs; + uint64_t specsOffset; + }; +}; + +static constexpr Shading INVALID_SHADING_MODEL = (Shading) 0xff; +static constexpr BlendingMode INVALID_BLENDING = (BlendingMode) 0xff; + +struct ArchiveSpec { + Shading shadingModel; + BlendingMode blendingMode; + uint16_t flagsCount; + uint32_t packageByteCount; + union { + struct ArchiveFlag* flags; + uint64_t flagsOffset; + }; + union { + uint8_t* package; + uint64_t packageOffset; + }; +}; + +struct ArchiveFlag { + union { + const char* name; + uint64_t nameOffset; + }; + ArchiveFeature value; +}; + +#pragma clang diagnostic pop + +} // namespace filament::uberz + +#endif // UBERZ_READABLE_ARCHIVE_H diff --git a/ios/include/uberz/WritableArchive.h b/ios/include/uberz/WritableArchive.h new file mode 100644 index 00000000..2e2121c6 --- /dev/null +++ b/ios/include/uberz/WritableArchive.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef UBERZ_WRITABLE_ARCHIVE_H +#define UBERZ_WRITABLE_ARCHIVE_H + +#include + +#include +#include +#include + +#include + +#include + +namespace filament::uberz { + +// Precompiled set of materials bundled with a list of features flags that each material supports. +// This is the writeable counterpart to ReadableArchive. +// Users do not need to access this class directly, they should go through gltfio. +class WritableArchive { +public: + WritableArchive(size_t materialCount) : mMaterials(materialCount) {} + void addMaterial(const char* name, const uint8_t* package, size_t packageSize); + void addSpecLine(std::string_view line); + utils::FixedCapacityVector serialize() const; + + // Low-level alternatives to addSpecLine that do not involve parsing: + void setShadingModel(Shading sm); + void setBlendingModel(BlendingMode bm); + void setFeatureFlag(const char* key, ArchiveFeature value); + +private: + size_t mLineNumber = 1; + ssize_t mMaterialIndex = -1; + + struct Material { + utils::CString name; + utils::FixedCapacityVector package; + Shading shadingModel; + BlendingMode blendingMode; + tsl::robin_map flags; + }; + + utils::FixedCapacityVector mMaterials; +}; + +} // namespace filament::uberz + +#endif // UBERZ_WRITABLE_ARCHIVE_H diff --git a/ios/include/utils/Allocator.h b/ios/include/utils/Allocator.h index 5d881c13..ba9fe198 100644 --- a/ios/include/utils/Allocator.h +++ b/ios/include/utils/Allocator.h @@ -162,7 +162,7 @@ public: } void free(void* p, size_t) noexcept { - free(p); + this->free(p); } ~HeapAllocator() noexcept = default; @@ -457,6 +457,12 @@ private: void* mEnd = nullptr; }; +class NullArea { +public: + void* data() const noexcept { return nullptr; } + size_t size() const noexcept { return 0; } +}; + } // namespace AreaPolicy // ------------------------------------------------------------------------------------------------ @@ -498,6 +504,7 @@ struct HighWatermark { void onFree(void* p, size_t size) noexcept; void onReset() noexcept; void onRewind(void const* addr) noexcept; + uint32_t getHighWatermark() const noexcept { return mHighWaterMark; } protected: const char* mName = nullptr; void* mBase = nullptr; diff --git a/ios/include/utils/BinaryTreeArray.h b/ios/include/utils/BinaryTreeArray.h new file mode 100644 index 00000000..d147d069 --- /dev/null +++ b/ios/include/utils/BinaryTreeArray.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_BINARYTREEARRAY_H +#define TNT_UTILS_BINARYTREEARRAY_H + +#include + +#include + +#include +#include +#include + +namespace utils { + +class BinaryTreeArray { + + // Simple fixed capacity stack + template::value>::type> + class stack { + TYPE mElements[CAPACITY]; + size_t mSize = 0; + public: + bool empty() const noexcept { return mSize == 0; } + void push(TYPE const& v) noexcept { + assert(mSize < CAPACITY); + mElements[mSize++] = v; + } + void pop() noexcept { + assert(mSize > 0); + --mSize; + } + const TYPE& back() const noexcept { + return mElements[mSize - 1]; + } + }; + +public: + static size_t count(size_t height) noexcept { return (1u << height) - 1; } + static size_t left(size_t i, size_t height) noexcept { return i + 1; } + static size_t right(size_t i, size_t height) noexcept { return i + (1u << (height - 1)); } + + // this builds the depth-first binary tree array top down (post-order) + template + static void traverse(size_t height, Leaf leaf, Node node) noexcept { + + struct TNode { + uint32_t index; + uint32_t col; + uint32_t height; + uint32_t next; + + bool isLeaf() const noexcept { return height == 1; } + size_t left() const noexcept { return BinaryTreeArray::left(index, height); } + size_t right() const noexcept { return BinaryTreeArray::right(index, height); } + }; + + stack stack; + stack.push(TNode{ 0, 0, (uint32_t)height, (uint32_t)count(height) }); + + uint32_t prevLeft = 0; + uint32_t prevRight = 0; + uint32_t prevIndex = 0; + while (!stack.empty()) { + TNode const* const UTILS_RESTRICT curr = &stack.back(); + const bool isLeaf = curr->isLeaf(); + const uint32_t index = curr->index; + const uint32_t l = (uint32_t)curr->left(); + const uint32_t r = (uint32_t)curr->right(); + + if (prevLeft == index || prevRight == index) { + if (!isLeaf) { + // the 'next' node of our left node's right descendants is our right child + stack.push({ l, 2 * curr->col, curr->height - 1, r }); + } + } else if (l == prevIndex) { + if (!isLeaf) { + // the 'next' node of our right child is our own 'next' sibling + stack.push({ r, 2 * curr->col + 1, curr->height - 1, curr->next }); + } + } else { + if (!isLeaf) { + node(index, l, r, curr->next); + } else { + leaf(index, curr->col, curr->next); + } + stack.pop(); + } + + prevLeft = l; + prevRight = r; + prevIndex = index; + } + } +}; + +} // namespace utils + +#endif //TNT_UTILS_BINARYTREEARRAY_H diff --git a/ios/include/utils/CString.h b/ios/include/utils/CString.h index da809d11..09f3d2ca 100644 --- a/ios/include/utils/CString.h +++ b/ios/include/utils/CString.h @@ -79,7 +79,7 @@ public: using const_pointer = const value_type*; using const_iterator = const value_type*; - constexpr StaticString() noexcept = default; + constexpr StaticString() noexcept {} // NOLINT(modernize-use-equals-default), Ubuntu compiler bug // initialization from a string literal template @@ -149,6 +149,14 @@ public: size_type getHash() const noexcept { return mHash; } + struct Hasher { + typedef StaticString argument_type; + typedef size_t result_type; + result_type operator()(const argument_type& s) const noexcept { + return s.getHash(); + } + }; + private: const_pointer mString = nullptr; size_type mLength = 0; @@ -200,7 +208,7 @@ public: using iterator = value_type*; using const_iterator = const value_type*; - CString() noexcept = default; + CString() noexcept {} // NOLINT(modernize-use-equals-default), Ubuntu compiler bug // Allocates memory and appends a null. This constructor can be used to hold arbitrary data // inside the string (i.e. it can contain nulls or non-ASCII encodings). @@ -220,7 +228,7 @@ public: : CString(other, N - 1) { } - CString(StaticString const& s) : CString(s.c_str(), s.size()) {} + CString(StaticString const& s) : CString(s.c_str(), s.size()) {} // NOLINT(google-explicit-constructor) CString(const CString& rhs); @@ -309,11 +317,19 @@ public: } // placement new declared as "throw" to avoid the compiler's null-check - inline void* operator new(size_t size, void* ptr) { + inline void* operator new(size_t, void* ptr) { assert(ptr); return ptr; } + struct Hasher : private hashCStrings { + typedef CString argument_type; + typedef size_t result_type; + result_type operator()(const argument_type& s) const noexcept { + return hashCStrings::operator()(s.c_str()); + } + }; + private: struct Data { size_type length; @@ -365,34 +381,4 @@ CString to_string(T value) noexcept; } // namespace utils -// FIXME: how could we not include this one? -// needed for std::hash, since implementation is inline, this would not cause -// binaries incompatibilities if another STL version was used. -#include - -namespace std { - -//! \privatesection -template<> -struct hash { - typedef utils::CString argument_type; - typedef size_t result_type; - utils::hashCStrings hasher; - size_t operator()(const utils::CString& s) const noexcept { - return hasher(s.c_str()); - } -}; - -//! \privatesection -template<> -struct hash { - typedef utils::StaticString argument_type; - typedef size_t result_type; - size_t operator()(const utils::StaticString& s) const noexcept { - return s.getHash(); - } -}; - -} // namespace std - #endif // TNT_UTILS_CSTRING_H diff --git a/ios/include/utils/Condition.h b/ios/include/utils/Condition.h new file mode 100644 index 00000000..2ed2c07f --- /dev/null +++ b/ios/include/utils/Condition.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_CONDITION_H +#define TNT_UTILS_CONDITION_H + +#if defined(__linux__) +#include +#else +#include +#endif + +#endif // TNT_UTILS_CONDITION_H diff --git a/ios/include/utils/CountDownLatch.h b/ios/include/utils/CountDownLatch.h new file mode 100644 index 00000000..6367fffc --- /dev/null +++ b/ios/include/utils/CountDownLatch.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_COUNTDOWNLATCH_H +#define TNT_UTILS_COUNTDOWNLATCH_H + +#include + +// note: we use our version of mutex/condition to keep this public header STL free +#include +#include + +namespace utils { + +/** + * A count down latch is used to block one or several threads until the latch is signaled + * a certain number of times. + * + * Threads entering the latch are blocked until the latch is signaled enough times. + * + * @see CyclicBarrier + */ +class CountDownLatch { +public: + /** + * Creates a count down latch with a specified count. The minimum useful value is 1. + * @param count the latch counter initial value + */ + explicit CountDownLatch(size_t count) noexcept; + ~CountDownLatch() = default; + + /** + * Blocks until latch() is called \p count times. + * @see CountDownLatch(size_t count) + */ + void await() noexcept; + + /** + * Releases threads blocked in await() when called \p count times. Calling latch() more than + * \p count times has no effect. + * @see reset() + */ + void latch() noexcept; + + /** + * Resets the count-down latch to the given value. + * + * @param new_count New latch count. A value of zero will immediately unblock all waiting + * threads. + * + * @warning Use with caution. It's only safe to reset the latch count when you're sure + * that no threads are waiting in await(). This can be guaranteed in various ways, for + * instance, if you have a single thread calling await(), you could call reset() from that + * thread, or you could use a CyclicBarrier to make sure all threads using the CountDownLatch + * are at a known place (i.e.: not in await()) when reset() is called. + */ + void reset(size_t new_count) noexcept; + + /** + * @return the number of times latch() has been called since construction or reset. + * @see reset(), CountDownLatch(size_t count) + */ + size_t getCount() const noexcept; + + CountDownLatch() = delete; + CountDownLatch(const CountDownLatch&) = delete; + CountDownLatch& operator=(const CountDownLatch&) = delete; + +private: + uint32_t m_initial_count; + uint32_t m_remaining_count; + mutable Mutex m_lock; + mutable Condition m_cv; +}; + +} // namespace utils + +#endif // TNT_UTILS_COUNTDOWNLATCH_H diff --git a/ios/include/utils/CyclicBarrier.h b/ios/include/utils/CyclicBarrier.h new file mode 100644 index 00000000..dae118e8 --- /dev/null +++ b/ios/include/utils/CyclicBarrier.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_CYCLIC_BARRIER_H +#define TNT_UTILS_CYCLIC_BARRIER_H + +#include + +// note: we use our version of mutex/condition to keep this public header STL free +#include +#include + +namespace utils { + +/** + * A cyclic barrier is used to synchronize several threads to a particular execution point. + * + * Threads entering the barrier are halted until all threads reach the barrier. + * + * @see CountDownLatch + */ +class CyclicBarrier { +public: + /** + * Creates a cyclic barrier with a specified number of threads to synchronize. The minimum + * useful value is 2. A value of 0 is invalid and is silently changed to 1. + * @param num_threads Number of threads to synchronize. + */ + explicit CyclicBarrier(size_t num_threads) noexcept; + + /** + * @return The number of thread that are synchronized. + */ + size_t getThreadCount() const noexcept; + + /** + * @return Number of threads currently waiting on the barrier. + */ + size_t getWaitingThreadCount() const noexcept; + + /** + * Blocks until getThreadCount()-1 other threads reach await(). + */ + void await() noexcept; + + /** + * Resets the cyclic barrier to its original state and releases all waiting threads. + */ + void reset() noexcept; + + CyclicBarrier() = delete; + CyclicBarrier(const CyclicBarrier&) = delete; + CyclicBarrier& operator=(const CyclicBarrier&) = delete; + +private: + enum class State { + TRAP, RELEASE + }; + + const size_t m_num_threads; + mutable Mutex m_lock; + mutable Condition m_cv; + + State m_state = State::TRAP; + size_t m_trapped_threads = 0; + size_t m_released_threads = 0; +}; + +} // namespace utils + +#endif // TNT_UTILS_CYCLIC_BARRIER_H diff --git a/ios/include/utils/Entity.h b/ios/include/utils/Entity.h index 74f417aa..58fdf606 100644 --- a/ios/include/utils/Entity.h +++ b/ios/include/utils/Entity.h @@ -19,9 +19,6 @@ #include -// FIXME: could we get rid of -#include // for std::hash - #include #include @@ -30,7 +27,7 @@ namespace utils { class UTILS_PUBLIC Entity { public: // this can be used to create an array of to-be-filled entities (see create()) - Entity() noexcept = default; + Entity() noexcept { } // NOLINT(modernize-use-equals-default), Ubuntu compiler bug // Entities can be copied Entity(const Entity& e) noexcept = default; @@ -68,10 +65,17 @@ public: return Entity{ Type(identity) }; } + struct Hasher { + typedef Entity argument_type; + typedef size_t result_type; + result_type operator()(argument_type const& e) const { + return e.getId(); + } + }; + private: friend class EntityManager; friend class EntityManagerImpl; - friend struct std::hash; using Type = uint32_t; explicit Entity(Type identity) noexcept : mIdentity(identity) { } @@ -81,18 +85,4 @@ private: } // namespace utils - -namespace std { - -template<> -struct hash { - typedef utils::Entity argument_type; - typedef size_t result_type; - result_type operator()(argument_type const& e) const { - return e.getId(); - } -}; - -} // namespace std - #endif // TNT_UTILS_ENTITY_H diff --git a/ios/include/utils/EntityManager.h b/ios/include/utils/EntityManager.h index f44f6c28..9674cac2 100644 --- a/ios/include/utils/EntityManager.h +++ b/ios/include/utils/EntityManager.h @@ -36,7 +36,7 @@ namespace utils { class UTILS_PUBLIC EntityManager { public: - // Get the global EntityManager. Is is recommended to cache this value. + // Get the global EntityManager. It is recommended to cache this value. // Thread Safe. static EntityManager& get() noexcept; diff --git a/ios/include/utils/Hash.h b/ios/include/utils/Hash.h new file mode 100644 index 00000000..f17dfc86 --- /dev/null +++ b/ios/include/utils/Hash.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_HASH_H +#define TNT_UTILS_HASH_H + +#include // for std::hash + +#include +#include + +namespace utils::hash { + +// Hash function that takes an arbitrary swath of word-aligned data. +inline uint32_t murmur3(const uint32_t* key, size_t wordCount, uint32_t seed) noexcept { + uint32_t h = seed; + size_t i = wordCount; + do { + uint32_t k = *key++; + k *= 0xcc9e2d51u; + k = (k << 15u) | (k >> 17u); + k *= 0x1b873593u; + h ^= k; + h = (h << 13u) | (h >> 19u); + h = (h * 5u) + 0xe6546b64u; + } while (--i); + h ^= wordCount; + h ^= h >> 16u; + h *= 0x85ebca6bu; + h ^= h >> 13u; + h *= 0xc2b2ae35u; + h ^= h >> 16u; + return h; +} + +// The hash yields the same result for a given byte sequence regardless of alignment. +inline uint32_t murmurSlow(const uint8_t* key, size_t byteCount, uint32_t seed) noexcept { + const size_t wordCount = (byteCount + 3) / 4; + const uint8_t* const last = key + byteCount; + + // The remainder is identical to murmur3() except an inner loop safely "reads" an entire word. + uint32_t h = seed; + size_t i = wordCount; + do { + uint32_t k = 0; + for (int i = 0; i < 4 && key < last; ++i, ++key) { + k >>= 8; + k |= uint32_t(*key) << 24; + } + k *= 0xcc9e2d51u; + k = (k << 15u) | (k >> 17u); + k *= 0x1b873593u; + h ^= k; + h = (h << 13u) | (h >> 19u); + h = (h * 5u) + 0xe6546b64u; + } while (--i); + h ^= wordCount; + h ^= h >> 16u; + h *= 0x85ebca6bu; + h ^= h >> 13u; + h *= 0xc2b2ae35u; + h ^= h >> 16u; + return h; +} + +template +struct MurmurHashFn { + uint32_t operator()(const T& key) const noexcept { + static_assert(0 == (sizeof(key) & 3u), "Hashing requires a size that is a multiple of 4."); + return murmur3((const uint32_t*) &key, sizeof(key) / 4, 0); + } +}; + +// combines two hashes together +template +inline void combine(size_t& seed, const T& v) noexcept { + std::hash hasher; + seed ^= hasher(v) + 0x9e3779b9u + (seed << 6u) + (seed >> 2u); +} + +// combines two hashes together, faster but less good +template +inline void combine_fast(size_t& seed, const T& v) noexcept { + std::hash hasher; + seed ^= hasher(v) << 1u; +} + +} // namespace utils::hash + +#endif // TNT_UTILS_HASH_H diff --git a/ios/include/utils/Invocable.h b/ios/include/utils/Invocable.h index d2f69c36..49b43071 100644 --- a/ios/include/utils/Invocable.h +++ b/ios/include/utils/Invocable.h @@ -62,7 +62,7 @@ public: // Creates an Invocable from the functor passed in. template = 0> - Invocable(Fn&& fn) noexcept; + Invocable(Fn&& fn) noexcept; // NOLINT(google-explicit-constructor) Invocable(const Invocable&) = delete; Invocable(Invocable&& rhs) noexcept; @@ -121,12 +121,9 @@ Invocable::Invocable(Invocable&& rhs) noexcept template Invocable& Invocable::operator=(Invocable&& rhs) noexcept { if (this != &rhs) { - mInvocable = rhs.mInvocable; - mDeleter = rhs.mDeleter; - mInvoker = rhs.mInvoker; - rhs.mInvocable = nullptr; - rhs.mDeleter = nullptr; - rhs.mInvoker = nullptr; + std::swap(mInvocable, rhs.mInvocable); + std::swap(mDeleter, rhs.mDeleter); + std::swap(mInvoker, rhs.mInvoker); } return *this; } diff --git a/ios/include/utils/JobSystem.h b/ios/include/utils/JobSystem.h new file mode 100644 index 00000000..6c30a5b2 --- /dev/null +++ b/ios/include/utils/JobSystem.h @@ -0,0 +1,530 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_JOBSYSTEM_H +#define TNT_UTILS_JOBSYSTEM_H + +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace utils { + +class JobSystem { + static constexpr size_t MAX_JOB_COUNT = 16384; + static_assert(MAX_JOB_COUNT <= 0x7FFE, "MAX_JOB_COUNT must be <= 0x7FFE"); + using WorkQueue = WorkStealingDequeue; + +public: + class Job; + + using JobFunc = void(*)(void*, JobSystem&, Job*); + + class alignas(CACHELINE_SIZE) Job { + public: + Job() noexcept {} /* = default; */ /* clang bug */ // NOLINT(modernize-use-equals-default,cppcoreguidelines-pro-type-member-init) + Job(const Job&) = delete; + Job(Job&&) = delete; + + private: + friend class JobSystem; + + // Size is chosen so that we can store at least std::function<> + // the alignas() qualifier ensures we're multiple of a cache-line. + static constexpr size_t JOB_STORAGE_SIZE_BYTES = + sizeof(std::function) > 48 ? sizeof(std::function) : 48; + static constexpr size_t JOB_STORAGE_SIZE_WORDS = + (JOB_STORAGE_SIZE_BYTES + sizeof(void*) - 1) / sizeof(void*); + + // keep it first, so it's correctly aligned with all architectures + // this is where we store the job's data, typically a std::function<> + // v7 | v8 + void* storage[JOB_STORAGE_SIZE_WORDS]; // 48 | 48 + JobFunc function; // 4 | 8 + uint16_t parent; // 2 | 2 + std::atomic runningJobCount = { 1 }; // 2 | 2 + mutable std::atomic refCount = { 1 }; // 2 | 2 + // 6 | 2 (padding) + // 64 | 64 + }; + + explicit JobSystem(size_t threadCount = 0, size_t adoptableThreadsCount = 1) noexcept; + + ~JobSystem(); + + // Make the current thread part of the thread pool. + void adopt(); + + // Remove this adopted thread from the parent. This is intended to be used for + // shutting down a JobSystem. In particular, this doesn't allow the parent to + // adopt more thread. + void emancipate(); + + + // If a parent is not specified when creating a job, that job will automatically take the + // root job as a parent. + // The root job is reset when waited on. + Job* setRootJob(Job* job) noexcept { return mRootJob = job; } + + // use setRootJob() instead + UTILS_DEPRECATED + Job* setMasterJob(Job* job) noexcept { return setRootJob(job); } + + + Job* create(Job* parent, JobFunc func) noexcept; + + // NOTE: All methods below must be called from the same thread and that thread must be + // owned by JobSystem's thread pool. + + /* + * Job creation examples: + * ---------------------- + * + * struct Functor { + * uintptr_t storage[6]; + * void operator()(JobSystem&, Jobsystem::Job*); + * } functor; + * + * struct Foo { + * uintptr_t storage[6]; + * void method(JobSystem&, Jobsystem::Job*); + * } foo; + * + * Functor and Foo size muse be <= uintptr_t[6] + * + * createJob() + * createJob(parent) + * createJob(parent, &foo) + * createJob(parent, foo) + * createJob(parent, std::ref(foo)) + * createJob(parent, functor) + * createJob(parent, std::ref(functor)) + * createJob(parent, [ up-to 6 uintptr_t ](JobSystem*, Jobsystem::Job*){ }) + * + * Utility functions: + * ------------------ + * These are less efficient, but handle any size objects using the heap if needed. + * (internally uses std::function<>), and don't require the callee to take + * a (JobSystem&, Jobsystem::Job*) as parameter. + * + * struct BigFoo { + * uintptr_t large[16]; + * void operator()(); + * void method(int answerToEverything); + * static void exec(BigFoo&) { } + * } bigFoo; + * + * jobs::createJob(js, parent, [ any-capture ](int answerToEverything){}, 42); + * jobs::createJob(js, parent, &BigFoo::method, &bigFoo, 42); + * jobs::createJob(js, parent, &BigFoo::exec, std::ref(bigFoo)); + * jobs::createJob(js, parent, bigFoo); + * jobs::createJob(js, parent, std::ref(bigFoo)); + * etc... + * + * struct SmallFunctor { + * uintptr_t storage[3]; + * void operator()(T* data, size_t count); + * } smallFunctor; + * + * jobs::parallel_for(js, data, count, [ up-to 3 uintptr_t ](T* data, size_t count) { }); + * jobs::parallel_for(js, data, count, smallFunctor); + * jobs::parallel_for(js, data, count, std::ref(smallFunctor)); + * + */ + + // creates an empty (no-op) job with an optional parent + Job* createJob(Job* parent = nullptr) noexcept { + return create(parent, nullptr); + } + + // creates a job from a KNOWN method pointer w/ object passed by pointer + // the caller must ensure the object will outlive the Job + template + Job* createJob(Job* parent, T* data) noexcept { + Job* job = create(parent, [](void* user, JobSystem& js, Job* job) { + (*static_cast(user)->*method)(js, job); + }); + if (job) { + job->storage[0] = data; + } + return job; + } + + // creates a job from a KNOWN method pointer w/ object passed by value + template + Job* createJob(Job* parent, T data) noexcept { + static_assert(sizeof(data) <= sizeof(Job::storage), "user data too large"); + Job* job = create(parent, [](void* user, JobSystem& js, Job* job) { + T* that = static_cast(user); + (that->*method)(js, job); + that->~T(); + }); + if (job) { + new(job->storage) T(std::move(data)); + } + return job; + } + + // creates a job from a functor passed by value + template + Job* createJob(Job* parent, T functor) noexcept { + static_assert(sizeof(functor) <= sizeof(Job::storage), "functor too large"); + Job* job = create(parent, [](void* user, JobSystem& js, Job* job){ + T& that = *static_cast(user); + that(js, job); + that.~T(); + }); + if (job) { + new(job->storage) T(std::move(functor)); + } + return job; + } + + + /* + * Jobs are normally finished automatically, this can be used to cancel a job before it is run. + * + * Never use this once a flavor of run() has been called. + */ + void cancel(Job*& job) noexcept; + + /* + * Adds a reference to a Job. + * + * This allows the caller to waitAndRelease() on this job from multiple threads. + * Use runAndWait() if waiting from multiple threads is not needed. + * + * This job MUST BE waited on with waitAndRelease(), or released with release(). + */ + Job* retain(Job* job) noexcept; + + /* + * Releases a reference from a Job obtained with runAndRetain() or a call to retain(). + * + * The job can't be used after this call. + */ + void release(Job*& job) noexcept; + void release(Job*&& job) noexcept { + Job* p = job; + release(p); + } + + /* + * Add job to this thread's execution queue. It's reference will drop automatically. + * Current thread must be owned by JobSystem's thread pool. See adopt(). + * + * The job can't be used after this call. + */ + void run(Job*& job) noexcept; + void run(Job*&& job) noexcept { // allows run(createJob(...)); + Job* p = job; + run(p); + } + + void signal() noexcept; + + /* + * Add job to this thread's execution queue and and keep a reference to it. + * Current thread must be owned by JobSystem's thread pool. See adopt(). + * + * This job MUST BE waited on with wait(), or released with release(). + */ + Job* runAndRetain(Job* job) noexcept; + + /* + * Wait on a job and destroys it. + * Current thread must be owned by JobSystem's thread pool. See adopt(). + * + * The job must first be obtained from runAndRetain() or retain(). + * The job can't be used after this call. + */ + void waitAndRelease(Job*& job) noexcept; + + /* + * Runs and wait for a job. This is equivalent to calling + * runAndRetain(job); + * wait(job); + * + * The job can't be used after this call. + */ + void runAndWait(Job*& job) noexcept; + void runAndWait(Job*&& job) noexcept { // allows runAndWait(createJob(...)); + Job* p = job; + runAndWait(p); + } + + // for debugging + friend utils::io::ostream& operator << (utils::io::ostream& out, JobSystem const& js); + + + // utility functions... + + // set the name of the current thread (on OSes that support it) + static void setThreadName(const char* threadName) noexcept; + + enum class Priority { + NORMAL, + DISPLAY, + URGENT_DISPLAY + }; + + static void setThreadPriority(Priority priority) noexcept; + static void setThreadAffinityById(size_t id) noexcept; + + size_t getParallelSplitCount() const noexcept { + return mParallelSplitCount; + } + +private: + // this is just to avoid using std::default_random_engine, since we're in a public header. + class default_random_engine { + static constexpr uint32_t m = 0x7fffffffu; + uint32_t mState; // must be 0 < seed < 0x7fffffff + public: + inline constexpr explicit default_random_engine(uint32_t seed = 1u) noexcept + : mState(((seed % m) == 0u) ? 1u : seed % m) { + } + inline uint32_t operator()() noexcept { + return mState = uint32_t((uint64_t(mState) * 48271u) % m); + } + }; + + struct alignas(CACHELINE_SIZE) ThreadState { // this causes 40-bytes padding + // make sure storage is cache-line aligned + WorkQueue workQueue; + + // these are not accessed by the worker threads + alignas(CACHELINE_SIZE) // this causes 56-bytes padding + JobSystem* js; + std::thread thread; + default_random_engine rndGen; + uint32_t id; + }; + + static_assert(sizeof(ThreadState) % CACHELINE_SIZE == 0, + "ThreadState doesn't align to a cache line"); + + ThreadState& getState() noexcept; + + void incRef(Job const* job) noexcept; + void decRef(Job const* job) noexcept; + + Job* allocateJob() noexcept; + JobSystem::ThreadState* getStateToStealFrom(JobSystem::ThreadState& state) noexcept; + bool hasJobCompleted(Job const* job) noexcept; + + void requestExit() noexcept; + bool exitRequested() const noexcept; + bool hasActiveJobs() const noexcept; + + void loop(ThreadState* state) noexcept; + bool execute(JobSystem::ThreadState& state) noexcept; + Job* steal(JobSystem::ThreadState& state) noexcept; + void finish(Job* job) noexcept; + + void put(WorkQueue& workQueue, Job* job) noexcept; + Job* pop(WorkQueue& workQueue) noexcept; + Job* steal(WorkQueue& workQueue) noexcept; + + void wait(std::unique_lock& lock, Job* job = nullptr) noexcept; + void wakeAll() noexcept; + void wakeOne() noexcept; + + // these have thread contention, keep them together + utils::Mutex mWaiterLock; + utils::Condition mWaiterCondition; + + std::atomic mActiveJobs = { 0 }; + utils::Arena, LockingPolicy::NoLock> mJobPool; + + template + using aligned_vector = std::vector>; + + // these are essentially const, make sure they're on a different cache-lines than the + // read-write atomics. + // We can't use "alignas(CACHELINE_SIZE)" because the standard allocator can't make this + // guarantee. + char padding[CACHELINE_SIZE]; + + alignas(16) // at least we align to half (or quarter) cache-line + aligned_vector mThreadStates; // actual data is stored offline + std::atomic mExitRequested = { false }; // this one is almost never written + std::atomic mAdoptedThreads = { 0 }; // this one is almost never written + Job* const mJobStorageBase; // Base for conversion to indices + uint16_t mThreadCount = 0; // total # of threads in the pool + uint8_t mParallelSplitCount = 0; // # of split allowable in parallel_for + Job* mRootJob = nullptr; + + utils::SpinLock mThreadMapLock; // this should have very little contention + tsl::robin_map mThreadMap; +}; + +// ------------------------------------------------------------------------------------------------- +// Utility functions built on top of JobSystem + +namespace jobs { + +// These are convenience C++11 style job creation methods that support lambdas +// +// IMPORTANT: these are less efficient to call and may perform heap allocation +// depending on the capture and parameters +// +template +JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent, + CALLABLE&& func, ARGS&&... args) noexcept { + struct Data { + std::function f; + // Renaming the method below could cause an Arrested Development. + void gob(JobSystem&, JobSystem::Job*) noexcept { f(); } + } user{ std::bind(std::forward(func), + std::forward(args)...) }; + return js.createJob(parent, std::move(user)); +} + +template::type>::value + >::type +> +JobSystem::Job* createJob(JobSystem& js, JobSystem::Job* parent, + CALLABLE&& func, T&& o, ARGS&&... args) noexcept { + struct Data { + std::function f; + // Renaming the method below could cause an Arrested Development. + void gob(JobSystem&, JobSystem::Job*) noexcept { f(); } + } user{ std::bind(std::forward(func), std::forward(o), + std::forward(args)...) }; + return js.createJob(parent, std::move(user)); +} + + +namespace details { + +template +struct ParallelForJobData { + using SplitterType = S; + using Functor = F; + using JobData = ParallelForJobData; + using size_type = uint32_t; + + ParallelForJobData(size_type start, size_type count, uint8_t splits, + Functor functor, + const SplitterType& splitter) noexcept + : start(start), count(count), + functor(std::move(functor)), + splits(splits), + splitter(splitter) { + } + + void parallelWithJobs(JobSystem& js, JobSystem::Job* parent) noexcept { + assert(parent); + + // this branch is often miss-predicted (it both sides happen 50% of the calls) +right_side: + if (splitter.split(splits, count)) { + const size_type lc = count / 2; + JobData ld(start, lc, splits + uint8_t(1), functor, splitter); + JobSystem::Job* l = js.createJob(parent, std::move(ld)); + if (UTILS_UNLIKELY(l == nullptr)) { + // couldn't create a job, just pretend we're done splitting + goto execute; + } + + // start the left side before attempting the right side, so we parallelize in case + // of job creation failure -- rare, but still. + js.run(l); + + // don't spawn a job for the right side, just reuse us -- spawning jobs is more + // costly than we'd like. + start += lc; + count -= lc; + ++splits; + goto right_side; + + } else { +execute: + // we're done splitting, do the real work here! + functor(start, count); + } + } + +private: + size_type start; // 4 + size_type count; // 4 + Functor functor; // ? + uint8_t splits; // 1 + SplitterType splitter; // 1 +}; + +} // namespace details + + +// parallel jobs with start/count indices +template +JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, + uint32_t start, uint32_t count, F functor, const S& splitter) noexcept { + using JobData = details::ParallelForJobData; + JobData jobData(start, count, 0, std::move(functor), splitter); + return js.createJob(parent, std::move(jobData)); +} + +// parallel jobs with pointer/count +template +JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, + T* data, uint32_t count, F functor, const S& splitter) noexcept { + auto user = [data, f = std::move(functor)](uint32_t s, uint32_t c) { + f(data + s, c); + }; + using JobData = details::ParallelForJobData; + JobData jobData(0, count, 0, std::move(user), splitter); + return js.createJob(parent, std::move(jobData)); +} + +// parallel jobs on a Slice<> +template +JobSystem::Job* parallel_for(JobSystem& js, JobSystem::Job* parent, + utils::Slice slice, F functor, const S& splitter) noexcept { + return parallel_for(js, parent, slice.data(), slice.size(), functor, splitter); +} + + +template +class CountSplitter { +public: + bool split(size_t splits, size_t count) const noexcept { + return (splits < MAX_SPLITS && count >= COUNT * 2); + } +}; + +} // namespace jobs +} // namespace utils + +#endif // TNT_UTILS_JOBSYSTEM_H diff --git a/ios/include/utils/NameComponentManager.h b/ios/include/utils/NameComponentManager.h index 62201d11..9e31e461 100644 --- a/ios/include/utils/NameComponentManager.h +++ b/ios/include/utils/NameComponentManager.h @@ -17,40 +17,19 @@ #ifndef TNT_UTILS_NAMECOMPONENTMANAGER_H #define TNT_UTILS_NAMECOMPONENTMANAGER_H -#include -#include -#include - #include +#include #include #include #include -#include +#include +#include namespace utils { class EntityManager; -namespace details { -class SafeString { -public: - SafeString() noexcept = default; - explicit SafeString(const char* str) noexcept : mCStr(strdup(str)) { } - SafeString(SafeString&& rhs) noexcept : mCStr(rhs.mCStr) { rhs.mCStr = nullptr; } - SafeString& operator=(SafeString&& rhs) noexcept { - std::swap(mCStr, rhs.mCStr); - return *this; - } - ~SafeString() { free((void*)mCStr); } - const char* c_str() const noexcept { return mCStr; } - -private: - char const* mCStr = nullptr; -}; -} // namespace details - - /** * \class NameComponentManager NameComponentManager.h utils/NameComponentManager.h * \brief Allows clients to associate string labels with entities. @@ -69,7 +48,7 @@ private: * printf("%s\n", names->getName(names->getInstance(myEntity)); * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -class UTILS_PUBLIC NameComponentManager : public SingleInstanceComponentManager { +class UTILS_PUBLIC NameComponentManager : public SingleInstanceComponentManager { public: using Instance = EntityInstance; @@ -93,7 +72,7 @@ public: * @return Non-zero handle if the entity has a name component, 0 otherwise. */ Instance getInstance(Entity e) const noexcept { - return Instance(SingleInstanceComponentManager::getInstance(e)); + return { SingleInstanceComponentManager::getInstance(e) }; } /*! \cond PRIVATE */ diff --git a/ios/include/utils/Profiler.h b/ios/include/utils/Profiler.h new file mode 100644 index 00000000..f41bd1e9 --- /dev/null +++ b/ios/include/utils/Profiler.h @@ -0,0 +1,212 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_PROFILER_H +#define TNT_UTILS_PROFILER_H + +#include +#include +#include + +#include // note: This is safe (only used inline) + +#if defined(__linux__) +# include +# include +# include +#endif + +#include + +namespace utils { + +class Profiler { +public: + enum { + INSTRUCTIONS = 0, // must be zero + CPU_CYCLES = 1, + DCACHE_REFS = 2, + DCACHE_MISSES = 3, + BRANCHES = 4, + BRANCH_MISSES = 5, + ICACHE_REFS = 6, + ICACHE_MISSES = 7, + + // Must be last one + EVENT_COUNT + }; + + enum { + EV_CPU_CYCLES = 1u << CPU_CYCLES, + EV_L1D_REFS = 1u << DCACHE_REFS, + EV_L1D_MISSES = 1u << DCACHE_MISSES, + EV_BPU_REFS = 1u << BRANCHES, + EV_BPU_MISSES = 1u << BRANCH_MISSES, + EV_L1I_REFS = 1u << ICACHE_REFS, + EV_L1I_MISSES = 1u << ICACHE_MISSES, + // helpers + EV_L1D_RATES = EV_L1D_REFS | EV_L1D_MISSES, + EV_L1I_RATES = EV_L1I_REFS | EV_L1I_MISSES, + EV_BPU_RATES = EV_BPU_REFS | EV_BPU_MISSES, + }; + + Profiler() noexcept; // must call resetEvents() + explicit Profiler(uint32_t eventMask) noexcept; + ~Profiler() noexcept; + + Profiler(const Profiler& rhs) = delete; + Profiler(Profiler&& rhs) = delete; + Profiler& operator=(const Profiler& rhs) = delete; + Profiler& operator=(Profiler&& rhs) = delete; + + // selects which events are enabled. + uint32_t resetEvents(uint32_t eventMask) noexcept; + + uint32_t getEnabledEvents() const noexcept { return mEnabledEvents; } + + // could return false if performance counters are not supported/enabled + bool isValid() const { return mCountersFd[0] >= 0; } + + class Counters { + friend class Profiler; + uint64_t nr; + uint64_t time_enabled; + uint64_t time_running; + struct { + uint64_t value; + uint64_t id; + } counters[Profiler::EVENT_COUNT]; + + friend Counters operator-(Counters lhs, const Counters& rhs) noexcept { + lhs.nr -= rhs.nr; + lhs.time_enabled -= rhs.time_enabled; + lhs.time_running -= rhs.time_running; + for (size_t i = 0; i < EVENT_COUNT; ++i) { + lhs.counters[i].value -= rhs.counters[i].value; + } + return lhs; + } + + public: + uint64_t getInstructions() const { return counters[INSTRUCTIONS].value; } + uint64_t getCpuCycles() const { return counters[CPU_CYCLES].value; } + uint64_t getL1DReferences() const { return counters[DCACHE_REFS].value; } + uint64_t getL1DMisses() const { return counters[DCACHE_MISSES].value; } + uint64_t getL1IReferences() const { return counters[ICACHE_REFS].value; } + uint64_t getL1IMisses() const { return counters[ICACHE_MISSES].value; } + uint64_t getBranchInstructions() const { return counters[BRANCHES].value; } + uint64_t getBranchMisses() const { return counters[BRANCH_MISSES].value; } + + std::chrono::duration getWallTime() const { + return std::chrono::duration(time_enabled); + } + + std::chrono::duration getRunningTime() const { + return std::chrono::duration(time_running); + } + + double getIPC() const noexcept { + uint64_t cpuCycles = getCpuCycles(); + uint64_t instructions = getInstructions(); + return double(instructions) / double(cpuCycles); + } + + double getCPI() const noexcept { + uint64_t cpuCycles = getCpuCycles(); + uint64_t instructions = getInstructions(); + return double(cpuCycles) / double(instructions); + } + + double getL1DMissRate() const noexcept { + uint64_t cacheReferences = getL1DReferences(); + uint64_t cacheMisses = getL1DMisses(); + return double(cacheMisses) / double(cacheReferences); + } + + double getL1DHitRate() const noexcept { + return 1.0 - getL1DMissRate(); + } + + double getL1IMissRate() const noexcept { + uint64_t cacheReferences = getL1IReferences(); + uint64_t cacheMisses = getL1IMisses(); + return double(cacheMisses) / double(cacheReferences); + } + + double getL1IHitRate() const noexcept { + return 1.0 - getL1IMissRate(); + } + + double getBranchMissRate() const noexcept { + uint64_t branchReferences = getBranchInstructions(); + uint64_t branchMisses = getBranchMisses(); + return double(branchMisses) / double(branchReferences); + } + + double getBranchHitRate() const noexcept { + return 1.0 - getBranchMissRate(); + } + + double getMPKI(uint64_t misses) const noexcept { + return (misses * 1000.0) / getInstructions(); + } + }; + +#if defined(__linux__) + + void reset() noexcept { + int fd = mCountersFd[0]; + ioctl(fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); + } + + void start() noexcept { + int fd = mCountersFd[0]; + ioctl(fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP); + } + + void stop() noexcept { + int fd = mCountersFd[0]; + ioctl(fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP); + } + + Counters readCounters() noexcept; + +#else // !__linux__ + + void reset() noexcept { } + void start() noexcept { } + void stop() noexcept { } + Counters readCounters() noexcept { return {}; } + +#endif // __linux__ + + bool hasBranchRates() const noexcept { + return (mCountersFd[BRANCHES] >= 0) && (mCountersFd[BRANCH_MISSES] >= 0); + } + + bool hasICacheRates() const noexcept { + return (mCountersFd[ICACHE_REFS] >= 0) && (mCountersFd[ICACHE_MISSES] >= 0); + } + +private: + UTILS_UNUSED uint8_t mIds[EVENT_COUNT] = {}; + int mCountersFd[EVENT_COUNT]; + uint32_t mEnabledEvents = 0; +}; + +} // namespace utils + +#endif // TNT_UTILS_PROFILER_H diff --git a/ios/include/utils/Range.h b/ios/include/utils/Range.h new file mode 100644 index 00000000..019ef6fa --- /dev/null +++ b/ios/include/utils/Range.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_RANGE_H +#define TNT_UTILS_RANGE_H + +#include + +#include + +namespace utils { + +template +struct Range { + using value_type = T; + T first = 0; + T last = 0; // this actually refers to one past the last + + size_t size() const noexcept { return last - first; } + bool empty() const noexcept { return !size(); } + bool contains(const T& t) const noexcept { return first <= t && t < last; } + + bool overlaps(const Range& that) const noexcept { + return that.first < this->last && that.last > this->first; + } + + class const_iterator { + friend struct Range; + T value = {}; + + public: + const_iterator() noexcept = default; + explicit const_iterator(T value) noexcept : value(value) {} + + using value_type = T; + using pointer = value_type*; + using difference_type = ptrdiff_t; + using iterator_category = std::random_access_iterator_tag; + + + const value_type operator*() const { return value; } + const value_type operator[](size_t n) const { return value + n; } + + const_iterator& operator++() { ++value; return *this; } + const_iterator& operator--() { --value; return *this; } + + const const_iterator operator++(int) { const_iterator t(value); value++; return t; } + const const_iterator operator--(int) { const_iterator t(value); value--; return t; } + + const_iterator operator+(size_t rhs) const { return { value + rhs }; } + const_iterator operator+(size_t rhs) { return { value + rhs }; } + const_iterator operator-(size_t rhs) const { return { value - rhs }; } + + difference_type operator-(const_iterator const& rhs) const { return value - rhs.value; } + + bool operator==(const_iterator const& rhs) const { return (value == rhs.value); } + bool operator!=(const_iterator const& rhs) const { return (value != rhs.value); } + bool operator>=(const_iterator const& rhs) const { return (value >= rhs.value); } + bool operator> (const_iterator const& rhs) const { return (value > rhs.value); } + bool operator<=(const_iterator const& rhs) const { return (value <= rhs.value); } + bool operator< (const_iterator const& rhs) const { return (value < rhs.value); } + }; + + const_iterator begin() noexcept { return const_iterator{ first }; } + const_iterator end() noexcept { return const_iterator{ last }; } + const_iterator begin() const noexcept { return const_iterator{ first }; } + const_iterator end() const noexcept { return const_iterator{ last }; } + + const_iterator front() const noexcept { return const_iterator{ first }; } + const_iterator back() const noexcept { return const_iterator{ last - 1 }; } +}; + +} // namespace utils + +#endif // TNT_UTILS_RANGE_H diff --git a/ios/include/utils/RangeMap.h b/ios/include/utils/RangeMap.h new file mode 100644 index 00000000..32b59f25 --- /dev/null +++ b/ios/include/utils/RangeMap.h @@ -0,0 +1,308 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_RANGEMAP_H +#define TNT_UTILS_RANGEMAP_H + +#include +#include +#include + +#include + +namespace utils { + +/** + * Sparse container for a series of ordered non-overlapping intervals. + * + * RangeMap has a low memory footprint if it contains fairly homogeneous data. Internally, the + * intervals are automatically split and merged as elements are added or removed. + * + * Each interval maps to an instance of ValueType, which should support cheap equality checks + * and copy assignment. (simple concrete types are ideal) + * + * KeyType should support operator< because intervals are internally sorted using std::map. + */ +template +class RangeMap { +public: + /** + * Replaces all slots between first (inclusive) and last (exclusive). + */ + void add(KeyType first, KeyType last, const ValueType& value) noexcept { + // First check if an existing range contains "first". + Iterator iter = findRange(first); + if (iter != end()) { + const Range existing = getRange(iter); + // Check if the existing range be extended. + if (getValue(iter) == value) { + if (existing.last < last) { + wipe(existing.last, last); + iter = shrink(iter, existing.first, last); + mergeRight(iter); + } + return; + } + // Split the existing range into two ranges. + if (last < existing.last && first > existing.first) { + iter = shrink(iter, existing.first, first); + insert(first, last, value); + insert(last, existing.last, getValue(iter)); + return; + } + clear(first, last); + insert(first, last, value); + return; + } + + // Check if an existing range contains the end of the new range. + KeyType back = last; + iter = findRange(--back); + if (iter == end()) { + wipe(first, last); + insert(first, last, value); + return; + } + const Range existing = getRange(iter); + + // Check if the existing range be extended. + if (getValue(iter) == value) { + if (existing.first > first) { + wipe(first, existing.first); + iter = shrink(iter, first, existing.last); + mergeLeft(iter); + } + return; + } + + // Clip the beginning of the existing range and potentially remove it. + if (last < existing.last) { + shrink(iter, last, existing.last); + } + wipe(first, last); + insert(first, last, value); + } + + /** + * Shorthand for the "add" method that inserts a single element. + */ + void set(KeyType key, const ValueType& value) noexcept { + KeyType begin = key; + add(begin, ++key, value); + } + + /** + * Checks if a range exists that encompasses the given key. + */ + bool has(KeyType key) const noexcept { + return findRange(key) != mMap.end(); + } + + /** + * Retrieves the element at the given location, panics if no element exists. + */ + const ValueType& get(KeyType key) const { + ConstIterator iter = findRange(key); + ASSERT_PRECONDITION(iter != end(), "RangeMap: No element exists at the given key."); + return getValue(iter); + } + + /** + * Removes all elements between begin (inclusive) and end (exclusive). + */ + void clear(KeyType first, KeyType last) noexcept { + // Check if an existing range contains "first". + Iterator iter = findRange(first); + if (iter != end()) { + const Range existing = getRange(iter); + // Split the existing range into two ranges. + if (last < existing.last && first > existing.first) { + iter = shrink(iter, existing.first, first); + insert(last, existing.last, getValue(iter)); + return; + } + // Clip one of the ends of the existing range or remove it. + if (first > existing.first) { + shrink(iter, existing.first, first); + } else if (last < existing.last) { + shrink(iter, last, existing.last); + } else { + wipe(first, last); + } + // There might be another range that intersects the cleared range, so try again. + clear(first, last); + return; + } + + // Check if an existing range contains the end of the new range. + KeyType back = last; + iter = findRange(--back); + if (iter == end()) { + wipe(first, last); + return; + } + const Range existing = getRange(iter); + + // Clip the beginning of the existing range and potentially remove it. + if (last < existing.last) { + shrink(iter, last, existing.last); + } + wipe(first, last); + } + + /** + * Shorthand for the "clear" method that clears a single element. + */ + void reset(KeyType key) noexcept { + KeyType begin = key; + clear(begin, ++key); + } + + /** + * Returns the number of internal interval objects (rarely used). + */ + size_t rangeCount() const noexcept { return mMap.size(); } + +private: + + using Map = std::map, ValueType>>; + using Iterator = typename Map::iterator; + using ConstIterator = typename Map::const_iterator; + + ConstIterator begin() const noexcept { return mMap.begin(); } + ConstIterator end() const noexcept { return mMap.end(); } + + Iterator begin() noexcept { return mMap.begin(); } + Iterator end() noexcept { return mMap.end(); } + + Range& getRange(Iterator iter) const { return iter->second.first; } + ValueType& getValue(Iterator iter) const { return iter->second.second; } + + const Range& getRange(ConstIterator iter) const { return iter->second.first; } + const ValueType& getValue(ConstIterator iter) const { return iter->second.second; } + + // Private helper that assumes there is no existing range that overlaps the given range. + void insert(KeyType first, KeyType last, const ValueType& value) noexcept { + assert_invariant(!has(first)); + assert_invariant(!has(last - 1)); + + // Check if there is an adjacent range to the left than can be extended. + KeyType previous = first; + if (Iterator iter = findRange(--previous); iter != end() && getValue(iter) == value) { + getRange(iter).last = last; + mergeRight(iter); + return; + } + + // Check if there is an adjacent range to the right than can be extended. + if (Iterator iter = findRange(last); iter != end() && getValue(iter) == value) { + getRange(iter).first = first; + return; + } + + mMap[first] = {Range { first, last }, value}; + } + + // Private helper that erases all intervals that are wholly contained within the given range. + // Note that this is quite different from the public "clear" method. + void wipe(KeyType first, KeyType last) noexcept { + // Find the first range whose beginning is greater than or equal to "first". + Iterator iter = mMap.lower_bound(first); + while (iter != end() && getRange(iter).first < last) { + KeyType existing_last = getRange(iter).last; + if (existing_last > last) { + break; + } + iter = mMap.erase(iter); + } + } + + // Checks if there is range to the right that touches the given range. + // If so, erases it, extends the given range rightwards, and returns true. + bool mergeRight(Iterator iter) { + Iterator next = iter; + if (++next == end() || getValue(next) != getValue(iter)) { + return false; + } + if (getRange(next).first != getRange(iter).last) { + return false; + } + getRange(iter).last = getRange(next).last; + mMap.erase(next); + return true; + } + + // Checks if there is range to the left that touches the given range. + // If so, erases it, extends the given range leftwards, and returns true. + bool mergeLeft(Iterator iter) { + Iterator prev = iter; + if (--prev == end() || getValue(prev) != getValue(iter)) { + return false; + } + if (getRange(prev).last != getRange(iter).first) { + return false; + } + getRange(iter).first = getRange(prev).first; + mMap.erase(prev); + return true; + } + + // Private helper that clips one end of an existing range. + Iterator shrink(Iterator iter, KeyType first, KeyType last) { + assert_invariant(first < last); + assert_invariant(getRange(iter).first == first || getRange(iter).last == last); + std::pair, ValueType> value = {{first, last}, iter->second.second}; + mMap.erase(iter); + return mMap.insert({first, value}).first; + } + + // If the given key is encompassed by an existing range, returns an iterator for that range. + // If no encompassing range exists, returns end(). + ConstIterator findRange(KeyType key) const noexcept { + return findRangeT(*this, key); + } + + // If the given key is encompassed by an existing range, returns an iterator for that range. + // If no encompassing range exists, returns end(). + Iterator findRange(KeyType key) noexcept { + return findRangeT(*this, key); + } + + // This template method allows us to avoid code duplication for const and non-const variants of + // findRange. C++17 has "std::as_const()" but that would not be helpful here, as we would still + // need to convert a const iterator to a non-const iterator. + template + static IteratorType findRangeT(SelfType& instance, KeyType key) noexcept { + // Find the first range whose beginning is greater than or equal to the given key. + IteratorType iter = instance.mMap.lower_bound(key); + if (iter != instance.end() && instance.getRange(iter).contains(key)) { + return iter; + } + // If that was the first range, or if the map is empty, return false. + if (iter == instance.begin()) { + return instance.end(); + } + // Check the range immediately previous to the one that was found. + return instance.getRange(--iter).contains(key) ? iter : instance.end(); + } + + // This maps from the start value of each range to the range itself. + Map mMap; +}; + +} // namespace utils + +#endif // TNT_UTILS_RANGEMAP_H diff --git a/ios/include/utils/SingleInstanceComponentManager.h b/ios/include/utils/SingleInstanceComponentManager.h index 574d14a7..e728d6ee 100644 --- a/ios/include/utils/SingleInstanceComponentManager.h +++ b/ios/include/utils/SingleInstanceComponentManager.h @@ -256,7 +256,7 @@ protected: private: // maps an entity to an instance index - tsl::robin_map mInstanceMap; + tsl::robin_map mInstanceMap; default_random_engine mRng; }; diff --git a/ios/include/utils/Stopwatch.h b/ios/include/utils/Stopwatch.h new file mode 100644 index 00000000..fcd30512 --- /dev/null +++ b/ios/include/utils/Stopwatch.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_STOPWATCH_H +#define TNT_UTILS_STOPWATCH_H + +#include + +#include +#include + +#include + +namespace utils { + +/* + * A very basic Stopwatch class + */ + +template +class Stopwatch { +public: + using duration = typename Clock::duration; + using time_point = typename Clock::time_point; + +private: + time_point mStart; + duration mMinLap = std::numeric_limits::max(); + duration mMaxLap{}; + std::chrono::duration mAvgLap{}; + size_t mCount = 0; + const char* mName = nullptr; + +public: + // Create a Stopwatch with a name and clock + explicit Stopwatch(const char* name) noexcept: mName(name) {} + + // Logs min/avg/max lap time + ~Stopwatch() noexcept; + + // start the stopwatch + inline void start() noexcept { + mStart = Clock::now(); + } + + // stop the stopwatch + inline void stop() noexcept { + auto d = Clock::now() - mStart; + mMinLap = std::min(mMinLap, d); + mMaxLap = std::max(mMaxLap, d); + mAvgLap = (mAvgLap * mCount + d) / (mCount + 1); + mCount++; + } + + // get the minimum lap time recorded + duration getMinLapTime() const noexcept { return mMinLap; } + + // get the maximum lap time recorded + duration getMaxLapTime() const noexcept { return mMaxLap; } + + // get the average lap time + duration getAverageLapTime() const noexcept { return mAvgLap; } +}; + +template +Stopwatch::~Stopwatch() noexcept { + slog.d << "Stopwatch \"" << mName << "\" : [" + << mMinLap.count() << ", " + << std::chrono::duration_cast(mAvgLap).count() << ", " + << mMaxLap.count() << "] ns" << io::endl; +} + +/* + * AutoStopwatch can be used to start and stop a Stopwatch automatically + * when entering and exiting a scope. + */ +template +class AutoStopwatch { + Stopwatch& stopwatch; +public: + inline explicit AutoStopwatch(Stopwatch& stopwatch) noexcept: stopwatch(stopwatch) { + stopwatch.start(); + } + + inline ~AutoStopwatch() noexcept { stopwatch.stop(); } +}; + +} // namespace utils + +#endif // TNT_UTILS_STOPWATCH_H diff --git a/ios/include/utils/StructureOfArrays.h b/ios/include/utils/StructureOfArrays.h index e5cbb422..41cd8b89 100644 --- a/ios/include/utils/StructureOfArrays.h +++ b/ios/include/utils/StructureOfArrays.h @@ -19,7 +19,6 @@ #include // note: this is safe, see how std::array is used below (inline / private) #include -#include #include #include @@ -509,7 +508,7 @@ private: mSize = needed; } - // this calculate the offset adjusted for all data alignment of a given array + // this calculates the offset adjusted for all data alignment of a given array static inline size_t getOffset(size_t index, size_t capacity) noexcept { auto offsets = getOffsets(capacity); return offsets[index]; diff --git a/ios/include/utils/Systrace.h b/ios/include/utils/Systrace.h new file mode 100644 index 00000000..67553f77 --- /dev/null +++ b/ios/include/utils/Systrace.h @@ -0,0 +1,278 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_SYSTRACE_H +#define TNT_UTILS_SYSTRACE_H + + +#define SYSTRACE_TAG_NEVER (0) +#define SYSTRACE_TAG_ALWAYS (1<<0) +#define SYSTRACE_TAG_FILAMENT (1<<1) // don't change, used in makefiles +#define SYSTRACE_TAG_JOBSYSTEM (1<<2) + + +#if defined(__ANDROID__) + +#include + +#include +#include +#include + +#include + +/* + * The SYSTRACE_ macros use SYSTRACE_TAG as a the TAG, which should be defined + * before this file is included. If not, the SYSTRACE_TAG_ALWAYS tag will be used. + */ + +#ifndef SYSTRACE_TAG +#define SYSTRACE_TAG (SYSTRACE_TAG_ALWAYS) +#endif + +// enable tracing +#define SYSTRACE_ENABLE() ::utils::details::Systrace::enable(SYSTRACE_TAG) + +// disable tracing +#define SYSTRACE_DISABLE() ::utils::details::Systrace::disable(SYSTRACE_TAG) + + +/** + * Creates a Systrace context in the current scope. needed for calling all other systrace + * commands below. + */ +#define SYSTRACE_CONTEXT() ::utils::details::Systrace ___tracer(SYSTRACE_TAG) + + +// SYSTRACE_NAME traces the beginning and end of the current scope. To trace +// the correct start and end times this macro should be declared first in the +// scope body. +// It also automatically creates a Systrace context +#define SYSTRACE_NAME(name) ::utils::details::ScopedTrace ___tracer(SYSTRACE_TAG, name) + +// SYSTRACE_CALL is an SYSTRACE_NAME that uses the current function name. +#define SYSTRACE_CALL() SYSTRACE_NAME(__FUNCTION__) + +#define SYSTRACE_NAME_BEGIN(name) \ + ___tracer.traceBegin(SYSTRACE_TAG, name) + +#define SYSTRACE_NAME_END() \ + ___tracer.traceEnd(SYSTRACE_TAG) + + +/** + * Trace the beginning of an asynchronous event. Unlike ATRACE_BEGIN/ATRACE_END + * contexts, asynchronous events do not need to be nested. The name describes + * the event, and the cookie provides a unique identifier for distinguishing + * simultaneous events. The name and cookie used to begin an event must be + * used to end it. + */ +#define SYSTRACE_ASYNC_BEGIN(name, cookie) \ + ___tracer.asyncBegin(SYSTRACE_TAG, name, cookie) + +/** + * Trace the end of an asynchronous event. + * This should have a corresponding SYSTRACE_ASYNC_BEGIN. + */ +#define SYSTRACE_ASYNC_END(name, cookie) \ + ___tracer.asyncEnd(SYSTRACE_TAG, name, cookie) + +/** + * Traces an integer counter value. name is used to identify the counter. + * This can be used to track how a value changes over time. + */ +#define SYSTRACE_VALUE32(name, val) \ + ___tracer.value(SYSTRACE_TAG, name, int32_t(val)) + +#define SYSTRACE_VALUE64(name, val) \ + ___tracer.value(SYSTRACE_TAG, name, int64_t(val)) + +// ------------------------------------------------------------------------------------------------ +// No user serviceable code below... +// ------------------------------------------------------------------------------------------------ + +namespace utils { +namespace details { + +class Systrace { +public: + + enum tags { + NEVER = SYSTRACE_TAG_NEVER, + ALWAYS = SYSTRACE_TAG_ALWAYS, + FILAMENT = SYSTRACE_TAG_FILAMENT, + JOBSYSTEM = SYSTRACE_TAG_JOBSYSTEM + // we could define more TAGS here, as we need them. + }; + + Systrace(uint32_t tag) noexcept { + if (tag) init(tag); + } + + static void enable(uint32_t tags) noexcept; + static void disable(uint32_t tags) noexcept; + + + inline void traceBegin(uint32_t tag, const char* name) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + beginSection(this, name); + } + } + + inline void traceEnd(uint32_t tag) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + endSection(this); + } + } + + inline void asyncBegin(uint32_t tag, const char* name, int32_t cookie) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + beginAsyncSection(this, name, cookie); + } + } + + inline void asyncEnd(uint32_t tag, const char* name, int32_t cookie) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + endAsyncSection(this, name, cookie); + } + } + + inline void value(uint32_t tag, const char* name, int32_t value) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + setCounter(this, name, value); + } + } + + inline void value(uint32_t tag, const char* name, int64_t value) noexcept { + if (tag && UTILS_UNLIKELY(mIsTracingEnabled)) { + setCounter(this, name, value); + } + } + +private: + friend class ScopedTrace; + + // whether tracing is supported at all by the platform + + using ATrace_isEnabled_t = bool (*)(void); + using ATrace_beginSection_t = void (*)(const char* sectionName); + using ATrace_endSection_t = void (*)(void); + using ATrace_beginAsyncSection_t = void (*)(const char* sectionName, int32_t cookie); + using ATrace_endAsyncSection_t = void (*)(const char* sectionName, int32_t cookie); + using ATrace_setCounter_t = void (*)(const char* counterName, int64_t counterValue); + + struct GlobalState { + bool isTracingAvailable; + std::atomic isTracingEnabled; + int markerFd; + + ATrace_isEnabled_t ATrace_isEnabled; + ATrace_beginSection_t ATrace_beginSection; + ATrace_endSection_t ATrace_endSection; + ATrace_beginAsyncSection_t ATrace_beginAsyncSection; + ATrace_endAsyncSection_t ATrace_endAsyncSection; + ATrace_setCounter_t ATrace_setCounter; + + void (*beginSection)(Systrace* that, const char* name); + void (*endSection)(Systrace* that); + void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*setCounter)(Systrace* that, const char* name, int64_t value); + }; + + static GlobalState sGlobalState; + + + // per-instance versions for better performance + ATrace_isEnabled_t ATrace_isEnabled; + ATrace_beginSection_t ATrace_beginSection; + ATrace_endSection_t ATrace_endSection; + ATrace_beginAsyncSection_t ATrace_beginAsyncSection; + ATrace_endAsyncSection_t ATrace_endAsyncSection; + ATrace_setCounter_t ATrace_setCounter; + + void (*beginSection)(Systrace* that, const char* name); + void (*endSection)(Systrace* that); + void (*beginAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*endAsyncSection)(Systrace* that, const char* name, int32_t cookie); + void (*setCounter)(Systrace* that, const char* name, int64_t value); + + void init(uint32_t tag) noexcept; + + // cached values for faster access, no need to be initialized + bool mIsTracingEnabled; + int mMarkerFd = -1; + pid_t mPid; + + static void setup() noexcept; + static void init_once() noexcept; + static bool isTracingEnabled(uint32_t tag) noexcept; + + static void begin_body(int fd, int pid, const char* name) noexcept; + static void end_body(int fd, int pid) noexcept; + static void async_begin_body(int fd, int pid, const char* name, int32_t cookie) noexcept; + static void async_end_body(int fd, int pid, const char* name, int32_t cookie) noexcept; + static void int64_body(int fd, int pid, const char* name, int64_t value) noexcept; +}; + +// ------------------------------------------------------------------------------------------------ + +class ScopedTrace { +public: + // we don't inline this because it's relatively heavy due to a global check + ScopedTrace(uint32_t tag, const char* name) noexcept : mTrace(tag), mTag(tag) { + mTrace.traceBegin(tag, name); + } + + inline ~ScopedTrace() noexcept { + mTrace.traceEnd(mTag); + } + + inline void value(uint32_t tag, const char* name, int32_t v) noexcept { + mTrace.value(tag, name, v); + } + + inline void value(uint32_t tag, const char* name, int64_t v) noexcept { + mTrace.value(tag, name, v); + } + +private: + Systrace mTrace; + const uint32_t mTag; +}; + +} // namespace details +} // namespace utils + +// ------------------------------------------------------------------------------------------------ +#else // !ANDROID +// ------------------------------------------------------------------------------------------------ + +#define SYSTRACE_ENABLE() +#define SYSTRACE_DISABLE() +#define SYSTRACE_CONTEXT() +#define SYSTRACE_NAME(name) +#define SYSTRACE_NAME_BEGIN(name) +#define SYSTRACE_NAME_END() +#define SYSTRACE_CALL() +#define SYSTRACE_ASYNC_BEGIN(name, cookie) +#define SYSTRACE_ASYNC_END(name, cookie) +#define SYSTRACE_VALUE32(name, val) +#define SYSTRACE_VALUE64(name, val) + +#endif // ANDROID + +#endif // TNT_UTILS_SYSTRACE_H diff --git a/ios/include/utils/ThermalManager.h b/ios/include/utils/ThermalManager.h new file mode 100644 index 00000000..fc38d88e --- /dev/null +++ b/ios/include/utils/ThermalManager.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_THERMALMANAGER_H +#define TNT_UTILS_THERMALMANAGER_H + +#if defined(__ANDROID__) +#include +#else +#include +#endif + +#endif // TNT_UTILS_THERMALMANAGER_H diff --git a/ios/include/utils/ThreadUtils.h b/ios/include/utils/ThreadUtils.h new file mode 100644 index 00000000..5f855b66 --- /dev/null +++ b/ios/include/utils/ThreadUtils.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_THREADUTILS_H +#define TNT_UTILS_THREADUTILS_H + +#include + +namespace utils { + +class ThreadUtils { +public: + static std::thread::id getThreadId() noexcept; + static bool isThisThread(std::thread::id id) noexcept; +}; + +} // namespace utils + +#endif // TNT_UTILS_THREADUTILS_H diff --git a/ios/include/utils/WorkStealingDequeue.h b/ios/include/utils/WorkStealingDequeue.h new file mode 100644 index 00000000..73b1ce6e --- /dev/null +++ b/ios/include/utils/WorkStealingDequeue.h @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_WORKSTEALINGDEQUEUE_H +#define TNT_UTILS_WORKSTEALINGDEQUEUE_H + +#include + +#include +#include + +namespace utils { + +/* + * A templated, lockless, fixed-size work-stealing dequeue + * + * + * top bottom + * v v + * |----|----|----|----|----|----| + * steal() push(), pop() + * any thread main thread + * + * + */ +template +class WorkStealingDequeue { + static_assert(!(COUNT & (COUNT - 1)), "COUNT must be a power of two"); + static constexpr size_t MASK = COUNT - 1; + + // mTop and mBottom must be signed integers. We use 64-bits atomics so we don't have + // to worry about wrapping around. + using index_t = int64_t; + + std::atomic mTop = { 0 }; // written/read in pop()/steal() + std::atomic mBottom = { 0 }; // written only in pop(), read in push(), steal() + + TYPE mItems[COUNT]; + + // NOTE: it's not safe to return a reference because getItemAt() can be called + // concurrently and the caller could std::move() the item unsafely. + TYPE getItemAt(index_t index) noexcept { return mItems[index & MASK]; } + + void setItemAt(index_t index, TYPE item) noexcept { mItems[index & MASK] = item; } + +public: + using value_type = TYPE; + + inline void push(TYPE item) noexcept; + inline TYPE pop() noexcept; + inline TYPE steal() noexcept; + + size_t getSize() const noexcept { return COUNT; } + + // for debugging only... + size_t getCount() const noexcept { + index_t bottom = mBottom.load(std::memory_order_relaxed); + index_t top = mTop.load(std::memory_order_relaxed); + return bottom - top; + } +}; + +/* + * Adds an item at the BOTTOM of the queue. + * + * Must be called from the main thread. + */ +template +void WorkStealingDequeue::push(TYPE item) noexcept { + // std::memory_order_relaxed is sufficient because this load doesn't acquire anything from + // another thread. mBottom is only written in pop() which cannot be concurrent with push() + index_t bottom = mBottom.load(std::memory_order_relaxed); + setItemAt(bottom, item); + + // std::memory_order_release is used because we release the item we just pushed to other + // threads which are calling steal(). + mBottom.store(bottom + 1, std::memory_order_release); +} + +/* + * Removes an item from the BOTTOM of the queue. + * + * Must be called from the main thread. + */ +template +TYPE WorkStealingDequeue::pop() noexcept { + // std::memory_order_seq_cst is needed to guarantee ordering in steal() + // Note however that this is not a typical acquire/release operation: + // - not acquire because mBottom is only written in push() which is not concurrent + // - not release because we're not publishing anything to steal() here + // + // QUESTION: does this prevent mTop load below to be reordered before the "store" part of + // fetch_sub()? Hopefully it does. If not we'd need a full memory barrier. + // + index_t bottom = mBottom.fetch_sub(1, std::memory_order_seq_cst) - 1; + + // bottom could be -1 if we tried to pop() from an empty queue. This will be corrected below. + assert( bottom >= -1 ); + + // std::memory_order_seq_cst is needed to guarantee ordering in steal() + // Note however that this is not a typical acquire operation + // (i.e. other thread's writes of mTop don't publish data) + index_t top = mTop.load(std::memory_order_seq_cst); + + if (top < bottom) { + // Queue isn't empty and it's not the last item, just return it, this is the common case. + return getItemAt(bottom); + } + + TYPE item{}; + if (top == bottom) { + // we just took the last item + item = getItemAt(bottom); + + // Because we know we took the last item, we could be racing with steal() -- the last + // item being both at the top and bottom of the queue. + // We resolve this potential race by also stealing that item from ourselves. + if (mTop.compare_exchange_strong(top, top + 1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + // success: we stole our last item from ourself, meaning that a concurrent steal() + // would have failed. + // mTop now equals top + 1, we adjust top to make the queue empty. + top++; + } else { + // failure: mTop was not equal to top, which means the item was stolen under our feet. + // top now equals to mTop. Simply discard the item we just popped. + // The queue is now empty. + item = TYPE(); + } + } else { + // We could be here if the item was stolen just before we read mTop, we'll adjust + // mBottom below. + assert(top - bottom == 1); + } + + // std::memory_order_relaxed used because we're not publishing any data. + // no concurrent writes to mBottom possible, it's always safe to write mBottom. + mBottom.store(top, std::memory_order_relaxed); + return item; +} + +/* + * Steals an item from the TOP of another thread's queue. + * + * This can be called concurrently with steal(), push() or pop() + * + * steal() never fails, either there is an item and it atomically takes it, or there isn't and + * it returns an empty item. + */ +template +TYPE WorkStealingDequeue::steal() noexcept { + while (true) { + /* + * Note: A Key component of this algorithm is that mTop is read before mBottom here + * (and observed as such in other threads) + */ + + // std::memory_order_seq_cst is needed to guarantee ordering in pop() + // Note however that this is not a typical acquire operation + // (i.e. other thread's writes of mTop don't publish data) + index_t top = mTop.load(std::memory_order_seq_cst); + + // std::memory_order_acquire is needed because we're acquiring items published in push(). + // std::memory_order_seq_cst is needed to guarantee ordering in pop() + index_t bottom = mBottom.load(std::memory_order_seq_cst); + + if (top >= bottom) { + // queue is empty + return TYPE(); + } + + // The queue isn't empty + TYPE item(getItemAt(top)); + if (mTop.compare_exchange_strong(top, top + 1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + // success: we stole an item, just return it. + return item; + } + // failure: the item we just tried to steal was pop()'ed under our feet, + // simply discard it; nothing to do -- it's okay to try again. + } +} + + +} // namespace utils + +#endif // TNT_UTILS_WORKSTEALINGDEQUEUE_H diff --git a/ios/include/utils/Zip2Iterator.h b/ios/include/utils/Zip2Iterator.h new file mode 100644 index 00000000..7b9f552a --- /dev/null +++ b/ios/include/utils/Zip2Iterator.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ZIP2ITERATOR_H +#define TNT_UTILS_ZIP2ITERATOR_H + +#include +#include + +#include + +namespace utils { + +/* + * A random access iterator that wraps two other random access iterators. + * This mostly exists so that one can sort an array using values from another. + */ + +template +class Zip2Iterator { + std::pair mIt; + using Ref1 = typename std::iterator_traits::reference; + using Ref2 = typename std::iterator_traits::reference; + using Val1 = typename std::iterator_traits::value_type; + using Val2 = typename std::iterator_traits::value_type; + +public: + struct Ref : public std::pair { + using std::pair::pair; + using std::pair::operator=; + private: + friend void swap(Ref lhs, Ref rhs) { + using std::swap; + swap(lhs.first, rhs.first); + swap(lhs.second, rhs.second); + } + }; + + using value_type = std::pair; + using reference = Ref; + using pointer = value_type*; + using difference_type = ptrdiff_t; + using iterator_category = std::random_access_iterator_tag; + + Zip2Iterator() = default; + Zip2Iterator(It1 first, It2 second) : mIt({first, second}) {} + Zip2Iterator(Zip2Iterator const& rhs) noexcept = default; + Zip2Iterator& operator=(Zip2Iterator const& rhs) = default; + + reference operator*() const { return { *mIt.first, *mIt.second }; } + + reference operator[](size_t n) const { return *(*this + n); } + + Zip2Iterator& operator++() { + ++mIt.first; + ++mIt.second; + return *this; + } + + Zip2Iterator& operator--() { + --mIt.first; + --mIt.second; + return *this; + } + + // Postfix operator needed by Microsoft C++ + const Zip2Iterator operator++(int) { + Zip2Iterator t(*this); + mIt.first++; + mIt.second++; + return t; + } + + const Zip2Iterator operator--(int) { + Zip2Iterator t(*this); + mIt.first--; + mIt.second--; + return t; + } + + Zip2Iterator& operator+=(size_t v) { + mIt.first += v; + mIt.second += v; + return *this; + } + + Zip2Iterator& operator-=(size_t v) { + mIt.first -= v; + mIt.second -= v; + return *this; + } + + Zip2Iterator operator+(size_t rhs) const { return { mIt.first + rhs, mIt.second + rhs }; } + Zip2Iterator operator+(size_t rhs) { return { mIt.first + rhs, mIt.second + rhs }; } + Zip2Iterator operator-(size_t rhs) const { return { mIt.first - rhs, mIt.second - rhs }; } + + difference_type operator-(Zip2Iterator const& rhs) const { return mIt.first - rhs.mIt.first; } + + bool operator==(Zip2Iterator const& rhs) const { return (mIt.first == rhs.mIt.first); } + bool operator!=(Zip2Iterator const& rhs) const { return (mIt.first != rhs.mIt.first); } + bool operator>=(Zip2Iterator const& rhs) const { return (mIt.first >= rhs.mIt.first); } + bool operator> (Zip2Iterator const& rhs) const { return (mIt.first > rhs.mIt.first); } + bool operator<=(Zip2Iterator const& rhs) const { return (mIt.first <= rhs.mIt.first); } + bool operator< (Zip2Iterator const& rhs) const { return (mIt.first < rhs.mIt.first); } +}; + +} // namespace utils + +#endif // TNT_UTILS_ZIP2ITERATOR_H diff --git a/ios/include/utils/algorithm.h b/ios/include/utils/algorithm.h index 72240c27..6d12ffc6 100644 --- a/ios/include/utils/algorithm.h +++ b/ios/include/utils/algorithm.h @@ -19,7 +19,6 @@ #include -#include // for std::less #include // for std::enable_if #include @@ -168,74 +167,6 @@ T log2i(T x) noexcept { return (sizeof(x) * 8 - 1u) - clz(x); } -/* - * branch-less version of std::lower_bound and std::upper_bound. - * These versions are intended to be fully inlined, which only happens when the size - * of the array is known at compile time. This code also performs better if the - * array is a power-of-two in size. - * - * These code works even if the conditions above are not met, and becomes a less-branches - * algorithm instead of a branch-less one! - */ - -template> -inline UTILS_PUBLIC -RandomAccessIterator lower_bound( - RandomAccessIterator first, RandomAccessIterator last, const T& value, - COMPARE comp = std::less(), - bool assume_power_of_two = false) { - size_t len = last - first; - - if (!assume_power_of_two) { - // handle non power-of-two sized arrays. If it's POT, the next line is a no-op - // and gets optimized out if the size is known at compile time. - len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 - size_t difference = (last - first) - len; - // If len was already a POT, then difference will be 0. - // We need to explicitly check this case to avoid dereferencing past the end of the array - first += !difference || comp(first[len], value) ? difference : 0; - } - - while (len) { - // The number of repetitions here doesn't affect the result. We manually unroll the loop - // twice, to guarantee we have at least two iterations without branches (for the case - // where the size is not known at compile time - first += comp(first[len >>= 1u], value) ? len : 0; - first += comp(first[len >>= 1u], value) ? len : 0; - } - first += comp(*first, value); - return first; -} - -template> -inline UTILS_PUBLIC -RandomAccessIterator upper_bound( - RandomAccessIterator first, RandomAccessIterator last, - const T& value, COMPARE comp = std::less(), - bool assume_power_of_two = false) { - size_t len = last - first; - - if (!assume_power_of_two) { - // handle non power-of-two sized arrays. If it's POT, the next line is a no-op - // and gets optimized out if the size is known at compile time. - len = 1u << (31 - clz(uint32_t(len))); // next power of two length / 2 - size_t difference = (last - first) - len; - // If len was already a POT, then difference will be 0. - // We need to explicitly check this case to avoid dereferencing past the end of the array - first += !difference || comp(value, first[len]) ? 0 : difference; - } - - while (len) { - // The number of repetitions here doesn't affect the result. We manually unroll the loop - // twice, to guarantee we have at least two iterations without branches (for the case - // where the size is not known at compile time - first += !comp(value, first[len >>= 1u]) ? len : 0; - first += !comp(value, first[len >>= 1u]) ? len : 0; - } - first += !comp(value, *first); - return first; -} - template inline UTILS_PUBLIC RandomAccessIterator partition_point( diff --git a/ios/include/utils/android/ThermalManager.h b/ios/include/utils/android/ThermalManager.h new file mode 100644 index 00000000..6c303b04 --- /dev/null +++ b/ios/include/utils/android/ThermalManager.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ANDROID_THERMALMANAGER_H +#define TNT_UTILS_ANDROID_THERMALMANAGER_H + +#include + +#include + +struct AThermalManager; + +namespace utils { + +class ThermalManager { +public: + enum class ThermalStatus : int8_t { + ERROR = -1, + NONE, + LIGHT, + MODERATE, + SEVERE, + CRITICAL, + EMERGENCY, + SHUTDOWN + }; + + ThermalManager(); + ~ThermalManager(); + + // Movable + ThermalManager(ThermalManager&& rhs) noexcept; + ThermalManager& operator=(ThermalManager&& rhs) noexcept; + + // not copiable + ThermalManager(ThermalManager const& rhs) = delete; + ThermalManager& operator=(ThermalManager const& rhs) = delete; + + ThermalStatus getCurrentThermalStatus() const noexcept; + +private: + AThermalManager* mThermalManager = nullptr; +}; + +} // namespace utils + +#endif // TNT_UTILS_ANDROID_THERMALMANAGER_H diff --git a/ios/include/utils/api_level.h b/ios/include/utils/api_level.h new file mode 100644 index 00000000..0d2ec830 --- /dev/null +++ b/ios/include/utils/api_level.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_APILEVEL_H +#define TNT_UTILS_APILEVEL_H + +#include + +namespace utils { + +/** + * Returns this platform's API level. On Android this function will return + * the API level as defined by the SDK API level version. If a platform does + * not have an API level, this function returns 0. + */ +UTILS_PUBLIC +int api_level(); + +} // namespace utils + +#endif // TNT_UTILS_APILEVEL_H diff --git a/ios/include/utils/architecture.h b/ios/include/utils/architecture.h new file mode 100644 index 00000000..83b11794 --- /dev/null +++ b/ios/include/utils/architecture.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ARCHITECTURE_H +#define TNT_UTILS_ARCHITECTURE_H + +#include + +namespace utils { + +constexpr size_t CACHELINE_SIZE = 64; + +} // namespace utils + +#endif // TNT_UTILS_ARCHITECTURE_H diff --git a/ios/include/utils/ashmem.h b/ios/include/utils/ashmem.h new file mode 100644 index 00000000..c9ca9e3e --- /dev/null +++ b/ios/include/utils/ashmem.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_ASHMEM_H +#define TNT_UTILS_ASHMEM_H + +#include + +namespace utils { + +int ashmem_create_region(const char *name, size_t size); + +} // namespace utils + +#endif // TNT_UTILS_ASHMEM_H diff --git a/ios/include/utils/generic/Condition.h b/ios/include/utils/generic/Condition.h new file mode 100644 index 00000000..accde2be --- /dev/null +++ b/ios/include/utils/generic/Condition.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_GENERIC_CONDITION_H +#define TNT_UTILS_GENERIC_CONDITION_H + +#include + +namespace utils { + +class Condition : public std::condition_variable { +public: + using std::condition_variable::condition_variable; + + inline void notify_n(size_t n) noexcept { + if (n == 1) { + notify_one(); + } else if (n > 1) { + notify_all(); + } + } +}; + +} // namespace utils + +#endif // TNT_UTILS_GENERIC_CONDITION_H diff --git a/ios/include/utils/generic/ThermalManager.h b/ios/include/utils/generic/ThermalManager.h new file mode 100644 index 00000000..2d0088e5 --- /dev/null +++ b/ios/include/utils/generic/ThermalManager.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_GENERIC_THERMALMANAGER_H +#define TNT_UTILS_GENERIC_THERMALMANAGER_H + +#include + +#include + +namespace utils { + +class ThermalManager { +public: + enum class ThermalStatus : int8_t { + ERROR = -1, + NONE, + LIGHT, + MODERATE, + SEVERE, + CRITICAL, + EMERGENCY, + SHUTDOWN + }; + + ThermalManager() = default; + + // Movable + ThermalManager(ThermalManager&& rhs) noexcept = default; + ThermalManager& operator=(ThermalManager&& rhs) noexcept = default; + + // not copiable + ThermalManager(ThermalManager const& rhs) = delete; + ThermalManager& operator=(ThermalManager const& rhs) = delete; + + ThermalStatus getCurrentThermalStatus() const noexcept { + return ThermalStatus::NONE; + } +}; + +} // namespace utils + +#endif // TNT_UTILS_GENERIC_THERMALMANAGER_H diff --git a/ios/include/utils/linux/Condition.h b/ios/include/utils/linux/Condition.h new file mode 100644 index 00000000..c2ff21f8 --- /dev/null +++ b/ios/include/utils/linux/Condition.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_LINUX_CONDITION_H +#define TNT_UTILS_LINUX_CONDITION_H + +#include +#include +#include // for cv_status +#include +#include // for unique_lock + +#include + +#include + +namespace utils { + +/* + * A very simple condition variable class that can be used as an (almost) drop-in replacement + * for std::condition_variable (doesn't have the timed wait() though). + * It is very low overhead as most of it is inlined. + */ + +class Condition { +public: + Condition() noexcept = default; + Condition(const Condition&) = delete; + Condition& operator=(const Condition&) = delete; + + void notify_all() noexcept { + pulse(std::numeric_limits::max()); + } + + void notify_one() noexcept { + pulse(1); + } + + void notify_n(size_t n) noexcept { + if (n > 0) pulse(n); + } + + void wait(std::unique_lock& lock) noexcept { + wait_until(lock.mutex(), false, nullptr); + } + + template + void wait(std::unique_lock& lock, P predicate) { + while (!predicate()) { + wait(lock); + } + } + + template + std::cv_status wait_until(std::unique_lock& lock, + const std::chrono::time_point& timeout_time) noexcept { + // convert to nanoseconds + uint64_t ns = std::chrono::duration(timeout_time.time_since_epoch()).count(); + using sec_t = decltype(timespec::tv_sec); + using nsec_t = decltype(timespec::tv_nsec); + timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) }; + return wait_until(lock.mutex(), false, &ts); + } + + template + std::cv_status wait_until(std::unique_lock& lock, + const std::chrono::time_point& timeout_time) noexcept { + // convert to nanoseconds + uint64_t ns = std::chrono::duration(timeout_time.time_since_epoch()).count(); + using sec_t = decltype(timespec::tv_sec); + using nsec_t = decltype(timespec::tv_nsec); + timespec ts{ sec_t(ns / 1000000000), nsec_t(ns % 1000000000) }; + return wait_until(lock.mutex(), true, &ts); + } + + template + bool wait_until(std::unique_lock& lock, + const std::chrono::time_point& timeout_time, P predicate) noexcept { + while (!predicate()) { + if (wait_until(lock, timeout_time) == std::cv_status::timeout) { + return predicate(); + } + } + return true; + } + + template + std::cv_status wait_for(std::unique_lock& lock, + const std::chrono::duration& rel_time) noexcept { + return wait_until(lock, std::chrono::steady_clock::now() + rel_time); + } + + template + bool wait_for(std::unique_lock& lock, + const std::chrono::duration& rel_time, P pred) noexcept { + return wait_until(lock, std::chrono::steady_clock::now() + rel_time, std::move(pred)); + } + +private: + std::atomic mState = { 0 }; + + void pulse(int threadCount) noexcept; + + std::cv_status wait_until(Mutex* lock, + bool realtimeClock, timespec* ts) noexcept; +}; + +} // namespace utils + +#endif // TNT_UTILS_LINUX_CONDITION_H diff --git a/ios/include/utils/linux/Mutex.h b/ios/include/utils/linux/Mutex.h new file mode 100644 index 00000000..2dcc7ebc --- /dev/null +++ b/ios/include/utils/linux/Mutex.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_LINUX_MUTEX_H +#define TNT_UTILS_LINUX_MUTEX_H + +#include + +#include + +namespace utils { + +/* + * A very simple mutex class that can be used as an (almost) drop-in replacement + * for std::mutex. + * It is very low overhead as most of it is inlined. + */ + +class Mutex { +public: + constexpr Mutex() noexcept = default; + Mutex(const Mutex&) = delete; + Mutex& operator=(const Mutex&) = delete; + + void lock() noexcept { + uint32_t old_state = UNLOCKED; + if (UTILS_UNLIKELY(!mState.compare_exchange_strong(old_state, + LOCKED, std::memory_order_acquire, std::memory_order_relaxed))) { + wait(); + } + } + + void unlock() noexcept { + if (UTILS_UNLIKELY(mState.exchange(UNLOCKED, std::memory_order_release) == LOCKED_CONTENDED)) { + wake(); + } + } + +private: + enum { + UNLOCKED = 0, LOCKED = 1, LOCKED_CONTENDED = 2 + }; + std::atomic mState = { UNLOCKED }; + + void wait() noexcept; + void wake() noexcept; +}; + +} // namespace utils + +#endif // TNT_UTILS_LINUX_MUTEX_H diff --git a/ios/include/utils/memalign.h b/ios/include/utils/memalign.h index 04cdf555..001e3a24 100644 --- a/ios/include/utils/memalign.h +++ b/ios/include/utils/memalign.h @@ -30,15 +30,13 @@ namespace utils { inline void* aligned_alloc(size_t size, size_t align) noexcept { + // 'align' must be a power of two and a multiple of sizeof(void*) + align = (align < sizeof(void*)) ? sizeof(void*) : align; assert(align && !(align & align - 1)); + assert((align % sizeof(void*)) == 0); void* p = nullptr; - // must be a power of two and >= sizeof(void*) - while (align < sizeof(void*)) { - align <<= 1u; - } - #if defined(WIN32) p = ::_aligned_malloc(size, align); #else diff --git a/ios/include/utils/ostream.h b/ios/include/utils/ostream.h index 80358fd4..0e5c4249 100644 --- a/ios/include/utils/ostream.h +++ b/ios/include/utils/ostream.h @@ -17,13 +17,14 @@ #ifndef TNT_UTILS_OSTREAM_H #define TNT_UTILS_OSTREAM_H -#include -#include - #include #include #include +#include +#include +#include + namespace utils::io { struct ostream_; @@ -60,6 +61,9 @@ public: ostream& operator<<(const char* string) noexcept; ostream& operator<<(const unsigned char* string) noexcept; + ostream& operator<<(std::string const& s) noexcept; + ostream& operator<<(std::string_view const& s) noexcept; + ostream& operator<<(ostream& (* f)(ostream&)) noexcept { return f(*this); } ostream& dec() noexcept; @@ -110,9 +114,6 @@ private: const char* getFormat(type t) const noexcept; }; -// handles std::string -inline ostream& operator << (ostream& o, std::string const& s) noexcept { return o << s.c_str(); } - // handles utils::bitset inline ostream& operator << (ostream& o, utils::bitset32 const& s) noexcept { return o << (void*)uintptr_t(s.getValue()); @@ -131,7 +132,7 @@ inline ostream& operator<<(ostream& stream, const VECTOR& v) { inline ostream& hex(ostream& s) noexcept { return s.hex(); } inline ostream& dec(ostream& s) noexcept { return s.dec(); } -inline ostream& endl(ostream& s) noexcept { s << "\n"; return s.flush(); } +inline ostream& endl(ostream& s) noexcept { s << '\n'; return s.flush(); } inline ostream& flush(ostream& s) noexcept { return s.flush(); } } // namespace utils::io diff --git a/ios/include/utils/sstream.h b/ios/include/utils/sstream.h new file mode 100644 index 00000000..8b0c4b4a --- /dev/null +++ b/ios/include/utils/sstream.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_SSTREAM_H +#define TNT_UTILS_SSTREAM_H + +#include + +namespace utils::io { + +class sstream : public ostream { +public: + ostream& flush() noexcept override; + const char* c_str() const noexcept; +}; + +} // namespace utils::io + +#endif // TNT_UTILS_SSTREAM_H diff --git a/ios/include/utils/string.h b/ios/include/utils/string.h new file mode 100644 index 00000000..040044c0 --- /dev/null +++ b/ios/include/utils/string.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_STRING_H +#define TNT_UTILS_STRING_H + +#include + +namespace utils { + +float strtof_c(const char* start, char** end); + +} // namespace utils + +#endif // TNT_UTILS_STRING_H diff --git a/ios/include/utils/trap.h b/ios/include/utils/trap.h new file mode 100644 index 00000000..aedc3ddb --- /dev/null +++ b/ios/include/utils/trap.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_TRAP_H +#define TNT_UTILS_TRAP_H + +#include + +#if defined(WIN32) +#include +#include +#endif + +namespace utils { + +// This can be used as a programmatic breakpoint. +inline void debug_trap() noexcept { +#if defined(WIN32) + DebugBreak(); +#else + std::raise(SIGINT); +#endif +} + +} // namespace utils + +#endif // TNT_UTILS_TRAP_H diff --git a/ios/include/utils/vector.h b/ios/include/utils/vector.h new file mode 100644 index 00000000..96f144c2 --- /dev/null +++ b/ios/include/utils/vector.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TNT_UTILS_VECTOR_H +#define TNT_UTILS_VECTOR_H + +#include +#include + +namespace utils { + +/** + * Inserts the specified item in the vector at its sorted position. + */ +template +static inline void insert_sorted(std::vector& v, T item) { + auto pos = std::lower_bound(v.begin(), v.end(), item); + v.insert(pos, std::move(item)); +} + +/** + * Inserts the specified item in the vector at its sorted position. + * The item type must implement the < operator. If the specified + * item is already present in the vector, this method returns without + * inserting the item again. + * + * @return True if the item was inserted at is sorted position, false + * if the item already exists in the vector. + */ +template +static inline bool insert_sorted_unique(std::vector& v, T item) { + if (UTILS_LIKELY(v.size() == 0 || v.back() < item)) { + v.push_back(item); + return true; + } + + auto pos = std::lower_bound(v.begin(), v.end(), item); + if (UTILS_LIKELY(pos == v.end() || item < *pos)) { + v.insert(pos, std::move(item)); + return true; + } + + return false; +} + +} // end utils namespace + +#endif // TNT_UTILS_VECTOR_H diff --git a/ios/include/utils/win32/stdtypes.h b/ios/include/utils/win32/stdtypes.h new file mode 100644 index 00000000..f593d8cb --- /dev/null +++ b/ios/include/utils/win32/stdtypes.h @@ -0,0 +1,33 @@ +/* +* Copyright (C) 2018 The Android Open Source Project +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#ifndef TNT_UTILS_WIN32_STDTYPES_H +#define TNT_UTILS_WIN32_STDTYPES_H + +#if defined(WIN32) +#include + +// Copied from linux libc sys/stat.h: +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#define PATH_MAX (MAX_PATH) + +// For getcwd +#include +#define getcwd _getcwd + +#endif +#endif // TNT_UTILS_WIN32_STDTYPES_H diff --git a/ios/include/viewer/AutomationEngine.h b/ios/include/viewer/AutomationEngine.h index ac8b98f7..5617d82b 100644 --- a/ios/include/viewer/AutomationEngine.h +++ b/ios/include/viewer/AutomationEngine.h @@ -40,7 +40,7 @@ namespace viewer { * * When executing a test, clients should call tick() after each frame is rendered, which gives * automation an opportunity to push settings to Filament, increment the current test index (if - * enough time has elapsed), and request an asychronous screenshot. + * enough time has elapsed), and request an asynchronous screenshot. * * The time to sleep between tests is configurable and can be set to zero. Automation also waits a * specified minimum number of frames between tests. diff --git a/ios/include/viewer/Settings.h b/ios/include/viewer/Settings.h index 248553f1..45c9ce6d 100644 --- a/ios/include/viewer/Settings.h +++ b/ios/include/viewer/Settings.h @@ -72,6 +72,7 @@ using MultiSampleAntiAliasingOptions = filament::View::MultiSampleAntiAliasingOp using TemporalAntiAliasingOptions = filament::View::TemporalAntiAliasingOptions; using VignetteOptions = filament::View::VignetteOptions; using VsmShadowOptions = filament::View::VsmShadowOptions; +using GuardBandOptions = filament::View::GuardBandOptions; using LightManager = filament::LightManager; // These functions push all editable property values to their respective Filament objects. @@ -171,6 +172,7 @@ struct ViewSettings { TemporalAntiAliasingOptions taa; VignetteOptions vignette; VsmShadowOptions vsmShadowOptions; + GuardBandOptions guardBand; // Custom View Options ColorGradingSettings colorGrading; @@ -194,7 +196,7 @@ struct LightSettings { LightManager::ShadowOptions shadowOptions; SoftShadowOptions softShadowOptions; float sunlightIntensity = 100000.0f; - math::float3 sunlightDirection = {0.6, -1.0, -0.8};; + math::float3 sunlightDirection = {0.6, -1.0, -0.8}; math::float3 sunlightColor = filament::Color::toLinear({ 0.98, 0.92, 0.89}); float iblIntensity = 30000.0f; float iblRotation = 0.0f; diff --git a/ios/include/viewer/SimpleViewer.h b/ios/include/viewer/ViewerGui.h similarity index 80% rename from ios/include/viewer/SimpleViewer.h rename to ios/include/viewer/ViewerGui.h index 24cae0c2..8192c32e 100644 --- a/ios/include/viewer/SimpleViewer.h +++ b/ios/include/viewer/ViewerGui.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef VIEWER_SIMPLEVIEWER_H -#define VIEWER_SIMPLEVIEWER_H +#ifndef VIEWER_VIEWERGUI_H +#define VIEWER_VIEWERGUI_H #include #include @@ -26,6 +26,7 @@ #include #include +#include #include @@ -35,6 +36,7 @@ #include #include +#include #include namespace filagui { @@ -45,15 +47,15 @@ namespace filament { namespace viewer { /** - * \class SimpleViewer SimpleViewer.h viewer/SimpleViewer.h - * \brief Manages the state for a simple glTF viewer with imgui controls and a tree view. + * \class ViewerGui ViewerGui.h viewer/ViewerGui.h + * \brief Builds ImGui widgets for a simple glTF viewer and manages the associated state. * * This is a utility that can be used across multiple platforms, including web. * * \note If you don't need ImGui controls, there is no need to use this class, just use AssetLoader * instead. */ -class UTILS_PUBLIC SimpleViewer { +class UTILS_PUBLIC ViewerGui { public: using Animator = gltfio::Animator; using FilamentAsset = gltfio::FilamentAsset; @@ -62,29 +64,39 @@ public: static constexpr int DEFAULT_SIDEBAR_WIDTH = 350; /** - * Constructs a SimpleViewer that has a fixed association with the given Filament objects. + * Constructs a ViewerGui that has a fixed association with the given Filament objects. * * Upon construction, the simple viewer may create some additional Filament objects (such as * light sources) that it owns. */ - SimpleViewer(filament::Engine* engine, filament::Scene* scene, filament::View* view, + ViewerGui(filament::Engine* engine, filament::Scene* scene, filament::View* view, int sidebarWidth = DEFAULT_SIDEBAR_WIDTH); /** - * Destroys the SimpleViewer and any Filament entities that it owns. + * Destroys the ViewerGui and any Filament entities that it owns. */ - ~SimpleViewer(); + ~ViewerGui(); /** - * Adds the asset's ready-to-render entities into the scene. + * Sets the viewer's current asset. * * The viewer does not claim ownership over the asset or its entities. Clients should use * AssetLoader and ResourceLoader to load an asset before passing it in. * + * This method does not add renderables to the scene; see populateScene(). + * * @param asset The asset to view. * @param instanceToAnimate Optional instance from which to get the animator. */ - void populateScene(FilamentAsset* asset, FilamentInstance* instanceToAnimate = nullptr); + void setAsset(FilamentAsset* asset, FilamentInstance* instanceToAnimate = nullptr); + + /** + * Adds the asset's ready-to-render entities into the scene. + * + * This is used for asychronous loading. It can be called once per frame to gradually add + * entities into the scene as their textures are loaded. + */ + void populateScene(); /** * Removes the current asset from the viewer. @@ -126,7 +138,7 @@ public: void renderUserInterface(float timeStepInSeconds, filament::View* guiView, float pixelRatio); /** - * Event-passing methods, useful only when SimpleViewer manages its own instance of ImGuiHelper. + * Event-passing methods, useful only when ViewerGui manages its own instance of ImGuiHelper. * The key codes used in these methods are just normal ASCII/ANSI codes. * @{ */ @@ -218,8 +230,14 @@ public: int getCurrentCamera() const { return mCurrentCamera; } private: + using SceneMask = gltfio::NodeManager::SceneMask; + void updateIndirectLight(); + bool isRemoteMode() const { return mAsset == nullptr; } + + void sceneSelectionUI(); + // Immutable properties set from the constructor. filament::Engine* const mEngine; filament::Scene* const mScene; @@ -238,7 +256,6 @@ private: // Properties that can be changed from the UI. int mCurrentAnimation = 1; // It is a 1-based index and 0 means not playing animation int mCurrentVariant = 0; - bool mResetAnimation = true; bool mEnableWireframe = false; int mVsmMsaaSamplesLog2 = 1; Settings mSettings; @@ -246,10 +263,19 @@ private: uint32_t mFlags; utils::Entity mCurrentMorphingEntity; std::vector mMorphWeights; + SceneMask mVisibleScenes; + bool mShowingRestPose = false; // 0 is the default "free camera". Additional cameras come from the gltf file (1-based index). int mCurrentCamera = 0; + // Cross fade animation parameters. + float mCrossFadeDuration = 0.5f; // number of seconds to transition between animations + int mPreviousAnimation = 0; // one-based index of the previous animation + double mCurrentStartTime = 0.0f; // start time of most recent cross-fade (seconds) + double mPreviousStartTime = 0.0f; // start time of previous cross-fade (seconds) + bool mResetAnimation = true; // set when building ImGui widgets, honored in applyAnimation + // Color grading UI state. float mToneMapPlot[1024]; float mRangePlot[1024 * 3]; @@ -262,4 +288,4 @@ filament::math::mat4f fitIntoUnitCube(const filament::Aabb& bounds, float zoffse } // namespace viewer } // namespace filament -#endif // VIEWER_SIMPLEVIEWER_H +#endif // VIEWER_VIEWERGUI_H