Update thirdparty libs

fmt: 9.0.0 --> 9.1.0
    llhttp: 6.0.7 --> 6.0.9
    astc-encoder: 4.0.0 --> 4.1.0
    webp: 1.2.2 --> 1.2.4
    flatbuffers: 2.0.0 --> 2.0.8
This commit is contained in:
halx99 2022-09-15 20:15:28 +08:00
parent 0bd73df1dc
commit fad9afbc9a
95 changed files with 14618 additions and 11197 deletions

View File

@ -19,10 +19,10 @@ jobs:
- uses: actions/checkout@v2
- name: Run clang-format lint
uses: DoozyX/clang-format-lint-action@v0.13
uses: DoozyX/clang-format-lint-action@v0.14
with:
source: './core ./extensions ./tests ./templates'
exclude: './thidrparty ./extensions/fairygui ./extensions/scripting/lua-bindings/auto ./extensions/spine ./tests/fairygui-tests'
exclude: './thidrparty ./extensions/fairygui ./extensions/scripting/lua-bindings/auto ./extensions/spine ./tests/fairygui-tests' ./extensions/**/*_generated.h
extensions: 'h,cpp,c,mm'
clangFormatVersion: 13
inplace: True

View File

@ -19,7 +19,7 @@
#include <vector>
#include <deque>
#include "astc/astcenc.h"
#include "astc/astcenc_internal.h"
#include "astc/astcenc_internal_entry.h"
#include "yasio/detail/utils.hpp"
#define ASTCDEC_NO_CONTEXT 1

View File

@ -1,98 +1,120 @@
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_CSLANGUAGEDATABINARY_FLATBUFFERS_H_
#define FLATBUFFERS_GENERATED_CSLANGUAGEDATABINARY_FLATBUFFERS_H_
#include "flatbuffers/flatbuffers.h"
namespace flatbuffers
{
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 2 &&
FLATBUFFERS_VERSION_MINOR == 0 &&
FLATBUFFERS_VERSION_REVISION == 8,
"Non-compatible flatbuffers version included");
namespace flatbuffers {
struct LanguageItem;
struct LanguageItemBuilder;
struct LanguageSet;
struct LanguageSetBuilder;
struct LanguageItem FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct LanguageItem FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef LanguageItemBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_KEY = 4,
VT_VALUE = 6
};
const flatbuffers::String* key() const { return GetPointer<const flatbuffers::String*>(VT_KEY); }
const flatbuffers::String* value() const { return GetPointer<const flatbuffers::String*>(VT_VALUE); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_KEY) && verifier.VerifyString(key()) &&
VerifyOffset(verifier, VT_VALUE) && verifier.VerifyString(value()) && verifier.EndTable();
const flatbuffers::String *key() const {
return GetPointer<const flatbuffers::String *>(VT_KEY);
}
const flatbuffers::String *value() const {
return GetPointer<const flatbuffers::String *>(VT_VALUE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_KEY) &&
verifier.VerifyString(key()) &&
VerifyOffset(verifier, VT_VALUE) &&
verifier.VerifyString(value()) &&
verifier.EndTable();
}
};
struct LanguageItemBuilder
{
struct LanguageItemBuilder {
typedef LanguageItem Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_key(flatbuffers::Offset<flatbuffers::String> key) { fbb_.AddOffset(LanguageItem::VT_KEY, key); }
void add_value(flatbuffers::Offset<flatbuffers::String> value) { fbb_.AddOffset(LanguageItem::VT_VALUE, value); }
explicit LanguageItemBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
LanguageItemBuilder& operator=(const LanguageItemBuilder&);
flatbuffers::Offset<LanguageItem> Finish()
{
void add_key(flatbuffers::Offset<flatbuffers::String> key) {
fbb_.AddOffset(LanguageItem::VT_KEY, key);
}
void add_value(flatbuffers::Offset<flatbuffers::String> value) {
fbb_.AddOffset(LanguageItem::VT_VALUE, value);
}
explicit LanguageItemBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<LanguageItem> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<LanguageItem>(end);
return o;
}
};
inline flatbuffers::Offset<LanguageItem> CreateLanguageItem(flatbuffers::FlatBufferBuilder& _fbb,
inline flatbuffers::Offset<LanguageItem> CreateLanguageItem(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> key = 0,
flatbuffers::Offset<flatbuffers::String> value = 0)
{
flatbuffers::Offset<flatbuffers::String> value = 0) {
LanguageItemBuilder builder_(_fbb);
builder_.add_value(value);
builder_.add_key(key);
return builder_.Finish();
}
inline flatbuffers::Offset<LanguageItem> CreateLanguageItemDirect(flatbuffers::FlatBufferBuilder& _fbb,
inline flatbuffers::Offset<LanguageItem> CreateLanguageItemDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *key = nullptr,
const char* value = nullptr)
{
const char *value = nullptr) {
auto key__ = key ? _fbb.CreateString(key) : 0;
auto value__ = value ? _fbb.CreateString(value) : 0;
return flatbuffers::CreateLanguageItem(_fbb, key__, value__);
return flatbuffers::CreateLanguageItem(
_fbb,
key__,
value__);
}
struct LanguageSet FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct LanguageSet FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef LanguageSetBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_LANGUAGEITEMS = 4
};
const flatbuffers::Vector<flatbuffers::Offset<LanguageItem>>* languageItems() const
{
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<LanguageItem>>*>(VT_LANGUAGEITEMS);
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::LanguageItem>> *languageItems() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::LanguageItem>> *>(VT_LANGUAGEITEMS);
}
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_LANGUAGEITEMS) &&
verifier.VerifyVector(languageItems()) && verifier.VerifyVectorOfTables(languageItems()) &&
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_LANGUAGEITEMS) &&
verifier.VerifyVector(languageItems()) &&
verifier.VerifyVectorOfTables(languageItems()) &&
verifier.EndTable();
}
};
struct LanguageSetBuilder
{
struct LanguageSetBuilder {
typedef LanguageSet Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_languageItems(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<LanguageItem>>> languageItems)
{
void add_languageItems(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::LanguageItem>>> languageItems) {
fbb_.AddOffset(LanguageSet::VT_LANGUAGEITEMS, languageItems);
}
explicit LanguageSetBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
LanguageSetBuilder& operator=(const LanguageSetBuilder&);
flatbuffers::Offset<LanguageSet> Finish()
{
explicit LanguageSetBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<LanguageSet> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<LanguageSet>(end);
return o;
@ -101,8 +123,7 @@ struct LanguageSetBuilder
inline flatbuffers::Offset<LanguageSet> CreateLanguageSet(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<LanguageItem>>> languageItems = 0)
{
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::LanguageItem>>> languageItems = 0) {
LanguageSetBuilder builder_(_fbb);
builder_.add_languageItems(languageItems);
return builder_.Finish();
@ -110,41 +131,40 @@ inline flatbuffers::Offset<LanguageSet> CreateLanguageSet(
inline flatbuffers::Offset<LanguageSet> CreateLanguageSetDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<flatbuffers::Offset<LanguageItem>>* languageItems = nullptr)
{
auto languageItems__ = languageItems ? _fbb.CreateVector<flatbuffers::Offset<LanguageItem>>(*languageItems) : 0;
return flatbuffers::CreateLanguageSet(_fbb, languageItems__);
const std::vector<flatbuffers::Offset<flatbuffers::LanguageItem>> *languageItems = nullptr) {
auto languageItems__ = languageItems ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::LanguageItem>>(*languageItems) : 0;
return flatbuffers::CreateLanguageSet(
_fbb,
languageItems__);
}
inline const flatbuffers::LanguageSet* GetLanguageSet(const void* buf)
{
inline const flatbuffers::LanguageSet *GetLanguageSet(const void *buf) {
return flatbuffers::GetRoot<flatbuffers::LanguageSet>(buf);
}
inline const flatbuffers::LanguageSet* GetSizePrefixedLanguageSet(const void* buf)
{
inline const flatbuffers::LanguageSet *GetSizePrefixedLanguageSet(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<flatbuffers::LanguageSet>(buf);
}
inline bool VerifyLanguageSetBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifyLanguageSetBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<flatbuffers::LanguageSet>(nullptr);
}
inline bool VerifySizePrefixedLanguageSetBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifySizePrefixedLanguageSetBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<flatbuffers::LanguageSet>(nullptr);
}
inline void FinishLanguageSetBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::LanguageSet> root)
{
inline void FinishLanguageSetBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::LanguageSet> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedLanguageSetBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::LanguageSet> root)
{
inline void FinishSizePrefixedLanguageSetBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::LanguageSet> root) {
fbb.FinishSizePrefixed(root);
}

View File

@ -1,80 +1,113 @@
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_CSPARSE3DBINARY_FLATBUFFERS_H_
#define FLATBUFFERS_GENERATED_CSPARSE3DBINARY_FLATBUFFERS_H_
#include "flatbuffers/flatbuffers.h"
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 2 &&
FLATBUFFERS_VERSION_MINOR == 0 &&
FLATBUFFERS_VERSION_REVISION == 8,
"Non-compatible flatbuffers version included");
#include "CSParseBinary_generated.h"
namespace flatbuffers
{
namespace flatbuffers {
struct Node3DOption;
struct Node3DOptionBuilder;
struct Sprite3DOptions;
struct Sprite3DOptionsBuilder;
struct Particle3DOptions;
struct Particle3DOptionsBuilder;
struct UserCameraOptions;
struct UserCameraOptionsBuilder;
struct GameNode3DOption;
struct GameNode3DOptionBuilder;
struct Light3DOption;
struct Light3DOptionBuilder;
struct Node3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct Node3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef Node3DOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODEOPTIONS = 4,
VT_POSITION3D = 6,
VT_ROTATION3D = 8,
VT_SCALE3D = 10,
VT_CAMERAMASK = 12
};
const WidgetOptions* nodeOptions() const { return GetPointer<const WidgetOptions*>(VT_NODEOPTIONS); }
const FVec3* position3D() const { return GetStruct<const FVec3*>(VT_POSITION3D); }
const FVec3* rotation3D() const { return GetStruct<const FVec3*>(VT_ROTATION3D); }
const FVec3* scale3D() const { return GetStruct<const FVec3*>(VT_SCALE3D); }
int32_t cameramask() const { return GetField<int32_t>(VT_CAMERAMASK, 0); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) && VerifyField<FVec3>(verifier, VT_POSITION3D) &&
VerifyField<FVec3>(verifier, VT_ROTATION3D) && VerifyField<FVec3>(verifier, VT_SCALE3D) &&
VerifyField<int32_t>(verifier, VT_CAMERAMASK) && verifier.EndTable();
const flatbuffers::WidgetOptions *nodeOptions() const {
return GetPointer<const flatbuffers::WidgetOptions *>(VT_NODEOPTIONS);
}
const flatbuffers::FVec3 *position3D() const {
return GetStruct<const flatbuffers::FVec3 *>(VT_POSITION3D);
}
const flatbuffers::FVec3 *rotation3D() const {
return GetStruct<const flatbuffers::FVec3 *>(VT_ROTATION3D);
}
const flatbuffers::FVec3 *scale3D() const {
return GetStruct<const flatbuffers::FVec3 *>(VT_SCALE3D);
}
int32_t cameramask() const {
return GetField<int32_t>(VT_CAMERAMASK, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) &&
VerifyField<flatbuffers::FVec3>(verifier, VT_POSITION3D, 4) &&
VerifyField<flatbuffers::FVec3>(verifier, VT_ROTATION3D, 4) &&
VerifyField<flatbuffers::FVec3>(verifier, VT_SCALE3D, 4) &&
VerifyField<int32_t>(verifier, VT_CAMERAMASK, 4) &&
verifier.EndTable();
}
};
struct Node3DOptionBuilder
{
struct Node3DOptionBuilder {
typedef Node3DOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_nodeOptions(flatbuffers::Offset<WidgetOptions> nodeOptions)
{
void add_nodeOptions(flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions) {
fbb_.AddOffset(Node3DOption::VT_NODEOPTIONS, nodeOptions);
}
void add_position3D(const FVec3* position3D) { fbb_.AddStruct(Node3DOption::VT_POSITION3D, position3D); }
void add_rotation3D(const FVec3* rotation3D) { fbb_.AddStruct(Node3DOption::VT_ROTATION3D, rotation3D); }
void add_scale3D(const FVec3* scale3D) { fbb_.AddStruct(Node3DOption::VT_SCALE3D, scale3D); }
void add_cameramask(int32_t cameramask) { fbb_.AddElement<int32_t>(Node3DOption::VT_CAMERAMASK, cameramask, 0); }
explicit Node3DOptionBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
Node3DOptionBuilder& operator=(const Node3DOptionBuilder&);
flatbuffers::Offset<Node3DOption> Finish()
{
void add_position3D(const flatbuffers::FVec3 *position3D) {
fbb_.AddStruct(Node3DOption::VT_POSITION3D, position3D);
}
void add_rotation3D(const flatbuffers::FVec3 *rotation3D) {
fbb_.AddStruct(Node3DOption::VT_ROTATION3D, rotation3D);
}
void add_scale3D(const flatbuffers::FVec3 *scale3D) {
fbb_.AddStruct(Node3DOption::VT_SCALE3D, scale3D);
}
void add_cameramask(int32_t cameramask) {
fbb_.AddElement<int32_t>(Node3DOption::VT_CAMERAMASK, cameramask, 0);
}
explicit Node3DOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Node3DOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Node3DOption>(end);
return o;
}
};
inline flatbuffers::Offset<Node3DOption> CreateNode3DOption(flatbuffers::FlatBufferBuilder& _fbb,
flatbuffers::Offset<WidgetOptions> nodeOptions = 0,
const FVec3* position3D = 0,
const FVec3* rotation3D = 0,
const FVec3* scale3D = 0,
int32_t cameramask = 0)
{
inline flatbuffers::Offset<Node3DOption> CreateNode3DOption(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
const flatbuffers::FVec3 *position3D = nullptr,
const flatbuffers::FVec3 *rotation3D = nullptr,
const flatbuffers::FVec3 *scale3D = nullptr,
int32_t cameramask = 0) {
Node3DOptionBuilder builder_(_fbb);
builder_.add_cameramask(cameramask);
builder_.add_scale3D(scale3D);
@ -84,69 +117,80 @@ inline flatbuffers::Offset<Node3DOption> CreateNode3DOption(flatbuffers::FlatBuf
return builder_.Finish();
}
struct Sprite3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct Sprite3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef Sprite3DOptionsBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODE3DOPTION = 4,
VT_FILEDATA = 6,
VT_RUNACTION = 8,
VT_ISFLIPPED = 10,
VT_LIGHTFLAG = 12
};
const Node3DOption* node3DOption() const { return GetPointer<const Node3DOption*>(VT_NODE3DOPTION); }
const ResourceData* fileData() const { return GetPointer<const ResourceData*>(VT_FILEDATA); }
bool runAction() const { return GetField<uint8_t>(VT_RUNACTION, 0) != 0; }
bool isFlipped() const { return GetField<uint8_t>(VT_ISFLIPPED, 0) != 0; }
int32_t lightFlag() const { return GetField<int32_t>(VT_LIGHTFLAG, 0); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) && VerifyOffset(verifier, VT_FILEDATA) &&
verifier.VerifyTable(fileData()) && VerifyField<uint8_t>(verifier, VT_RUNACTION) &&
VerifyField<uint8_t>(verifier, VT_ISFLIPPED) && VerifyField<int32_t>(verifier, VT_LIGHTFLAG) &&
const flatbuffers::Node3DOption *node3DOption() const {
return GetPointer<const flatbuffers::Node3DOption *>(VT_NODE3DOPTION);
}
const flatbuffers::ResourceData *fileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_FILEDATA);
}
bool runAction() const {
return GetField<uint8_t>(VT_RUNACTION, 0) != 0;
}
bool isFlipped() const {
return GetField<uint8_t>(VT_ISFLIPPED, 0) != 0;
}
int32_t lightFlag() const {
return GetField<int32_t>(VT_LIGHTFLAG, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) &&
VerifyOffset(verifier, VT_FILEDATA) &&
verifier.VerifyTable(fileData()) &&
VerifyField<uint8_t>(verifier, VT_RUNACTION, 1) &&
VerifyField<uint8_t>(verifier, VT_ISFLIPPED, 1) &&
VerifyField<int32_t>(verifier, VT_LIGHTFLAG, 4) &&
verifier.EndTable();
}
};
struct Sprite3DOptionsBuilder
{
struct Sprite3DOptionsBuilder {
typedef Sprite3DOptions Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_node3DOption(flatbuffers::Offset<Node3DOption> node3DOption)
{
void add_node3DOption(flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption) {
fbb_.AddOffset(Sprite3DOptions::VT_NODE3DOPTION, node3DOption);
}
void add_fileData(flatbuffers::Offset<ResourceData> fileData)
{
void add_fileData(flatbuffers::Offset<flatbuffers::ResourceData> fileData) {
fbb_.AddOffset(Sprite3DOptions::VT_FILEDATA, fileData);
}
void add_runAction(bool runAction)
{
void add_runAction(bool runAction) {
fbb_.AddElement<uint8_t>(Sprite3DOptions::VT_RUNACTION, static_cast<uint8_t>(runAction), 0);
}
void add_isFlipped(bool isFlipped)
{
void add_isFlipped(bool isFlipped) {
fbb_.AddElement<uint8_t>(Sprite3DOptions::VT_ISFLIPPED, static_cast<uint8_t>(isFlipped), 0);
}
void add_lightFlag(int32_t lightFlag) { fbb_.AddElement<int32_t>(Sprite3DOptions::VT_LIGHTFLAG, lightFlag, 0); }
explicit Sprite3DOptionsBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
Sprite3DOptionsBuilder& operator=(const Sprite3DOptionsBuilder&);
flatbuffers::Offset<Sprite3DOptions> Finish()
{
void add_lightFlag(int32_t lightFlag) {
fbb_.AddElement<int32_t>(Sprite3DOptions::VT_LIGHTFLAG, lightFlag, 0);
}
explicit Sprite3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Sprite3DOptions> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Sprite3DOptions>(end);
return o;
}
};
inline flatbuffers::Offset<Sprite3DOptions> CreateSprite3DOptions(flatbuffers::FlatBufferBuilder& _fbb,
flatbuffers::Offset<Node3DOption> node3DOption = 0,
flatbuffers::Offset<ResourceData> fileData = 0,
inline flatbuffers::Offset<Sprite3DOptions> CreateSprite3DOptions(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption = 0,
flatbuffers::Offset<flatbuffers::ResourceData> fileData = 0,
bool runAction = false,
bool isFlipped = false,
int32_t lightFlag = 0)
{
int32_t lightFlag = 0) {
Sprite3DOptionsBuilder builder_(_fbb);
builder_.add_lightFlag(lightFlag);
builder_.add_fileData(fileData);
@ -156,39 +200,43 @@ inline flatbuffers::Offset<Sprite3DOptions> CreateSprite3DOptions(flatbuffers::F
return builder_.Finish();
}
struct Particle3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct Particle3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef Particle3DOptionsBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODE3DOPTION = 4,
VT_FILEDATA = 6
};
const Node3DOption* node3DOption() const { return GetPointer<const Node3DOption*>(VT_NODE3DOPTION); }
const ResourceData* fileData() const { return GetPointer<const ResourceData*>(VT_FILEDATA); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) && VerifyOffset(verifier, VT_FILEDATA) &&
verifier.VerifyTable(fileData()) && verifier.EndTable();
const flatbuffers::Node3DOption *node3DOption() const {
return GetPointer<const flatbuffers::Node3DOption *>(VT_NODE3DOPTION);
}
const flatbuffers::ResourceData *fileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_FILEDATA);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) &&
VerifyOffset(verifier, VT_FILEDATA) &&
verifier.VerifyTable(fileData()) &&
verifier.EndTable();
}
};
struct Particle3DOptionsBuilder
{
struct Particle3DOptionsBuilder {
typedef Particle3DOptions Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_node3DOption(flatbuffers::Offset<Node3DOption> node3DOption)
{
void add_node3DOption(flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption) {
fbb_.AddOffset(Particle3DOptions::VT_NODE3DOPTION, node3DOption);
}
void add_fileData(flatbuffers::Offset<ResourceData> fileData)
{
void add_fileData(flatbuffers::Offset<flatbuffers::ResourceData> fileData) {
fbb_.AddOffset(Particle3DOptions::VT_FILEDATA, fileData);
}
explicit Particle3DOptionsBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
Particle3DOptionsBuilder& operator=(const Particle3DOptionsBuilder&);
flatbuffers::Offset<Particle3DOptions> Finish()
{
explicit Particle3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Particle3DOptions> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Particle3DOptions>(end);
return o;
@ -197,19 +245,17 @@ struct Particle3DOptionsBuilder
inline flatbuffers::Offset<Particle3DOptions> CreateParticle3DOptions(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<Node3DOption> node3DOption = 0,
flatbuffers::Offset<ResourceData> fileData = 0)
{
flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption = 0,
flatbuffers::Offset<flatbuffers::ResourceData> fileData = 0) {
Particle3DOptionsBuilder builder_(_fbb);
builder_.add_fileData(fileData);
builder_.add_node3DOption(node3DOption);
return builder_.Finish();
}
struct UserCameraOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct UserCameraOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef UserCameraOptionsBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODE3DOPTION = 4,
VT_FOV = 6,
VT_NEARCLIP = 8,
@ -223,80 +269,112 @@ struct UserCameraOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
VT_FORWARDFILEDATA = 24,
VT_BACKFILEDATA = 26
};
const Node3DOption* node3DOption() const { return GetPointer<const Node3DOption*>(VT_NODE3DOPTION); }
float fov() const { return GetField<float>(VT_FOV, 60.0f); }
float nearClip() const { return GetField<float>(VT_NEARCLIP, 1.0f); }
float farClip() const { return GetField<float>(VT_FARCLIP, 1000.0f); }
int32_t cameraFlag() const { return GetField<int32_t>(VT_CAMERAFLAG, 0); }
bool skyBoxEnabled() const { return GetField<uint8_t>(VT_SKYBOXENABLED, 0) != 0; }
const ResourceData* leftFileData() const { return GetPointer<const ResourceData*>(VT_LEFTFILEDATA); }
const ResourceData* rightFileData() const { return GetPointer<const ResourceData*>(VT_RIGHTFILEDATA); }
const ResourceData* upFileData() const { return GetPointer<const ResourceData*>(VT_UPFILEDATA); }
const ResourceData* downFileData() const { return GetPointer<const ResourceData*>(VT_DOWNFILEDATA); }
const ResourceData* forwardFileData() const { return GetPointer<const ResourceData*>(VT_FORWARDFILEDATA); }
const ResourceData* backFileData() const { return GetPointer<const ResourceData*>(VT_BACKFILEDATA); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) && VerifyField<float>(verifier, VT_FOV) &&
VerifyField<float>(verifier, VT_NEARCLIP) && VerifyField<float>(verifier, VT_FARCLIP) &&
VerifyField<int32_t>(verifier, VT_CAMERAFLAG) && VerifyField<uint8_t>(verifier, VT_SKYBOXENABLED) &&
VerifyOffset(verifier, VT_LEFTFILEDATA) && verifier.VerifyTable(leftFileData()) &&
VerifyOffset(verifier, VT_RIGHTFILEDATA) && verifier.VerifyTable(rightFileData()) &&
VerifyOffset(verifier, VT_UPFILEDATA) && verifier.VerifyTable(upFileData()) &&
VerifyOffset(verifier, VT_DOWNFILEDATA) && verifier.VerifyTable(downFileData()) &&
VerifyOffset(verifier, VT_FORWARDFILEDATA) && verifier.VerifyTable(forwardFileData()) &&
VerifyOffset(verifier, VT_BACKFILEDATA) && verifier.VerifyTable(backFileData()) && verifier.EndTable();
const flatbuffers::Node3DOption *node3DOption() const {
return GetPointer<const flatbuffers::Node3DOption *>(VT_NODE3DOPTION);
}
float fov() const {
return GetField<float>(VT_FOV, 60.0f);
}
float nearClip() const {
return GetField<float>(VT_NEARCLIP, 1.0f);
}
float farClip() const {
return GetField<float>(VT_FARCLIP, 1000.0f);
}
int32_t cameraFlag() const {
return GetField<int32_t>(VT_CAMERAFLAG, 0);
}
bool skyBoxEnabled() const {
return GetField<uint8_t>(VT_SKYBOXENABLED, 0) != 0;
}
const flatbuffers::ResourceData *leftFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_LEFTFILEDATA);
}
const flatbuffers::ResourceData *rightFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_RIGHTFILEDATA);
}
const flatbuffers::ResourceData *upFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_UPFILEDATA);
}
const flatbuffers::ResourceData *downFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_DOWNFILEDATA);
}
const flatbuffers::ResourceData *forwardFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_FORWARDFILEDATA);
}
const flatbuffers::ResourceData *backFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_BACKFILEDATA);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) &&
VerifyField<float>(verifier, VT_FOV, 4) &&
VerifyField<float>(verifier, VT_NEARCLIP, 4) &&
VerifyField<float>(verifier, VT_FARCLIP, 4) &&
VerifyField<int32_t>(verifier, VT_CAMERAFLAG, 4) &&
VerifyField<uint8_t>(verifier, VT_SKYBOXENABLED, 1) &&
VerifyOffset(verifier, VT_LEFTFILEDATA) &&
verifier.VerifyTable(leftFileData()) &&
VerifyOffset(verifier, VT_RIGHTFILEDATA) &&
verifier.VerifyTable(rightFileData()) &&
VerifyOffset(verifier, VT_UPFILEDATA) &&
verifier.VerifyTable(upFileData()) &&
VerifyOffset(verifier, VT_DOWNFILEDATA) &&
verifier.VerifyTable(downFileData()) &&
VerifyOffset(verifier, VT_FORWARDFILEDATA) &&
verifier.VerifyTable(forwardFileData()) &&
VerifyOffset(verifier, VT_BACKFILEDATA) &&
verifier.VerifyTable(backFileData()) &&
verifier.EndTable();
}
};
struct UserCameraOptionsBuilder
{
struct UserCameraOptionsBuilder {
typedef UserCameraOptions Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_node3DOption(flatbuffers::Offset<Node3DOption> node3DOption)
{
void add_node3DOption(flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption) {
fbb_.AddOffset(UserCameraOptions::VT_NODE3DOPTION, node3DOption);
}
void add_fov(float fov) { fbb_.AddElement<float>(UserCameraOptions::VT_FOV, fov, 60.0f); }
void add_nearClip(float nearClip) { fbb_.AddElement<float>(UserCameraOptions::VT_NEARCLIP, nearClip, 1.0f); }
void add_farClip(float farClip) { fbb_.AddElement<float>(UserCameraOptions::VT_FARCLIP, farClip, 1000.0f); }
void add_cameraFlag(int32_t cameraFlag)
{
void add_fov(float fov) {
fbb_.AddElement<float>(UserCameraOptions::VT_FOV, fov, 60.0f);
}
void add_nearClip(float nearClip) {
fbb_.AddElement<float>(UserCameraOptions::VT_NEARCLIP, nearClip, 1.0f);
}
void add_farClip(float farClip) {
fbb_.AddElement<float>(UserCameraOptions::VT_FARCLIP, farClip, 1000.0f);
}
void add_cameraFlag(int32_t cameraFlag) {
fbb_.AddElement<int32_t>(UserCameraOptions::VT_CAMERAFLAG, cameraFlag, 0);
}
void add_skyBoxEnabled(bool skyBoxEnabled)
{
void add_skyBoxEnabled(bool skyBoxEnabled) {
fbb_.AddElement<uint8_t>(UserCameraOptions::VT_SKYBOXENABLED, static_cast<uint8_t>(skyBoxEnabled), 0);
}
void add_leftFileData(flatbuffers::Offset<ResourceData> leftFileData)
{
void add_leftFileData(flatbuffers::Offset<flatbuffers::ResourceData> leftFileData) {
fbb_.AddOffset(UserCameraOptions::VT_LEFTFILEDATA, leftFileData);
}
void add_rightFileData(flatbuffers::Offset<ResourceData> rightFileData)
{
void add_rightFileData(flatbuffers::Offset<flatbuffers::ResourceData> rightFileData) {
fbb_.AddOffset(UserCameraOptions::VT_RIGHTFILEDATA, rightFileData);
}
void add_upFileData(flatbuffers::Offset<ResourceData> upFileData)
{
void add_upFileData(flatbuffers::Offset<flatbuffers::ResourceData> upFileData) {
fbb_.AddOffset(UserCameraOptions::VT_UPFILEDATA, upFileData);
}
void add_downFileData(flatbuffers::Offset<ResourceData> downFileData)
{
void add_downFileData(flatbuffers::Offset<flatbuffers::ResourceData> downFileData) {
fbb_.AddOffset(UserCameraOptions::VT_DOWNFILEDATA, downFileData);
}
void add_forwardFileData(flatbuffers::Offset<ResourceData> forwardFileData)
{
void add_forwardFileData(flatbuffers::Offset<flatbuffers::ResourceData> forwardFileData) {
fbb_.AddOffset(UserCameraOptions::VT_FORWARDFILEDATA, forwardFileData);
}
void add_backFileData(flatbuffers::Offset<ResourceData> backFileData)
{
void add_backFileData(flatbuffers::Offset<flatbuffers::ResourceData> backFileData) {
fbb_.AddOffset(UserCameraOptions::VT_BACKFILEDATA, backFileData);
}
explicit UserCameraOptionsBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
UserCameraOptionsBuilder& operator=(const UserCameraOptionsBuilder&);
flatbuffers::Offset<UserCameraOptions> Finish()
{
explicit UserCameraOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<UserCameraOptions> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<UserCameraOptions>(end);
return o;
@ -305,19 +383,18 @@ struct UserCameraOptionsBuilder
inline flatbuffers::Offset<UserCameraOptions> CreateUserCameraOptions(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<Node3DOption> node3DOption = 0,
flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption = 0,
float fov = 60.0f,
float nearClip = 1.0f,
float farClip = 1000.0f,
int32_t cameraFlag = 0,
bool skyBoxEnabled = false,
flatbuffers::Offset<ResourceData> leftFileData = 0,
flatbuffers::Offset<ResourceData> rightFileData = 0,
flatbuffers::Offset<ResourceData> upFileData = 0,
flatbuffers::Offset<ResourceData> downFileData = 0,
flatbuffers::Offset<ResourceData> forwardFileData = 0,
flatbuffers::Offset<ResourceData> backFileData = 0)
{
flatbuffers::Offset<flatbuffers::ResourceData> leftFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> rightFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> upFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> downFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> forwardFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> backFileData = 0) {
UserCameraOptionsBuilder builder_(_fbb);
builder_.add_backFileData(backFileData);
builder_.add_forwardFileData(forwardFileData);
@ -334,10 +411,9 @@ inline flatbuffers::Offset<UserCameraOptions> CreateUserCameraOptions(
return builder_.Finish();
}
struct GameNode3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct GameNode3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GameNode3DOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NAME = 4,
VT_SKYBOXMASK = 6,
VT_SKYBOXENABLED = 8,
@ -351,90 +427,114 @@ struct GameNode3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
VT_CUSTOMPROPERTY = 24,
VT_USEDEFAULTLIGHT = 26
};
const flatbuffers::String* name() const { return GetPointer<const flatbuffers::String*>(VT_NAME); }
int32_t skyBoxMask() const { return GetField<int32_t>(VT_SKYBOXMASK, 0); }
bool skyBoxEnabled() const { return GetField<uint8_t>(VT_SKYBOXENABLED, 0) != 0; }
const ResourceData* leftFileData() const { return GetPointer<const ResourceData*>(VT_LEFTFILEDATA); }
const ResourceData* rightFileData() const { return GetPointer<const ResourceData*>(VT_RIGHTFILEDATA); }
const ResourceData* upFileData() const { return GetPointer<const ResourceData*>(VT_UPFILEDATA); }
const ResourceData* downFileData() const { return GetPointer<const ResourceData*>(VT_DOWNFILEDATA); }
const ResourceData* forwardFileData() const { return GetPointer<const ResourceData*>(VT_FORWARDFILEDATA); }
const ResourceData* backFileData() const { return GetPointer<const ResourceData*>(VT_BACKFILEDATA); }
const flatbuffers::String* frameEvent() const { return GetPointer<const flatbuffers::String*>(VT_FRAMEEVENT); }
const flatbuffers::String* customProperty() const
{
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
int32_t skyBoxMask() const {
return GetField<int32_t>(VT_SKYBOXMASK, 0);
}
bool skyBoxEnabled() const {
return GetField<uint8_t>(VT_SKYBOXENABLED, 0) != 0;
}
const flatbuffers::ResourceData *leftFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_LEFTFILEDATA);
}
const flatbuffers::ResourceData *rightFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_RIGHTFILEDATA);
}
const flatbuffers::ResourceData *upFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_UPFILEDATA);
}
const flatbuffers::ResourceData *downFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_DOWNFILEDATA);
}
const flatbuffers::ResourceData *forwardFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_FORWARDFILEDATA);
}
const flatbuffers::ResourceData *backFileData() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_BACKFILEDATA);
}
const flatbuffers::String *frameEvent() const {
return GetPointer<const flatbuffers::String *>(VT_FRAMEEVENT);
}
const flatbuffers::String *customProperty() const {
return GetPointer<const flatbuffers::String *>(VT_CUSTOMPROPERTY);
}
bool useDefaultLight() const { return GetField<uint8_t>(VT_USEDEFAULTLIGHT, 0) != 0; }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && verifier.VerifyString(name()) &&
VerifyField<int32_t>(verifier, VT_SKYBOXMASK) && VerifyField<uint8_t>(verifier, VT_SKYBOXENABLED) &&
VerifyOffset(verifier, VT_LEFTFILEDATA) && verifier.VerifyTable(leftFileData()) &&
VerifyOffset(verifier, VT_RIGHTFILEDATA) && verifier.VerifyTable(rightFileData()) &&
VerifyOffset(verifier, VT_UPFILEDATA) && verifier.VerifyTable(upFileData()) &&
VerifyOffset(verifier, VT_DOWNFILEDATA) && verifier.VerifyTable(downFileData()) &&
VerifyOffset(verifier, VT_FORWARDFILEDATA) && verifier.VerifyTable(forwardFileData()) &&
VerifyOffset(verifier, VT_BACKFILEDATA) && verifier.VerifyTable(backFileData()) &&
VerifyOffset(verifier, VT_FRAMEEVENT) && verifier.VerifyString(frameEvent()) &&
VerifyOffset(verifier, VT_CUSTOMPROPERTY) && verifier.VerifyString(customProperty()) &&
VerifyField<uint8_t>(verifier, VT_USEDEFAULTLIGHT) && verifier.EndTable();
bool useDefaultLight() const {
return GetField<uint8_t>(VT_USEDEFAULTLIGHT, 0) != 0;
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyField<int32_t>(verifier, VT_SKYBOXMASK, 4) &&
VerifyField<uint8_t>(verifier, VT_SKYBOXENABLED, 1) &&
VerifyOffset(verifier, VT_LEFTFILEDATA) &&
verifier.VerifyTable(leftFileData()) &&
VerifyOffset(verifier, VT_RIGHTFILEDATA) &&
verifier.VerifyTable(rightFileData()) &&
VerifyOffset(verifier, VT_UPFILEDATA) &&
verifier.VerifyTable(upFileData()) &&
VerifyOffset(verifier, VT_DOWNFILEDATA) &&
verifier.VerifyTable(downFileData()) &&
VerifyOffset(verifier, VT_FORWARDFILEDATA) &&
verifier.VerifyTable(forwardFileData()) &&
VerifyOffset(verifier, VT_BACKFILEDATA) &&
verifier.VerifyTable(backFileData()) &&
VerifyOffset(verifier, VT_FRAMEEVENT) &&
verifier.VerifyString(frameEvent()) &&
VerifyOffset(verifier, VT_CUSTOMPROPERTY) &&
verifier.VerifyString(customProperty()) &&
VerifyField<uint8_t>(verifier, VT_USEDEFAULTLIGHT, 1) &&
verifier.EndTable();
}
};
struct GameNode3DOptionBuilder
{
struct GameNode3DOptionBuilder {
typedef GameNode3DOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_name(flatbuffers::Offset<flatbuffers::String> name) { fbb_.AddOffset(GameNode3DOption::VT_NAME, name); }
void add_skyBoxMask(int32_t skyBoxMask)
{
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(GameNode3DOption::VT_NAME, name);
}
void add_skyBoxMask(int32_t skyBoxMask) {
fbb_.AddElement<int32_t>(GameNode3DOption::VT_SKYBOXMASK, skyBoxMask, 0);
}
void add_skyBoxEnabled(bool skyBoxEnabled)
{
void add_skyBoxEnabled(bool skyBoxEnabled) {
fbb_.AddElement<uint8_t>(GameNode3DOption::VT_SKYBOXENABLED, static_cast<uint8_t>(skyBoxEnabled), 0);
}
void add_leftFileData(flatbuffers::Offset<ResourceData> leftFileData)
{
void add_leftFileData(flatbuffers::Offset<flatbuffers::ResourceData> leftFileData) {
fbb_.AddOffset(GameNode3DOption::VT_LEFTFILEDATA, leftFileData);
}
void add_rightFileData(flatbuffers::Offset<ResourceData> rightFileData)
{
void add_rightFileData(flatbuffers::Offset<flatbuffers::ResourceData> rightFileData) {
fbb_.AddOffset(GameNode3DOption::VT_RIGHTFILEDATA, rightFileData);
}
void add_upFileData(flatbuffers::Offset<ResourceData> upFileData)
{
void add_upFileData(flatbuffers::Offset<flatbuffers::ResourceData> upFileData) {
fbb_.AddOffset(GameNode3DOption::VT_UPFILEDATA, upFileData);
}
void add_downFileData(flatbuffers::Offset<ResourceData> downFileData)
{
void add_downFileData(flatbuffers::Offset<flatbuffers::ResourceData> downFileData) {
fbb_.AddOffset(GameNode3DOption::VT_DOWNFILEDATA, downFileData);
}
void add_forwardFileData(flatbuffers::Offset<ResourceData> forwardFileData)
{
void add_forwardFileData(flatbuffers::Offset<flatbuffers::ResourceData> forwardFileData) {
fbb_.AddOffset(GameNode3DOption::VT_FORWARDFILEDATA, forwardFileData);
}
void add_backFileData(flatbuffers::Offset<ResourceData> backFileData)
{
void add_backFileData(flatbuffers::Offset<flatbuffers::ResourceData> backFileData) {
fbb_.AddOffset(GameNode3DOption::VT_BACKFILEDATA, backFileData);
}
void add_frameEvent(flatbuffers::Offset<flatbuffers::String> frameEvent)
{
void add_frameEvent(flatbuffers::Offset<flatbuffers::String> frameEvent) {
fbb_.AddOffset(GameNode3DOption::VT_FRAMEEVENT, frameEvent);
}
void add_customProperty(flatbuffers::Offset<flatbuffers::String> customProperty)
{
void add_customProperty(flatbuffers::Offset<flatbuffers::String> customProperty) {
fbb_.AddOffset(GameNode3DOption::VT_CUSTOMPROPERTY, customProperty);
}
void add_useDefaultLight(bool useDefaultLight)
{
void add_useDefaultLight(bool useDefaultLight) {
fbb_.AddElement<uint8_t>(GameNode3DOption::VT_USEDEFAULTLIGHT, static_cast<uint8_t>(useDefaultLight), 0);
}
explicit GameNode3DOptionBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
GameNode3DOptionBuilder& operator=(const GameNode3DOptionBuilder&);
flatbuffers::Offset<GameNode3DOption> Finish()
{
explicit GameNode3DOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<GameNode3DOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GameNode3DOption>(end);
return o;
@ -446,16 +546,15 @@ inline flatbuffers::Offset<GameNode3DOption> CreateGameNode3DOption(
flatbuffers::Offset<flatbuffers::String> name = 0,
int32_t skyBoxMask = 0,
bool skyBoxEnabled = false,
flatbuffers::Offset<ResourceData> leftFileData = 0,
flatbuffers::Offset<ResourceData> rightFileData = 0,
flatbuffers::Offset<ResourceData> upFileData = 0,
flatbuffers::Offset<ResourceData> downFileData = 0,
flatbuffers::Offset<ResourceData> forwardFileData = 0,
flatbuffers::Offset<ResourceData> backFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> leftFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> rightFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> upFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> downFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> forwardFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> backFileData = 0,
flatbuffers::Offset<flatbuffers::String> frameEvent = 0,
flatbuffers::Offset<flatbuffers::String> customProperty = 0,
bool useDefaultLight = false)
{
bool useDefaultLight = false) {
GameNode3DOptionBuilder builder_(_fbb);
builder_.add_customProperty(customProperty);
builder_.add_frameEvent(frameEvent);
@ -477,28 +576,37 @@ inline flatbuffers::Offset<GameNode3DOption> CreateGameNode3DOptionDirect(
const char *name = nullptr,
int32_t skyBoxMask = 0,
bool skyBoxEnabled = false,
flatbuffers::Offset<ResourceData> leftFileData = 0,
flatbuffers::Offset<ResourceData> rightFileData = 0,
flatbuffers::Offset<ResourceData> upFileData = 0,
flatbuffers::Offset<ResourceData> downFileData = 0,
flatbuffers::Offset<ResourceData> forwardFileData = 0,
flatbuffers::Offset<ResourceData> backFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> leftFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> rightFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> upFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> downFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> forwardFileData = 0,
flatbuffers::Offset<flatbuffers::ResourceData> backFileData = 0,
const char *frameEvent = nullptr,
const char *customProperty = nullptr,
bool useDefaultLight = false)
{
bool useDefaultLight = false) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto frameEvent__ = frameEvent ? _fbb.CreateString(frameEvent) : 0;
auto customProperty__ = customProperty ? _fbb.CreateString(customProperty) : 0;
return flatbuffers::CreateGameNode3DOption(_fbb, name__, skyBoxMask, skyBoxEnabled, leftFileData, rightFileData,
upFileData, downFileData, forwardFileData, backFileData, frameEvent__,
customProperty__, useDefaultLight);
return flatbuffers::CreateGameNode3DOption(
_fbb,
name__,
skyBoxMask,
skyBoxEnabled,
leftFileData,
rightFileData,
upFileData,
downFileData,
forwardFileData,
backFileData,
frameEvent__,
customProperty__,
useDefaultLight);
}
struct Light3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct Light3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef Light3DOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODE3DOPTION = 4,
VT_ENABLED = 6,
VT_TYPE = 8,
@ -507,59 +615,86 @@ struct Light3DOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
VT_RANGE = 14,
VT_OUTERANGLE = 16
};
const Node3DOption* node3DOption() const { return GetPointer<const Node3DOption*>(VT_NODE3DOPTION); }
bool enabled() const { return GetField<uint8_t>(VT_ENABLED, 0) != 0; }
int32_t type() const { return GetField<int32_t>(VT_TYPE, 0); }
int32_t flag() const { return GetField<int32_t>(VT_FLAG, 0); }
float intensity() const { return GetField<float>(VT_INTENSITY, 0.0f); }
float range() const { return GetField<float>(VT_RANGE, 0.0f); }
float outerAngle() const { return GetField<float>(VT_OUTERANGLE, 0.0f); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) && VerifyField<uint8_t>(verifier, VT_ENABLED) &&
VerifyField<int32_t>(verifier, VT_TYPE) && VerifyField<int32_t>(verifier, VT_FLAG) &&
VerifyField<float>(verifier, VT_INTENSITY) && VerifyField<float>(verifier, VT_RANGE) &&
VerifyField<float>(verifier, VT_OUTERANGLE) && verifier.EndTable();
const flatbuffers::Node3DOption *node3DOption() const {
return GetPointer<const flatbuffers::Node3DOption *>(VT_NODE3DOPTION);
}
bool enabled() const {
return GetField<uint8_t>(VT_ENABLED, 0) != 0;
}
int32_t type() const {
return GetField<int32_t>(VT_TYPE, 0);
}
int32_t flag() const {
return GetField<int32_t>(VT_FLAG, 0);
}
float intensity() const {
return GetField<float>(VT_INTENSITY, 0.0f);
}
float range() const {
return GetField<float>(VT_RANGE, 0.0f);
}
float outerAngle() const {
return GetField<float>(VT_OUTERANGLE, 0.0f);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODE3DOPTION) &&
verifier.VerifyTable(node3DOption()) &&
VerifyField<uint8_t>(verifier, VT_ENABLED, 1) &&
VerifyField<int32_t>(verifier, VT_TYPE, 4) &&
VerifyField<int32_t>(verifier, VT_FLAG, 4) &&
VerifyField<float>(verifier, VT_INTENSITY, 4) &&
VerifyField<float>(verifier, VT_RANGE, 4) &&
VerifyField<float>(verifier, VT_OUTERANGLE, 4) &&
verifier.EndTable();
}
};
struct Light3DOptionBuilder
{
struct Light3DOptionBuilder {
typedef Light3DOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_node3DOption(flatbuffers::Offset<Node3DOption> node3DOption)
{
void add_node3DOption(flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption) {
fbb_.AddOffset(Light3DOption::VT_NODE3DOPTION, node3DOption);
}
void add_enabled(bool enabled)
{
void add_enabled(bool enabled) {
fbb_.AddElement<uint8_t>(Light3DOption::VT_ENABLED, static_cast<uint8_t>(enabled), 0);
}
void add_type(int32_t type) { fbb_.AddElement<int32_t>(Light3DOption::VT_TYPE, type, 0); }
void add_flag(int32_t flag) { fbb_.AddElement<int32_t>(Light3DOption::VT_FLAG, flag, 0); }
void add_intensity(float intensity) { fbb_.AddElement<float>(Light3DOption::VT_INTENSITY, intensity, 0.0f); }
void add_range(float range) { fbb_.AddElement<float>(Light3DOption::VT_RANGE, range, 0.0f); }
void add_outerAngle(float outerAngle) { fbb_.AddElement<float>(Light3DOption::VT_OUTERANGLE, outerAngle, 0.0f); }
explicit Light3DOptionBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
Light3DOptionBuilder& operator=(const Light3DOptionBuilder&);
flatbuffers::Offset<Light3DOption> Finish()
{
void add_type(int32_t type) {
fbb_.AddElement<int32_t>(Light3DOption::VT_TYPE, type, 0);
}
void add_flag(int32_t flag) {
fbb_.AddElement<int32_t>(Light3DOption::VT_FLAG, flag, 0);
}
void add_intensity(float intensity) {
fbb_.AddElement<float>(Light3DOption::VT_INTENSITY, intensity, 0.0f);
}
void add_range(float range) {
fbb_.AddElement<float>(Light3DOption::VT_RANGE, range, 0.0f);
}
void add_outerAngle(float outerAngle) {
fbb_.AddElement<float>(Light3DOption::VT_OUTERANGLE, outerAngle, 0.0f);
}
explicit Light3DOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Light3DOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Light3DOption>(end);
return o;
}
};
inline flatbuffers::Offset<Light3DOption> CreateLight3DOption(flatbuffers::FlatBufferBuilder& _fbb,
flatbuffers::Offset<Node3DOption> node3DOption = 0,
inline flatbuffers::Offset<Light3DOption> CreateLight3DOption(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Node3DOption> node3DOption = 0,
bool enabled = false,
int32_t type = 0,
int32_t flag = 0,
float intensity = 0.0f,
float range = 0.0f,
float outerAngle = 0.0f)
{
float outerAngle = 0.0f) {
Light3DOptionBuilder builder_(_fbb);
builder_.add_outerAngle(outerAngle);
builder_.add_range(range);
@ -571,35 +706,33 @@ inline flatbuffers::Offset<Light3DOption> CreateLight3DOption(flatbuffers::FlatB
return builder_.Finish();
}
inline const flatbuffers::Node3DOption* GetNode3DOption(const void* buf)
{
inline const flatbuffers::Node3DOption *GetNode3DOption(const void *buf) {
return flatbuffers::GetRoot<flatbuffers::Node3DOption>(buf);
}
inline const flatbuffers::Node3DOption* GetSizePrefixedNode3DOption(const void* buf)
{
inline const flatbuffers::Node3DOption *GetSizePrefixedNode3DOption(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<flatbuffers::Node3DOption>(buf);
}
inline bool VerifyNode3DOptionBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifyNode3DOptionBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<flatbuffers::Node3DOption>(nullptr);
}
inline bool VerifySizePrefixedNode3DOptionBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifySizePrefixedNode3DOptionBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<flatbuffers::Node3DOption>(nullptr);
}
inline void FinishNode3DOptionBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::Node3DOption> root)
{
inline void FinishNode3DOptionBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::Node3DOption> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedNode3DOptionBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::Node3DOption> root)
{
inline void FinishSizePrefixedNode3DOptionBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::Node3DOption> root) {
fbb.FinishSizePrefixed(root);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +1,31 @@
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_CSARMATURENODE_FLATBUFFERS_H_
#define FLATBUFFERS_GENERATED_CSARMATURENODE_FLATBUFFERS_H_
#include "flatbuffers/flatbuffers.h"
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 2 &&
FLATBUFFERS_VERSION_MINOR == 0 &&
FLATBUFFERS_VERSION_REVISION == 8,
"Non-compatible flatbuffers version included");
#include "CSParseBinary_generated.h"
namespace flatbuffers
{
namespace flatbuffers {
struct CSArmatureNodeOption;
struct CSArmatureNodeOptionBuilder;
struct ResourceItemData;
struct ResourceItemDataBuilder;
struct CSArmatureNodeOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct CSArmatureNodeOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CSArmatureNodeOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODEOPTIONS = 4,
VT_FILEDATA = 6,
VT_ISLOOP = 8,
@ -27,72 +35,81 @@ struct CSArmatureNodeOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
VT_TIMESCALE = 16,
VT_ARMATURESCALE = 18
};
const WidgetOptions* nodeOptions() const { return GetPointer<const WidgetOptions*>(VT_NODEOPTIONS); }
const ResourceItemData* fileData() const { return GetPointer<const ResourceItemData*>(VT_FILEDATA); }
bool isLoop() const { return GetField<uint8_t>(VT_ISLOOP, 1) != 0; }
bool isAutoPlay() const { return GetField<uint8_t>(VT_ISAUTOPLAY, 1) != 0; }
const flatbuffers::String* currentAnimationName() const
{
const flatbuffers::WidgetOptions *nodeOptions() const {
return GetPointer<const flatbuffers::WidgetOptions *>(VT_NODEOPTIONS);
}
const flatbuffers::ResourceItemData *fileData() const {
return GetPointer<const flatbuffers::ResourceItemData *>(VT_FILEDATA);
}
bool isLoop() const {
return GetField<uint8_t>(VT_ISLOOP, 1) != 0;
}
bool isAutoPlay() const {
return GetField<uint8_t>(VT_ISAUTOPLAY, 1) != 0;
}
const flatbuffers::String *currentAnimationName() const {
return GetPointer<const flatbuffers::String *>(VT_CURRENTANIMATIONNAME);
}
const flatbuffers::String* currentArmatureName() const
{
const flatbuffers::String *currentArmatureName() const {
return GetPointer<const flatbuffers::String *>(VT_CURRENTARMATURENAME);
}
float timeScale() const { return GetField<float>(VT_TIMESCALE, 0.0f); }
float armatureScale() const { return GetField<float>(VT_ARMATURESCALE, 0.0f); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) && VerifyOffset(verifier, VT_FILEDATA) &&
verifier.VerifyTable(fileData()) && VerifyField<uint8_t>(verifier, VT_ISLOOP) &&
VerifyField<uint8_t>(verifier, VT_ISAUTOPLAY) && VerifyOffset(verifier, VT_CURRENTANIMATIONNAME) &&
verifier.VerifyString(currentAnimationName()) && VerifyOffset(verifier, VT_CURRENTARMATURENAME) &&
verifier.VerifyString(currentArmatureName()) && VerifyField<float>(verifier, VT_TIMESCALE) &&
VerifyField<float>(verifier, VT_ARMATURESCALE) && verifier.EndTable();
float timeScale() const {
return GetField<float>(VT_TIMESCALE, 0.0f);
}
float armatureScale() const {
return GetField<float>(VT_ARMATURESCALE, 0.0f);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) &&
VerifyOffset(verifier, VT_FILEDATA) &&
verifier.VerifyTable(fileData()) &&
VerifyField<uint8_t>(verifier, VT_ISLOOP, 1) &&
VerifyField<uint8_t>(verifier, VT_ISAUTOPLAY, 1) &&
VerifyOffset(verifier, VT_CURRENTANIMATIONNAME) &&
verifier.VerifyString(currentAnimationName()) &&
VerifyOffset(verifier, VT_CURRENTARMATURENAME) &&
verifier.VerifyString(currentArmatureName()) &&
VerifyField<float>(verifier, VT_TIMESCALE, 4) &&
VerifyField<float>(verifier, VT_ARMATURESCALE, 4) &&
verifier.EndTable();
}
};
struct CSArmatureNodeOptionBuilder
{
struct CSArmatureNodeOptionBuilder {
typedef CSArmatureNodeOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_nodeOptions(flatbuffers::Offset<WidgetOptions> nodeOptions)
{
void add_nodeOptions(flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions) {
fbb_.AddOffset(CSArmatureNodeOption::VT_NODEOPTIONS, nodeOptions);
}
void add_fileData(flatbuffers::Offset<ResourceItemData> fileData)
{
void add_fileData(flatbuffers::Offset<flatbuffers::ResourceItemData> fileData) {
fbb_.AddOffset(CSArmatureNodeOption::VT_FILEDATA, fileData);
}
void add_isLoop(bool isLoop)
{
void add_isLoop(bool isLoop) {
fbb_.AddElement<uint8_t>(CSArmatureNodeOption::VT_ISLOOP, static_cast<uint8_t>(isLoop), 1);
}
void add_isAutoPlay(bool isAutoPlay)
{
void add_isAutoPlay(bool isAutoPlay) {
fbb_.AddElement<uint8_t>(CSArmatureNodeOption::VT_ISAUTOPLAY, static_cast<uint8_t>(isAutoPlay), 1);
}
void add_currentAnimationName(flatbuffers::Offset<flatbuffers::String> currentAnimationName)
{
void add_currentAnimationName(flatbuffers::Offset<flatbuffers::String> currentAnimationName) {
fbb_.AddOffset(CSArmatureNodeOption::VT_CURRENTANIMATIONNAME, currentAnimationName);
}
void add_currentArmatureName(flatbuffers::Offset<flatbuffers::String> currentArmatureName)
{
void add_currentArmatureName(flatbuffers::Offset<flatbuffers::String> currentArmatureName) {
fbb_.AddOffset(CSArmatureNodeOption::VT_CURRENTARMATURENAME, currentArmatureName);
}
void add_timeScale(float timeScale) { fbb_.AddElement<float>(CSArmatureNodeOption::VT_TIMESCALE, timeScale, 0.0f); }
void add_armatureScale(float armatureScale)
{
void add_timeScale(float timeScale) {
fbb_.AddElement<float>(CSArmatureNodeOption::VT_TIMESCALE, timeScale, 0.0f);
}
void add_armatureScale(float armatureScale) {
fbb_.AddElement<float>(CSArmatureNodeOption::VT_ARMATURESCALE, armatureScale, 0.0f);
}
explicit CSArmatureNodeOptionBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb)
{
explicit CSArmatureNodeOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
CSArmatureNodeOptionBuilder& operator=(const CSArmatureNodeOptionBuilder&);
flatbuffers::Offset<CSArmatureNodeOption> Finish()
{
flatbuffers::Offset<CSArmatureNodeOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<CSArmatureNodeOption>(end);
return o;
@ -101,15 +118,14 @@ struct CSArmatureNodeOptionBuilder
inline flatbuffers::Offset<CSArmatureNodeOption> CreateCSArmatureNodeOption(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<ResourceItemData> fileData = 0,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<flatbuffers::ResourceItemData> fileData = 0,
bool isLoop = true,
bool isAutoPlay = true,
flatbuffers::Offset<flatbuffers::String> currentAnimationName = 0,
flatbuffers::Offset<flatbuffers::String> currentArmatureName = 0,
float timeScale = 0.0f,
float armatureScale = 0.0f)
{
float armatureScale = 0.0f) {
CSArmatureNodeOptionBuilder builder_(_fbb);
builder_.add_armatureScale(armatureScale);
builder_.add_timeScale(timeScale);
@ -124,101 +140,118 @@ inline flatbuffers::Offset<CSArmatureNodeOption> CreateCSArmatureNodeOption(
inline flatbuffers::Offset<CSArmatureNodeOption> CreateCSArmatureNodeOptionDirect(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<ResourceItemData> fileData = 0,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<flatbuffers::ResourceItemData> fileData = 0,
bool isLoop = true,
bool isAutoPlay = true,
const char *currentAnimationName = nullptr,
const char *currentArmatureName = nullptr,
float timeScale = 0.0f,
float armatureScale = 0.0f)
{
float armatureScale = 0.0f) {
auto currentAnimationName__ = currentAnimationName ? _fbb.CreateString(currentAnimationName) : 0;
auto currentArmatureName__ = currentArmatureName ? _fbb.CreateString(currentArmatureName) : 0;
return flatbuffers::CreateCSArmatureNodeOption(_fbb, nodeOptions, fileData, isLoop, isAutoPlay,
currentAnimationName__, currentArmatureName__, timeScale,
return flatbuffers::CreateCSArmatureNodeOption(
_fbb,
nodeOptions,
fileData,
isLoop,
isAutoPlay,
currentAnimationName__,
currentArmatureName__,
timeScale,
armatureScale);
}
struct ResourceItemData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct ResourceItemData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ResourceItemDataBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TYPE = 4,
VT_PATH = 6
};
int32_t type() const { return GetField<int32_t>(VT_TYPE, 0); }
const flatbuffers::String* path() const { return GetPointer<const flatbuffers::String*>(VT_PATH); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_TYPE) &&
VerifyOffset(verifier, VT_PATH) && verifier.VerifyString(path()) && verifier.EndTable();
int32_t type() const {
return GetField<int32_t>(VT_TYPE, 0);
}
const flatbuffers::String *path() const {
return GetPointer<const flatbuffers::String *>(VT_PATH);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_TYPE, 4) &&
VerifyOffset(verifier, VT_PATH) &&
verifier.VerifyString(path()) &&
verifier.EndTable();
}
};
struct ResourceItemDataBuilder
{
struct ResourceItemDataBuilder {
typedef ResourceItemData Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_type(int32_t type) { fbb_.AddElement<int32_t>(ResourceItemData::VT_TYPE, type, 0); }
void add_path(flatbuffers::Offset<flatbuffers::String> path) { fbb_.AddOffset(ResourceItemData::VT_PATH, path); }
explicit ResourceItemDataBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
ResourceItemDataBuilder& operator=(const ResourceItemDataBuilder&);
flatbuffers::Offset<ResourceItemData> Finish()
{
void add_type(int32_t type) {
fbb_.AddElement<int32_t>(ResourceItemData::VT_TYPE, type, 0);
}
void add_path(flatbuffers::Offset<flatbuffers::String> path) {
fbb_.AddOffset(ResourceItemData::VT_PATH, path);
}
explicit ResourceItemDataBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<ResourceItemData> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<ResourceItemData>(end);
return o;
}
};
inline flatbuffers::Offset<ResourceItemData> CreateResourceItemData(flatbuffers::FlatBufferBuilder& _fbb,
inline flatbuffers::Offset<ResourceItemData> CreateResourceItemData(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t type = 0,
flatbuffers::Offset<flatbuffers::String> path = 0)
{
flatbuffers::Offset<flatbuffers::String> path = 0) {
ResourceItemDataBuilder builder_(_fbb);
builder_.add_path(path);
builder_.add_type(type);
return builder_.Finish();
}
inline flatbuffers::Offset<ResourceItemData> CreateResourceItemDataDirect(flatbuffers::FlatBufferBuilder& _fbb,
inline flatbuffers::Offset<ResourceItemData> CreateResourceItemDataDirect(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t type = 0,
const char* path = nullptr)
{
const char *path = nullptr) {
auto path__ = path ? _fbb.CreateString(path) : 0;
return flatbuffers::CreateResourceItemData(_fbb, type, path__);
return flatbuffers::CreateResourceItemData(
_fbb,
type,
path__);
}
inline const flatbuffers::CSArmatureNodeOption* GetCSArmatureNodeOption(const void* buf)
{
inline const flatbuffers::CSArmatureNodeOption *GetCSArmatureNodeOption(const void *buf) {
return flatbuffers::GetRoot<flatbuffers::CSArmatureNodeOption>(buf);
}
inline const flatbuffers::CSArmatureNodeOption* GetSizePrefixedCSArmatureNodeOption(const void* buf)
{
inline const flatbuffers::CSArmatureNodeOption *GetSizePrefixedCSArmatureNodeOption(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<flatbuffers::CSArmatureNodeOption>(buf);
}
inline bool VerifyCSArmatureNodeOptionBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifyCSArmatureNodeOptionBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<flatbuffers::CSArmatureNodeOption>(nullptr);
}
inline bool VerifySizePrefixedCSArmatureNodeOptionBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifySizePrefixedCSArmatureNodeOptionBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<flatbuffers::CSArmatureNodeOption>(nullptr);
}
inline void FinishCSArmatureNodeOptionBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::CSArmatureNodeOption> root)
{
inline void FinishCSArmatureNodeOptionBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::CSArmatureNodeOption> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedCSArmatureNodeOptionBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::CSArmatureNodeOption> root)
{
inline void FinishSizePrefixedCSArmatureNodeOptionBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::CSArmatureNodeOption> root) {
fbb.FinishSizePrefixed(root);
}

View File

@ -1,57 +1,71 @@
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_CSBONEBINARY_FLATBUFFERS_H_
#define FLATBUFFERS_GENERATED_CSBONEBINARY_FLATBUFFERS_H_
#include "flatbuffers/flatbuffers.h"
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 2 &&
FLATBUFFERS_VERSION_MINOR == 0 &&
FLATBUFFERS_VERSION_REVISION == 8,
"Non-compatible flatbuffers version included");
#include "CSParseBinary_generated.h"
namespace flatbuffers
{
namespace flatbuffers {
struct BoneOptions;
struct BoneOptionsBuilder;
struct BoneOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum
{
struct BoneOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef BoneOptionsBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODEOPTIONS = 4,
VT_LENGTH = 6,
VT_BLENDFUNC = 8
};
const flatbuffers::WidgetOptions* nodeOptions() const
{
const flatbuffers::WidgetOptions *nodeOptions() const {
return GetPointer<const flatbuffers::WidgetOptions *>(VT_NODEOPTIONS);
}
float length() const { return GetField<float>(VT_LENGTH, 0.0f); }
const flatbuffers::BlendFunc* blendFunc() const { return GetStruct<const flatbuffers::BlendFunc*>(VT_BLENDFUNC); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyField<flatbuffers::uoffset_t>(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) && VerifyField<float>(verifier, VT_LENGTH) &&
VerifyField<flatbuffers::BlendFunc>(verifier, VT_BLENDFUNC) && verifier.EndTable();
float length() const {
return GetField<float>(VT_LENGTH, 0.0f);
}
const flatbuffers::BlendFunc *blendFunc() const {
return GetStruct<const flatbuffers::BlendFunc *>(VT_BLENDFUNC);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) &&
VerifyField<float>(verifier, VT_LENGTH, 4) &&
VerifyField<flatbuffers::BlendFunc>(verifier, VT_BLENDFUNC, 4) &&
verifier.EndTable();
}
};
struct BoneOptionsBuilder
{
struct BoneOptionsBuilder {
typedef BoneOptions Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_nodeOptions(flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions)
{
void add_nodeOptions(flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions) {
fbb_.AddOffset(BoneOptions::VT_NODEOPTIONS, nodeOptions);
}
void add_length(float length) { fbb_.AddElement<float>(BoneOptions::VT_LENGTH, length, 0.0f); }
void add_blendFunc(const flatbuffers::BlendFunc* blendFunc)
{
void add_length(float length) {
fbb_.AddElement<float>(BoneOptions::VT_LENGTH, length, 0.0f);
}
void add_blendFunc(const flatbuffers::BlendFunc *blendFunc) {
fbb_.AddStruct(BoneOptions::VT_BLENDFUNC, blendFunc);
}
BoneOptionsBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
BoneOptionsBuilder& operator=(const BoneOptionsBuilder&);
flatbuffers::Offset<BoneOptions> Finish()
{
auto o = flatbuffers::Offset<BoneOptions>(fbb_.EndTable(start_, 3));
explicit BoneOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<BoneOptions> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<BoneOptions>(end);
return o;
}
};
@ -60,8 +74,7 @@ inline flatbuffers::Offset<BoneOptions> CreateBoneOptions(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
float length = 0.0f,
const flatbuffers::BlendFunc* blendFunc = 0)
{
const flatbuffers::BlendFunc *blendFunc = nullptr) {
BoneOptionsBuilder builder_(_fbb);
builder_.add_blendFunc(blendFunc);
builder_.add_length(length);
@ -69,22 +82,36 @@ inline flatbuffers::Offset<BoneOptions> CreateBoneOptions(
return builder_.Finish();
}
inline const flatbuffers::BoneOptions* GetBoneOptions(const void* buf)
{
inline const flatbuffers::BoneOptions *GetBoneOptions(const void *buf) {
return flatbuffers::GetRoot<flatbuffers::BoneOptions>(buf);
}
inline bool VerifyBoneOptionsBuffer(flatbuffers::Verifier& verifier)
{
inline const flatbuffers::BoneOptions *GetSizePrefixedBoneOptions(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<flatbuffers::BoneOptions>(buf);
}
inline bool VerifyBoneOptionsBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<flatbuffers::BoneOptions>(nullptr);
}
inline void FinishBoneOptionsBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::BoneOptions> root)
{
inline bool VerifySizePrefixedBoneOptionsBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<flatbuffers::BoneOptions>(nullptr);
}
inline void FinishBoneOptionsBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::BoneOptions> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedBoneOptionsBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::BoneOptions> root) {
fbb.FinishSizePrefixed(root);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_GENERATED_CSBONEBINARY_FLATBUFFERS_H_

View File

@ -1,25 +1,34 @@
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_CSTABCONTROL_FLATBUFFERS_H_
#define FLATBUFFERS_GENERATED_CSTABCONTROL_FLATBUFFERS_H_
#include "flatbuffers/flatbuffers.h"
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 2 &&
FLATBUFFERS_VERSION_MINOR == 0 &&
FLATBUFFERS_VERSION_REVISION == 8,
"Non-compatible flatbuffers version included");
#include "CSParseBinary_generated.h"
namespace flatbuffers
{
namespace flatbuffers {
struct TabControlOption;
struct TabControlOptionBuilder;
struct TabHeaderOption;
struct TabHeaderOptionBuilder;
struct TabItemOption;
struct TabItemOptionBuilder;
struct TabControlOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct TabControlOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TabControlOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODEOPTIONS = 4,
VT_HEADERPLACE = 6,
VT_HEADERWIDTH = 8,
@ -29,69 +38,80 @@ struct TabControlOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
VT_IGNOREHEADERTEXTURESIZE = 16,
VT_TABITEMS = 18
};
const WidgetOptions* nodeOptions() const { return GetPointer<const WidgetOptions*>(VT_NODEOPTIONS); }
int32_t headerPlace() const { return GetField<int32_t>(VT_HEADERPLACE, 0); }
int32_t headerWidth() const { return GetField<int32_t>(VT_HEADERWIDTH, 0); }
int32_t headerHeight() const { return GetField<int32_t>(VT_HEADERHEIGHT, 0); }
float selectedTabZoom() const { return GetField<float>(VT_SELECTEDTABZOOM, 0.0f); }
int32_t selectedTabIndex() const { return GetField<int32_t>(VT_SELECTEDTABINDEX, 0); }
uint8_t ignoreHeaderTextureSize() const { return GetField<uint8_t>(VT_IGNOREHEADERTEXTURESIZE, 0); }
const flatbuffers::Vector<flatbuffers::Offset<TabItemOption>>* tabItems() const
{
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<TabItemOption>>*>(VT_TABITEMS);
const flatbuffers::WidgetOptions *nodeOptions() const {
return GetPointer<const flatbuffers::WidgetOptions *>(VT_NODEOPTIONS);
}
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) && VerifyField<int32_t>(verifier, VT_HEADERPLACE) &&
VerifyField<int32_t>(verifier, VT_HEADERWIDTH) && VerifyField<int32_t>(verifier, VT_HEADERHEIGHT) &&
VerifyField<float>(verifier, VT_SELECTEDTABZOOM) &&
VerifyField<int32_t>(verifier, VT_SELECTEDTABINDEX) &&
VerifyField<uint8_t>(verifier, VT_IGNOREHEADERTEXTURESIZE) && VerifyOffset(verifier, VT_TABITEMS) &&
verifier.VerifyVector(tabItems()) && verifier.VerifyVectorOfTables(tabItems()) && verifier.EndTable();
int32_t headerPlace() const {
return GetField<int32_t>(VT_HEADERPLACE, 0);
}
int32_t headerWidth() const {
return GetField<int32_t>(VT_HEADERWIDTH, 0);
}
int32_t headerHeight() const {
return GetField<int32_t>(VT_HEADERHEIGHT, 0);
}
float selectedTabZoom() const {
return GetField<float>(VT_SELECTEDTABZOOM, 0.0f);
}
int32_t selectedTabIndex() const {
return GetField<int32_t>(VT_SELECTEDTABINDEX, 0);
}
uint8_t ignoreHeaderTextureSize() const {
return GetField<uint8_t>(VT_IGNOREHEADERTEXTURESIZE, 0);
}
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::TabItemOption>> *tabItems() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::TabItemOption>> *>(VT_TABITEMS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) &&
VerifyField<int32_t>(verifier, VT_HEADERPLACE, 4) &&
VerifyField<int32_t>(verifier, VT_HEADERWIDTH, 4) &&
VerifyField<int32_t>(verifier, VT_HEADERHEIGHT, 4) &&
VerifyField<float>(verifier, VT_SELECTEDTABZOOM, 4) &&
VerifyField<int32_t>(verifier, VT_SELECTEDTABINDEX, 4) &&
VerifyField<uint8_t>(verifier, VT_IGNOREHEADERTEXTURESIZE, 1) &&
VerifyOffset(verifier, VT_TABITEMS) &&
verifier.VerifyVector(tabItems()) &&
verifier.VerifyVectorOfTables(tabItems()) &&
verifier.EndTable();
}
};
struct TabControlOptionBuilder
{
struct TabControlOptionBuilder {
typedef TabControlOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_nodeOptions(flatbuffers::Offset<WidgetOptions> nodeOptions)
{
void add_nodeOptions(flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions) {
fbb_.AddOffset(TabControlOption::VT_NODEOPTIONS, nodeOptions);
}
void add_headerPlace(int32_t headerPlace)
{
void add_headerPlace(int32_t headerPlace) {
fbb_.AddElement<int32_t>(TabControlOption::VT_HEADERPLACE, headerPlace, 0);
}
void add_headerWidth(int32_t headerWidth)
{
void add_headerWidth(int32_t headerWidth) {
fbb_.AddElement<int32_t>(TabControlOption::VT_HEADERWIDTH, headerWidth, 0);
}
void add_headerHeight(int32_t headerHeight)
{
void add_headerHeight(int32_t headerHeight) {
fbb_.AddElement<int32_t>(TabControlOption::VT_HEADERHEIGHT, headerHeight, 0);
}
void add_selectedTabZoom(float selectedTabZoom)
{
void add_selectedTabZoom(float selectedTabZoom) {
fbb_.AddElement<float>(TabControlOption::VT_SELECTEDTABZOOM, selectedTabZoom, 0.0f);
}
void add_selectedTabIndex(int32_t selectedTabIndex)
{
void add_selectedTabIndex(int32_t selectedTabIndex) {
fbb_.AddElement<int32_t>(TabControlOption::VT_SELECTEDTABINDEX, selectedTabIndex, 0);
}
void add_ignoreHeaderTextureSize(uint8_t ignoreHeaderTextureSize)
{
void add_ignoreHeaderTextureSize(uint8_t ignoreHeaderTextureSize) {
fbb_.AddElement<uint8_t>(TabControlOption::VT_IGNOREHEADERTEXTURESIZE, ignoreHeaderTextureSize, 0);
}
void add_tabItems(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TabItemOption>>> tabItems)
{
void add_tabItems(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::TabItemOption>>> tabItems) {
fbb_.AddOffset(TabControlOption::VT_TABITEMS, tabItems);
}
explicit TabControlOptionBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
TabControlOptionBuilder& operator=(const TabControlOptionBuilder&);
flatbuffers::Offset<TabControlOption> Finish()
{
explicit TabControlOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<TabControlOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TabControlOption>(end);
return o;
@ -100,15 +120,14 @@ struct TabControlOptionBuilder
inline flatbuffers::Offset<TabControlOption> CreateTabControlOption(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
int32_t headerPlace = 0,
int32_t headerWidth = 0,
int32_t headerHeight = 0,
float selectedTabZoom = 0.0f,
int32_t selectedTabIndex = 0,
uint8_t ignoreHeaderTextureSize = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TabItemOption>>> tabItems = 0)
{
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::TabItemOption>>> tabItems = 0) {
TabControlOptionBuilder builder_(_fbb);
builder_.add_tabItems(tabItems);
builder_.add_selectedTabIndex(selectedTabIndex);
@ -123,24 +142,30 @@ inline flatbuffers::Offset<TabControlOption> CreateTabControlOption(
inline flatbuffers::Offset<TabControlOption> CreateTabControlOptionDirect(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
int32_t headerPlace = 0,
int32_t headerWidth = 0,
int32_t headerHeight = 0,
float selectedTabZoom = 0.0f,
int32_t selectedTabIndex = 0,
uint8_t ignoreHeaderTextureSize = 0,
const std::vector<flatbuffers::Offset<TabItemOption>>* tabItems = nullptr)
{
auto tabItems__ = tabItems ? _fbb.CreateVector<flatbuffers::Offset<TabItemOption>>(*tabItems) : 0;
return flatbuffers::CreateTabControlOption(_fbb, nodeOptions, headerPlace, headerWidth, headerHeight,
selectedTabZoom, selectedTabIndex, ignoreHeaderTextureSize, tabItems__);
const std::vector<flatbuffers::Offset<flatbuffers::TabItemOption>> *tabItems = nullptr) {
auto tabItems__ = tabItems ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::TabItemOption>>(*tabItems) : 0;
return flatbuffers::CreateTabControlOption(
_fbb,
nodeOptions,
headerPlace,
headerWidth,
headerHeight,
selectedTabZoom,
selectedTabIndex,
ignoreHeaderTextureSize,
tabItems__);
}
struct TabHeaderOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct TabHeaderOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TabHeaderOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODEOPTIONS = 4,
VT_FONTRES = 6,
VT_FONTSIZE = 8,
@ -152,73 +177,99 @@ struct TabHeaderOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
VT_CROSSNORMALFILE = 20,
VT_CROSSDISABLEFILE = 22
};
const WidgetOptions* nodeOptions() const { return GetPointer<const WidgetOptions*>(VT_NODEOPTIONS); }
const ResourceData* fontRes() const { return GetPointer<const ResourceData*>(VT_FONTRES); }
int32_t fontSize() const { return GetField<int32_t>(VT_FONTSIZE, 0); }
const flatbuffers::String* titleText() const { return GetPointer<const flatbuffers::String*>(VT_TITLETEXT); }
const Color* textColor() const { return GetStruct<const Color*>(VT_TEXTCOLOR); }
const ResourceData* normalBackFile() const { return GetPointer<const ResourceData*>(VT_NORMALBACKFILE); }
const ResourceData* pressBackFile() const { return GetPointer<const ResourceData*>(VT_PRESSBACKFILE); }
const ResourceData* disableBackFile() const { return GetPointer<const ResourceData*>(VT_DISABLEBACKFILE); }
const ResourceData* crossNormalFile() const { return GetPointer<const ResourceData*>(VT_CROSSNORMALFILE); }
const ResourceData* crossDisableFile() const { return GetPointer<const ResourceData*>(VT_CROSSDISABLEFILE); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) && VerifyOffset(verifier, VT_FONTRES) &&
verifier.VerifyTable(fontRes()) && VerifyField<int32_t>(verifier, VT_FONTSIZE) &&
VerifyOffset(verifier, VT_TITLETEXT) && verifier.VerifyString(titleText()) &&
VerifyField<Color>(verifier, VT_TEXTCOLOR) && VerifyOffset(verifier, VT_NORMALBACKFILE) &&
verifier.VerifyTable(normalBackFile()) && VerifyOffset(verifier, VT_PRESSBACKFILE) &&
verifier.VerifyTable(pressBackFile()) && VerifyOffset(verifier, VT_DISABLEBACKFILE) &&
verifier.VerifyTable(disableBackFile()) && VerifyOffset(verifier, VT_CROSSNORMALFILE) &&
verifier.VerifyTable(crossNormalFile()) && VerifyOffset(verifier, VT_CROSSDISABLEFILE) &&
verifier.VerifyTable(crossDisableFile()) && verifier.EndTable();
const flatbuffers::WidgetOptions *nodeOptions() const {
return GetPointer<const flatbuffers::WidgetOptions *>(VT_NODEOPTIONS);
}
const flatbuffers::ResourceData *fontRes() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_FONTRES);
}
int32_t fontSize() const {
return GetField<int32_t>(VT_FONTSIZE, 0);
}
const flatbuffers::String *titleText() const {
return GetPointer<const flatbuffers::String *>(VT_TITLETEXT);
}
const flatbuffers::Color *textColor() const {
return GetStruct<const flatbuffers::Color *>(VT_TEXTCOLOR);
}
const flatbuffers::ResourceData *normalBackFile() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_NORMALBACKFILE);
}
const flatbuffers::ResourceData *pressBackFile() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_PRESSBACKFILE);
}
const flatbuffers::ResourceData *disableBackFile() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_DISABLEBACKFILE);
}
const flatbuffers::ResourceData *crossNormalFile() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_CROSSNORMALFILE);
}
const flatbuffers::ResourceData *crossDisableFile() const {
return GetPointer<const flatbuffers::ResourceData *>(VT_CROSSDISABLEFILE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NODEOPTIONS) &&
verifier.VerifyTable(nodeOptions()) &&
VerifyOffset(verifier, VT_FONTRES) &&
verifier.VerifyTable(fontRes()) &&
VerifyField<int32_t>(verifier, VT_FONTSIZE, 4) &&
VerifyOffset(verifier, VT_TITLETEXT) &&
verifier.VerifyString(titleText()) &&
VerifyField<flatbuffers::Color>(verifier, VT_TEXTCOLOR, 1) &&
VerifyOffset(verifier, VT_NORMALBACKFILE) &&
verifier.VerifyTable(normalBackFile()) &&
VerifyOffset(verifier, VT_PRESSBACKFILE) &&
verifier.VerifyTable(pressBackFile()) &&
VerifyOffset(verifier, VT_DISABLEBACKFILE) &&
verifier.VerifyTable(disableBackFile()) &&
VerifyOffset(verifier, VT_CROSSNORMALFILE) &&
verifier.VerifyTable(crossNormalFile()) &&
VerifyOffset(verifier, VT_CROSSDISABLEFILE) &&
verifier.VerifyTable(crossDisableFile()) &&
verifier.EndTable();
}
};
struct TabHeaderOptionBuilder
{
struct TabHeaderOptionBuilder {
typedef TabHeaderOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_nodeOptions(flatbuffers::Offset<WidgetOptions> nodeOptions)
{
void add_nodeOptions(flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions) {
fbb_.AddOffset(TabHeaderOption::VT_NODEOPTIONS, nodeOptions);
}
void add_fontRes(flatbuffers::Offset<ResourceData> fontRes)
{
void add_fontRes(flatbuffers::Offset<flatbuffers::ResourceData> fontRes) {
fbb_.AddOffset(TabHeaderOption::VT_FONTRES, fontRes);
}
void add_fontSize(int32_t fontSize) { fbb_.AddElement<int32_t>(TabHeaderOption::VT_FONTSIZE, fontSize, 0); }
void add_titleText(flatbuffers::Offset<flatbuffers::String> titleText)
{
void add_fontSize(int32_t fontSize) {
fbb_.AddElement<int32_t>(TabHeaderOption::VT_FONTSIZE, fontSize, 0);
}
void add_titleText(flatbuffers::Offset<flatbuffers::String> titleText) {
fbb_.AddOffset(TabHeaderOption::VT_TITLETEXT, titleText);
}
void add_textColor(const Color* textColor) { fbb_.AddStruct(TabHeaderOption::VT_TEXTCOLOR, textColor); }
void add_normalBackFile(flatbuffers::Offset<ResourceData> normalBackFile)
{
void add_textColor(const flatbuffers::Color *textColor) {
fbb_.AddStruct(TabHeaderOption::VT_TEXTCOLOR, textColor);
}
void add_normalBackFile(flatbuffers::Offset<flatbuffers::ResourceData> normalBackFile) {
fbb_.AddOffset(TabHeaderOption::VT_NORMALBACKFILE, normalBackFile);
}
void add_pressBackFile(flatbuffers::Offset<ResourceData> pressBackFile)
{
void add_pressBackFile(flatbuffers::Offset<flatbuffers::ResourceData> pressBackFile) {
fbb_.AddOffset(TabHeaderOption::VT_PRESSBACKFILE, pressBackFile);
}
void add_disableBackFile(flatbuffers::Offset<ResourceData> disableBackFile)
{
void add_disableBackFile(flatbuffers::Offset<flatbuffers::ResourceData> disableBackFile) {
fbb_.AddOffset(TabHeaderOption::VT_DISABLEBACKFILE, disableBackFile);
}
void add_crossNormalFile(flatbuffers::Offset<ResourceData> crossNormalFile)
{
void add_crossNormalFile(flatbuffers::Offset<flatbuffers::ResourceData> crossNormalFile) {
fbb_.AddOffset(TabHeaderOption::VT_CROSSNORMALFILE, crossNormalFile);
}
void add_crossDisableFile(flatbuffers::Offset<ResourceData> crossDisableFile)
{
void add_crossDisableFile(flatbuffers::Offset<flatbuffers::ResourceData> crossDisableFile) {
fbb_.AddOffset(TabHeaderOption::VT_CROSSDISABLEFILE, crossDisableFile);
}
explicit TabHeaderOptionBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
TabHeaderOptionBuilder& operator=(const TabHeaderOptionBuilder&);
flatbuffers::Offset<TabHeaderOption> Finish()
{
explicit TabHeaderOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<TabHeaderOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TabHeaderOption>(end);
return o;
@ -227,17 +278,16 @@ struct TabHeaderOptionBuilder
inline flatbuffers::Offset<TabHeaderOption> CreateTabHeaderOption(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<ResourceData> fontRes = 0,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<flatbuffers::ResourceData> fontRes = 0,
int32_t fontSize = 0,
flatbuffers::Offset<flatbuffers::String> titleText = 0,
const Color* textColor = 0,
flatbuffers::Offset<ResourceData> normalBackFile = 0,
flatbuffers::Offset<ResourceData> pressBackFile = 0,
flatbuffers::Offset<ResourceData> disableBackFile = 0,
flatbuffers::Offset<ResourceData> crossNormalFile = 0,
flatbuffers::Offset<ResourceData> crossDisableFile = 0)
{
const flatbuffers::Color *textColor = nullptr,
flatbuffers::Offset<flatbuffers::ResourceData> normalBackFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> pressBackFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> disableBackFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> crossNormalFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> crossDisableFile = 0) {
TabHeaderOptionBuilder builder_(_fbb);
builder_.add_crossDisableFile(crossDisableFile);
builder_.add_crossNormalFile(crossNormalFile);
@ -254,97 +304,111 @@ inline flatbuffers::Offset<TabHeaderOption> CreateTabHeaderOption(
inline flatbuffers::Offset<TabHeaderOption> CreateTabHeaderOptionDirect(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<ResourceData> fontRes = 0,
flatbuffers::Offset<flatbuffers::WidgetOptions> nodeOptions = 0,
flatbuffers::Offset<flatbuffers::ResourceData> fontRes = 0,
int32_t fontSize = 0,
const char *titleText = nullptr,
const Color* textColor = 0,
flatbuffers::Offset<ResourceData> normalBackFile = 0,
flatbuffers::Offset<ResourceData> pressBackFile = 0,
flatbuffers::Offset<ResourceData> disableBackFile = 0,
flatbuffers::Offset<ResourceData> crossNormalFile = 0,
flatbuffers::Offset<ResourceData> crossDisableFile = 0)
{
const flatbuffers::Color *textColor = nullptr,
flatbuffers::Offset<flatbuffers::ResourceData> normalBackFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> pressBackFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> disableBackFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> crossNormalFile = 0,
flatbuffers::Offset<flatbuffers::ResourceData> crossDisableFile = 0) {
auto titleText__ = titleText ? _fbb.CreateString(titleText) : 0;
return flatbuffers::CreateTabHeaderOption(_fbb, nodeOptions, fontRes, fontSize, titleText__, textColor,
normalBackFile, pressBackFile, disableBackFile, crossNormalFile,
return flatbuffers::CreateTabHeaderOption(
_fbb,
nodeOptions,
fontRes,
fontSize,
titleText__,
textColor,
normalBackFile,
pressBackFile,
disableBackFile,
crossNormalFile,
crossDisableFile);
}
struct TabItemOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
{
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
{
struct TabItemOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TabItemOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_HEADER = 4,
VT_CONTAINER = 6
};
const TabHeaderOption* header() const { return GetPointer<const TabHeaderOption*>(VT_HEADER); }
const NodeTree* container() const { return GetPointer<const NodeTree*>(VT_CONTAINER); }
bool Verify(flatbuffers::Verifier& verifier) const
{
return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_HEADER) && verifier.VerifyTable(header()) &&
VerifyOffset(verifier, VT_CONTAINER) && verifier.VerifyTable(container()) && verifier.EndTable();
const flatbuffers::TabHeaderOption *header() const {
return GetPointer<const flatbuffers::TabHeaderOption *>(VT_HEADER);
}
const flatbuffers::NodeTree *container() const {
return GetPointer<const flatbuffers::NodeTree *>(VT_CONTAINER);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_HEADER) &&
verifier.VerifyTable(header()) &&
VerifyOffset(verifier, VT_CONTAINER) &&
verifier.VerifyTable(container()) &&
verifier.EndTable();
}
};
struct TabItemOptionBuilder
{
struct TabItemOptionBuilder {
typedef TabItemOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_header(flatbuffers::Offset<TabHeaderOption> header) { fbb_.AddOffset(TabItemOption::VT_HEADER, header); }
void add_container(flatbuffers::Offset<NodeTree> container)
{
void add_header(flatbuffers::Offset<flatbuffers::TabHeaderOption> header) {
fbb_.AddOffset(TabItemOption::VT_HEADER, header);
}
void add_container(flatbuffers::Offset<flatbuffers::NodeTree> container) {
fbb_.AddOffset(TabItemOption::VT_CONTAINER, container);
}
explicit TabItemOptionBuilder(flatbuffers::FlatBufferBuilder& _fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
TabItemOptionBuilder& operator=(const TabItemOptionBuilder&);
flatbuffers::Offset<TabItemOption> Finish()
{
explicit TabItemOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<TabItemOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TabItemOption>(end);
return o;
}
};
inline flatbuffers::Offset<TabItemOption> CreateTabItemOption(flatbuffers::FlatBufferBuilder& _fbb,
flatbuffers::Offset<TabHeaderOption> header = 0,
flatbuffers::Offset<NodeTree> container = 0)
{
inline flatbuffers::Offset<TabItemOption> CreateTabItemOption(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::TabHeaderOption> header = 0,
flatbuffers::Offset<flatbuffers::NodeTree> container = 0) {
TabItemOptionBuilder builder_(_fbb);
builder_.add_container(container);
builder_.add_header(header);
return builder_.Finish();
}
inline const flatbuffers::TabControlOption* GetTabControlOption(const void* buf)
{
inline const flatbuffers::TabControlOption *GetTabControlOption(const void *buf) {
return flatbuffers::GetRoot<flatbuffers::TabControlOption>(buf);
}
inline const flatbuffers::TabControlOption* GetSizePrefixedTabControlOption(const void* buf)
{
inline const flatbuffers::TabControlOption *GetSizePrefixedTabControlOption(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<flatbuffers::TabControlOption>(buf);
}
inline bool VerifyTabControlOptionBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifyTabControlOptionBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<flatbuffers::TabControlOption>(nullptr);
}
inline bool VerifySizePrefixedTabControlOptionBuffer(flatbuffers::Verifier& verifier)
{
inline bool VerifySizePrefixedTabControlOptionBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<flatbuffers::TabControlOption>(nullptr);
}
inline void FinishTabControlOptionBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::TabControlOption> root)
{
inline void FinishTabControlOptionBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::TabControlOption> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedTabControlOptionBuffer(flatbuffers::FlatBufferBuilder& fbb,
flatbuffers::Offset<flatbuffers::TabControlOption> root)
{
inline void FinishSizePrefixedTabControlOptionBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<flatbuffers::TabControlOption> root) {
fbb.FinishSizePrefixed(root);
}

10
thirdparty/README.md vendored
View File

@ -6,7 +6,7 @@
## astc
- [![Upstream](https://img.shields.io/github/v/release/ARM-software/astc-encoder?label=Upstream)](https://github.com/ARM-software/astc-encoder)
- Version: 4.0.0
- Version: 4.1.0
- License: Apache-2.0
## Box2D
@ -52,12 +52,12 @@
## flatbuffers
- [![Upstream](https://img.shields.io/github/v/release/google/flatbuffers?label=Upstream)](https://github.com/google/flatbuffers)
- Version: 2.0.0
- Version: 2.0.8
- License: Apache-2.0
## {fmt}
- [![Upstream](https://img.shields.io/github/v/release/fmtlib/fmt?label=Upstream)](https://github.com/fmtlib/fmt)
- Version: 9.0.0
- Version: 9.1.0
- License: MIT
## FreeType
@ -102,7 +102,7 @@
## llhttp
- [![Upstream](https://img.shields.io/github/v/release/nodejs/llhttp?label=Upstream)](https://github.com/nodejs/llhttp)
- Version: 6.0.7
- Version: 6.0.9
- License: MIT
## lua
@ -216,7 +216,7 @@
## webp
- [![Upstream](https://img.shields.io/github/v/tag/webmproject/libwebp?label=Upstream)](https://github.com/webmproject/libwebp)
- Version: 1.2.2
- Version: 1.2.4
- License: Google Inc
## xsbase

View File

@ -1171,7 +1171,7 @@ static float prepare_block_statistics(
/* See header for documentation. */
void compress_block(
const astcenc_context& ctx,
const astcenc_contexti& ctx,
const image_block& blk,
physical_compressed_block& pcb,
compression_working_buffers& tmpbuf)

View File

@ -99,17 +99,9 @@ static void brent_kung_prefix_sum(
} while (lc_stride > 2);
}
/**
* @brief Compute averages for a pixel region.
*
* The routine computes both in a single pass, using a summed-area table to decouple the running
* time from the averaging/variance kernel size.
*
* @param[out] ctx The compressor context storing the output data.
* @param arg The input parameter structure.
*/
static void compute_pixel_region_variance(
astcenc_context& ctx,
/* See header for documentation. */
void compute_pixel_region_variance(
astcenc_contexti& ctx,
const pixel_region_args& arg
) {
// Unpack the memory structure into local variables
@ -427,57 +419,6 @@ static void compute_pixel_region_variance(
}
}
void compute_averages(
astcenc_context& ctx,
const avg_args &ag
) {
pixel_region_args arg = ag.arg;
arg.work_memory = new vfloat4[ag.work_memory_size];
int size_x = ag.img_size_x;
int size_y = ag.img_size_y;
int size_z = ag.img_size_z;
int step_xy = ag.blk_size_xy;
int step_z = ag.blk_size_z;
int y_tasks = (size_y + step_xy - 1) / step_xy;
// All threads run this processing loop until there is no work remaining
while (true)
{
unsigned int count;
unsigned int base = ctx.manage_avg.get_task_assignment(16, count);
if (!count)
{
break;
}
for (unsigned int i = base; i < base + count; i++)
{
int z = (i / (y_tasks)) * step_z;
int y = (i - (z * y_tasks)) * step_xy;
arg.size_z = astc::min(step_z, size_z - z);
arg.offset_z = z;
arg.size_y = astc::min(step_xy, size_y - y);
arg.offset_y = y;
for (int x = 0; x < size_x; x += step_xy)
{
arg.size_x = astc::min(step_xy, size_x - x);
arg.offset_x = x;
compute_pixel_region_variance(ctx, arg);
}
}
ctx.manage_avg.complete_task_assignment(count);
}
delete[] arg.work_memory;
}
/* See header for documentation. */
unsigned int init_compute_averages(
const astcenc_image& img,

View File

@ -24,7 +24,7 @@
#include <new>
#include "astcenc.h"
#include "astcenc_internal.h"
#include "astcenc_internal_entry.h"
#include "astcenc_diagnostic_trace.h"
/**
@ -701,7 +701,8 @@ astcenc_error astcenc_context_alloc(
}
#endif
astcenc_context* ctx = new astcenc_context;
astcenc_context* ctxo = new astcenc_context;
astcenc_contexti* ctx = &ctxo->context;
ctx->thread_count = thread_count;
ctx->config = config;
ctx->working_buffers = nullptr;
@ -746,7 +747,7 @@ astcenc_error astcenc_context_alloc(
if (!ctx->working_buffers)
{
aligned_free<block_size_descriptor>(ctx->bsd);
delete ctx;
delete ctxo;
*context = nullptr;
return ASTCENC_ERR_OUT_OF_MEM;
}
@ -765,7 +766,7 @@ astcenc_error astcenc_context_alloc(
trace_add_data("block_z", config.block_z);
#endif
*context = ctx;
*context = ctxo;
#if !defined(ASTCENC_DECOMPRESS_ONLY)
prepare_angular_tables();
@ -776,16 +777,17 @@ astcenc_error astcenc_context_alloc(
/* See header dor documentation. */
void astcenc_context_free(
astcenc_context* ctx
astcenc_context* ctxo
) {
if (ctx)
if (ctxo)
{
astcenc_contexti* ctx = &ctxo->context;
aligned_free<compression_working_buffers>(ctx->working_buffers);
aligned_free<block_size_descriptor>(ctx->bsd);
#if defined(ASTCENC_DIAGNOSTICS)
delete ctx->trace_log;
#endif
delete ctx;
delete ctxo;
}
}
@ -794,19 +796,20 @@ void astcenc_context_free(
/**
* @brief Compress an image, after any preflight has completed.
*
* @param[out] ctx The compressor context.
* @param[out] ctxo The compressor context.
* @param thread_index The thread index.
* @param image The intput image.
* @param swizzle The input swizzle.
* @param[out] buffer The output array for the compressed data.
*/
static void compress_image(
astcenc_context& ctx,
astcenc_context& ctxo,
unsigned int thread_index,
const astcenc_image& image,
const astcenc_swizzle& swizzle,
uint8_t* buffer
) {
astcenc_contexti& ctx = ctxo.context;
const block_size_descriptor& bsd = *ctx.bsd;
astcenc_profile decode_mode = ctx.config.profile;
@ -839,7 +842,7 @@ static void compress_image(
auto& temp_buffers = ctx.working_buffers[thread_index];
// Only the first thread actually runs the initializer
ctx.manage_compress.init(block_count);
ctxo.manage_compress.init(block_count);
// Determine if we can use an optimized load function
bool needs_swz = (swizzle.r != ASTCENC_SWZ_R) || (swizzle.g != ASTCENC_SWZ_G) ||
@ -861,7 +864,7 @@ static void compress_image(
while (true)
{
unsigned int count;
unsigned int base = ctx.manage_compress.get_task_assignment(16, count);
unsigned int base = ctxo.manage_compress.get_task_assignment(16, count);
if (!count)
{
break;
@ -945,15 +948,77 @@ static void compress_image(
compress_block(ctx, blk, *pcb, temp_buffers);
}
ctx.manage_compress.complete_task_assignment(count);
ctxo.manage_compress.complete_task_assignment(count);
}
}
/**
* @brief Compute regional averages in an image.
*
* This function can be called by multiple threads, but only after a single
* thread calls the setup function @c init_compute_averages().
*
* Results are written back into @c img->input_alpha_averages.
*
* @param[out] ctx The context.
* @param ag The average and variance arguments created during setup.
*/
static void compute_averages(
astcenc_context& ctx,
const avg_args &ag
) {
pixel_region_args arg = ag.arg;
arg.work_memory = new vfloat4[ag.work_memory_size];
int size_x = ag.img_size_x;
int size_y = ag.img_size_y;
int size_z = ag.img_size_z;
int step_xy = ag.blk_size_xy;
int step_z = ag.blk_size_z;
int y_tasks = (size_y + step_xy - 1) / step_xy;
// All threads run this processing loop until there is no work remaining
while (true)
{
unsigned int count;
unsigned int base = ctx.manage_avg.get_task_assignment(16, count);
if (!count)
{
break;
}
for (unsigned int i = base; i < base + count; i++)
{
int z = (i / (y_tasks)) * step_z;
int y = (i - (z * y_tasks)) * step_xy;
arg.size_z = astc::min(step_z, size_z - z);
arg.offset_z = z;
arg.size_y = astc::min(step_xy, size_y - y);
arg.offset_y = y;
for (int x = 0; x < size_x; x += step_xy)
{
arg.size_x = astc::min(step_xy, size_x - x);
arg.offset_x = x;
compute_pixel_region_variance(ctx.context, arg);
}
}
ctx.manage_avg.complete_task_assignment(count);
}
delete[] arg.work_memory;
}
#endif
/* See header for documentation. */
astcenc_error astcenc_compress_image(
astcenc_context* ctx,
astcenc_context* ctxo,
astcenc_image* imagep,
const astcenc_swizzle* swizzle,
uint8_t* data_out,
@ -961,7 +1026,7 @@ astcenc_error astcenc_compress_image(
unsigned int thread_index
) {
#if defined(ASTCENC_DECOMPRESS_ONLY)
(void)ctx;
(void)ctxo;
(void)imagep;
(void)swizzle;
(void)data_out;
@ -969,6 +1034,7 @@ astcenc_error astcenc_compress_image(
(void)thread_index;
return ASTCENC_ERR_BAD_CONTEXT;
#else
astcenc_contexti* ctx = &ctxo->context;
astcenc_error status;
astcenc_image& image = *imagep;
@ -1006,7 +1072,7 @@ astcenc_error astcenc_compress_image(
// If context thread count is one then implicitly reset
if (ctx->thread_count == 1)
{
astcenc_compress_reset(ctx);
astcenc_compress_reset(ctxo);
}
if (ctx->config.a_scale_radius != 0)
@ -1024,19 +1090,19 @@ astcenc_error astcenc_compress_image(
};
// Only the first thread actually runs the initializer
ctx->manage_avg.init(init_avg);
ctxo->manage_avg.init(init_avg);
// All threads will enter this function and dynamically grab work
compute_averages(*ctx, ctx->avg_preprocess_args);
compute_averages(*ctxo, ctx->avg_preprocess_args);
}
// Wait for compute_averages to complete before compressing
ctx->manage_avg.wait();
ctxo->manage_avg.wait();
compress_image(*ctx, thread_index, image, *swizzle, data_out);
compress_image(*ctxo, thread_index, image, *swizzle, data_out);
// Wait for compress to complete before freeing memory
ctx->manage_compress.wait();
ctxo->manage_compress.wait();
auto term_compress = [ctx]() {
delete[] ctx->input_alpha_averages;
@ -1044,7 +1110,7 @@ astcenc_error astcenc_compress_image(
};
// Only the first thread to arrive actually runs the term
ctx->manage_compress.term(term_compress);
ctxo->manage_compress.term(term_compress);
return ASTCENC_SUCCESS;
#endif
@ -1052,26 +1118,27 @@ astcenc_error astcenc_compress_image(
/* See header for documentation. */
astcenc_error astcenc_compress_reset(
astcenc_context* ctx
astcenc_context* ctxo
) {
#if defined(ASTCENC_DECOMPRESS_ONLY)
(void)ctx;
(void)ctxo;
return ASTCENC_ERR_BAD_CONTEXT;
#else
astcenc_contexti* ctx = &ctxo->context;
if (ctx->config.flags & ASTCENC_FLG_DECOMPRESS_ONLY)
{
return ASTCENC_ERR_BAD_CONTEXT;
}
ctx->manage_avg.reset();
ctx->manage_compress.reset();
ctxo->manage_avg.reset();
ctxo->manage_compress.reset();
return ASTCENC_SUCCESS;
#endif
}
/* See header for documentation. */
astcenc_error astcenc_decompress_image(
astcenc_context* ctx,
astcenc_context* ctxo,
const uint8_t* data,
size_t data_len,
astcenc_image* image_outp,
@ -1080,6 +1147,7 @@ astcenc_error astcenc_decompress_image(
) {
astcenc_error status;
astcenc_image& image_out = *image_outp;
astcenc_contexti* ctx = &ctxo->context;
// Today this doesn't matter (working set on stack) but might in future ...
if (thread_index >= ctx->thread_count)
@ -1117,17 +1185,17 @@ astcenc_error astcenc_decompress_image(
// If context thread count is one then implicitly reset
if (ctx->thread_count == 1)
{
astcenc_decompress_reset(ctx);
astcenc_decompress_reset(ctxo);
}
// Only the first thread actually runs the initializer
ctx->manage_decompress.init(zblocks * yblocks * xblocks);
ctxo->manage_decompress.init(zblocks * yblocks * xblocks);
// All threads run this processing loop until there is no work remaining
while (true)
{
unsigned int count;
unsigned int base = ctx->manage_decompress.get_task_assignment(128, count);
unsigned int base = ctxo->manage_decompress.get_task_assignment(128, count);
if (!count)
{
break;
@ -1157,7 +1225,7 @@ astcenc_error astcenc_decompress_image(
x * block_x, y * block_y, z * block_z, *swizzle);
}
ctx->manage_decompress.complete_task_assignment(count);
ctxo->manage_decompress.complete_task_assignment(count);
}
return ASTCENC_SUCCESS;
@ -1165,24 +1233,26 @@ astcenc_error astcenc_decompress_image(
/* See header for documentation. */
astcenc_error astcenc_decompress_reset(
astcenc_context* ctx
astcenc_context* ctxo
) {
ctx->manage_decompress.reset();
ctxo->manage_decompress.reset();
return ASTCENC_SUCCESS;
}
/* See header for documentation. */
astcenc_error astcenc_get_block_info(
astcenc_context* ctx,
astcenc_context* ctxo,
const uint8_t data[16],
astcenc_block_info* info
) {
#if defined(ASTCENC_DECOMPRESS_ONLY)
(void)ctx;
(void)ctxo;
(void)data;
(void)info;
return ASTCENC_ERR_BAD_CONTEXT;
#else
astcenc_contexti* ctx = &ctxo->context;
// Decode the compressed data into a symbolic form
const physical_compressed_block&pcb = *reinterpret_cast<const physical_compressed_block*>(data);
symbolic_compressed_block scb;

View File

@ -23,15 +23,12 @@
#define ASTCENC_INTERNAL_INCLUDED
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdint>
#if defined(ASTCENC_DIAGNOSTICS)
#include <cstdio>
#endif
#include <cstdlib>
#include <condition_variable>
#include <functional>
#include <mutex>
#include <type_traits>
#include "astcenc.h"
#include "astcenc_mathlib.h"
@ -161,223 +158,6 @@ static_assert((WEIGHTS_MAX_BLOCK_MODES % ASTCENC_SIMD_WIDTH) == 0,
"WEIGHTS_MAX_BLOCK_MODES must be multiple of ASTCENC_SIMD_WIDTH");
/* ============================================================================
Parallel execution control
============================================================================ */
/**
* @brief A simple counter-based manager for parallel task execution.
*
* The task processing execution consists of:
*
* * A single-threaded init stage.
* * A multi-threaded processing stage.
* * A condition variable so threads can wait for processing completion.
*
* The init stage will be executed by the first thread to arrive in the critical section, there is
* no main thread in the thread pool.
*
* The processing stage uses dynamic dispatch to assign task tickets to threads on an on-demand
* basis. Threads may each therefore executed different numbers of tasks, depending on their
* processing complexity. The task queue and the task tickets are just counters; the caller must map
* these integers to an actual processing partition in a specific problem domain.
*
* The exit wait condition is needed to ensure processing has finished before a worker thread can
* progress to the next stage of the pipeline. Specifically a worker may exit the processing stage
* because there are no new tasks to assign to it while other worker threads are still processing.
* Calling @c wait() will ensure that all other worker have finished before the thread can proceed.
*
* The basic usage model:
*
* // --------- From single-threaded code ---------
*
* // Reset the tracker state
* manager->reset()
*
* // --------- From multi-threaded code ---------
*
* // Run the stage init; only first thread actually runs the lambda
* manager->init(<lambda>)
*
* do
* {
* // Request a task assignment
* uint task_count;
* uint base_index = manager->get_tasks(<granule>, task_count);
*
* // Process any tasks we were given (task_count <= granule size)
* if (task_count)
* {
* // Run the user task processing code for N tasks here
* ...
*
* // Flag these tasks as complete
* manager->complete_tasks(task_count);
* }
* } while (task_count);
*
* // Wait for all threads to complete tasks before progressing
* manager->wait()
*
* // Run the stage term; only first thread actually runs the lambda
* manager->term(<lambda>)
*/
class ParallelManager
{
private:
/** @brief Lock used for critical section and condition synchronization. */
std::mutex m_lock;
/** @brief True if the stage init() step has been executed. */
bool m_init_done;
/** @brief True if the stage term() step has been executed. */
bool m_term_done;
/** @brief Condition variable for tracking stage processing completion. */
std::condition_variable m_complete;
/** @brief Number of tasks started, but not necessarily finished. */
std::atomic<unsigned int> m_start_count;
/** @brief Number of tasks finished. */
unsigned int m_done_count;
/** @brief Number of tasks that need to be processed. */
unsigned int m_task_count;
public:
/** @brief Create a new ParallelManager. */
ParallelManager()
{
reset();
}
/**
* @brief Reset the tracker for a new processing batch.
*
* This must be called from single-threaded code before starting the multi-threaded processing
* operations.
*/
void reset()
{
m_init_done = false;
m_term_done = false;
m_start_count = 0;
m_done_count = 0;
m_task_count = 0;
}
/**
* @brief Trigger the pipeline stage init step.
*
* This can be called from multi-threaded code. The first thread to hit this will process the
* initialization. Other threads will block and wait for it to complete.
*
* @param init_func Callable which executes the stage initialization. It must return the
* total number of tasks in the stage.
*/
void init(std::function<unsigned int(void)> init_func)
{
std::lock_guard<std::mutex> lck(m_lock);
if (!m_init_done)
{
m_task_count = init_func();
m_init_done = true;
}
}
/**
* @brief Trigger the pipeline stage init step.
*
* This can be called from multi-threaded code. The first thread to hit this will process the
* initialization. Other threads will block and wait for it to complete.
*
* @param task_count Total number of tasks needing processing.
*/
void init(unsigned int task_count)
{
std::lock_guard<std::mutex> lck(m_lock);
if (!m_init_done)
{
m_task_count = task_count;
m_init_done = true;
}
}
/**
* @brief Request a task assignment.
*
* Assign up to @c granule tasks to the caller for processing.
*
* @param granule Maximum number of tasks that can be assigned.
* @param[out] count Actual number of tasks assigned, or zero if no tasks were assigned.
*
* @return Task index of the first assigned task; assigned tasks increment from this.
*/
unsigned int get_task_assignment(unsigned int granule, unsigned int& count)
{
unsigned int base = m_start_count.fetch_add(granule, std::memory_order_relaxed);
if (base >= m_task_count)
{
count = 0;
return 0;
}
count = astc::min(m_task_count - base, granule);
return base;
}
/**
* @brief Complete a task assignment.
*
* Mark @c count tasks as complete. This will notify all threads blocked on @c wait() if this
* completes the processing of the stage.
*
* @param count The number of completed tasks.
*/
void complete_task_assignment(unsigned int count)
{
// Note: m_done_count cannot use an atomic without the mutex; this has a race between the
// update here and the wait() for other threads
std::unique_lock<std::mutex> lck(m_lock);
this->m_done_count += count;
if (m_done_count == m_task_count)
{
lck.unlock();
m_complete.notify_all();
}
}
/**
* @brief Wait for stage processing to complete.
*/
void wait()
{
std::unique_lock<std::mutex> lck(m_lock);
m_complete.wait(lck, [this]{ return m_done_count == m_task_count; });
}
/**
* @brief Trigger the pipeline stage term step.
*
* This can be called from multi-threaded code. The first thread to hit this will process the
* work pool termination. Caller must have called @c wait() prior to calling this function to
* ensure that processing is complete.
*
* @param term_func Callable which executes the stage termination.
*/
void term(std::function<void(void)> term_func)
{
std::lock_guard<std::mutex> lck(m_lock);
if (!m_term_done)
{
term_func();
m_term_done = true;
}
}
};
/* ============================================================================
Commonly used data structures
============================================================================ */
@ -1432,7 +1212,7 @@ class TraceLog;
/**
* @brief The astcenc compression context.
*/
struct astcenc_context
struct astcenc_contexti
{
/** @brief The configuration this context was created with. */
astcenc_config config;
@ -1458,17 +1238,8 @@ struct astcenc_context
#if !defined(ASTCENC_DECOMPRESS_ONLY)
/** @brief The pixel region and variance worker arguments. */
avg_args avg_preprocess_args;
/** @brief The parallel manager for averages computation. */
ParallelManager manage_avg;
/** @brief The parallel manager for compression. */
ParallelManager manage_compress;
#endif
/** @brief The parallel manager for decompression. */
ParallelManager manage_decompress;
#if defined(ASTCENC_DIAGNOSTICS)
/**
* @brief The diagnostic trace logger.
@ -1809,20 +1580,17 @@ unsigned int init_compute_averages(
avg_args& ag);
/**
* @brief Compute regional averages in an image.
* @brief Compute averages for a pixel region.
*
* This function can be called by multiple threads, but only after a single
* thread calls the setup function @c init_compute_averages().
* The routine computes both in a single pass, using a summed-area table to decouple the running
* time from the averaging/variance kernel size.
*
* Results are written back into @c img->input_alpha_averages.
*
* @param[out] ctx The context.
* @param ag The average and variance arguments created during setup.
* @param[out] ctx The compressor context storing the output data.
* @param arg The input parameter structure.
*/
void compute_averages(
astcenc_context& ctx,
const avg_args& ag);
void compute_pixel_region_variance(
astcenc_contexti& ctx,
const pixel_region_args& arg);
/**
* @brief Load a single image block from the input image.
*
@ -2222,7 +1990,7 @@ void compute_angular_endpoints_2planes(
* @param[out] tmpbuf Preallocated scratch buffers for the compressor.
*/
void compress_block(
const astcenc_context& ctx,
const astcenc_contexti& ctx,
const image_block& blk,
physical_compressed_block& pcb,
compression_working_buffers& tmpbuf);

273
thirdparty/astc/astcenc_internal_entry.h vendored Normal file
View File

@ -0,0 +1,273 @@
// SPDX-License-Identifier: Apache-2.0
// ----------------------------------------------------------------------------
// Copyright 2011-2022 Arm Limited
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// ----------------------------------------------------------------------------
/**
* @brief Functions and data declarations for the outer context.
*
* The outer context includes thread-pool management, which is slower to
* compile due to increased use of C++ stdlib. The inner context used in the
* majority of the codec library does not include this.
*/
#ifndef ASTCENC_INTERNAL_ENTRY_INCLUDED
#define ASTCENC_INTERNAL_ENTRY_INCLUDED
#include <atomic>
#include <condition_variable>
#include <functional>
#include <mutex>
#include "astcenc_internal.h"
/* ============================================================================
Parallel execution control
============================================================================ */
/**
* @brief A simple counter-based manager for parallel task execution.
*
* The task processing execution consists of:
*
* * A single-threaded init stage.
* * A multi-threaded processing stage.
* * A condition variable so threads can wait for processing completion.
*
* The init stage will be executed by the first thread to arrive in the critical section, there is
* no main thread in the thread pool.
*
* The processing stage uses dynamic dispatch to assign task tickets to threads on an on-demand
* basis. Threads may each therefore executed different numbers of tasks, depending on their
* processing complexity. The task queue and the task tickets are just counters; the caller must map
* these integers to an actual processing partition in a specific problem domain.
*
* The exit wait condition is needed to ensure processing has finished before a worker thread can
* progress to the next stage of the pipeline. Specifically a worker may exit the processing stage
* because there are no new tasks to assign to it while other worker threads are still processing.
* Calling @c wait() will ensure that all other worker have finished before the thread can proceed.
*
* The basic usage model:
*
* // --------- From single-threaded code ---------
*
* // Reset the tracker state
* manager->reset()
*
* // --------- From multi-threaded code ---------
*
* // Run the stage init; only first thread actually runs the lambda
* manager->init(<lambda>)
*
* do
* {
* // Request a task assignment
* uint task_count;
* uint base_index = manager->get_tasks(<granule>, task_count);
*
* // Process any tasks we were given (task_count <= granule size)
* if (task_count)
* {
* // Run the user task processing code for N tasks here
* ...
*
* // Flag these tasks as complete
* manager->complete_tasks(task_count);
* }
* } while (task_count);
*
* // Wait for all threads to complete tasks before progressing
* manager->wait()
*
* // Run the stage term; only first thread actually runs the lambda
* manager->term(<lambda>)
*/
class ParallelManager
{
private:
/** @brief Lock used for critical section and condition synchronization. */
std::mutex m_lock;
/** @brief True if the stage init() step has been executed. */
bool m_init_done;
/** @brief True if the stage term() step has been executed. */
bool m_term_done;
/** @brief Condition variable for tracking stage processing completion. */
std::condition_variable m_complete;
/** @brief Number of tasks started, but not necessarily finished. */
std::atomic<unsigned int> m_start_count;
/** @brief Number of tasks finished. */
unsigned int m_done_count;
/** @brief Number of tasks that need to be processed. */
unsigned int m_task_count;
public:
/** @brief Create a new ParallelManager. */
ParallelManager()
{
reset();
}
/**
* @brief Reset the tracker for a new processing batch.
*
* This must be called from single-threaded code before starting the multi-threaded processing
* operations.
*/
void reset()
{
m_init_done = false;
m_term_done = false;
m_start_count = 0;
m_done_count = 0;
m_task_count = 0;
}
/**
* @brief Trigger the pipeline stage init step.
*
* This can be called from multi-threaded code. The first thread to hit this will process the
* initialization. Other threads will block and wait for it to complete.
*
* @param init_func Callable which executes the stage initialization. It must return the
* total number of tasks in the stage.
*/
void init(std::function<unsigned int(void)> init_func)
{
std::lock_guard<std::mutex> lck(m_lock);
if (!m_init_done)
{
m_task_count = init_func();
m_init_done = true;
}
}
/**
* @brief Trigger the pipeline stage init step.
*
* This can be called from multi-threaded code. The first thread to hit this will process the
* initialization. Other threads will block and wait for it to complete.
*
* @param task_count Total number of tasks needing processing.
*/
void init(unsigned int task_count)
{
std::lock_guard<std::mutex> lck(m_lock);
if (!m_init_done)
{
m_task_count = task_count;
m_init_done = true;
}
}
/**
* @brief Request a task assignment.
*
* Assign up to @c granule tasks to the caller for processing.
*
* @param granule Maximum number of tasks that can be assigned.
* @param[out] count Actual number of tasks assigned, or zero if no tasks were assigned.
*
* @return Task index of the first assigned task; assigned tasks increment from this.
*/
unsigned int get_task_assignment(unsigned int granule, unsigned int& count)
{
unsigned int base = m_start_count.fetch_add(granule, std::memory_order_relaxed);
if (base >= m_task_count)
{
count = 0;
return 0;
}
count = astc::min(m_task_count - base, granule);
return base;
}
/**
* @brief Complete a task assignment.
*
* Mark @c count tasks as complete. This will notify all threads blocked on @c wait() if this
* completes the processing of the stage.
*
* @param count The number of completed tasks.
*/
void complete_task_assignment(unsigned int count)
{
// Note: m_done_count cannot use an atomic without the mutex; this has a race between the
// update here and the wait() for other threads
std::unique_lock<std::mutex> lck(m_lock);
this->m_done_count += count;
if (m_done_count == m_task_count)
{
lck.unlock();
m_complete.notify_all();
}
}
/**
* @brief Wait for stage processing to complete.
*/
void wait()
{
std::unique_lock<std::mutex> lck(m_lock);
m_complete.wait(lck, [this]{ return m_done_count == m_task_count; });
}
/**
* @brief Trigger the pipeline stage term step.
*
* This can be called from multi-threaded code. The first thread to hit this will process the
* work pool termination. Caller must have called @c wait() prior to calling this function to
* ensure that processing is complete.
*
* @param term_func Callable which executes the stage termination.
*/
void term(std::function<void(void)> term_func)
{
std::lock_guard<std::mutex> lck(m_lock);
if (!m_term_done)
{
term_func();
m_term_done = true;
}
}
};
/**
* @brief The astcenc compression context.
*/
struct astcenc_context
{
/** @brief The context internal state. */
astcenc_contexti context;
#if !defined(ASTCENC_DECOMPRESS_ONLY)
/** @brief The parallel manager for averages computation. */
ParallelManager manage_avg;
/** @brief The parallel manager for compression. */
ParallelManager manage_compress;
#endif
/** @brief The parallel manager for decompression. */
ParallelManager manage_decompress;
};
#endif

View File

@ -517,7 +517,7 @@ static void compute_color_error_for_every_integer_count_and_quant_level(
best_error[i][1] = ERROR_CALC_DEFAULT;
best_error[i][0] = ERROR_CALC_DEFAULT;
format_of_choice[i][3] = encode_hdr_alpha ? FMT_HDR_RGBA : FMT_HDR_RGB_LDR_ALPHA;
format_of_choice[i][3] = static_cast<uint8_t>(encode_hdr_alpha ? FMT_HDR_RGBA : FMT_HDR_RGB_LDR_ALPHA);
format_of_choice[i][2] = FMT_HDR_RGB;
format_of_choice[i][1] = FMT_HDR_RGB_SCALE;
format_of_choice[i][0] = FMT_HDR_LUMINANCE_LARGE_RANGE;
@ -537,7 +537,7 @@ static void compute_color_error_for_every_integer_count_and_quant_level(
float full_hdr_rgba_error = rgba_quantization_error + rgb_range_error + alpha_range_error;
best_error[i][3] = full_hdr_rgba_error;
format_of_choice[i][3] = encode_hdr_alpha ? FMT_HDR_RGBA : FMT_HDR_RGB_LDR_ALPHA;
format_of_choice[i][3] = static_cast<uint8_t>(encode_hdr_alpha ? FMT_HDR_RGBA : FMT_HDR_RGB_LDR_ALPHA);
// For 6 integers, we have one HDR-RGB encoding
float full_hdr_rgb_error = (rgb_quantization_error * mode11mult) + rgb_range_error + eci.alpha_drop_error;

View File

@ -351,7 +351,7 @@ struct vmask4
*/
template <int32_t l> ASTCENC_SIMD_INLINE uint32_t lane() const
{
return vgetq_lane_s32(m, l);
return vgetq_lane_u32(m, l);
}
/**
@ -968,13 +968,15 @@ ASTCENC_SIMD_INLINE void vtable_prepare(
*/
ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)
{
int8x16_t table { t0.m };
int8x16_t table {
vreinterpretq_s8_s32(t0.m)
};
// Set index byte MSB to 1 for unused bytes so shuffle returns zero
// Set index byte above max index for unused bytes so table lookup returns zero
int32x4_t idx_masked = vorrq_s32(idx.m, vdupq_n_s32(0xFFFFFF00));
int8x16_t idx_bytes = vreinterpretq_u8_s32(idx_masked);
uint8x16_t idx_bytes = vreinterpretq_u8_s32(idx_masked);
return vint4(vqtbl1q_s8(table, idx_bytes));
return vint4(vreinterpretq_s32_s8(vqtbl1q_s8(table, idx_bytes)));
}
/**
@ -982,13 +984,16 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)
*/
ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 idx)
{
int8x16x2_t table { t0.m, t1.m };
int8x16x2_t table {
vreinterpretq_s8_s32(t0.m),
vreinterpretq_s8_s32(t1.m)
};
// Set index byte MSB to 1 for unused bytes so shuffle returns zero
// Set index byte above max index for unused bytes so table lookup returns zero
int32x4_t idx_masked = vorrq_s32(idx.m, vdupq_n_s32(0xFFFFFF00));
int8x16_t idx_bytes = vreinterpretq_u8_s32(idx_masked);
uint8x16_t idx_bytes = vreinterpretq_u8_s32(idx_masked);
return vint4(vqtbl2q_s8(table, idx_bytes));
return vint4(vreinterpretq_s32_s8(vqtbl2q_s8(table, idx_bytes)));
}
/**
@ -996,13 +1001,18 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 idx)
*/
ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3, vint4 idx)
{
int8x16x4_t table { t0.m, t1.m, t2.m, t3.m };
int8x16x4_t table {
vreinterpretq_s8_s32(t0.m),
vreinterpretq_s8_s32(t1.m),
vreinterpretq_s8_s32(t2.m),
vreinterpretq_s8_s32(t3.m)
};
// Set index byte MSB to 1 for unused bytes so shuffle returns zero
// Set index byte above max index for unused bytes so table lookup returns zero
int32x4_t idx_masked = vorrq_s32(idx.m, vdupq_n_s32(0xFFFFFF00));
int8x16_t idx_bytes = vreinterpretq_u8_s32(idx_masked);
uint8x16_t idx_bytes = vreinterpretq_u8_s32(idx_masked);
return vint4(vqtbl4q_s8(table, idx_bytes));
return vint4(vreinterpretq_s32_s8(vqtbl4q_s8(table, idx_bytes)));
}
/**

View File

@ -363,6 +363,14 @@ struct vmask4
m = _mm_castsi128_ps(mask.m);
}
/**
* @brief Get the scalar value of a single lane.
*/
template <int l> ASTCENC_SIMD_INLINE float lane() const
{
return _mm_cvtss_f32(_mm_shuffle_ps(m, m, l));
}
/**
* @brief The vector ...
*/
@ -1192,7 +1200,27 @@ ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)
#if ASTCENC_AVX >= 2
_mm_maskstore_epi32(base, _mm_castps_si128(mask.m), data.m);
#else
_mm_maskmoveu_si128(data.m, _mm_castps_si128(mask.m), reinterpret_cast<char*>(base));
// Note - we cannot use _mm_maskmoveu_si128 as the underlying hardware doesn't guarantee
// fault suppression on masked lanes so we can get page faults at the end of an image.
if (mask.lane<3>() != 0.0f)
{
store(data, base);
}
else if(mask.lane<2>() != 0.0f)
{
base[0] = data.lane<0>();
base[1] = data.lane<1>();
base[2] = data.lane<2>();
}
else if(mask.lane<1>() != 0.0f)
{
base[0] = data.lane<0>();
base[1] = data.lane<1>();
}
else if(mask.lane<0>() != 0.0f)
{
base[0] = data.lane<0>();
}
#endif
}

68
thirdparty/flatbuffers/allocator.h vendored Normal file
View File

@ -0,0 +1,68 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ALLOCATOR_H_
#define FLATBUFFERS_ALLOCATOR_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// Allocator interface. This is flatbuffers-specific and meant only for
// `vector_downward` usage.
class Allocator {
public:
virtual ~Allocator() {}
// Allocate `size` bytes of memory.
virtual uint8_t *allocate(size_t size) = 0;
// Deallocate `size` bytes of memory at `p` allocated by this allocator.
virtual void deallocate(uint8_t *p, size_t size) = 0;
// Reallocate `new_size` bytes of memory, replacing the old region of size
// `old_size` at `p`. In contrast to a normal realloc, this grows downwards,
// and is intended specifcally for `vector_downward` use.
// `in_use_back` and `in_use_front` indicate how much of `old_size` is
// actually in use at each end, and needs to be copied.
virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows
uint8_t *new_p = allocate(new_size);
memcpy_downward(old_p, old_size, new_p, new_size, in_use_back,
in_use_front);
deallocate(old_p, old_size);
return new_p;
}
protected:
// Called by `reallocate_downward` to copy memory from `old_p` of `old_size`
// to `new_p` of `new_size`. Only memory of size `in_use_front` and
// `in_use_back` will be copied from the front and back of the old memory
// allocation.
void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back,
in_use_back);
memcpy(new_p, old_p, in_use_front);
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_ALLOCATOR_H_

243
thirdparty/flatbuffers/array.h vendored Normal file
View File

@ -0,0 +1,243 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ARRAY_H_
#define FLATBUFFERS_ARRAY_H_
#include "flatbuffers/base.h"
#include "flatbuffers/stl_emulation.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
// This is used as a helper type for accessing arrays.
template<typename T, uint16_t length> class Array {
// Array<T> can carry only POD data types (scalars or structs).
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
scalar_tag;
typedef
typename flatbuffers::conditional<scalar_tag::value, T, const T *>::type
IndirectHelperType;
public:
typedef uint16_t size_type;
typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
typedef VectorIterator<T, return_type> const_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
// If T is a LE-scalar or a struct (!scalar_tag::value).
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
(scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1)) ||
!scalar_tag::value;
FLATBUFFERS_CONSTEXPR uint16_t size() const { return length; }
return_type Get(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<IndirectHelperType>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
return static_cast<E>(Get(i));
}
const_iterator begin() const { return const_iterator(Data(), 0); }
const_iterator end() const { return const_iterator(Data(), size()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
const_reverse_iterator crbegin() const { return rbegin(); }
const_reverse_iterator crend() const { return rend(); }
// Get a mutable pointer to elements inside this array.
// This method used to mutate arrays of structs followed by a @p Mutate
// operation. For primitive types use @p Mutate directly.
// @warning Assignments and reads to/from the dereferenced pointer are not
// automatically converted to the correct endianness.
typename flatbuffers::conditional<scalar_tag::value, void, T *>::type
GetMutablePointer(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<T *>(&data()[i]);
}
// Change elements if you have a non-const pointer to this object.
void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); }
// The raw data in little endian format. Use with care.
const uint8_t *Data() const { return data_; }
uint8_t *Data() { return data_; }
// Similarly, but typed, much like std::vector::data
const T *data() const { return reinterpret_cast<const T *>(Data()); }
T *data() { return reinterpret_cast<T *>(Data()); }
// Copy data from a span with endian conversion.
// If this Array and the span overlap, the behavior is undefined.
void CopyFromSpan(flatbuffers::span<const T, length> src) {
const auto p1 = reinterpret_cast<const uint8_t *>(src.data());
const auto p2 = Data();
FLATBUFFERS_ASSERT(!(p1 >= p2 && p1 < (p2 + length)) &&
!(p2 >= p1 && p2 < (p1 + length)));
(void)p1;
(void)p2;
CopyFromSpanImpl(flatbuffers::bool_constant<is_span_observable>(), src);
}
protected:
void MutateImpl(flatbuffers::true_type, uoffset_t i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
void MutateImpl(flatbuffers::false_type, uoffset_t i, const T &val) {
*(GetMutablePointer(i)) = val;
}
void CopyFromSpanImpl(flatbuffers::true_type,
flatbuffers::span<const T, length> src) {
// Use std::memcpy() instead of std::copy() to avoid performance degradation
// due to aliasing if T is char or unsigned char.
// The size is known at compile time, so memcpy would be inlined.
std::memcpy(data(), src.data(), length * sizeof(T));
}
// Copy data from flatbuffers::span with endian conversion.
void CopyFromSpanImpl(flatbuffers::false_type,
flatbuffers::span<const T, length> src) {
for (size_type k = 0; k < length; k++) { Mutate(k, src[k]); }
}
// This class is only used to access pre-existing data. Don't ever
// try to construct these manually.
// 'constexpr' allows us to use 'size()' at compile time.
// @note Must not use 'FLATBUFFERS_CONSTEXPR' here, as const is not allowed on
// a constructor.
#if defined(__cpp_constexpr)
constexpr Array();
#else
Array();
#endif
uint8_t data_[length * sizeof(T)];
private:
// This class is a pointer. Copying will therefore create an invalid object.
// Private and unimplemented copy constructor.
Array(const Array &);
Array &operator=(const Array &);
};
// Specialization for Array[struct] with access using Offset<void> pointer.
// This specialization used by idl_gen_text.cpp.
template<typename T, uint16_t length> class Array<Offset<T>, length> {
static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
public:
typedef const void *return_type;
const uint8_t *Data() const { return data_; }
// Make idl_gen_text.cpp::PrintContainer happy.
return_type operator[](uoffset_t) const {
FLATBUFFERS_ASSERT(false);
return nullptr;
}
private:
// This class is only used to access pre-existing data.
Array();
Array(const Array &);
Array &operator=(const Array &);
uint8_t data_[1];
};
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U, N> make_span(Array<U, N> &arr)
FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U, N> make_span(
const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<const U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t, sizeof(U) * N>
make_bytes_span(Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t, sizeof(U) * N>
make_bytes_span(const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<const uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
// Cast a raw T[length] to a raw flatbuffers::Array<T, length>
// without endian conversion. Use with care.
// TODO: move these Cast-methods to `internal` namespace.
template<typename T, uint16_t length>
Array<T, length> &CastToArray(T (&arr)[length]) {
return *reinterpret_cast<Array<T, length> *>(arr);
}
template<typename T, uint16_t length>
const Array<T, length> &CastToArray(const T (&arr)[length]) {
return *reinterpret_cast<const Array<T, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
Array<E, length> &CastToArrayOfEnum(T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<Array<E, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
const Array<E, length> &CastToArrayOfEnum(const T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<const Array<E, length> *>(arr);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_ARRAY_H_

View File

@ -50,10 +50,6 @@
#include <unistd.h>
#endif
#ifdef _STLPORT_VERSION
#define FLATBUFFERS_CPP98_STL
#endif
#ifdef __ANDROID__
#include <android/api-level.h>
#endif
@ -144,7 +140,7 @@
#define FLATBUFFERS_VERSION_MAJOR 2
#define FLATBUFFERS_VERSION_MINOR 0
#define FLATBUFFERS_VERSION_REVISION 0
#define FLATBUFFERS_VERSION_REVISION 8
#define FLATBUFFERS_STRING_EXPAND(X) #X
#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
namespace flatbuffers {
@ -247,6 +243,11 @@ namespace flatbuffers {
#endif // __has_include
#endif // !FLATBUFFERS_HAS_STRING_VIEW
#ifndef FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
// Allow heap allocations to be used
#define FLATBUFFERS_GENERAL_HEAP_ALLOC_OK 1
#endif // !FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
#ifndef FLATBUFFERS_HAS_NEW_STRTOD
// Modern (C++11) strtod and strtof functions are available for use.
// 1) nan/inf strings as argument of strtod;
@ -259,9 +260,12 @@ namespace flatbuffers {
#endif // !FLATBUFFERS_HAS_NEW_STRTOD
#ifndef FLATBUFFERS_LOCALE_INDEPENDENT
// Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}.
#if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \
(defined(_XOPEN_VERSION) && (_XOPEN_VERSION>=700)) && (!defined(__ANDROID_API__) || (defined(__ANDROID_API__) && (__ANDROID_API__>=21))))
// Enable locale independent functions {strtof_l, strtod_l,strtoll_l,
// strtoull_l}.
#if (defined(_MSC_VER) && _MSC_VER >= 1800) || \
(defined(__ANDROID_API__) && __ANDROID_API__>= 21) || \
(defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)) && \
(!defined(__Fuchsia__) && !defined(__ANDROID_API__))
#define FLATBUFFERS_LOCALE_INDEPENDENT 1
#else
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
@ -269,14 +273,14 @@ namespace flatbuffers {
#endif // !FLATBUFFERS_LOCALE_INDEPENDENT
// Suppress Undefined Behavior Sanitizer (recoverable only). Usage:
// - __supress_ubsan__("undefined")
// - __supress_ubsan__("signed-integer-overflow")
// - __suppress_ubsan__("undefined")
// - __suppress_ubsan__("signed-integer-overflow")
#if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7))
#define __supress_ubsan__(type) __attribute__((no_sanitize(type)))
#define __suppress_ubsan__(type) __attribute__((no_sanitize(type)))
#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)
#define __supress_ubsan__(type) __attribute__((no_sanitize_undefined))
#define __suppress_ubsan__(type) __attribute__((no_sanitize_undefined))
#else
#define __supress_ubsan__(type)
#define __suppress_ubsan__(type)
#endif
// This is constexpr function used for checking compile-time constants.
@ -289,7 +293,7 @@ template<typename T> FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) {
#if ((__cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L)))
// All attributes unknown to an implementation are ignored without causing an error.
#define FLATBUFFERS_ATTRIBUTE(attr) [[attr]]
#define FLATBUFFERS_ATTRIBUTE(attr) attr
#define FLATBUFFERS_FALLTHROUGH() [[fallthrough]]
#else
@ -327,8 +331,20 @@ typedef uintmax_t largest_scalar_t;
// In 32bits, this evaluates to 2GB - 1
#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1)
// The minimum size buffer that can be a valid flatbuffer.
// Includes the offset to the root table (uoffset_t), the offset to the vtable
// of the root table (soffset_t), the size of the vtable (uint16_t), and the
// size of the referring table (uint16_t).
#define FLATBUFFERS_MIN_BUFFER_SIZE sizeof(uoffset_t) + sizeof(soffset_t) + \
sizeof(uint16_t) + sizeof(uint16_t)
// We support aligning the contents of buffers up to this size.
#define FLATBUFFERS_MAX_ALIGNMENT 16
#ifndef FLATBUFFERS_MAX_ALIGNMENT
#define FLATBUFFERS_MAX_ALIGNMENT 32
#endif
/// @brief The length of a FlatBuffer file header.
static const size_t kFileIdentifierLength = 4;
inline bool VerifyAlignmentRequirements(size_t align, size_t min_align = 1) {
return (min_align <= align) && (align <= (FLATBUFFERS_MAX_ALIGNMENT)) &&
@ -397,7 +413,7 @@ template<typename T> T EndianScalar(T t) {
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__supress_ubsan__("alignment")
__suppress_ubsan__("alignment")
T ReadScalar(const void *p) {
return EndianScalar(*reinterpret_cast<const T *>(p));
}
@ -411,13 +427,13 @@ T ReadScalar(const void *p) {
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__supress_ubsan__("alignment")
__suppress_ubsan__("alignment")
void WriteScalar(void *p, T t) {
*reinterpret_cast<T *>(p) = EndianScalar(t);
}
template<typename T> struct Offset;
template<typename T> __supress_ubsan__("alignment") void WriteScalar(void *p, Offset<T> t) {
template<typename T> __suppress_ubsan__("alignment") void WriteScalar(void *p, Offset<T> t) {
*reinterpret_cast<uoffset_t *>(p) = EndianScalar(t.o);
}
@ -428,10 +444,43 @@ template<typename T> __supress_ubsan__("alignment") void WriteScalar(void *p, Of
// Computes how many bytes you'd have to pad to be able to write an
// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
// memory).
__supress_ubsan__("unsigned-integer-overflow")
__suppress_ubsan__("unsigned-integer-overflow")
inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
return ((~buf_size) + 1) & (scalar_size - 1);
}
// Generic 'operator==' with conditional specialisations.
// T e - new value of a scalar field.
// T def - default of scalar (is known at compile-time).
template<typename T> inline bool IsTheSameAs(T e, T def) { return e == def; }
#if defined(FLATBUFFERS_NAN_DEFAULTS) && \
defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Like `operator==(e, def)` with weak NaN if T=(float|double).
template<typename T> inline bool IsFloatTheSameAs(T e, T def) {
return (e == def) || ((def != def) && (e != e));
}
template<> inline bool IsTheSameAs<float>(float e, float def) {
return IsFloatTheSameAs(e, def);
}
template<> inline bool IsTheSameAs<double>(double e, double def) {
return IsFloatTheSameAs(e, def);
}
#endif
// Check 'v' is out of closed range [low; high].
// Workaround for GCC warning [-Werror=type-limits]:
// comparison is always true due to limited range of data type.
template<typename T>
inline bool IsOutRange(const T &v, const T &low, const T &high) {
return (v < low) || (high < v);
}
// Check 'v' is in closed range [low; high].
template<typename T>
inline bool IsInRange(const T &v, const T &low, const T &high) {
return !IsOutRange(v, low, high);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BASE_H_

142
thirdparty/flatbuffers/buffer.h vendored Normal file
View File

@ -0,0 +1,142 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_H_
#define FLATBUFFERS_BUFFER_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// Wrapper for uoffset_t to allow safe template specialization.
// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
template<typename T> struct Offset {
uoffset_t o;
Offset() : o(0) {}
Offset(uoffset_t _o) : o(_o) {}
Offset<void> Union() const { return Offset<void>(o); }
bool IsNull() const { return !o; }
};
inline void EndianCheck() {
int endiantest = 1;
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
FLATBUFFERS_ASSERT(*reinterpret_cast<char *>(&endiantest) ==
FLATBUFFERS_LITTLEENDIAN);
(void)endiantest;
}
template<typename T> FLATBUFFERS_CONSTEXPR size_t AlignOf() {
// clang-format off
#ifdef _MSC_VER
return __alignof(T);
#else
#ifndef alignof
return __alignof__(T);
#else
return alignof(T);
#endif
#endif
// clang-format on
}
// Lexicographically compare two strings (possibly containing nulls), and
// return true if the first is less than the second.
static inline bool StringLessThan(const char *a_data, uoffset_t a_size,
const char *b_data, uoffset_t b_size) {
const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size));
return cmp == 0 ? a_size < b_size : cmp < 0;
}
// When we read serialized data from memory, in the case of most scalars,
// we want to just read T, but in the case of Offset, we want to actually
// perform the indirection and return a pointer.
// The template specialization below does just that.
// It is wrapped in a struct since function templates can't overload on the
// return type like this.
// The typedef is for the convenience of callers of this function
// (avoiding the need for a trailing return decltype)
template<typename T> struct IndirectHelper {
typedef T return_type;
typedef T mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return EndianScalar((reinterpret_cast<const T *>(p))[i]);
}
};
template<typename T> struct IndirectHelper<Offset<T>> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(uoffset_t);
static return_type Read(const uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
}
};
template<typename T> struct IndirectHelper<const T *> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return reinterpret_cast<const T *>(p + i * sizeof(T));
}
};
/// @brief Get a pointer to the file_identifier section of the buffer.
/// @return Returns a const char pointer to the start of the file_identifier
/// characters in the buffer. The returned char * has length
/// 'flatbuffers::FlatBufferBuilder::kFileIdentifierLength'.
/// This function is UNDEFINED for FlatBuffers whose schema does not include
/// a file_identifier (likely points at padding or the start of a the root
/// vtable).
inline const char *GetBufferIdentifier(const void *buf,
bool size_prefixed = false) {
return reinterpret_cast<const char *>(buf) +
((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t));
}
// Helper to see if the identifier in a buffer has the expected value.
inline bool BufferHasIdentifier(const void *buf, const char *identifier,
bool size_prefixed = false) {
return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier,
flatbuffers::kFileIdentifierLength) == 0;
}
/// @cond FLATBUFFERS_INTERNAL
// Helpers to get a typed pointer to the root object contained in the buffer.
template<typename T> T *GetMutableRoot(void *buf) {
EndianCheck();
return reinterpret_cast<T *>(
reinterpret_cast<uint8_t *>(buf) +
EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
}
template<typename T> T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) +
sizeof(uoffset_t));
}
template<typename T> const T *GetRoot(const void *buf) {
return GetMutableRoot<T>(const_cast<void *>(buf));
}
template<typename T> const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(uoffset_t));
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_H_

53
thirdparty/flatbuffers/buffer_ref.h vendored Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_REF_H_
#define FLATBUFFERS_BUFFER_REF_H_
#include "flatbuffers/base.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// Convenient way to bundle a buffer and its length, to pass it around
// typed by its root.
// A BufferRef does not own its buffer.
struct BufferRefBase {}; // for std::is_base_of
template<typename T> struct BufferRef : BufferRefBase {
BufferRef() : buf(nullptr), len(0), must_free(false) {}
BufferRef(uint8_t *_buf, uoffset_t _len)
: buf(_buf), len(_len), must_free(false) {}
~BufferRef() {
if (must_free) free(buf);
}
const T *GetRoot() const { return flatbuffers::GetRoot<T>(buf); }
bool Verify() {
Verifier verifier(buf, len);
return verifier.VerifyBuffer<T>(nullptr);
}
uint8_t *buf;
uoffset_t len;
bool must_free;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_REF_H_

View File

@ -0,0 +1,64 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#define FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
namespace flatbuffers {
// DefaultAllocator uses new/delete to allocate memory regions
class DefaultAllocator : public Allocator {
public:
uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE {
return new uint8_t[size];
}
void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; }
static void dealloc(void *p, size_t) { delete[] static_cast<uint8_t *>(p); }
};
// These functions allow for a null allocator to mean use the default allocator,
// as used by DetachedBuffer and vector_downward below.
// This is to avoid having a statically or dynamically allocated default
// allocator, or having to move it between the classes that may own it.
inline uint8_t *Allocate(Allocator *allocator, size_t size) {
return allocator ? allocator->allocate(size)
: DefaultAllocator().allocate(size);
}
inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) {
if (allocator)
allocator->deallocate(p, size);
else
DefaultAllocator().deallocate(p, size);
}
inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p,
size_t old_size, size_t new_size,
size_t in_use_back, size_t in_use_front) {
return allocator ? allocator->reallocate_downward(old_p, old_size, new_size,
in_use_back, in_use_front)
: DefaultAllocator().reallocate_downward(
old_p, old_size, new_size, in_use_back, in_use_front);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_DEFAULT_ALLOCATOR_H_

114
thirdparty/flatbuffers/detached_buffer.h vendored Normal file
View File

@ -0,0 +1,114 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DETACHED_BUFFER_H_
#define FLATBUFFERS_DETACHED_BUFFER_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
#include "flatbuffers/default_allocator.h"
namespace flatbuffers {
// DetachedBuffer is a finished flatbuffer memory region, detached from its
// builder. The original memory region and allocator are also stored so that
// the DetachedBuffer can manage the memory lifetime.
class DetachedBuffer {
public:
DetachedBuffer()
: allocator_(nullptr),
own_allocator_(false),
buf_(nullptr),
reserved_(0),
cur_(nullptr),
size_(0) {}
DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf,
size_t reserved, uint8_t *cur, size_t sz)
: allocator_(allocator),
own_allocator_(own_allocator),
buf_(buf),
reserved_(reserved),
cur_(cur),
size_(sz) {}
DetachedBuffer(DetachedBuffer &&other)
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
buf_(other.buf_),
reserved_(other.reserved_),
cur_(other.cur_),
size_(other.size_) {
other.reset();
}
DetachedBuffer &operator=(DetachedBuffer &&other) {
if (this == &other) return *this;
destroy();
allocator_ = other.allocator_;
own_allocator_ = other.own_allocator_;
buf_ = other.buf_;
reserved_ = other.reserved_;
cur_ = other.cur_;
size_ = other.size_;
other.reset();
return *this;
}
~DetachedBuffer() { destroy(); }
const uint8_t *data() const { return cur_; }
uint8_t *data() { return cur_; }
size_t size() const { return size_; }
// These may change access mode, leave these at end of public section
FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other));
FLATBUFFERS_DELETE_FUNC(
DetachedBuffer &operator=(const DetachedBuffer &other));
protected:
Allocator *allocator_;
bool own_allocator_;
uint8_t *buf_;
size_t reserved_;
uint8_t *cur_;
size_t size_;
inline void destroy() {
if (buf_) Deallocate(allocator_, buf_, reserved_);
if (own_allocator_ && allocator_) { delete allocator_; }
reset();
}
inline void reset() {
allocator_ = nullptr;
own_allocator_ = false;
buf_ = nullptr;
reserved_ = 0;
cur_ = nullptr;
size_ = 0;
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_DETACHED_BUFFER_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -26,36 +26,33 @@
#include <memory>
#include <limits>
#if defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
#define FLATBUFFERS_CPP98_STL
#endif // defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
#if defined(FLATBUFFERS_CPP98_STL)
#include <cctype>
#endif // defined(FLATBUFFERS_CPP98_STL)
#ifndef FLATBUFFERS_USE_STD_OPTIONAL
// Detect C++17 compatible compiler.
// __cplusplus >= 201703L - a compiler has support of 'static inline' variables.
#if defined(FLATBUFFERS_USE_STD_OPTIONAL) \
|| (defined(__cplusplus) && __cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))
#if (defined(__cplusplus) && __cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
#define FLATBUFFERS_USE_STD_OPTIONAL 1
#else
#define FLATBUFFERS_USE_STD_OPTIONAL 0
#endif // (defined(__cplusplus) && __cplusplus >= 201703L) ...
#endif // FLATBUFFERS_USE_STD_OPTIONAL
#if FLATBUFFERS_USE_STD_OPTIONAL
#include <optional>
#ifndef FLATBUFFERS_USE_STD_OPTIONAL
#define FLATBUFFERS_USE_STD_OPTIONAL
#endif
#endif // defined(FLATBUFFERS_USE_STD_OPTIONAL) ...
// The __cpp_lib_span is the predefined feature macro.
#if defined(FLATBUFFERS_USE_STD_SPAN)
#include <span>
#elif defined(__cpp_lib_span) && defined(__has_include)
#if __has_include(<span>)
#include <array>
#include <span>
#define FLATBUFFERS_USE_STD_SPAN
#endif
#else
// Disable non-trivial ctors if FLATBUFFERS_SPAN_MINIMAL defined.
#if !defined(FLATBUFFERS_TEMPLATES_ALIASES) || defined(FLATBUFFERS_CPP98_STL)
#if !defined(FLATBUFFERS_TEMPLATES_ALIASES)
#define FLATBUFFERS_SPAN_MINIMAL
#else
// Enable implicit construction of a span<T,N> from a std::array<T,N>.
@ -63,42 +60,9 @@
#endif
#endif // defined(FLATBUFFERS_USE_STD_SPAN)
// This header provides backwards compatibility for C++98 STLs like stlport.
// This header provides backwards compatibility for older versions of the STL.
namespace flatbuffers {
// Retrieve ::back() from a string in a way that is compatible with pre C++11
// STLs (e.g stlport).
inline char& string_back(std::string &value) {
return value[value.length() - 1];
}
inline char string_back(const std::string &value) {
return value[value.length() - 1];
}
// Helper method that retrieves ::data() from a vector in a way that is
// compatible with pre C++11 STLs (e.g stlport).
template <typename T> inline T *vector_data(std::vector<T> &vector) {
// In some debug environments, operator[] does bounds checking, so &vector[0]
// can't be used.
return vector.empty() ? nullptr : &vector[0];
}
template <typename T> inline const T *vector_data(
const std::vector<T> &vector) {
return vector.empty() ? nullptr : &vector[0];
}
template <typename T, typename V>
inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
#if defined(FLATBUFFERS_CPP98_STL)
vector->push_back(data);
#else
vector->emplace_back(std::forward<V>(data));
#endif // defined(FLATBUFFERS_CPP98_STL)
}
#ifndef FLATBUFFERS_CPP98_STL
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <typename T>
using numeric_limits = std::numeric_limits<T>;
@ -106,54 +70,8 @@ inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
template <typename T> class numeric_limits :
public std::numeric_limits<T> {};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#else
template <typename T> class numeric_limits :
public std::numeric_limits<T> {
public:
// Android NDK fix.
static T lowest() {
return std::numeric_limits<T>::min();
}
};
template <> class numeric_limits<float> :
public std::numeric_limits<float> {
public:
static float lowest() { return -FLT_MAX; }
};
template <> class numeric_limits<double> :
public std::numeric_limits<double> {
public:
static double lowest() { return -DBL_MAX; }
};
template <> class numeric_limits<unsigned long long> {
public:
static unsigned long long min() { return 0ULL; }
static unsigned long long max() { return ~0ULL; }
static unsigned long long lowest() {
return numeric_limits<unsigned long long>::min();
}
};
template <> class numeric_limits<long long> {
public:
static long long min() {
return static_cast<long long>(1ULL << ((sizeof(long long) << 3) - 1));
}
static long long max() {
return static_cast<long long>(
(1ULL << ((sizeof(long long) << 3) - 1)) - 1);
}
static long long lowest() {
return numeric_limits<long long>::min();
}
};
#endif // FLATBUFFERS_CPP98_STL
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
#ifndef FLATBUFFERS_CPP98_STL
template <typename T> using is_scalar = std::is_scalar<T>;
template <typename T, typename U> using is_same = std::is_same<T,U>;
template <typename T> using is_floating_point = std::is_floating_point<T>;
@ -166,32 +84,8 @@ inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
using integral_constant = std::integral_constant<T, v>;
template <bool B>
using bool_constant = integral_constant<bool, B>;
#else
// Map C++ TR1 templates defined by stlport.
template <typename T> using is_scalar = std::tr1::is_scalar<T>;
template <typename T, typename U> using is_same = std::tr1::is_same<T,U>;
template <typename T> using is_floating_point =
std::tr1::is_floating_point<T>;
template <typename T> using is_unsigned = std::tr1::is_unsigned<T>;
template <typename T> using is_enum = std::tr1::is_enum<T>;
// Android NDK doesn't have std::make_unsigned or std::tr1::make_unsigned.
template<typename T> struct make_unsigned {
static_assert(is_unsigned<T>::value, "Specialization not implemented!");
using type = T;
};
template<> struct make_unsigned<char> { using type = unsigned char; };
template<> struct make_unsigned<short> { using type = unsigned short; };
template<> struct make_unsigned<int> { using type = unsigned int; };
template<> struct make_unsigned<long> { using type = unsigned long; };
template<>
struct make_unsigned<long long> { using type = unsigned long long; };
template<bool B, class T, class F>
using conditional = std::tr1::conditional<B, T, F>;
template<class T, T v>
using integral_constant = std::tr1::integral_constant<T, v>;
template <bool B>
using bool_constant = integral_constant<bool, B>;
#endif // !FLATBUFFERS_CPP98_STL
using true_type = std::true_type;
using false_type = std::false_type;
#else
// MSVC 2010 doesn't support C++11 aliases.
template <typename T> struct is_scalar : public std::is_scalar<T> {};
@ -207,9 +101,10 @@ inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
struct integral_constant : public std::integral_constant<T, v> {};
template <bool B>
struct bool_constant : public integral_constant<bool, B> {};
typedef bool_constant<true> true_type;
typedef bool_constant<false> false_type;
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#ifndef FLATBUFFERS_CPP98_STL
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <class T> using unique_ptr = std::unique_ptr<T>;
#else
@ -237,98 +132,8 @@ inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
}
};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#else
// Very limited implementation of unique_ptr.
// This is provided simply to allow the C++ code generated from the default
// settings to function in C++98 environments with no modifications.
template <class T> class unique_ptr {
public:
typedef T element_type;
unique_ptr() : ptr_(nullptr) {}
explicit unique_ptr(T* p) : ptr_(p) {}
unique_ptr(unique_ptr&& u) : ptr_(nullptr) { reset(u.release()); }
unique_ptr(const unique_ptr& u) : ptr_(nullptr) {
reset(const_cast<unique_ptr*>(&u)->release());
}
~unique_ptr() { reset(); }
unique_ptr& operator=(const unique_ptr& u) {
reset(const_cast<unique_ptr*>(&u)->release());
return *this;
}
unique_ptr& operator=(unique_ptr&& u) {
reset(u.release());
return *this;
}
unique_ptr& operator=(T* p) {
reset(p);
return *this;
}
const T& operator*() const { return *ptr_; }
T* operator->() const { return ptr_; }
T* get() const noexcept { return ptr_; }
explicit operator bool() const { return ptr_ != nullptr; }
// modifiers
T* release() {
T* value = ptr_;
ptr_ = nullptr;
return value;
}
void reset(T* p = nullptr) {
T* value = ptr_;
ptr_ = p;
if (value) delete value;
}
void swap(unique_ptr& u) {
T* temp_ptr = ptr_;
ptr_ = u.ptr_;
u.ptr_ = temp_ptr;
}
private:
T* ptr_;
};
template <class T> bool operator==(const unique_ptr<T>& x,
const unique_ptr<T>& y) {
return x.get() == y.get();
}
template <class T, class D> bool operator==(const unique_ptr<T>& x,
const D* y) {
return static_cast<D*>(x.get()) == y;
}
template <class T> bool operator==(const unique_ptr<T>& x, intptr_t y) {
return reinterpret_cast<intptr_t>(x.get()) == y;
}
template <class T> bool operator!=(const unique_ptr<T>& x, decltype(nullptr)) {
return !!x;
}
template <class T> bool operator!=(decltype(nullptr), const unique_ptr<T>& x) {
return !!x;
}
template <class T> bool operator==(const unique_ptr<T>& x, decltype(nullptr)) {
return !x;
}
template <class T> bool operator==(decltype(nullptr), const unique_ptr<T>& x) {
return !x;
}
#endif // !FLATBUFFERS_CPP98_STL
#ifdef FLATBUFFERS_USE_STD_OPTIONAL
#if FLATBUFFERS_USE_STD_OPTIONAL
template<class T>
using Optional = std::optional<T>;
using nullopt_t = std::nullopt_t;
@ -484,17 +289,43 @@ FLATBUFFERS_CONSTEXPR std::size_t dynamic_extent = static_cast<std::size_t>(-1);
namespace internal {
// This is SFINAE helper class for checking of a common condition:
// > This overload only participates in overload resolution
// > Check whether a pointer to an array of U can be converted
// > to a pointer to an array of E.
// This helper is used for checking of 'U -> const U'.
template<class E, std::size_t Extent, class U, std::size_t N>
struct is_span_convertable {
// > Check whether a pointer to an array of From can be converted
// > to a pointer to an array of To.
// This helper is used for checking of 'From -> const From'.
template<class To, std::size_t Extent, class From, std::size_t N>
struct is_span_convertible {
using type =
typename std::conditional<std::is_convertible<U (*)[], E (*)[]>::value
typename std::conditional<std::is_convertible<From (*)[], To (*)[]>::value
&& (Extent == dynamic_extent || N == Extent),
int, void>::type;
};
template<typename T>
struct SpanIterator {
// TODO: upgrade to std::random_access_iterator_tag.
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = typename std::remove_cv<T>::type;
using reference = T&;
using pointer = T*;
// Convince MSVC compiler that this iterator is trusted (it is verified).
#ifdef _MSC_VER
using _Unchecked_type = pointer;
#endif // _MSC_VER
SpanIterator(pointer ptr) : ptr_(ptr) {}
reference operator*() const { return *ptr_; }
pointer operator->() { return ptr_; }
SpanIterator& operator++() { ptr_++; return *this; }
SpanIterator operator++(int) { auto tmp = *this; ++(*this); return tmp; }
friend bool operator== (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ == rhs.ptr_; }
friend bool operator!= (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ != rhs.ptr_; }
private:
pointer ptr_;
};
} // namespace internal
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
@ -534,6 +365,13 @@ class span FLATBUFFERS_FINAL_CLASS {
return data_;
}
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
using Iterator = internal::SpanIterator<T>;
Iterator begin() const { return Iterator(data()); }
Iterator end() const { return Iterator(data() + size()); }
#endif
// Returns a reference to the idx-th element of the sequence.
// The behavior is undefined if the idx is greater than or equal to size().
FLATBUFFERS_CONSTEXPR_CPP11 reference operator[](size_type idx) const {
@ -577,7 +415,7 @@ class span FLATBUFFERS_FINAL_CLASS {
// extent == 0 || extent == flatbuffers::dynamic_extent.
// A dummy template argument N is need dependency for SFINAE.
template<std::size_t N = 0,
typename internal::is_span_convertable<element_type, Extent, element_type, (N - N)>::type = 0>
typename internal::is_span_convertible<element_type, Extent, element_type, (N - N)>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr),
count_(0) {
static_assert(extent == 0 || extent == dynamic_extent, "invalid span");
@ -590,12 +428,12 @@ class span FLATBUFFERS_FINAL_CLASS {
// std::remove_pointer_t<decltype(std::data(arr))>(*)[]
// is convertible to element_type (*)[].
template<std::size_t N,
typename internal::is_span_convertable<element_type, Extent, element_type, N>::type = 0>
typename internal::is_span_convertible<element_type, Extent, element_type, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(element_type (&arr)[N]) FLATBUFFERS_NOEXCEPT
: data_(arr), count_(N) {}
template<class U, std::size_t N,
typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
typename internal::is_span_convertible<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
: data_(arr.data()), count_(N) {}
@ -605,7 +443,7 @@ class span FLATBUFFERS_FINAL_CLASS {
// : data_(arr.data()), count_(N) {}
template<class U, std::size_t N,
typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
typename internal::is_span_convertible<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(const std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
: data_(arr.data()), count_(N) {}
@ -615,7 +453,7 @@ class span FLATBUFFERS_FINAL_CLASS {
// if extent == std::dynamic_extent || N == extent is true and U (*)[]
// is convertible to element_type (*)[].
template<class U, std::size_t N,
typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
typename internal::is_span_convertible<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(const flatbuffers::span<U, N> &s) FLATBUFFERS_NOEXCEPT
: span(s.data(), s.size()) {
}
@ -625,48 +463,47 @@ class span FLATBUFFERS_FINAL_CLASS {
private:
// This is a naive implementation with 'count_' member even if (Extent != dynamic_extent).
pointer const data_;
const size_type count_;
size_type count_;
};
#endif // defined(FLATBUFFERS_USE_STD_SPAN)
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
template<class U, std::size_t N>
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<U, N> make_span(U(&arr)[N]) FLATBUFFERS_NOEXCEPT {
return span<U, N>(arr);
flatbuffers::span<ElementType, Extent> make_span(ElementType(&arr)[Extent]) FLATBUFFERS_NOEXCEPT {
return span<ElementType, Extent>(arr);
}
template<class U, std::size_t N>
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const U, N> make_span(const U(&arr)[N]) FLATBUFFERS_NOEXCEPT {
return span<const U, N>(arr);
flatbuffers::span<const ElementType, Extent> make_span(const ElementType(&arr)[Extent]) FLATBUFFERS_NOEXCEPT {
return span<const ElementType, Extent>(arr);
}
template<class U, std::size_t N>
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<U, N> make_span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
return span<U, N>(arr);
flatbuffers::span<ElementType, Extent> make_span(std::array<ElementType, Extent> &arr) FLATBUFFERS_NOEXCEPT {
return span<ElementType, Extent>(arr);
}
template<class U, std::size_t N>
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const U, N> make_span(const std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
return span<const U, N>(arr);
flatbuffers::span<const ElementType, Extent> make_span(const std::array<ElementType, Extent> &arr) FLATBUFFERS_NOEXCEPT {
return span<const ElementType, Extent>(arr);
}
template<class U, std::size_t N>
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<U, dynamic_extent> make_span(U *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<U, dynamic_extent>(first, count);
flatbuffers::span<ElementType, dynamic_extent> make_span(ElementType *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<ElementType, dynamic_extent>(first, count);
}
template<class U, std::size_t N>
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const U, dynamic_extent> make_span(const U *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<const U, dynamic_extent>(first, count);
flatbuffers::span<const ElementType, dynamic_extent> make_span(const ElementType *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<const ElementType, dynamic_extent>(first, count);
}
#endif
#endif // defined(FLATBUFFERS_USE_STD_SPAN)
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
} // namespace flatbuffers

64
thirdparty/flatbuffers/string.h vendored Normal file
View File

@ -0,0 +1,64 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_STRING_H_
#define FLATBUFFERS_STRING_H_
#include "flatbuffers/base.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
struct String : public Vector<char> {
const char *c_str() const { return reinterpret_cast<const char *>(Data()); }
std::string str() const { return std::string(c_str(), size()); }
// clang-format off
#ifdef FLATBUFFERS_HAS_STRING_VIEW
flatbuffers::string_view string_view() const {
return flatbuffers::string_view(c_str(), size());
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
// clang-format on
bool operator<(const String &o) const {
return StringLessThan(this->data(), this->size(), o.data(), o.size());
}
};
// Convenience function to get std::string from a String returning an empty
// string on null pointer.
static inline std::string GetString(const String *str) {
return str ? str->str() : "";
}
// Convenience function to get char* from a String returning an empty string on
// null pointer.
static inline const char *GetCstring(const String *str) {
return str ? str->c_str() : "";
}
#ifdef FLATBUFFERS_HAS_STRING_VIEW
// Convenience function to get string_view from a String returning an empty
// string_view on null pointer.
static inline flatbuffers::string_view GetStringView(const String *str) {
return str ? str->string_view() : flatbuffers::string_view();
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
} // namespace flatbuffers
#endif // FLATBUFFERS_STRING_H_

53
thirdparty/flatbuffers/struct.h vendored Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_STRUCT_H_
#define FLATBUFFERS_STRUCT_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// "structs" are flat structures that do not have an offset table, thus
// always have all members present and do not support forwards/backwards
// compatible extensions.
class Struct FLATBUFFERS_FINAL_CLASS {
public:
template<typename T> T GetField(uoffset_t o) const {
return ReadScalar<T>(&data_[o]);
}
template<typename T> T GetStruct(uoffset_t o) const {
return reinterpret_cast<T>(&data_[o]);
}
const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; }
uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; }
private:
// private constructor & copy constructor: you obtain instances of this
// class by pointing to existing data only
Struct();
Struct(const Struct &);
Struct &operator=(const Struct &);
uint8_t data_[1];
};
} // namespace flatbuffers
#endif // FLATBUFFERS_STRUCT_H_

168
thirdparty/flatbuffers/table.h vendored Normal file
View File

@ -0,0 +1,168 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_TABLE_H_
#define FLATBUFFERS_TABLE_H_
#include "flatbuffers/base.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// "tables" use an offset table (possibly shared) that allows fields to be
// omitted and added at will, but uses an extra indirection to read.
class Table {
public:
const uint8_t *GetVTable() const {
return data_ - ReadScalar<soffset_t>(data_);
}
// This gets the field offset for any of the functions below it, or 0
// if the field was not present.
voffset_t GetOptionalFieldOffset(voffset_t field) const {
// The vtable offset is always at the start.
auto vtable = GetVTable();
// The first element is the size of the vtable (fields + type id + itself).
auto vtsize = ReadScalar<voffset_t>(vtable);
// If the field we're accessing is outside the vtable, we're reading older
// data, so it's the same as if the offset was 0 (not present).
return field < vtsize ? ReadScalar<voffset_t>(vtable + field) : 0;
}
template<typename T> T GetField(voffset_t field, T defaultval) const {
auto field_offset = GetOptionalFieldOffset(field);
return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
}
template<typename P> P GetPointer(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
: nullptr;
}
template<typename P> P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P>(field);
}
template<typename P> P GetStruct(voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = const_cast<uint8_t *>(data_ + field_offset);
return field_offset ? reinterpret_cast<P>(p) : nullptr;
}
template<typename Raw, typename Face>
flatbuffers::Optional<Face> GetOptional(voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? Optional<Face>(static_cast<Face>(ReadScalar<Raw>(p)))
: Optional<Face>();
}
template<typename T> bool SetField(voffset_t field, T val, T def) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return IsTheSameAs(val, def);
WriteScalar(data_ + field_offset, val);
return true;
}
template<typename T> bool SetField(voffset_t field, T val) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return false;
WriteScalar(data_ + field_offset, val);
return true;
}
bool SetPointer(voffset_t field, const uint8_t *val) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return false;
WriteScalar(data_ + field_offset,
static_cast<uoffset_t>(val - (data_ + field_offset)));
return true;
}
uint8_t *GetAddressOf(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
return field_offset ? data_ + field_offset : nullptr;
}
const uint8_t *GetAddressOf(voffset_t field) const {
return const_cast<Table *>(this)->GetAddressOf(field);
}
bool CheckField(voffset_t field) const {
return GetOptionalFieldOffset(field) != 0;
}
// Verify the vtable of this table.
// Call this once per table, followed by VerifyField once per field.
bool VerifyTableStart(Verifier &verifier) const {
return verifier.VerifyTableStart(data_);
}
// Verify a particular field.
template<typename T>
bool VerifyField(const Verifier &verifier, voffset_t field,
size_t align) const {
// Calling GetOptionalFieldOffset should be safe now thanks to
// VerifyTable().
auto field_offset = GetOptionalFieldOffset(field);
// Check the actual field.
return !field_offset || verifier.VerifyField<T>(data_, field_offset, align);
}
// VerifyField for required fields.
template<typename T>
bool VerifyFieldRequired(const Verifier &verifier, voffset_t field,
size_t align) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.VerifyField<T>(data_, field_offset, align);
}
// Versions for offsets.
bool VerifyOffset(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return !field_offset || verifier.VerifyOffset(data_, field_offset);
}
bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.VerifyOffset(data_, field_offset);
}
private:
// private constructor & copy constructor: you obtain instances of this
// class by pointing to existing data only
Table();
Table(const Table &other);
Table &operator=(const Table &);
uint8_t data_[1];
};
// This specialization allows avoiding warnings like:
// MSVC C4800: type: forcing value to bool 'true' or 'false'.
template<>
inline flatbuffers::Optional<bool> Table::GetOptional<uint8_t, bool>(
voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? Optional<bool>(ReadScalar<uint8_t>(p) != 0)
: Optional<bool>();
}
} // namespace flatbuffers
#endif // FLATBUFFERS_TABLE_H_

389
thirdparty/flatbuffers/vector.h vendored Normal file
View File

@ -0,0 +1,389 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VECTOR_H_
#define FLATBUFFERS_VECTOR_H_
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/stl_emulation.h"
namespace flatbuffers {
struct String;
// An STL compatible iterator implementation for Vector below, effectively
// calling Get() for every element.
template<typename T, typename IT> struct VectorIterator {
typedef std::random_access_iterator_tag iterator_category;
typedef IT value_type;
typedef ptrdiff_t difference_type;
typedef IT *pointer;
typedef IT &reference;
VectorIterator(const uint8_t *data, uoffset_t i)
: data_(data + IndirectHelper<T>::element_stride * i) {}
VectorIterator(const VectorIterator &other) : data_(other.data_) {}
VectorIterator() : data_(nullptr) {}
VectorIterator &operator=(const VectorIterator &other) {
data_ = other.data_;
return *this;
}
VectorIterator &operator=(VectorIterator &&other) {
data_ = other.data_;
return *this;
}
bool operator==(const VectorIterator &other) const {
return data_ == other.data_;
}
bool operator<(const VectorIterator &other) const {
return data_ < other.data_;
}
bool operator!=(const VectorIterator &other) const {
return data_ != other.data_;
}
difference_type operator-(const VectorIterator &other) const {
return (data_ - other.data_) / IndirectHelper<T>::element_stride;
}
// Note: return type is incompatible with the standard
// `reference operator*()`.
IT operator*() const { return IndirectHelper<T>::Read(data_, 0); }
// Note: return type is incompatible with the standard
// `pointer operator->()`.
IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
VectorIterator &operator++() {
data_ += IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator operator++(int) {
VectorIterator temp(data_, 0);
data_ += IndirectHelper<T>::element_stride;
return temp;
}
VectorIterator operator+(const uoffset_t &offset) const {
return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride,
0);
}
VectorIterator &operator+=(const uoffset_t &offset) {
data_ += offset * IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator &operator--() {
data_ -= IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator operator--(int) {
VectorIterator temp(data_, 0);
data_ -= IndirectHelper<T>::element_stride;
return temp;
}
VectorIterator operator-(const uoffset_t &offset) const {
return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride,
0);
}
VectorIterator &operator-=(const uoffset_t &offset) {
data_ -= offset * IndirectHelper<T>::element_stride;
return *this;
}
private:
const uint8_t *data_;
};
template<typename Iterator>
struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
explicit VectorReverseIterator(Iterator iter)
: std::reverse_iterator<Iterator>(iter) {}
// Note: return type is incompatible with the standard
// `reference operator*()`.
typename Iterator::value_type operator*() const {
auto tmp = std::reverse_iterator<Iterator>::current;
return *--tmp;
}
// Note: return type is incompatible with the standard
// `pointer operator->()`.
typename Iterator::value_type operator->() const {
auto tmp = std::reverse_iterator<Iterator>::current;
return *--tmp;
}
};
// This is used as a helper type for accessing vectors.
// Vector::data() assumes the vector elements start after the length field.
template<typename T> class Vector {
public:
typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type>
iterator;
typedef VectorIterator<T, typename IndirectHelper<T>::return_type>
const_iterator;
typedef VectorReverseIterator<iterator> reverse_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
scalar_tag;
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1);
uoffset_t size() const { return EndianScalar(length_); }
// Deprecated: use size(). Here for backwards compatibility.
FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]])
uoffset_t Length() const { return size(); }
typedef typename IndirectHelper<T>::return_type return_type;
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
typedef return_type value_type;
return_type Get(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<T>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
return static_cast<E>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is the right type!
template<typename U> const U *GetAs(uoffset_t i) const {
return reinterpret_cast<const U *>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is actually a string!
const String *GetAsString(uoffset_t i) const {
return reinterpret_cast<const String *>(Get(i));
}
const void *GetStructFromOffset(size_t o) const {
return reinterpret_cast<const void *>(Data() + o);
}
iterator begin() { return iterator(Data(), 0); }
const_iterator begin() const { return const_iterator(Data(), 0); }
iterator end() { return iterator(Data(), size()); }
const_iterator end() const { return const_iterator(Data(), size()); }
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
const_reverse_iterator crbegin() const { return rbegin(); }
const_reverse_iterator crend() const { return rend(); }
// Change elements if you have a non-const pointer to this object.
// Scalars only. See reflection.h, and the documentation.
void Mutate(uoffset_t i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
// Change an element of a vector of tables (or strings).
// "val" points to the new table/string, as you can obtain from
// e.g. reflection::AddFlatBuffer().
void MutateOffset(uoffset_t i, const uint8_t *val) {
FLATBUFFERS_ASSERT(i < size());
static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
WriteScalar(data() + i,
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
}
// Get a mutable pointer to tables/strings inside this vector.
mutable_return_type GetMutableObject(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
}
// The raw data in little endian format. Use with care.
const uint8_t *Data() const {
return reinterpret_cast<const uint8_t *>(&length_ + 1);
}
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
// Similarly, but typed, much like std::vector::data
const T *data() const { return reinterpret_cast<const T *>(Data()); }
T *data() { return reinterpret_cast<T *>(Data()); }
template<typename K> return_type LookupByKey(K key) const {
void *search_result = std::bsearch(
&key, Data(), size(), IndirectHelper<T>::element_stride, KeyCompare<K>);
if (!search_result) {
return nullptr; // Key not found.
}
const uint8_t *element = reinterpret_cast<const uint8_t *>(search_result);
return IndirectHelper<T>::Read(element, 0);
}
template<typename K> mutable_return_type MutableLookupByKey(K key) {
return const_cast<mutable_return_type>(LookupByKey(key));
}
protected:
// This class is only used to access pre-existing data. Don't ever
// try to construct these manually.
Vector();
uoffset_t length_;
private:
// This class is a pointer. Copying will therefore create an invalid object.
// Private and unimplemented copy constructor.
Vector(const Vector &);
Vector &operator=(const Vector &);
template<typename K> static int KeyCompare(const void *ap, const void *bp) {
const K *key = reinterpret_cast<const K *>(ap);
const uint8_t *data = reinterpret_cast<const uint8_t *>(bp);
auto table = IndirectHelper<T>::Read(data, 0);
// std::bsearch compares with the operands transposed, so we negate the
// result here.
return -table->KeyCompareWithValue(*key);
}
};
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> &vec)
FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<U>(vec.data(), vec.size());
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U> make_span(
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<const U>(vec.data(), vec.size());
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t> make_bytes_span(
Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::scalar_tag::value,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<uint8_t>(vec.Data(), vec.size() * sizeof(U));
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t> make_bytes_span(
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::scalar_tag::value,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<const uint8_t>(vec.Data(), vec.size() * sizeof(U));
}
// Convenient helper functions to get a span of any vector, regardless
// of whether it is null or not (the field is not set).
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> *ptr)
FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return ptr ? make_span(*ptr) : span<U>();
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U> make_span(
const Vector<U> *ptr) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return ptr ? make_span(*ptr) : span<const U>();
}
// Represent a vector much like the template above, but in this case we
// don't know what the element types are (used with reflection.h).
class VectorOfAny {
public:
uoffset_t size() const { return EndianScalar(length_); }
const uint8_t *Data() const {
return reinterpret_cast<const uint8_t *>(&length_ + 1);
}
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
protected:
VectorOfAny();
uoffset_t length_;
private:
VectorOfAny(const VectorOfAny &);
VectorOfAny &operator=(const VectorOfAny &);
};
template<typename T, typename U>
Vector<Offset<T>> *VectorCast(Vector<Offset<U>> *ptr) {
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
return reinterpret_cast<Vector<Offset<T>> *>(ptr);
}
template<typename T, typename U>
const Vector<Offset<T>> *VectorCast(const Vector<Offset<U>> *ptr) {
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
return reinterpret_cast<const Vector<Offset<T>> *>(ptr);
}
// Convenient helper function to get the length of any vector, regardless
// of whether it is null or not (the field is not set).
template<typename T> static inline size_t VectorLength(const Vector<T> *v) {
return v ? v->size() : 0;
}
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_

271
thirdparty/flatbuffers/vector_downward.h vendored Normal file
View File

@ -0,0 +1,271 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
#define FLATBUFFERS_VECTOR_DOWNWARD_H_
#include "flatbuffers/base.h"
#include "flatbuffers/default_allocator.h"
#include "flatbuffers/detached_buffer.h"
namespace flatbuffers {
// This is a minimal replication of std::vector<uint8_t> functionality,
// except growing from higher to lower addresses. i.e. push_back() inserts data
// in the lowest address in the vector.
// Since this vector leaves the lower part unused, we support a "scratch-pad"
// that can be stored there for temporary data, to share the allocated space.
// Essentially, this supports 2 std::vectors in a single buffer.
class vector_downward {
public:
explicit vector_downward(size_t initial_size, Allocator *allocator,
bool own_allocator, size_t buffer_minalign)
: allocator_(allocator),
own_allocator_(own_allocator),
initial_size_(initial_size),
buffer_minalign_(buffer_minalign),
reserved_(0),
size_(0),
buf_(nullptr),
cur_(nullptr),
scratch_(nullptr) {}
vector_downward(vector_downward &&other)
// clang-format on
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
initial_size_(other.initial_size_),
buffer_minalign_(other.buffer_minalign_),
reserved_(other.reserved_),
size_(other.size_),
buf_(other.buf_),
cur_(other.cur_),
scratch_(other.scratch_) {
// No change in other.allocator_
// No change in other.initial_size_
// No change in other.buffer_minalign_
other.own_allocator_ = false;
other.reserved_ = 0;
other.buf_ = nullptr;
other.cur_ = nullptr;
other.scratch_ = nullptr;
}
vector_downward &operator=(vector_downward &&other) {
// Move construct a temporary and swap idiom
vector_downward temp(std::move(other));
swap(temp);
return *this;
}
~vector_downward() {
clear_buffer();
clear_allocator();
}
void reset() {
clear_buffer();
clear();
}
void clear() {
if (buf_) {
cur_ = buf_ + reserved_;
} else {
reserved_ = 0;
cur_ = nullptr;
}
size_ = 0;
clear_scratch();
}
void clear_scratch() { scratch_ = buf_; }
void clear_allocator() {
if (own_allocator_ && allocator_) { delete allocator_; }
allocator_ = nullptr;
own_allocator_ = false;
}
void clear_buffer() {
if (buf_) Deallocate(allocator_, buf_, reserved_);
buf_ = nullptr;
}
// Relinquish the pointer to the caller.
uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
auto *buf = buf_;
allocated_bytes = reserved_;
offset = static_cast<size_t>(cur_ - buf_);
// release_raw only relinquishes the buffer ownership.
// Does not deallocate or reset the allocator. Destructor will do that.
buf_ = nullptr;
clear();
return buf;
}
// Relinquish the pointer to the caller.
DetachedBuffer release() {
// allocator ownership (if any) is transferred to DetachedBuffer.
DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_,
size());
if (own_allocator_) {
allocator_ = nullptr;
own_allocator_ = false;
}
buf_ = nullptr;
clear();
return fb;
}
size_t ensure_space(size_t len) {
FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
// Beyond this, signed offsets may not have enough range:
// (FlatBuffers > 2GB not supported).
FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
return len;
}
inline uint8_t *make_space(size_t len) {
if (len) {
ensure_space(len);
cur_ -= len;
size_ += static_cast<uoffset_t>(len);
}
return cur_;
}
// Returns nullptr if using the DefaultAllocator.
Allocator *get_custom_allocator() { return allocator_; }
inline uoffset_t size() const { return size_; }
uoffset_t scratch_size() const {
return static_cast<uoffset_t>(scratch_ - buf_);
}
size_t capacity() const { return reserved_; }
uint8_t *data() const {
FLATBUFFERS_ASSERT(cur_);
return cur_;
}
uint8_t *scratch_data() const {
FLATBUFFERS_ASSERT(buf_);
return buf_;
}
uint8_t *scratch_end() const {
FLATBUFFERS_ASSERT(scratch_);
return scratch_;
}
uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; }
void push(const uint8_t *bytes, size_t num) {
if (num > 0) { memcpy(make_space(num), bytes, num); }
}
// Specialized version of push() that avoids memcpy call for small data.
template<typename T> void push_small(const T &little_endian_t) {
make_space(sizeof(T));
*reinterpret_cast<T *>(cur_) = little_endian_t;
}
template<typename T> void scratch_push_small(const T &t) {
ensure_space(sizeof(T));
*reinterpret_cast<T *>(scratch_) = t;
scratch_ += sizeof(T);
}
// fill() is most frequently called with small byte counts (<= 4),
// which is why we're using loops rather than calling memset.
void fill(size_t zero_pad_bytes) {
make_space(zero_pad_bytes);
for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0;
}
// Version for when we know the size is larger.
// Precondition: zero_pad_bytes > 0
void fill_big(size_t zero_pad_bytes) {
memset(make_space(zero_pad_bytes), 0, zero_pad_bytes);
}
void pop(size_t bytes_to_remove) {
cur_ += bytes_to_remove;
size_ -= static_cast<uoffset_t>(bytes_to_remove);
}
void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
void swap(vector_downward &other) {
using std::swap;
swap(allocator_, other.allocator_);
swap(own_allocator_, other.own_allocator_);
swap(initial_size_, other.initial_size_);
swap(buffer_minalign_, other.buffer_minalign_);
swap(reserved_, other.reserved_);
swap(size_, other.size_);
swap(buf_, other.buf_);
swap(cur_, other.cur_);
swap(scratch_, other.scratch_);
}
void swap_allocator(vector_downward &other) {
using std::swap;
swap(allocator_, other.allocator_);
swap(own_allocator_, other.own_allocator_);
}
private:
// You shouldn't really be copying instances of this class.
FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &));
FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &));
Allocator *allocator_;
bool own_allocator_;
size_t initial_size_;
size_t buffer_minalign_;
size_t reserved_;
uoffset_t size_;
uint8_t *buf_;
uint8_t *cur_; // Points at location between empty (below) and used (above).
uint8_t *scratch_; // Points to the end of the scratchpad in use.
void reallocate(size_t len) {
auto old_reserved = reserved_;
auto old_size = size();
auto old_scratch_size = scratch_size();
reserved_ +=
(std::max)(len, old_reserved ? old_reserved / 2 : initial_size_);
reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1);
if (buf_) {
buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_,
old_size, old_scratch_size);
} else {
buf_ = Allocate(allocator_, reserved_);
}
cur_ = buf_ + reserved_ - old_size;
scratch_ = buf_ + old_scratch_size;
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_

317
thirdparty/flatbuffers/verifier.h vendored Normal file
View File

@ -0,0 +1,317 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VERIFIER_H_
#define FLATBUFFERS_VERIFIER_H_
#include "flatbuffers/base.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
// Helper class to verify the integrity of a FlatBuffer
class Verifier FLATBUFFERS_FINAL_CLASS {
public:
struct Options {
// The maximum nesting of tables and vectors before we call it invalid.
uoffset_t max_depth = 64;
// The maximum number of tables we will verify before we call it invalid.
uoffset_t max_tables = 1000000;
// If true, verify all data is aligned.
bool check_alignment = true;
// If true, run verifier on nested flatbuffers
bool check_nested_flatbuffers = true;
};
explicit Verifier(const uint8_t *const buf, const size_t buf_len,
const Options &opts)
: buf_(buf), size_(buf_len), opts_(opts) {
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
}
// Deprecated API, please construct with Verifier::Options.
Verifier(const uint8_t *const buf, const size_t buf_len,
const uoffset_t max_depth = 64, const uoffset_t max_tables = 1000000,
const bool check_alignment = true)
: Verifier(buf, buf_len, [&] {
Options opts;
opts.max_depth = max_depth;
opts.max_tables = max_tables;
opts.check_alignment = check_alignment;
return opts;
}()) {}
// Central location where any verification failures register.
bool Check(const bool ok) const {
// clang-format off
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
FLATBUFFERS_ASSERT(ok);
#endif
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
if (!ok)
upper_bound_ = 0;
#endif
// clang-format on
return ok;
}
// Verify any range within the buffer.
bool Verify(const size_t elem, const size_t elem_len) const {
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
auto upper_bound = elem + elem_len;
if (upper_bound_ < upper_bound)
upper_bound_ = upper_bound;
#endif
// clang-format on
return Check(elem_len < size_ && elem <= size_ - elem_len);
}
bool VerifyAlignment(const size_t elem, const size_t align) const {
return Check((elem & (align - 1)) == 0 || !opts_.check_alignment);
}
// Verify a range indicated by sizeof(T).
template<typename T> bool Verify(const size_t elem) const {
return VerifyAlignment(elem, sizeof(T)) && Verify(elem, sizeof(T));
}
bool VerifyFromPointer(const uint8_t *const p, const size_t len) {
return Verify(static_cast<size_t>(p - buf_), len);
}
// Verify relative to a known-good base pointer.
bool VerifyFieldStruct(const uint8_t *const base, const voffset_t elem_off,
const size_t elem_len, const size_t align) const {
const auto f = static_cast<size_t>(base - buf_) + elem_off;
return VerifyAlignment(f, align) && Verify(f, elem_len);
}
template<typename T>
bool VerifyField(const uint8_t *const base, const voffset_t elem_off,
const size_t align) const {
const auto f = static_cast<size_t>(base - buf_) + elem_off;
return VerifyAlignment(f, align) && Verify(f, sizeof(T));
}
// Verify a pointer (may be NULL) of a table type.
template<typename T> bool VerifyTable(const T *const table) {
return !table || table->Verify(*this);
}
// Verify a pointer (may be NULL) of any vector type.
template<typename T> bool VerifyVector(const Vector<T> *const vec) const {
return !vec || VerifyVectorOrString(reinterpret_cast<const uint8_t *>(vec),
sizeof(T));
}
// Verify a pointer (may be NULL) of a vector to struct.
template<typename T>
bool VerifyVector(const Vector<const T *> *const vec) const {
return VerifyVector(reinterpret_cast<const Vector<T> *>(vec));
}
// Verify a pointer (may be NULL) to string.
bool VerifyString(const String *const str) const {
size_t end;
return !str || (VerifyVectorOrString(reinterpret_cast<const uint8_t *>(str),
1, &end) &&
Verify(end, 1) && // Must have terminator
Check(buf_[end] == '\0')); // Terminating byte must be 0.
}
// Common code between vectors and strings.
bool VerifyVectorOrString(const uint8_t *const vec, const size_t elem_size,
size_t *const end = nullptr) const {
const auto veco = static_cast<size_t>(vec - buf_);
// Check we can read the size field.
if (!Verify<uoffset_t>(veco)) return false;
// Check the whole array. If this is a string, the byte past the array must
// be 0.
const auto size = ReadScalar<uoffset_t>(vec);
const auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size;
if (!Check(size < max_elems))
return false; // Protect against byte_size overflowing.
const auto byte_size = sizeof(size) + elem_size * size;
if (end) *end = veco + byte_size;
return Verify(veco, byte_size);
}
// Special case for string contents, after the above has been called.
bool VerifyVectorOfStrings(const Vector<Offset<String>> *const vec) const {
if (vec) {
for (uoffset_t i = 0; i < vec->size(); i++) {
if (!VerifyString(vec->Get(i))) return false;
}
}
return true;
}
// Special case for table contents, after the above has been called.
template<typename T>
bool VerifyVectorOfTables(const Vector<Offset<T>> *const vec) {
if (vec) {
for (uoffset_t i = 0; i < vec->size(); i++) {
if (!vec->Get(i)->Verify(*this)) return false;
}
}
return true;
}
__suppress_ubsan__("unsigned-integer-overflow") bool VerifyTableStart(
const uint8_t *const table) {
// Check the vtable offset.
const auto tableo = static_cast<size_t>(table - buf_);
if (!Verify<soffset_t>(tableo)) return false;
// This offset may be signed, but doing the subtraction unsigned always
// gives the result we want.
const auto vtableo =
tableo - static_cast<size_t>(ReadScalar<soffset_t>(table));
// Check the vtable size field, then check vtable fits in its entirety.
if (!(VerifyComplexity() && Verify<voffset_t>(vtableo) &&
VerifyAlignment(ReadScalar<voffset_t>(buf_ + vtableo),
sizeof(voffset_t))))
return false;
const auto vsize = ReadScalar<voffset_t>(buf_ + vtableo);
return Check((vsize & 1) == 0) && Verify(vtableo, vsize);
}
template<typename T>
bool VerifyBufferFromStart(const char *const identifier, const size_t start) {
// Buffers have to be of some size to be valid. The reason it is a runtime
// check instead of static_assert, is that nested flatbuffers go through
// this call and their size is determined at runtime.
if (!Check(size_ >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false;
// If an identifier is provided, check that we have a buffer
if (identifier && !Check((size_ >= 2 * sizeof(flatbuffers::uoffset_t) &&
BufferHasIdentifier(buf_ + start, identifier)))) {
return false;
}
// Call T::Verify, which must be in the generated code for this type.
const auto o = VerifyOffset(start);
return Check(o != 0) &&
reinterpret_cast<const T *>(buf_ + start + o)->Verify(*this)
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
&& GetComputedSize()
#endif
;
// clang-format on
}
template<typename T>
bool VerifyNestedFlatBuffer(const Vector<uint8_t> *const buf,
const char *const identifier) {
// Caller opted out of this.
if (!opts_.check_nested_flatbuffers) return true;
// An empty buffer is OK as it indicates not present.
if (!buf) return true;
// If there is a nested buffer, it must be greater than the min size.
if (!Check(buf->size() >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false;
Verifier nested_verifier(buf->data(), buf->size());
return nested_verifier.VerifyBuffer<T>(identifier);
}
// Verify this whole buffer, starting with root type T.
template<typename T> bool VerifyBuffer() { return VerifyBuffer<T>(nullptr); }
template<typename T> bool VerifyBuffer(const char *const identifier) {
return VerifyBufferFromStart<T>(identifier, 0);
}
template<typename T>
bool VerifySizePrefixedBuffer(const char *const identifier) {
return Verify<uoffset_t>(0U) &&
Check(ReadScalar<uoffset_t>(buf_) == size_ - sizeof(uoffset_t)) &&
VerifyBufferFromStart<T>(identifier, sizeof(uoffset_t));
}
uoffset_t VerifyOffset(const size_t start) const {
if (!Verify<uoffset_t>(start)) return 0;
const auto o = ReadScalar<uoffset_t>(buf_ + start);
// May not point to itself.
if (!Check(o != 0)) return 0;
// Can't wrap around / buffers are max 2GB.
if (!Check(static_cast<soffset_t>(o) >= 0)) return 0;
// Must be inside the buffer to create a pointer from it (pointer outside
// buffer is UB).
if (!Verify(start + o, 1)) return 0;
return o;
}
uoffset_t VerifyOffset(const uint8_t *const base,
const voffset_t start) const {
return VerifyOffset(static_cast<size_t>(base - buf_) + start);
}
// Called at the start of a table to increase counters measuring data
// structure depth and amount, and possibly bails out with false if limits set
// by the constructor have been hit. Needs to be balanced with EndTable().
bool VerifyComplexity() {
depth_++;
num_tables_++;
return Check(depth_ <= opts_.max_depth && num_tables_ <= opts_.max_tables);
}
// Called at the end of a table to pop the depth count.
bool EndTable() {
depth_--;
return true;
}
// Returns the message size in bytes
size_t GetComputedSize() const {
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
uintptr_t size = upper_bound_;
// Align the size to uoffset_t
size = (size - 1 + sizeof(uoffset_t)) & ~(sizeof(uoffset_t) - 1);
return (size > size_) ? 0 : size;
#else
// Must turn on FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE for this to work.
(void)upper_bound_;
FLATBUFFERS_ASSERT(false);
return 0;
#endif
// clang-format on
}
std::vector<uint8_t> *GetFlexReuseTracker() { return flex_reuse_tracker_; }
void SetFlexReuseTracker(std::vector<uint8_t> *const rt) {
flex_reuse_tracker_ = rt;
}
private:
const uint8_t *buf_;
const size_t size_;
const Options opts_;
mutable size_t upper_bound_ = 0;
uoffset_t depth_ = 0;
uoffset_t num_tables_ = 0;
std::vector<uint8_t> *flex_reuse_tracker_ = nullptr;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_

View File

@ -262,12 +262,6 @@ if (CMAKE_BUILD_TYPE STREQUAL "Debug")
endif ()
if (BUILD_SHARED_LIBS)
if (UNIX AND NOT APPLE AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "SunOS" AND
NOT EMSCRIPTEN)
# Fix rpmlint warning:
# unused-direct-shlib-dependency /usr/lib/libformat.so.1.1.0 /lib/libm.so.6.
target_link_libraries(fmt -Wl,--as-needed)
endif ()
target_compile_definitions(fmt PRIVATE FMT_EXPORT INTERFACE FMT_SHARED)
endif ()
if (FMT_SAFE_DURATION_CAST)

View File

@ -1,3 +1,129 @@
9.1.0 - 2022-08-27
------------------
* ``fmt::formatted_size`` now works at compile time
(`#3026 <https://github.com/fmtlib/fmt/pull/3026>`_). For example
(`godbolt <https://godbolt.org/z/1MW5rMdf8>`__):
.. code:: c++
#include <fmt/compile.h>
int main() {
using namespace fmt::literals;
constexpr size_t n = fmt::formatted_size("{}"_cf, 42);
fmt::print("{}\n", n); // prints 2
}
Thanks `@marksantaniello (Mark Santaniello)
<https://github.com/marksantaniello>`_.
* Fixed handling of invalid UTF-8
(`#3038 <https://github.com/fmtlib/fmt/pull/3038>`_,
`#3044 <https://github.com/fmtlib/fmt/pull/3044>`_,
`#3056 <https://github.com/fmtlib/fmt/pull/3056>`_).
Thanks `@phprus (Vladislav Shchapov) <https://github.com/phprus>`_ and
`@skeeto (Christopher Wellons) <https://github.com/skeeto>`_.
* Improved Unicode support in ``ostream`` overloads of ``print``
(`#2994 <https://github.com/fmtlib/fmt/pull/2994>`_,
`#3001 <https://github.com/fmtlib/fmt/pull/3001>`_,
`#3025 <https://github.com/fmtlib/fmt/pull/3025>`_).
Thanks `@dimztimz (Dimitrij Mijoski) <https://github.com/dimztimz>`_.
* Fixed handling of the sign specifier in localized formatting on systems with
32-bit ``wchar_t`` (`#3041 <https://github.com/fmtlib/fmt/issues/3041>`_).
* Added support for wide streams to ``fmt::streamed``
(`#2994 <https://github.com/fmtlib/fmt/pull/2994>`_).
Thanks `@phprus (Vladislav Shchapov) <https://github.com/phprus>`_.
* Added the ``n`` specifier that disables the output of delimiters when
formatting ranges (`#2981 <https://github.com/fmtlib/fmt/pull/2981>`_,
`#2983 <https://github.com/fmtlib/fmt/pull/2983>`_).
For example (`godbolt <https://godbolt.org/z/roKqGdj8c>`__):
.. code:: c++
#include <fmt/ranges.h>
#include <vector>
int main() {
auto v = std::vector{1, 2, 3};
fmt::print("{:n}\n", v); // prints 1, 2, 3
}
Thanks `@BRevzin (Barry Revzin) <https://github.com/BRevzin>`_.
* Worked around problematic ``std::string_view`` constructors introduced in
C++23 (`#3030 <https://github.com/fmtlib/fmt/issues/3030>`_,
`#3050 <https://github.com/fmtlib/fmt/issues/3050>`_).
Thanks `@strega-nil-ms (nicole mazzuca) <https://github.com/strega-nil-ms>`_.
* Improve handling (exclusion) of recursive ranges
(`#2968 <https://github.com/fmtlib/fmt/issues/2968>`_,
`#2974 <https://github.com/fmtlib/fmt/pull/2974>`_).
Thanks `@Dani-Hub (Daniel Krügler) <https://github.com/Dani-Hub>`_.
* Improved error reporting in format string compilation
(`#3055 <https://github.com/fmtlib/fmt/issues/3055>`_).
* Improved the implementation of
`Dragonbox <https://github.com/jk-jeon/dragonbox>`_, the algorithm used for
the default floating-point formatting
(`#2984 <https://github.com/fmtlib/fmt/pull/2984>`_).
Thanks `@jk-jeon (Junekey Jeon) <https://github.com/jk-jeon>`_.
* Fixed issues with floating-point formatting on exotic platforms.
* Improved the implementation of chrono formatting
(`#3010 <https://github.com/fmtlib/fmt/pull/3010>`_).
Thanks `@phprus (Vladislav Shchapov) <https://github.com/phprus>`_.
* Improved documentation
(`#2966 <https://github.com/fmtlib/fmt/pull/2966>`_,
`#3009 <https://github.com/fmtlib/fmt/pull/3009>`_,
`#3020 <https://github.com/fmtlib/fmt/issues/3020>`_,
`#3037 <https://github.com/fmtlib/fmt/pull/3037>`_).
Thanks `@mwinterb <https://github.com/mwinterb>`_,
`@jcelerier (Jean-Michaël Celerier) <https://github.com/jcelerier>`_
and `@remiburtin (Rémi Burtin) <https://github.com/remiburtin>`_.
* Improved build configuration
(`#2991 <https://github.com/fmtlib/fmt/pull/2991>`_,
`#2995 <https://github.com/fmtlib/fmt/pull/2995>`_,
`#3004 <https://github.com/fmtlib/fmt/issues/3004>`_,
`#3007 <https://github.com/fmtlib/fmt/pull/3007>`_,
`#3040 <https://github.com/fmtlib/fmt/pull/3040>`_).
Thanks `@dimztimz (Dimitrij Mijoski) <https://github.com/dimztimz>`_ and
`@hwhsu1231 (Haowei Hsu) <https://github.com/hwhsu1231>`_.
* Fixed various warnings and compilation issues
(`#2969 <https://github.com/fmtlib/fmt/issues/2969>`_,
`#2971 <https://github.com/fmtlib/fmt/pull/2971>`_,
`#2975 <https://github.com/fmtlib/fmt/issues/2975>`_,
`#2982 <https://github.com/fmtlib/fmt/pull/2982>`_,
`#2985 <https://github.com/fmtlib/fmt/pull/2985>`_,
`#2988 <https://github.com/fmtlib/fmt/issues/2988>`_,
`#3000 <https://github.com/fmtlib/fmt/issues/3000>`_,
`#3006 <https://github.com/fmtlib/fmt/issues/3006>`_,
`#3014 <https://github.com/fmtlib/fmt/issues/3014>`_,
`#3015 <https://github.com/fmtlib/fmt/issues/3015>`_,
`#3021 <https://github.com/fmtlib/fmt/pull/3021>`_,
`#3023 <https://github.com/fmtlib/fmt/issues/3023>`_,
`#3024 <https://github.com/fmtlib/fmt/pull/3024>`_,
`#3029 <https://github.com/fmtlib/fmt/pull/3029>`_,
`#3043 <https://github.com/fmtlib/fmt/pull/3043>`_,
`#3052 <https://github.com/fmtlib/fmt/issues/3052>`_,
`#3053 <https://github.com/fmtlib/fmt/pull/3053>`_,
`#3054 <https://github.com/fmtlib/fmt/pull/3054>`_).
Thanks `@h-friederich (Hannes Friederich) <https://github.com/h-friederich>`_,
`@dimztimz (Dimitrij Mijoski) <https://github.com/dimztimz>`_,
`@olupton (Olli Lupton) <https://github.com/olupton>`_,
`@bernhardmgruber (Bernhard Manfred Gruber)
<https://github.com/bernhardmgruber>`_,
`@phprus (Vladislav Shchapov) <https://github.com/phprus>`_.
9.0.0 - 2022-07-04
------------------
@ -19,7 +145,7 @@
return result;
}
constexpr auto answer = compile_time_itoa(0.42);
constexpr auto answer = compile_time_dtoa(0.42);
works with the default settings.

View File

@ -12,9 +12,6 @@
.. image:: https://github.com/fmtlib/fmt/workflows/windows/badge.svg
:target: https://github.com/fmtlib/fmt/actions?query=workflow%3Awindows
.. image:: https://ci.appveyor.com/api/projects/status/ehjkiefde6gucy1v?svg=true
:target: https://ci.appveyor.com/project/vitaut/fmt
.. image:: https://oss-fuzz-build-logs.storage.googleapis.com/badges/fmt.svg
:alt: fmt is continuously fuzzed at oss-fuzz
:target: https://bugs.chromium.org/p/oss-fuzz/issues/list?\

View File

@ -203,7 +203,7 @@ To safe_duration_cast(std::chrono::duration<FromRep, FromPeriod> from,
}
const auto min1 =
(std::numeric_limits<IntermediateRep>::min)() / Factor::num;
if (count < min1) {
if (!std::is_unsigned<IntermediateRep>::value && count < min1) {
ec = 1;
return {};
}
@ -1396,7 +1396,8 @@ inline bool isfinite(T) {
// Converts value to Int and checks that it's in the range [0, upper).
template <typename T, typename Int, FMT_ENABLE_IF(std::is_integral<T>::value)>
inline Int to_nonnegative_int(T value, Int upper) {
FMT_ASSERT(value >= 0 && to_unsigned(value) <= to_unsigned(upper),
FMT_ASSERT(std::is_unsigned<Int>::value ||
(value >= 0 && to_unsigned(value) <= to_unsigned(upper)),
"invalid value");
(void)upper;
return static_cast<Int>(value);
@ -1776,7 +1777,7 @@ struct chrono_formatter {
format_to(std::back_inserter(buf), runtime("{:.{}f}"),
std::fmod(val * static_cast<rep>(Period::num) /
static_cast<rep>(Period::den),
60),
static_cast<rep>(60)),
num_fractional_digits);
if (negative) *out++ = '-';
if (buf.size() < 2 || buf[1] == '.') *out++ = '0';
@ -2001,13 +2002,9 @@ template <typename Char, typename Duration>
struct formatter<std::chrono::time_point<std::chrono::system_clock, Duration>,
Char> : formatter<std::tm, Char> {
FMT_CONSTEXPR formatter() {
this->do_parse(default_specs,
default_specs + sizeof(default_specs) / sizeof(Char));
}
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return this->do_parse(ctx.begin(), ctx.end(), true);
basic_string_view<Char> default_specs =
detail::string_literal<Char, '%', 'F', ' ', '%', 'T'>{};
this->do_parse(default_specs.begin(), default_specs.end());
}
template <typename FormatContext>
@ -2015,15 +2012,8 @@ struct formatter<std::chrono::time_point<std::chrono::system_clock, Duration>,
FormatContext& ctx) const -> decltype(ctx.out()) {
return formatter<std::tm, Char>::format(localtime(val), ctx);
}
static constexpr const Char default_specs[] = {'%', 'F', ' ', '%', 'T'};
};
template <typename Char, typename Duration>
constexpr const Char
formatter<std::chrono::time_point<std::chrono::system_clock, Duration>,
Char>::default_specs[];
template <typename Char> struct formatter<std::tm, Char> {
private:
enum class spec {
@ -2035,13 +2025,18 @@ template <typename Char> struct formatter<std::tm, Char> {
basic_string_view<Char> specs;
protected:
template <typename It>
FMT_CONSTEXPR auto do_parse(It begin, It end, bool with_default = false)
-> It {
template <typename It> FMT_CONSTEXPR auto do_parse(It begin, It end) -> It {
if (begin != end && *begin == ':') ++begin;
end = detail::parse_chrono_format(begin, end, detail::tm_format_checker());
if (!with_default || end != begin)
specs = {begin, detail::to_unsigned(end - begin)};
// Replace default spec only if the new spec is not empty.
if (end != begin) specs = {begin, detail::to_unsigned(end - begin)};
return end;
}
public:
FMT_CONSTEXPR auto parse(basic_format_parse_context<Char>& ctx)
-> decltype(ctx.begin()) {
auto end = this->do_parse(ctx.begin(), ctx.end());
// basic_string_view<>::compare isn't constexpr before C++17.
if (specs.size() == 2 && specs[0] == Char('%')) {
if (specs[1] == Char('F'))
@ -2052,12 +2047,6 @@ template <typename Char> struct formatter<std::tm, Char> {
return end;
}
public:
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return this->do_parse(ctx.begin(), ctx.end());
}
template <typename FormatContext>
auto format(const std::tm& tm, FormatContext& ctx) const
-> decltype(ctx.out()) {

View File

@ -634,7 +634,7 @@ struct formatter<detail::styled_arg<T>, Char> : formatter<T, Char> {
**Example**::
fmt::print("Elapsed time: {s:.2f} seconds",
fmt::print("Elapsed time: {0:.2f} seconds",
fmt::styled(1.23, fmt::fg(fmt::color::green) |
fmt::bg(fmt::color::blue)));
\endrst

View File

@ -14,7 +14,7 @@ FMT_BEGIN_NAMESPACE
namespace detail {
template <typename Char, typename InputIt>
inline counting_iterator copy_str(InputIt begin, InputIt end,
FMT_CONSTEXPR inline counting_iterator copy_str(InputIt begin, InputIt end,
counting_iterator it) {
return it + (end - begin);
}
@ -341,7 +341,7 @@ constexpr parse_specs_result<T, Char> parse_specs(basic_string_view<Char> str,
next_arg_id);
auto f = formatter<T, Char>();
auto end = f.parse(ctx);
return {f, pos + fmt::detail::to_unsigned(end - str.data()) + 1,
return {f, pos + fmt::detail::to_unsigned(end - str.data()),
next_arg_id == 0 ? manual_indexing_id : ctx.next_arg_id()};
}
@ -397,15 +397,22 @@ constexpr auto parse_replacement_field_then_tail(S format_str) {
return parse_tail<Args, END_POS + 1, NEXT_ID>(
field<char_type, typename field_type<T>::type, ARG_INDEX>(),
format_str);
} else if constexpr (c == ':') {
} else if constexpr (c != ':') {
FMT_THROW(format_error("expected ':'"));
} else {
constexpr auto result = parse_specs<typename field_type<T>::type>(
str, END_POS + 1, NEXT_ID == manual_indexing_id ? 0 : NEXT_ID);
return parse_tail<Args, result.end, result.next_arg_id>(
if constexpr (result.end >= str.size() || str[result.end] != '}') {
FMT_THROW(format_error("expected '}'"));
return 0;
} else {
return parse_tail<Args, result.end + 1, result.next_arg_id>(
spec_field<char_type, typename field_type<T>::type, ARG_INDEX>{
result.fmt},
format_str);
}
}
}
// Compiles a non-empty format string and returns the compiled representation
// or unknown_format() on unrecognized input.
@ -568,7 +575,8 @@ format_to_n_result<OutputIt> format_to_n(OutputIt out, size_t n,
template <typename S, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
size_t formatted_size(const S& format_str, const Args&... args) {
FMT_CONSTEXPR20 size_t formatted_size(const S& format_str,
const Args&... args) {
return fmt::format_to(detail::counting_iterator(), format_str, args...)
.count();
}

View File

@ -17,7 +17,7 @@
#include <type_traits>
// The fmt library version in the form major * 10000 + minor * 100 + patch.
#define FMT_VERSION 90000
#define FMT_VERSION 90100
#if defined(__clang__) && !defined(__ibmxl__)
# define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__)
@ -200,6 +200,9 @@
# endif
#endif
// An inline std::forward replacement.
#define FMT_FORWARD(...) static_cast<decltype(__VA_ARGS__)&&>(__VA_ARGS__)
#ifdef _MSC_VER
# define FMT_UNCHECKED_ITERATOR(It) \
using _Unchecked_type = It // Mark iterator as checked.
@ -273,7 +276,8 @@
#ifndef FMT_USE_NONTYPE_TEMPLATE_ARGS
# if defined(__cpp_nontype_template_args) && \
((FMT_GCC_VERSION >= 903 && FMT_CPLUSPLUS >= 201709L) || \
__cpp_nontype_template_args >= 201911L)
__cpp_nontype_template_args >= 201911L) && \
!defined(__NVCOMPILER)
# define FMT_USE_NONTYPE_TEMPLATE_ARGS 1
# else
# define FMT_USE_NONTYPE_TEMPLATE_ARGS 0
@ -402,7 +406,7 @@ template <typename T> auto convert_for_visit(T) -> monostate { return {}; }
template <typename Int>
FMT_CONSTEXPR auto to_unsigned(Int value) ->
typename std::make_unsigned<Int>::type {
FMT_ASSERT(value >= 0, "negative value");
FMT_ASSERT(std::is_unsigned<Int>::value || value >= 0, "negative value");
return static_cast<typename std::make_unsigned<Int>::type>(value);
}
@ -707,8 +711,8 @@ class basic_format_parse_context : private ErrorHandler {
next_arg_id_ = -1;
do_check_arg_id(id);
}
FMT_CONSTEXPR void check_arg_id(basic_string_view<Char>) {}
FMT_CONSTEXPR void check_dynamic_spec(int arg_id);
FMT_CONSTEXPR void on_error(const char* message) {
ErrorHandler::on_error(message);
@ -735,7 +739,8 @@ class compile_parse_context
ErrorHandler eh = {}, int next_arg_id = 0)
: base(format_str, eh, next_arg_id), num_args_(num_args), types_(types) {}
constexpr int num_args() const { return num_args_; }
constexpr auto num_args() const -> int { return num_args_; }
constexpr auto arg_type(int id) const -> type { return types_[id]; }
FMT_CONSTEXPR auto next_arg_id() -> int {
int id = base::next_arg_id();
@ -748,6 +753,11 @@ class compile_parse_context
if (id >= num_args_) this->on_error("argument not found");
}
using base::check_arg_id;
FMT_CONSTEXPR void check_dynamic_spec(int arg_id) {
if (arg_id < num_args_ && types_ && !is_integral_type(types_[arg_id]))
this->on_error("width/precision is not integer");
}
};
FMT_END_DETAIL_NAMESPACE
@ -763,6 +773,15 @@ basic_format_parse_context<Char, ErrorHandler>::do_check_arg_id(int id) {
}
}
template <typename Char, typename ErrorHandler>
FMT_CONSTEXPR void
basic_format_parse_context<Char, ErrorHandler>::check_dynamic_spec(int arg_id) {
if (detail::is_constant_evaluated()) {
using context = detail::compile_parse_context<Char, ErrorHandler>;
static_cast<context*>(this)->check_dynamic_spec(arg_id);
}
}
template <typename Context> class basic_format_arg;
template <typename Context> class basic_format_args;
template <typename Context> class dynamic_format_arg_store;
@ -917,11 +936,11 @@ template <typename T> class buffer {
/** Appends data to the end of the buffer. */
template <typename U> void append(const U* begin, const U* end);
template <typename I> FMT_CONSTEXPR auto operator[](I index) -> T& {
template <typename Idx> FMT_CONSTEXPR auto operator[](Idx index) -> T& {
return ptr_[index];
}
template <typename I>
FMT_CONSTEXPR auto operator[](I index) const -> const T& {
template <typename Idx>
FMT_CONSTEXPR auto operator[](Idx index) const -> const T& {
return ptr_[index];
}
};
@ -1649,6 +1668,11 @@ auto copy_str(InputIt begin, InputIt end, appender out) -> appender {
return out;
}
template <typename Char, typename R, typename OutputIt>
FMT_CONSTEXPR auto copy_str(R&& rng, OutputIt out) -> OutputIt {
return detail::copy_str<Char>(rng.begin(), rng.end(), out);
}
#if FMT_GCC_VERSION && FMT_GCC_VERSION < 500
// A workaround for gcc 4.8 to make void_t work in a SFINAE context.
template <typename... Ts> struct void_t_impl { using type = void; };
@ -1708,7 +1732,7 @@ constexpr auto encode_types() -> unsigned long long {
template <typename Context, typename T>
FMT_CONSTEXPR FMT_INLINE auto make_value(T&& val) -> value<Context> {
const auto& arg = arg_mapper<Context>().map(std::forward<T>(val));
const auto& arg = arg_mapper<Context>().map(FMT_FORWARD(val));
constexpr bool formattable_char =
!std::is_same<decltype(arg), const unformattable_char&>::value;
@ -1875,7 +1899,7 @@ class format_arg_store
data_{detail::make_arg<
is_packed, Context,
detail::mapped_type_constant<remove_cvref_t<T>, Context>::value>(
std::forward<T>(args))...} {
FMT_FORWARD(args))...} {
detail::init_named_args(data_.named_args(), 0, 0, args...);
}
};
@ -1891,7 +1915,7 @@ class format_arg_store
template <typename Context = format_context, typename... Args>
constexpr auto make_format_args(Args&&... args)
-> format_arg_store<Context, remove_cvref_t<Args>...> {
return {std::forward<Args>(args)...};
return {FMT_FORWARD(args)...};
}
/**
@ -2240,11 +2264,14 @@ class dynamic_specs_handler
FMT_CONSTEXPR auto make_arg_ref(int arg_id) -> arg_ref_type {
context_.check_arg_id(arg_id);
context_.check_dynamic_spec(arg_id);
return arg_ref_type(arg_id);
}
FMT_CONSTEXPR auto make_arg_ref(auto_id) -> arg_ref_type {
return arg_ref_type(context_.next_arg_id());
int arg_id = context_.next_arg_id();
context_.check_dynamic_spec(arg_id);
return arg_ref_type(arg_id);
}
FMT_CONSTEXPR auto make_arg_ref(basic_string_view<char_type> arg_id)
@ -2270,12 +2297,15 @@ constexpr auto to_ascii(Char c) -> underlying_t<Char> {
return c;
}
FMT_CONSTEXPR inline auto code_point_length_impl(char c) -> int {
return "\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0\0\0\2\2\2\2\3\3\4"
[static_cast<unsigned char>(c) >> 3];
}
template <typename Char>
FMT_CONSTEXPR auto code_point_length(const Char* begin) -> int {
if (const_check(sizeof(Char) != 1)) return 1;
auto lengths =
"\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0\0\0\2\2\2\2\3\3\4";
int len = lengths[static_cast<unsigned char>(*begin) >> 3];
int len = code_point_length_impl(static_cast<char>(*begin));
// Compute the pointer to the next character early so that the next
// iteration can start working on the next character. Neither Clang
@ -2803,7 +2833,8 @@ FMT_CONSTEXPR auto parse_float_type_spec(const basic_format_specs<Char>& specs,
template <typename ErrorHandler = error_handler>
FMT_CONSTEXPR auto check_cstring_type_spec(presentation_type type,
ErrorHandler&& eh = {}) -> bool {
if (type == presentation_type::none || type == presentation_type::string)
if (type == presentation_type::none || type == presentation_type::string ||
type == presentation_type::debug)
return true;
if (type != presentation_type::pointer) eh.on_error("invalid type specifier");
return false;
@ -2921,7 +2952,10 @@ class format_string_checker {
basic_string_view<Char> format_str, ErrorHandler eh)
: context_(format_str, num_args, types_, eh),
parse_funcs_{&parse_format_specs<Args, parse_context_type>...},
types_{type_constant<Args, char>::value...} {}
types_{
mapped_type_constant<Args,
basic_format_context<Char*, Char>>::value...} {
}
FMT_CONSTEXPR void on_text(const Char*, const Char*) {}
@ -3065,6 +3099,15 @@ struct formatter<T, Char,
return it;
}
template <detail::type U = detail::type_constant<T, Char>::value,
enable_if_t<(U == detail::type::string_type ||
U == detail::type::cstring_type ||
U == detail::type::char_type),
int> = 0>
FMT_CONSTEXPR void set_debug_format() {
specs_.type = presentation_type::debug;
}
template <typename FormatContext>
FMT_CONSTEXPR auto format(const T& val, FormatContext& ctx) const
-> decltype(ctx.out());
@ -3127,7 +3170,7 @@ template <typename Char, typename... Args> class basic_format_string {
#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409
// Workaround broken conversion on older gcc.
template <typename...> using format_string = string_view;
inline auto runtime(string_view s) -> basic_string_view<char> { return s; }
inline auto runtime(string_view s) -> string_view { return s; }
#else
template <typename... Args>
using format_string = basic_format_string<char, type_identity_t<Args>...>;

View File

@ -1337,7 +1337,7 @@ template <typename T> decimal_fp<T> to_decimal(T x) noexcept {
if (r < deltai) {
// Exclude the right endpoint if necessary.
if (r == 0 && z_mul.is_integer && !include_right_endpoint) {
if (r == 0 && (z_mul.is_integer & !include_right_endpoint)) {
--ret_value.significand;
r = float_info<T>::big_divisor;
goto small_divisor_case_label;
@ -1346,27 +1346,12 @@ template <typename T> decimal_fp<T> to_decimal(T x) noexcept {
goto small_divisor_case_label;
} else {
// r == deltai; compare fractional parts.
const carrier_uint two_fl = two_fc - 1;
if (!include_left_endpoint ||
exponent < float_info<T>::case_fc_pm_half_lower_threshold ||
exponent > float_info<T>::divisibility_check_by_5_threshold) {
// If the left endpoint is not included, the condition for
// success is z^(f) < delta^(f) (odd parity).
// Otherwise, the inequalities on exponent ensure that
// x is not an integer, so if z^(f) >= delta^(f) (even parity), we in fact
// have strict inequality.
if (!cache_accessor<T>::compute_mul_parity(two_fl, cache, beta).parity) {
goto small_divisor_case_label;
}
} else {
const typename cache_accessor<T>::compute_mul_parity_result x_mul =
cache_accessor<T>::compute_mul_parity(two_fl, cache, beta);
if (!x_mul.parity && !x_mul.is_integer) {
cache_accessor<T>::compute_mul_parity(two_fc - 1, cache, beta);
if (!(x_mul.parity | (x_mul.is_integer & include_left_endpoint)))
goto small_divisor_case_label;
}
}
}
ret_value.exponent = minus_k + float_info<T>::kappa + 1;
// We may need to remove trailing zeros.
@ -1404,7 +1389,7 @@ small_divisor_case_label:
// or equivalently, when y is an integer.
if (y_mul.parity != approx_y_parity)
--ret_value.significand;
else if (y_mul.is_integer && ret_value.significand % 2 != 0)
else if (y_mul.is_integer & (ret_value.significand % 2 != 0))
--ret_value.significand;
return ret_value;
}
@ -1488,17 +1473,13 @@ FMT_FUNC std::string vformat(string_view fmt, format_args args) {
return to_string(buffer);
}
#ifdef _WIN32
namespace detail {
#ifdef _WIN32
using dword = conditional_t<sizeof(long) == 4, unsigned long, unsigned>;
extern "C" __declspec(dllimport) int __stdcall WriteConsoleW( //
void*, const void*, dword, dword*, void*);
} // namespace detail
#endif
namespace detail {
FMT_FUNC void print(std::FILE* f, string_view text) {
#ifdef _WIN32
FMT_FUNC bool write_console(std::FILE* f, string_view text) {
auto fd = _fileno(f);
if (_isatty(fd)) {
detail::utf8_to_utf16 u16(string_view(text.data(), text.size()));
@ -1506,11 +1487,20 @@ FMT_FUNC void print(std::FILE* f, string_view text) {
if (detail::WriteConsoleW(reinterpret_cast<void*>(_get_osfhandle(fd)),
u16.c_str(), static_cast<uint32_t>(u16.size()),
&written, nullptr)) {
return;
return true;
}
// Fallback to fwrite on failure. It can happen if the output has been
// redirected to NUL.
}
// We return false if the file descriptor was not TTY, or it was but
// SetConsoleW failed which can happen if the output has been redirected to
// NUL. In both cases when we return false, we should attempt to do regular
// write via fwrite or std::ostream::write.
return false;
}
#endif
FMT_FUNC void print(std::FILE* f, string_view text) {
#ifdef _WIN32
if (write_console(f, text)) return;
#endif
detail::fwrite_fully(text.data(), 1, text.size(), f);
}

View File

@ -249,6 +249,18 @@ FMT_CONSTEXPR inline void abort_fuzzing_if(bool condition) {
#endif
}
template <typename CharT, CharT... C> struct string_literal {
static constexpr CharT value[sizeof...(C)] = {C...};
constexpr operator basic_string_view<CharT>() const {
return {value, sizeof...(C)};
}
};
#if FMT_CPLUSPLUS < 201703L
template <typename CharT, CharT... C>
constexpr CharT string_literal<CharT, C...>::value[sizeof...(C)];
#endif
template <typename Streambuf> class formatbuf : public Streambuf {
private:
using char_type = typename Streambuf::char_type;
@ -287,7 +299,8 @@ FMT_CONSTEXPR20 auto bit_cast(const From& from) -> To {
if (is_constant_evaluated()) return std::bit_cast<To>(from);
#endif
auto to = To();
std::memcpy(&to, &from, sizeof(to));
// The cast suppresses a bogus -Wclass-memaccess on GCC.
std::memcpy(static_cast<void*>(&to), &from, sizeof(to));
return to;
}
@ -366,10 +379,12 @@ class uint128_fallback {
}
FMT_CONSTEXPR auto operator>>(int shift) const -> uint128_fallback {
if (shift == 64) return {0, hi_};
if (shift > 64) return uint128_fallback(0, hi_) >> (shift - 64);
return {hi_ >> shift, (hi_ << (64 - shift)) | (lo_ >> shift)};
}
FMT_CONSTEXPR auto operator<<(int shift) const -> uint128_fallback {
if (shift == 64) return {lo_, 0};
if (shift > 64) return uint128_fallback(lo_, 0) << (shift - 64);
return {hi_ << shift | (lo_ >> (64 - shift)), (lo_ << shift)};
}
FMT_CONSTEXPR auto operator>>=(int shift) -> uint128_fallback& {
@ -389,11 +404,11 @@ class uint128_fallback {
hi_ += (lo_ < n ? 1 : 0);
return *this;
}
#if FMT_HAS_BUILTIN(__builtin_addcll)
#if FMT_HAS_BUILTIN(__builtin_addcll) && !defined(__ibmxl__)
unsigned long long carry;
lo_ = __builtin_addcll(lo_, n, 0, &carry);
hi_ += carry;
#elif FMT_HAS_BUILTIN(__builtin_ia32_addcarryx_u64)
#elif FMT_HAS_BUILTIN(__builtin_ia32_addcarryx_u64) && !defined(__ibmxl__)
unsigned long long result;
auto carry = __builtin_ia32_addcarryx_u64(0, lo_, n, &result);
lo_ = result;
@ -592,19 +607,23 @@ FMT_CONSTEXPR inline auto utf8_decode(const char* s, uint32_t* c, int* e)
constexpr const int shiftc[] = {0, 18, 12, 6, 0};
constexpr const int shifte[] = {0, 6, 4, 2, 0};
int len = code_point_length(s);
const char* next = s + len;
int len = code_point_length_impl(*s);
// Compute the pointer to the next character early so that the next
// iteration can start working on the next character. Neither Clang
// nor GCC figure out this reordering on their own.
const char* next = s + len + !len;
using uchar = unsigned char;
// Assume a four-byte character and load four bytes. Unused bits are
// shifted out.
*c = uint32_t(s[0] & masks[len]) << 18;
*c |= uint32_t(s[1] & 0x3f) << 12;
*c |= uint32_t(s[2] & 0x3f) << 6;
*c |= uint32_t(s[3] & 0x3f) << 0;
*c = uint32_t(uchar(s[0]) & masks[len]) << 18;
*c |= uint32_t(uchar(s[1]) & 0x3f) << 12;
*c |= uint32_t(uchar(s[2]) & 0x3f) << 6;
*c |= uint32_t(uchar(s[3]) & 0x3f) << 0;
*c >>= shiftc[len];
// Accumulate the various error conditions.
using uchar = unsigned char;
*e = (*c < mins[len]) << 6; // non-canonical encoding
*e |= ((*c >> 11) == 0x1b) << 7; // surrogate half?
*e |= (*c > 0x10FFFF) << 8; // out of range?
@ -628,8 +647,8 @@ FMT_CONSTEXPR void for_each_codepoint(string_view s, F f) {
auto error = 0;
auto end = utf8_decode(buf_ptr, &cp, &error);
bool result = f(error ? invalid_code_point : cp,
string_view(ptr, to_unsigned(end - buf_ptr)));
return result ? end : nullptr;
string_view(ptr, error ? 1 : to_unsigned(end - buf_ptr)));
return result ? (error ? buf_ptr + 1 : end) : nullptr;
};
auto p = s.data();
const size_t block_size = 4; // utf8_decode always reads blocks of 4 chars.
@ -919,8 +938,11 @@ struct is_contiguous<basic_memory_buffer<T, SIZE, Allocator>> : std::true_type {
};
namespace detail {
#ifdef _WIN32
FMT_API bool write_console(std::FILE* f, string_view text);
#endif
FMT_API void print(std::FILE*, string_view);
}
} // namespace detail
/** A formatting error such as invalid format string. */
FMT_CLASS_API
@ -1213,7 +1235,7 @@ FMT_CONSTEXPR20 auto format_decimal(Char* out, UInt value, int size)
template <typename Char, typename UInt, typename Iterator,
FMT_ENABLE_IF(!std::is_pointer<remove_cvref_t<Iterator>>::value)>
inline auto format_decimal(Iterator out, UInt value, int size)
FMT_CONSTEXPR inline auto format_decimal(Iterator out, UInt value, int size)
-> format_decimal_result<Iterator> {
// Buffer is large enough to hold all digits (digits10 + 1).
Char buffer[digits10<UInt>() + 1];
@ -1274,8 +1296,6 @@ template <> struct float_info<float> {
static const int small_divisor = 10;
static const int min_k = -31;
static const int max_k = 46;
static const int divisibility_check_by_5_threshold = 39;
static const int case_fc_pm_half_lower_threshold = -1;
static const int shorter_interval_tie_lower_threshold = -35;
static const int shorter_interval_tie_upper_threshold = -35;
};
@ -1288,8 +1308,6 @@ template <> struct float_info<double> {
static const int small_divisor = 100;
static const int min_k = -292;
static const int max_k = 326;
static const int divisibility_check_by_5_threshold = 86;
static const int case_fc_pm_half_lower_threshold = -2;
static const int shorter_interval_tie_lower_threshold = -77;
static const int shorter_interval_tie_upper_threshold = -77;
};
@ -1543,7 +1561,10 @@ FMT_CONSTEXPR inline fp get_cached_power(int min_exponent,
const int dec_exp_step = 8;
index = (index - first_dec_exp - 1) / dec_exp_step + 1;
pow10_exponent = first_dec_exp + index * dec_exp_step;
return {data::pow10_significands[index], data::pow10_exponents[index]};
// Using *(x + index) instead of x[index] avoids an issue with some compilers
// using the EDG frontend (e.g. nvhpc/22.3 in C++17 mode).
return {*(data::pow10_significands + index),
*(data::pow10_exponents + index)};
}
#ifndef _MSC_VER
@ -1729,7 +1750,7 @@ inline auto find_escape(const char* begin, const char* end)
/* Use the hidden visibility as a workaround for a GCC bug (#1973). */ \
/* Use a macro-like name to avoid shadowing warnings. */ \
struct FMT_GCC_VISIBILITY_HIDDEN FMT_COMPILE_STRING : base { \
using char_type = fmt::remove_cvref_t<decltype(s[0])>; \
using char_type FMT_MAYBE_UNUSED = fmt::remove_cvref_t<decltype(s[0])>; \
FMT_MAYBE_UNUSED FMT_CONSTEXPR explicit \
operator fmt::basic_string_view<char_type>() const { \
return fmt::detail_exported::compile_string_to_view<char_type>(s); \
@ -1981,7 +2002,10 @@ auto write_int_localized(OutputIt out, UInt value, unsigned prefix,
grouping.count_separators(num_digits));
return write_padded<align::right>(
out, specs, size, size, [&](reserve_iterator<OutputIt> it) {
if (prefix != 0) *it++ = static_cast<Char>(prefix);
if (prefix != 0) {
char sign = static_cast<char>(prefix);
*it++ = static_cast<Char>(sign);
}
return grouping.apply(it, string_view(digits, to_unsigned(num_digits)));
});
}
@ -2123,29 +2147,30 @@ class counting_iterator {
FMT_UNCHECKED_ITERATOR(counting_iterator);
struct value_type {
template <typename T> void operator=(const T&) {}
template <typename T> FMT_CONSTEXPR void operator=(const T&) {}
};
counting_iterator() : count_(0) {}
FMT_CONSTEXPR counting_iterator() : count_(0) {}
size_t count() const { return count_; }
FMT_CONSTEXPR size_t count() const { return count_; }
counting_iterator& operator++() {
FMT_CONSTEXPR counting_iterator& operator++() {
++count_;
return *this;
}
counting_iterator operator++(int) {
FMT_CONSTEXPR counting_iterator operator++(int) {
auto it = *this;
++*this;
return it;
}
friend counting_iterator operator+(counting_iterator it, difference_type n) {
FMT_CONSTEXPR friend counting_iterator operator+(counting_iterator it,
difference_type n) {
it.count_ += static_cast<size_t>(n);
return it;
}
value_type operator*() const { return {}; }
FMT_CONSTEXPR value_type operator*() const { return {}; }
};
template <typename Char, typename OutputIt>
@ -2991,7 +3016,7 @@ FMT_CONSTEXPR20 inline void format_dragon(basic_fp<uint128_t> value,
upper = &upper_store;
}
}
bool even = (value.f & 1) == 0;
int even = static_cast<int>((value.f & 1) == 0);
if (!upper) upper = &lower;
if ((flags & dragon::fixup) != 0) {
if (add_compare(numerator, *upper, denominator) + even <= 0) {

View File

@ -10,6 +10,12 @@
#include <fstream>
#include <ostream>
#if defined(_WIN32) && defined(__GLIBCXX__)
# include <ext/stdio_filebuf.h>
# include <ext/stdio_sync_filebuf.h>
#elif defined(_WIN32) && defined(_LIBCPP_VERSION)
# include <__std_stream>
#endif
#include "format.h"
@ -51,43 +57,50 @@ struct is_streamable<
(std::is_convertible<T, int>::value && !std::is_enum<T>::value)>>
: std::false_type {};
template <typename Char> FILE* get_file(std::basic_filebuf<Char>&) {
return nullptr;
}
struct dummy_filebuf {
FILE* _Myfile;
};
template <typename T, typename U = int> struct ms_filebuf {
using type = dummy_filebuf;
};
template <typename T> struct ms_filebuf<T, decltype(T::_Myfile, 0)> {
using type = T;
};
using filebuf_type = ms_filebuf<std::filebuf>::type;
FILE* get_file(filebuf_type& buf);
// Generate a unique explicit instantion in every translation unit using a tag
// type in an anonymous namespace.
namespace {
struct filebuf_access_tag {};
struct file_access_tag {};
} // namespace
template <typename Tag, typename FileMemberPtr, FileMemberPtr file>
class filebuf_access {
friend FILE* get_file(filebuf_type& buf) { return buf.*file; }
template <class Tag, class BufType, FILE* BufType::*FileMemberPtr>
class file_access {
friend auto get_file(BufType& obj) -> FILE* { return obj.*FileMemberPtr; }
};
template class filebuf_access<filebuf_access_tag,
decltype(&filebuf_type::_Myfile),
&filebuf_type::_Myfile>;
inline bool write(std::filebuf& buf, fmt::string_view data) {
FILE* f = get_file(buf);
if (!f) return false;
print(f, data);
return true;
#if FMT_MSC_VERSION
template class file_access<file_access_tag, std::filebuf,
&std::filebuf::_Myfile>;
auto get_file(std::filebuf&) -> FILE*;
#elif defined(_WIN32) && defined(_LIBCPP_VERSION)
template class file_access<file_access_tag, std::__stdoutbuf<char>,
&std::__stdoutbuf<char>::__file_>;
auto get_file(std::__stdoutbuf<char>&) -> FILE*;
#endif
inline bool write_ostream_unicode(std::ostream& os, fmt::string_view data) {
#if FMT_MSC_VERSION
if (auto* buf = dynamic_cast<std::filebuf*>(os.rdbuf()))
if (FILE* f = get_file(*buf)) return write_console(f, data);
#elif defined(_WIN32) && defined(__GLIBCXX__)
auto* rdbuf = os.rdbuf();
FILE* c_file;
if (auto* fbuf = dynamic_cast<__gnu_cxx::stdio_sync_filebuf<char>*>(rdbuf))
c_file = fbuf->file();
else if (auto* fbuf = dynamic_cast<__gnu_cxx::stdio_filebuf<char>*>(rdbuf))
c_file = fbuf->file();
else
return false;
if (c_file) return write_console(c_file, data);
#elif defined(_WIN32) && defined(_LIBCPP_VERSION)
if (auto* buf = dynamic_cast<std::__stdoutbuf<char>*>(os.rdbuf()))
if (FILE* f = get_file(*buf)) return write_console(f, data);
#else
ignore_unused(os, data);
#endif
return false;
}
inline bool write(std::wfilebuf&, fmt::basic_string_view<wchar_t>) {
inline bool write_ostream_unicode(std::wostream&,
fmt::basic_string_view<wchar_t>) {
return false;
}
@ -95,10 +108,6 @@ inline bool write(std::wfilebuf&, fmt::basic_string_view<wchar_t>) {
// It is a separate function rather than a part of vprint to simplify testing.
template <typename Char>
void write_buffer(std::basic_ostream<Char>& os, buffer<Char>& buf) {
if (const_check(FMT_MSC_VERSION)) {
auto filebuf = dynamic_cast<std::basic_filebuf<Char>*>(os.rdbuf());
if (filebuf && write(*filebuf, {buf.data(), buf.size()})) return;
}
const Char* buf_data = buf.data();
using unsigned_streamsize = std::make_unsigned<std::streamsize>::type;
unsigned_streamsize size = buf.size();
@ -130,6 +139,8 @@ template <typename T> struct streamed_view { const T& value; };
// Formats an object of type T that has an overloaded ostream operator<<.
template <typename Char>
struct basic_ostream_formatter : formatter<basic_string_view<Char>, Char> {
void set_debug_format() = delete;
template <typename T, typename OutputIt>
auto format(const T& value, basic_format_context<OutputIt, Char>& ctx) const
-> OutputIt {
@ -142,12 +153,13 @@ struct basic_ostream_formatter : formatter<basic_string_view<Char>, Char> {
using ostream_formatter = basic_ostream_formatter<char>;
template <typename T>
struct formatter<detail::streamed_view<T>> : ostream_formatter {
template <typename T, typename Char>
struct formatter<detail::streamed_view<T>, Char>
: basic_ostream_formatter<Char> {
template <typename OutputIt>
auto format(detail::streamed_view<T> view,
basic_format_context<OutputIt, char>& ctx) const -> OutputIt {
return ostream_formatter::format(view.value, ctx);
basic_format_context<OutputIt, Char>& ctx) const -> OutputIt {
return basic_ostream_formatter<Char>::format(view.value, ctx);
}
};
@ -175,6 +187,13 @@ struct fallback_formatter<T, Char, enable_if_t<is_streamable<T, Char>::value>>
using basic_ostream_formatter<Char>::format;
};
inline void vprint_directly(std::ostream& os, string_view format_str,
format_args args) {
auto buffer = memory_buffer();
detail::vformat_to(buffer, format_str, args);
detail::write_buffer(os, buffer);
}
} // namespace detail
FMT_MODULE_EXPORT template <typename Char>
@ -183,6 +202,7 @@ void vprint(std::basic_ostream<Char>& os,
basic_format_args<buffer_context<type_identity_t<Char>>> args) {
auto buffer = basic_memory_buffer<Char>();
detail::vformat_to(buffer, format_str, args);
if (detail::write_ostream_unicode(os, {buffer.data(), buffer.size()})) return;
detail::write_buffer(os, buffer);
}
@ -197,7 +217,11 @@ void vprint(std::basic_ostream<Char>& os,
*/
FMT_MODULE_EXPORT template <typename... T>
void print(std::ostream& os, format_string<T...> fmt, T&&... args) {
vprint(os, fmt, fmt::make_format_args(args...));
const auto& vargs = fmt::make_format_args(args...);
if (detail::is_utf8())
vprint(os, fmt, vargs);
else
detail::vprint_directly(os, fmt, vargs);
}
FMT_MODULE_EXPORT

View File

@ -270,8 +270,8 @@ template <typename Range>
using uncvref_type = remove_cvref_t<range_reference_type<Range>>;
template <typename Range>
using uncvref_first_type = remove_cvref_t<
decltype(std::declval<range_reference_type<Range>>().first)>;
using uncvref_first_type =
remove_cvref_t<decltype(std::declval<range_reference_type<Range>>().first)>;
template <typename Range>
using uncvref_second_type = remove_cvref_t<
@ -326,18 +326,37 @@ struct formatter<TupleT, Char,
enable_if_t<fmt::is_tuple_like<TupleT>::value &&
fmt::is_tuple_formattable<TupleT, Char>::value>> {
private:
basic_string_view<Char> separator_ = detail::string_literal<Char, ',', ' '>{};
basic_string_view<Char> opening_bracket_ =
detail::string_literal<Char, '('>{};
basic_string_view<Char> closing_bracket_ =
detail::string_literal<Char, ')'>{};
// C++11 generic lambda for format().
template <typename FormatContext> struct format_each {
template <typename T> void operator()(const T& v) {
if (i > 0) out = detail::write_delimiter(out);
if (i > 0) out = detail::copy_str<Char>(separator, out);
out = detail::write_range_entry<Char>(out, v);
++i;
}
int i;
typename FormatContext::iterator& out;
basic_string_view<Char> separator;
};
public:
FMT_CONSTEXPR formatter() {}
FMT_CONSTEXPR void set_separator(basic_string_view<Char> sep) {
separator_ = sep;
}
FMT_CONSTEXPR void set_brackets(basic_string_view<Char> open,
basic_string_view<Char> close) {
opening_bracket_ = open;
closing_bracket_ = close;
}
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return ctx.begin();
@ -347,9 +366,9 @@ struct formatter<TupleT, Char,
auto format(const TupleT& values, FormatContext& ctx) const
-> decltype(ctx.out()) {
auto out = ctx.out();
*out++ = '(';
detail::for_each(values, format_each<FormatContext>{0, out});
*out++ = ')';
out = detail::copy_str<Char>(opening_bracket_, out);
detail::for_each(values, format_each<FormatContext>{0, out, separator_});
out = detail::copy_str<Char>(closing_bracket_, out);
return out;
}
};
@ -357,9 +376,8 @@ struct formatter<TupleT, Char,
template <typename T, typename Char> struct is_range {
static constexpr const bool value =
detail::is_range_<T>::value && !detail::is_std_string_like<T>::value &&
!detail::is_map<T>::value &&
!std::is_convertible<T, std::basic_string<Char>>::value &&
!std::is_constructible<detail::std_string_view<Char>, T>::value;
!std::is_convertible<T, detail::std_string_view<Char>>::value;
};
namespace detail {
@ -390,40 +408,88 @@ using range_formatter_type = conditional_t<
template <typename R>
using maybe_const_range =
conditional_t<has_const_begin_end<R>::value, const R, R>;
// Workaround a bug in MSVC 2015 and earlier.
#if !FMT_MSC_VERSION || FMT_MSC_VERSION >= 1910
template <typename R, typename Char>
struct is_formattable_delayed
: disjunction<
is_formattable<uncvref_type<maybe_const_range<R>>, Char>,
has_fallback_formatter<uncvref_type<maybe_const_range<R>>, Char>> {};
#endif
} // namespace detail
template <typename R, typename Char>
struct formatter<
R, Char,
enable_if_t<
conjunction<fmt::is_range<R, Char>
// Workaround a bug in MSVC 2017 and earlier.
#if !FMT_MSC_VERSION || FMT_MSC_VERSION >= 1920
,
disjunction<
is_formattable<detail::uncvref_type<detail::maybe_const_range<R>>,
Char>,
detail::has_fallback_formatter<
detail::uncvref_type<detail::maybe_const_range<R>>, Char>
>
#endif
>::value
>> {
template <typename T, typename Char, typename Enable = void>
struct range_formatter;
using range_type = detail::maybe_const_range<R>;
using formatter_type =
detail::range_formatter_type<Char, detail::uncvref_type<range_type>>;
formatter_type underlying_;
template <typename T, typename Char>
struct range_formatter<
T, Char,
enable_if_t<conjunction<
std::is_same<T, remove_cvref_t<T>>,
disjunction<is_formattable<T, Char>,
detail::has_fallback_formatter<T, Char>>>::value>> {
private:
detail::range_formatter_type<Char, T> underlying_;
bool custom_specs_ = false;
basic_string_view<Char> separator_ = detail::string_literal<Char, ',', ' '>{};
basic_string_view<Char> opening_bracket_ =
detail::string_literal<Char, '['>{};
basic_string_view<Char> closing_bracket_ =
detail::string_literal<Char, ']'>{};
template <class U>
FMT_CONSTEXPR static auto maybe_set_debug_format(U& u, int)
-> decltype(u.set_debug_format()) {
u.set_debug_format();
}
template <class U>
FMT_CONSTEXPR static void maybe_set_debug_format(U&, ...) {}
FMT_CONSTEXPR void maybe_set_debug_format() {
maybe_set_debug_format(underlying_, 0);
}
public:
FMT_CONSTEXPR range_formatter() {}
FMT_CONSTEXPR auto underlying() -> detail::range_formatter_type<Char, T>& {
return underlying_;
}
FMT_CONSTEXPR void set_separator(basic_string_view<Char> sep) {
separator_ = sep;
}
FMT_CONSTEXPR void set_brackets(basic_string_view<Char> open,
basic_string_view<Char> close) {
opening_bracket_ = open;
closing_bracket_ = close;
}
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
auto it = ctx.begin();
auto end = ctx.end();
if (it == end || *it == '}') return it;
if (it == end || *it == '}') {
maybe_set_debug_format();
return it;
}
if (*it == 'n') {
set_brackets({}, {});
++it;
}
if (*it == '}') {
maybe_set_debug_format();
return it;
}
if (*it != ':')
FMT_THROW(format_error("no top-level range formatters supported"));
FMT_THROW(format_error("no other top-level range formatters supported"));
custom_specs_ = true;
++it;
@ -431,75 +497,100 @@ struct formatter<
return underlying_.parse(ctx);
}
template <typename FormatContext>
auto format(range_type& range, FormatContext& ctx) const
-> decltype(ctx.out()) {
Char prefix = detail::is_set<R>::value ? '{' : '[';
Char postfix = detail::is_set<R>::value ? '}' : ']';
template <typename R, class FormatContext>
auto format(R&& range, FormatContext& ctx) const -> decltype(ctx.out()) {
detail::range_mapper<buffer_context<Char>> mapper;
auto out = ctx.out();
*out++ = prefix;
out = detail::copy_str<Char>(opening_bracket_, out);
int i = 0;
auto it = detail::range_begin(range);
auto end = detail::range_end(range);
for (; it != end; ++it) {
if (i > 0) out = detail::write_delimiter(out);
if (custom_specs_) {
if (i > 0) out = detail::copy_str<Char>(separator_, out);
;
ctx.advance_to(out);
out = underlying_.format(mapper.map(*it), ctx);
} else {
out = detail::write_range_entry<Char>(out, *it);
}
++i;
}
*out++ = postfix;
out = detail::copy_str<Char>(closing_bracket_, out);
return out;
}
};
template <typename T, typename Char>
struct formatter<
T, Char,
enable_if_t<conjunction<detail::is_map<T>
// Workaround a bug in MSVC 2017 and earlier.
#if !FMT_MSC_VERSION || FMT_MSC_VERSION >= 1920
,
disjunction<
is_formattable<detail::uncvref_first_type<T>, Char>,
detail::has_fallback_formatter<detail::uncvref_first_type<T>, Char>
>,
disjunction<
is_formattable<detail::uncvref_second_type<T>, Char>,
detail::has_fallback_formatter<detail::uncvref_second_type<T>, Char>
>
#endif
>::value
>> {
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return ctx.begin();
enum class range_format { disabled, map, set, sequence, string, debug_string };
namespace detail {
template <typename T> struct range_format_kind_ {
static constexpr auto value = std::is_same<range_reference_type<T>, T>::value
? range_format::disabled
: is_map<T>::value ? range_format::map
: is_set<T>::value ? range_format::set
: range_format::sequence;
};
template <range_format K, typename R, typename Char, typename Enable = void>
struct range_default_formatter;
template <range_format K>
using range_format_constant = std::integral_constant<range_format, K>;
template <range_format K, typename R, typename Char>
struct range_default_formatter<
K, R, Char,
enable_if_t<(K == range_format::sequence || K == range_format::map ||
K == range_format::set)>> {
using range_type = detail::maybe_const_range<R>;
range_formatter<detail::uncvref_type<range_type>, Char> underlying_;
FMT_CONSTEXPR range_default_formatter() { init(range_format_constant<K>()); }
FMT_CONSTEXPR void init(range_format_constant<range_format::set>) {
underlying_.set_brackets(detail::string_literal<Char, '{'>{},
detail::string_literal<Char, '}'>{});
}
template <
typename FormatContext, typename U,
FMT_ENABLE_IF(
std::is_same<U, conditional_t<detail::has_const_begin_end<T>::value,
const T, T>>::value)>
auto format(U& map, FormatContext& ctx) const -> decltype(ctx.out()) {
auto out = ctx.out();
*out++ = '{';
int i = 0;
for (const auto& item : map) {
if (i > 0) out = detail::write_delimiter(out);
out = detail::write_range_entry<Char>(out, item.first);
*out++ = ':';
*out++ = ' ';
out = detail::write_range_entry<Char>(out, item.second);
++i;
FMT_CONSTEXPR void init(range_format_constant<range_format::map>) {
underlying_.set_brackets(detail::string_literal<Char, '{'>{},
detail::string_literal<Char, '}'>{});
underlying_.underlying().set_brackets({}, {});
underlying_.underlying().set_separator(
detail::string_literal<Char, ':', ' '>{});
}
*out++ = '}';
return out;
FMT_CONSTEXPR void init(range_format_constant<range_format::sequence>) {}
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return underlying_.parse(ctx);
}
template <typename FormatContext>
auto format(range_type& range, FormatContext& ctx) const
-> decltype(ctx.out()) {
return underlying_.format(range, ctx);
}
};
} // namespace detail
template <typename T, typename Char, typename Enable = void>
struct range_format_kind
: conditional_t<
is_range<T, Char>::value, detail::range_format_kind_<T>,
std::integral_constant<range_format, range_format::disabled>> {};
template <typename R, typename Char>
struct formatter<
R, Char,
enable_if_t<conjunction<bool_constant<range_format_kind<R, Char>::value !=
range_format::disabled>
// Workaround a bug in MSVC 2015 and earlier.
#if !FMT_MSC_VERSION || FMT_MSC_VERSION >= 1910
,
detail::is_formattable_delayed<R, Char>
#endif
>::value>>
: detail::range_default_formatter<range_format_kind<R, Char>::value, R,
Char> {
};
template <typename Char, typename... T> struct tuple_join_view : detail::view {

View File

@ -57,10 +57,6 @@ inline void write_escaped_path<std::filesystem::path::value_type>(
} // namespace detail
#if !FMT_MSC_VERSION || FMT_MSC_VERSION >= 1920
// For MSVC 2017 and earlier using the partial specialization
// would cause an ambiguity error, therefore we provide it only
// conditionally.
template <typename Char>
struct formatter<std::filesystem::path, Char>
: formatter<basic_string_view<Char>> {
@ -73,7 +69,6 @@ struct formatter<std::filesystem::path, Char>
basic_string_view<Char>(quoted.data(), quoted.size()), ctx);
}
};
#endif
FMT_END_NAMESPACE
#endif

View File

@ -9,7 +9,6 @@
#define FMT_XCHAR_H_
#include <cwchar>
#include <tuple>
#include "format.h"
@ -30,9 +29,11 @@ using wmemory_buffer = basic_memory_buffer<wchar_t>;
#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409
// Workaround broken conversion on older gcc.
template <typename... Args> using wformat_string = wstring_view;
inline auto runtime(wstring_view s) -> wstring_view { return s; }
#else
template <typename... Args>
using wformat_string = basic_format_string<wchar_t, type_identity_t<Args>...>;
inline auto runtime(wstring_view s) -> basic_runtime<wchar_t> { return {{s}}; }
#endif
template <> struct is_char<wchar_t> : std::true_type {};
@ -82,20 +83,16 @@ auto vformat(basic_string_view<Char> format_str,
return to_string(buffer);
}
#if !FMT_GCC_VERSION || FMT_GCC_VERSION >= 409
template <typename... Args>
using wformat_string = basic_format_string<wchar_t, type_identity_t<Args>...>;
#endif
template <typename... T>
auto format(wformat_string<T...> fmt, T&&... args) -> std::wstring {
return vformat(fmt, fmt::make_wformat_args(args...));
return vformat(fmt::wstring_view(fmt), fmt::make_wformat_args(args...));
}
// Pass char_t as a default template parameter instead of using
// std::basic_string<char_t<S>> to reduce the symbol size.
template <typename S, typename... Args, typename Char = char_t<S>,
FMT_ENABLE_IF(!std::is_same<Char, char>::value)>
FMT_ENABLE_IF(!std::is_same<Char, char>::value &&
!std::is_same<Char, wchar_t>::value)>
auto format(const S& format_str, Args&&... args) -> std::basic_string<Char> {
return vformat(detail::to_string_view(format_str),
fmt::make_format_args<buffer_context<Char>>(args...));

View File

@ -1,43 +0,0 @@
#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIGURATION']
platform = os.environ['PLATFORM']
path = os.environ['PATH']
image = os.environ['APPVEYOR_BUILD_WORKER_IMAGE']
jobid = os.environ['APPVEYOR_JOB_ID']
cmake_command = ['cmake', '-DFMT_PEDANTIC=ON', '-DCMAKE_BUILD_TYPE=' + config, '..']
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks
# MinGW config.
path = path.replace(r'C:\Program Files (x86)\Git\bin', '')
os.environ['PATH'] = r'C:\MinGW\bin;' + path
else:
# Add MSBuild 14.0 to PATH as described in
# http://help.appveyor.com/discussions/problems/2229-v140-not-found-on-vs2105rc.
os.environ['PATH'] = r'C:\Program Files (x86)\MSBuild\15.0\Bin;' + path
if image == 'Visual Studio 2019':
generator = 'Visual Studio 16 2019'
if platform == 'x64':
cmake_command.extend(['-A', 'x64'])
else:
if image == 'Visual Studio 2015':
generator = 'Visual Studio 14 2015'
elif image == 'Visual Studio 2017':
generator = 'Visual Studio 15 2017'
if platform == 'x64':
generator += ' Win64'
cmake_command.append('-G' + generator)
build_command = ['cmake', '--build', '.', '--config', config, '--', '/m:4']
test_command = ['ctest', '-C', config]
check_call(cmake_command)
check_call(build_command)
check_call(test_command)

View File

@ -1,31 +0,0 @@
configuration:
- Debug
- Release
clone_depth: 1
image:
- Visual Studio 2015
platform:
- x64
environment:
CTEST_OUTPUT_ON_FAILURE: 1
MSVC_DEFAULT_OPTIONS: ON
BUILD: msvc
before_build:
- mkdir build
- cd build
build_script:
- python ../support/appveyor-build.py
on_failure:
- appveyor PushArtifact Testing/Temporary/LastTest.log
- appveyor AddTest test
# Uncomment this to debug AppVeyor failures.
#on_finish:
# - ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))

View File

@ -183,6 +183,12 @@ def update_site(env):
with rewrite(index) as b:
b.data = b.data.replace(
'doc/latest/index.html#format-string-syntax', 'syntax.html')
# Fix issues in syntax.rst.
index = os.path.join(target_doc_dir, 'syntax.rst')
with rewrite(index) as b:
b.data = b.data.replace(
'..productionlist:: sf\n', '.. productionlist:: sf\n ')
b.data = b.data.replace('Examples:\n', 'Examples::\n')
# Build the docs.
html_dir = os.path.join(env.build_dir, 'html')
if os.path.exists(html_dir):

View File

@ -3,7 +3,7 @@
#define LLHTTP_VERSION_MAJOR 6
#define LLHTTP_VERSION_MINOR 0
#define LLHTTP_VERSION_PATCH 7
#define LLHTTP_VERSION_PATCH 9
#ifndef LLHTTP_STRICT_MODE
# define LLHTTP_STRICT_MODE 0
@ -102,7 +102,8 @@ enum llhttp_lenient_flags {
LENIENT_HEADERS = 0x1,
LENIENT_CHUNKED_LENGTH = 0x2,
LENIENT_KEEP_ALIVE = 0x4,
LENIENT_TRANSFER_ENCODING = 0x8
LENIENT_TRANSFER_ENCODING = 0x8,
LENIENT_VERSION = 0x10
};
typedef enum llhttp_lenient_flags llhttp_lenient_flags_t;

File diff suppressed because it is too large Load Diff

498
thirdparty/webp/sharpyuv/sharpyuv.c vendored Normal file
View File

@ -0,0 +1,498 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Sharp RGB to YUV conversion.
//
// Author: Skal (pascal.massimino@gmail.com)
#include "sharpyuv/sharpyuv.h"
#include <assert.h>
#include <limits.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "src/webp/types.h"
#include "src/dsp/cpu.h"
#include "sharpyuv/sharpyuv_dsp.h"
#include "sharpyuv/sharpyuv_gamma.h"
//------------------------------------------------------------------------------
// Sharp RGB->YUV conversion
static const int kNumIterations = 4;
#define YUV_FIX 16 // fixed-point precision for RGB->YUV
static const int kYuvHalf = 1 << (YUV_FIX - 1);
// Max bit depth so that intermediate calculations fit in 16 bits.
static const int kMaxBitDepth = 14;
// Returns the precision shift to use based on the input rgb_bit_depth.
static int GetPrecisionShift(int rgb_bit_depth) {
// Try to add 2 bits of precision if it fits in kMaxBitDepth. Otherwise remove
// bits if needed.
return ((rgb_bit_depth + 2) <= kMaxBitDepth) ? 2
: (kMaxBitDepth - rgb_bit_depth);
}
typedef int16_t fixed_t; // signed type with extra precision for UV
typedef uint16_t fixed_y_t; // unsigned type with extra precision for W
//------------------------------------------------------------------------------
static uint8_t clip_8b(fixed_t v) {
return (!(v & ~0xff)) ? (uint8_t)v : (v < 0) ? 0u : 255u;
}
static uint16_t clip(fixed_t v, int max) {
return (v < 0) ? 0 : (v > max) ? max : (uint16_t)v;
}
static fixed_y_t clip_bit_depth(int y, int bit_depth) {
const int max = (1 << bit_depth) - 1;
return (!(y & ~max)) ? (fixed_y_t)y : (y < 0) ? 0 : max;
}
//------------------------------------------------------------------------------
static int RGBToGray(int64_t r, int64_t g, int64_t b) {
const int64_t luma = 13933 * r + 46871 * g + 4732 * b + kYuvHalf;
return (int)(luma >> YUV_FIX);
}
static uint32_t ScaleDown(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
int rgb_bit_depth) {
const int bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth);
const uint32_t A = SharpYuvGammaToLinear(a, bit_depth);
const uint32_t B = SharpYuvGammaToLinear(b, bit_depth);
const uint32_t C = SharpYuvGammaToLinear(c, bit_depth);
const uint32_t D = SharpYuvGammaToLinear(d, bit_depth);
return SharpYuvLinearToGamma((A + B + C + D + 2) >> 2, bit_depth);
}
static WEBP_INLINE void UpdateW(const fixed_y_t* src, fixed_y_t* dst, int w,
int rgb_bit_depth) {
const int bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth);
int i;
for (i = 0; i < w; ++i) {
const uint32_t R = SharpYuvGammaToLinear(src[0 * w + i], bit_depth);
const uint32_t G = SharpYuvGammaToLinear(src[1 * w + i], bit_depth);
const uint32_t B = SharpYuvGammaToLinear(src[2 * w + i], bit_depth);
const uint32_t Y = RGBToGray(R, G, B);
dst[i] = (fixed_y_t)SharpYuvLinearToGamma(Y, bit_depth);
}
}
static void UpdateChroma(const fixed_y_t* src1, const fixed_y_t* src2,
fixed_t* dst, int uv_w, int rgb_bit_depth) {
int i;
for (i = 0; i < uv_w; ++i) {
const int r =
ScaleDown(src1[0 * uv_w + 0], src1[0 * uv_w + 1], src2[0 * uv_w + 0],
src2[0 * uv_w + 1], rgb_bit_depth);
const int g =
ScaleDown(src1[2 * uv_w + 0], src1[2 * uv_w + 1], src2[2 * uv_w + 0],
src2[2 * uv_w + 1], rgb_bit_depth);
const int b =
ScaleDown(src1[4 * uv_w + 0], src1[4 * uv_w + 1], src2[4 * uv_w + 0],
src2[4 * uv_w + 1], rgb_bit_depth);
const int W = RGBToGray(r, g, b);
dst[0 * uv_w] = (fixed_t)(r - W);
dst[1 * uv_w] = (fixed_t)(g - W);
dst[2 * uv_w] = (fixed_t)(b - W);
dst += 1;
src1 += 2;
src2 += 2;
}
}
static void StoreGray(const fixed_y_t* rgb, fixed_y_t* y, int w) {
int i;
assert(w > 0);
for (i = 0; i < w; ++i) {
y[i] = RGBToGray(rgb[0 * w + i], rgb[1 * w + i], rgb[2 * w + i]);
}
}
//------------------------------------------------------------------------------
static WEBP_INLINE fixed_y_t Filter2(int A, int B, int W0, int bit_depth) {
const int v0 = (A * 3 + B + 2) >> 2;
return clip_bit_depth(v0 + W0, bit_depth);
}
//------------------------------------------------------------------------------
static WEBP_INLINE int Shift(int v, int shift) {
return (shift >= 0) ? (v << shift) : (v >> -shift);
}
static void ImportOneRow(const uint8_t* const r_ptr,
const uint8_t* const g_ptr,
const uint8_t* const b_ptr,
int rgb_step,
int rgb_bit_depth,
int pic_width,
fixed_y_t* const dst) {
// Convert the rgb_step from a number of bytes to a number of uint8_t or
// uint16_t values depending the bit depth.
const int step = (rgb_bit_depth > 8) ? rgb_step / 2 : rgb_step;
int i;
const int w = (pic_width + 1) & ~1;
for (i = 0; i < pic_width; ++i) {
const int off = i * step;
const int shift = GetPrecisionShift(rgb_bit_depth);
if (rgb_bit_depth == 8) {
dst[i + 0 * w] = Shift(r_ptr[off], shift);
dst[i + 1 * w] = Shift(g_ptr[off], shift);
dst[i + 2 * w] = Shift(b_ptr[off], shift);
} else {
dst[i + 0 * w] = Shift(((uint16_t*)r_ptr)[off], shift);
dst[i + 1 * w] = Shift(((uint16_t*)g_ptr)[off], shift);
dst[i + 2 * w] = Shift(((uint16_t*)b_ptr)[off], shift);
}
}
if (pic_width & 1) { // replicate rightmost pixel
dst[pic_width + 0 * w] = dst[pic_width + 0 * w - 1];
dst[pic_width + 1 * w] = dst[pic_width + 1 * w - 1];
dst[pic_width + 2 * w] = dst[pic_width + 2 * w - 1];
}
}
static void InterpolateTwoRows(const fixed_y_t* const best_y,
const fixed_t* prev_uv,
const fixed_t* cur_uv,
const fixed_t* next_uv,
int w,
fixed_y_t* out1,
fixed_y_t* out2,
int rgb_bit_depth) {
const int uv_w = w >> 1;
const int len = (w - 1) >> 1; // length to filter
int k = 3;
const int bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth);
while (k-- > 0) { // process each R/G/B segments in turn
// special boundary case for i==0
out1[0] = Filter2(cur_uv[0], prev_uv[0], best_y[0], bit_depth);
out2[0] = Filter2(cur_uv[0], next_uv[0], best_y[w], bit_depth);
SharpYuvFilterRow(cur_uv, prev_uv, len, best_y + 0 + 1, out1 + 1,
bit_depth);
SharpYuvFilterRow(cur_uv, next_uv, len, best_y + w + 1, out2 + 1,
bit_depth);
// special boundary case for i == w - 1 when w is even
if (!(w & 1)) {
out1[w - 1] = Filter2(cur_uv[uv_w - 1], prev_uv[uv_w - 1],
best_y[w - 1 + 0], bit_depth);
out2[w - 1] = Filter2(cur_uv[uv_w - 1], next_uv[uv_w - 1],
best_y[w - 1 + w], bit_depth);
}
out1 += w;
out2 += w;
prev_uv += uv_w;
cur_uv += uv_w;
next_uv += uv_w;
}
}
static WEBP_INLINE int RGBToYUVComponent(int r, int g, int b,
const int coeffs[4], int sfix) {
const int srounder = 1 << (YUV_FIX + sfix - 1);
const int luma = coeffs[0] * r + coeffs[1] * g + coeffs[2] * b +
coeffs[3] + srounder;
return (luma >> (YUV_FIX + sfix));
}
static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv,
uint8_t* y_ptr, int y_stride, uint8_t* u_ptr,
int u_stride, uint8_t* v_ptr, int v_stride,
int rgb_bit_depth,
int yuv_bit_depth, int width, int height,
const SharpYuvConversionMatrix* yuv_matrix) {
int i, j;
const fixed_t* const best_uv_base = best_uv;
const int w = (width + 1) & ~1;
const int h = (height + 1) & ~1;
const int uv_w = w >> 1;
const int uv_h = h >> 1;
const int sfix = GetPrecisionShift(rgb_bit_depth);
const int yuv_max = (1 << yuv_bit_depth) - 1;
for (best_uv = best_uv_base, j = 0; j < height; ++j) {
for (i = 0; i < width; ++i) {
const int off = (i >> 1);
const int W = best_y[i];
const int r = best_uv[off + 0 * uv_w] + W;
const int g = best_uv[off + 1 * uv_w] + W;
const int b = best_uv[off + 2 * uv_w] + W;
const int y = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_y, sfix);
if (yuv_bit_depth <= 8) {
y_ptr[i] = clip_8b(y);
} else {
((uint16_t*)y_ptr)[i] = clip(y, yuv_max);
}
}
best_y += w;
best_uv += (j & 1) * 3 * uv_w;
y_ptr += y_stride;
}
for (best_uv = best_uv_base, j = 0; j < uv_h; ++j) {
for (i = 0; i < uv_w; ++i) {
const int off = i;
// Note r, g and b values here are off by W, but a constant offset on all
// 3 components doesn't change the value of u and v with a YCbCr matrix.
const int r = best_uv[off + 0 * uv_w];
const int g = best_uv[off + 1 * uv_w];
const int b = best_uv[off + 2 * uv_w];
const int u = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_u, sfix);
const int v = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_v, sfix);
if (yuv_bit_depth <= 8) {
u_ptr[i] = clip_8b(u);
v_ptr[i] = clip_8b(v);
} else {
((uint16_t*)u_ptr)[i] = clip(u, yuv_max);
((uint16_t*)v_ptr)[i] = clip(v, yuv_max);
}
}
best_uv += 3 * uv_w;
u_ptr += u_stride;
v_ptr += v_stride;
}
return 1;
}
//------------------------------------------------------------------------------
// Main function
static void* SafeMalloc(uint64_t nmemb, size_t size) {
const uint64_t total_size = nmemb * (uint64_t)size;
if (total_size != (size_t)total_size) return NULL;
return malloc((size_t)total_size);
}
#define SAFE_ALLOC(W, H, T) ((T*)SafeMalloc((W) * (H), sizeof(T)))
static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr,
const uint8_t* b_ptr, int rgb_step, int rgb_stride,
int rgb_bit_depth, uint8_t* y_ptr, int y_stride,
uint8_t* u_ptr, int u_stride, uint8_t* v_ptr,
int v_stride, int yuv_bit_depth, int width,
int height,
const SharpYuvConversionMatrix* yuv_matrix) {
// we expand the right/bottom border if needed
const int w = (width + 1) & ~1;
const int h = (height + 1) & ~1;
const int uv_w = w >> 1;
const int uv_h = h >> 1;
uint64_t prev_diff_y_sum = ~0;
int j, iter;
// TODO(skal): allocate one big memory chunk. But for now, it's easier
// for valgrind debugging to have several chunks.
fixed_y_t* const tmp_buffer = SAFE_ALLOC(w * 3, 2, fixed_y_t); // scratch
fixed_y_t* const best_y_base = SAFE_ALLOC(w, h, fixed_y_t);
fixed_y_t* const target_y_base = SAFE_ALLOC(w, h, fixed_y_t);
fixed_y_t* const best_rgb_y = SAFE_ALLOC(w, 2, fixed_y_t);
fixed_t* const best_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
fixed_t* const target_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
fixed_t* const best_rgb_uv = SAFE_ALLOC(uv_w * 3, 1, fixed_t);
fixed_y_t* best_y = best_y_base;
fixed_y_t* target_y = target_y_base;
fixed_t* best_uv = best_uv_base;
fixed_t* target_uv = target_uv_base;
const uint64_t diff_y_threshold = (uint64_t)(3.0 * w * h);
int ok;
assert(w > 0);
assert(h > 0);
if (best_y_base == NULL || best_uv_base == NULL ||
target_y_base == NULL || target_uv_base == NULL ||
best_rgb_y == NULL || best_rgb_uv == NULL ||
tmp_buffer == NULL) {
ok = 0;
goto End;
}
// Import RGB samples to W/RGB representation.
for (j = 0; j < height; j += 2) {
const int is_last_row = (j == height - 1);
fixed_y_t* const src1 = tmp_buffer + 0 * w;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
// prepare two rows of input
ImportOneRow(r_ptr, g_ptr, b_ptr, rgb_step, rgb_bit_depth, width,
src1);
if (!is_last_row) {
ImportOneRow(r_ptr + rgb_stride, g_ptr + rgb_stride, b_ptr + rgb_stride,
rgb_step, rgb_bit_depth, width, src2);
} else {
memcpy(src2, src1, 3 * w * sizeof(*src2));
}
StoreGray(src1, best_y + 0, w);
StoreGray(src2, best_y + w, w);
UpdateW(src1, target_y, w, rgb_bit_depth);
UpdateW(src2, target_y + w, w, rgb_bit_depth);
UpdateChroma(src1, src2, target_uv, uv_w, rgb_bit_depth);
memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv));
best_y += 2 * w;
best_uv += 3 * uv_w;
target_y += 2 * w;
target_uv += 3 * uv_w;
r_ptr += 2 * rgb_stride;
g_ptr += 2 * rgb_stride;
b_ptr += 2 * rgb_stride;
}
// Iterate and resolve clipping conflicts.
for (iter = 0; iter < kNumIterations; ++iter) {
const fixed_t* cur_uv = best_uv_base;
const fixed_t* prev_uv = best_uv_base;
uint64_t diff_y_sum = 0;
best_y = best_y_base;
best_uv = best_uv_base;
target_y = target_y_base;
target_uv = target_uv_base;
for (j = 0; j < h; j += 2) {
fixed_y_t* const src1 = tmp_buffer + 0 * w;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
{
const fixed_t* const next_uv = cur_uv + ((j < h - 2) ? 3 * uv_w : 0);
InterpolateTwoRows(best_y, prev_uv, cur_uv, next_uv, w,
src1, src2, rgb_bit_depth);
prev_uv = cur_uv;
cur_uv = next_uv;
}
UpdateW(src1, best_rgb_y + 0 * w, w, rgb_bit_depth);
UpdateW(src2, best_rgb_y + 1 * w, w, rgb_bit_depth);
UpdateChroma(src1, src2, best_rgb_uv, uv_w, rgb_bit_depth);
// update two rows of Y and one row of RGB
diff_y_sum +=
SharpYuvUpdateY(target_y, best_rgb_y, best_y, 2 * w,
rgb_bit_depth + GetPrecisionShift(rgb_bit_depth));
SharpYuvUpdateRGB(target_uv, best_rgb_uv, best_uv, 3 * uv_w);
best_y += 2 * w;
best_uv += 3 * uv_w;
target_y += 2 * w;
target_uv += 3 * uv_w;
}
// test exit condition
if (iter > 0) {
if (diff_y_sum < diff_y_threshold) break;
if (diff_y_sum > prev_diff_y_sum) break;
}
prev_diff_y_sum = diff_y_sum;
}
// final reconstruction
ok = ConvertWRGBToYUV(best_y_base, best_uv_base, y_ptr, y_stride, u_ptr,
u_stride, v_ptr, v_stride, rgb_bit_depth, yuv_bit_depth,
width, height, yuv_matrix);
End:
free(best_y_base);
free(best_uv_base);
free(target_y_base);
free(target_uv_base);
free(best_rgb_y);
free(best_rgb_uv);
free(tmp_buffer);
return ok;
}
#undef SAFE_ALLOC
// Hidden exported init function.
// By default SharpYuvConvert calls it with NULL. If needed, users can declare
// it as extern and call it with a VP8CPUInfo function.
extern void SharpYuvInit(VP8CPUInfo cpu_info_func);
void SharpYuvInit(VP8CPUInfo cpu_info_func) {
static volatile VP8CPUInfo sharpyuv_last_cpuinfo_used =
(VP8CPUInfo)&sharpyuv_last_cpuinfo_used;
const int initialized =
(sharpyuv_last_cpuinfo_used != (VP8CPUInfo)&sharpyuv_last_cpuinfo_used);
if (cpu_info_func == NULL && initialized) return;
if (sharpyuv_last_cpuinfo_used == cpu_info_func) return;
SharpYuvInitDsp(cpu_info_func);
if (!initialized) {
SharpYuvInitGammaTables();
}
sharpyuv_last_cpuinfo_used = cpu_info_func;
}
int SharpYuvConvert(const void* r_ptr, const void* g_ptr,
const void* b_ptr, int rgb_step, int rgb_stride,
int rgb_bit_depth, void* y_ptr, int y_stride,
void* u_ptr, int u_stride, void* v_ptr,
int v_stride, int yuv_bit_depth, int width,
int height, const SharpYuvConversionMatrix* yuv_matrix) {
SharpYuvConversionMatrix scaled_matrix;
const int rgb_max = (1 << rgb_bit_depth) - 1;
const int rgb_round = 1 << (rgb_bit_depth - 1);
const int yuv_max = (1 << yuv_bit_depth) - 1;
const int sfix = GetPrecisionShift(rgb_bit_depth);
if (width < 1 || height < 1 || width == INT_MAX || height == INT_MAX ||
r_ptr == NULL || g_ptr == NULL || b_ptr == NULL || y_ptr == NULL ||
u_ptr == NULL || v_ptr == NULL) {
return 0;
}
if (rgb_bit_depth != 8 && rgb_bit_depth != 10 && rgb_bit_depth != 12 &&
rgb_bit_depth != 16) {
return 0;
}
if (yuv_bit_depth != 8 && yuv_bit_depth != 10 && yuv_bit_depth != 12) {
return 0;
}
if (rgb_bit_depth > 8 && (rgb_step % 2 != 0 || rgb_stride %2 != 0)) {
// Step/stride should be even for uint16_t buffers.
return 0;
}
if (yuv_bit_depth > 8 &&
(y_stride % 2 != 0 || u_stride % 2 != 0 || v_stride % 2 != 0)) {
// Stride should be even for uint16_t buffers.
return 0;
}
SharpYuvInit(NULL);
// Add scaling factor to go from rgb_bit_depth to yuv_bit_depth, to the
// rgb->yuv conversion matrix.
if (rgb_bit_depth == yuv_bit_depth) {
memcpy(&scaled_matrix, yuv_matrix, sizeof(scaled_matrix));
} else {
int i;
for (i = 0; i < 3; ++i) {
scaled_matrix.rgb_to_y[i] =
(yuv_matrix->rgb_to_y[i] * yuv_max + rgb_round) / rgb_max;
scaled_matrix.rgb_to_u[i] =
(yuv_matrix->rgb_to_u[i] * yuv_max + rgb_round) / rgb_max;
scaled_matrix.rgb_to_v[i] =
(yuv_matrix->rgb_to_v[i] * yuv_max + rgb_round) / rgb_max;
}
}
// Also incorporate precision change scaling.
scaled_matrix.rgb_to_y[3] = Shift(yuv_matrix->rgb_to_y[3], sfix);
scaled_matrix.rgb_to_u[3] = Shift(yuv_matrix->rgb_to_u[3], sfix);
scaled_matrix.rgb_to_v[3] = Shift(yuv_matrix->rgb_to_v[3], sfix);
return DoSharpArgbToYuv(r_ptr, g_ptr, b_ptr, rgb_step, rgb_stride,
rgb_bit_depth, y_ptr, y_stride, u_ptr, u_stride,
v_ptr, v_stride, yuv_bit_depth, width, height,
&scaled_matrix);
}
//------------------------------------------------------------------------------

81
thirdparty/webp/sharpyuv/sharpyuv.h vendored Normal file
View File

@ -0,0 +1,81 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Sharp RGB to YUV conversion.
#ifndef WEBP_SHARPYUV_SHARPYUV_H_
#define WEBP_SHARPYUV_SHARPYUV_H_
#include <inttypes.h>
#ifdef __cplusplus
extern "C" {
#endif
// SharpYUV API version following the convention from semver.org
#define SHARPYUV_VERSION_MAJOR 0
#define SHARPYUV_VERSION_MINOR 1
#define SHARPYUV_VERSION_PATCH 0
// Version as a uint32_t. The major number is the high 8 bits.
// The minor number is the middle 8 bits. The patch number is the low 16 bits.
#define SHARPYUV_MAKE_VERSION(MAJOR, MINOR, PATCH) \
(((MAJOR) << 24) | ((MINOR) << 16) | (PATCH))
#define SHARPYUV_VERSION \
SHARPYUV_MAKE_VERSION(SHARPYUV_VERSION_MAJOR, SHARPYUV_VERSION_MINOR, \
SHARPYUV_VERSION_PATCH)
// RGB to YUV conversion matrix, in 16 bit fixed point.
// y = rgb_to_y[0] * r + rgb_to_y[1] * g + rgb_to_y[2] * b + rgb_to_y[3]
// u = rgb_to_u[0] * r + rgb_to_u[1] * g + rgb_to_u[2] * b + rgb_to_u[3]
// v = rgb_to_v[0] * r + rgb_to_v[1] * g + rgb_to_v[2] * b + rgb_to_v[3]
// Then y, u and v values are divided by 1<<16 and rounded.
typedef struct {
int rgb_to_y[4];
int rgb_to_u[4];
int rgb_to_v[4];
} SharpYuvConversionMatrix;
// Converts RGB to YUV420 using a downsampling algorithm that minimizes
// artefacts caused by chroma subsampling.
// This is slower than standard downsampling (averaging of 4 UV values).
// Assumes that the image will be upsampled using a bilinear filter. If nearest
// neighbor is used instead, the upsampled image might look worse than with
// standard downsampling.
// r_ptr, g_ptr, b_ptr: pointers to the source r, g and b channels. Should point
// to uint8_t buffers if rgb_bit_depth is 8, or uint16_t buffers otherwise.
// rgb_step: distance in bytes between two horizontally adjacent pixels on the
// r, g and b channels. If rgb_bit_depth is > 8, it should be a
// multiple of 2.
// rgb_stride: distance in bytes between two vertically adjacent pixels on the
// r, g, and b channels. If rgb_bit_depth is > 8, it should be a
// multiple of 2.
// rgb_bit_depth: number of bits for each r/g/b value. One of: 8, 10, 12, 16.
// Note: 16 bit input is truncated to 14 bits before conversion to yuv.
// yuv_bit_depth: number of bits for each y/u/v value. One of: 8, 10, 12.
// y_ptr, u_ptr, v_ptr: pointers to the destination y, u and v channels. Should
// point to uint8_t buffers if yuv_bit_depth is 8, or uint16_t buffers
// otherwise.
// y_stride, u_stride, v_stride: distance in bytes between two vertically
// adjacent pixels on the y, u and v channels. If yuv_bit_depth > 8, they
// should be multiples of 2.
// width, height: width and height of the image in pixels
int SharpYuvConvert(const void* r_ptr, const void* g_ptr, const void* b_ptr,
int rgb_step, int rgb_stride, int rgb_bit_depth,
void* y_ptr, int y_stride, void* u_ptr, int u_stride,
void* v_ptr, int v_stride, int yuv_bit_depth, int width,
int height, const SharpYuvConversionMatrix* yuv_matrix);
// TODO(b/194336375): Add YUV444 to YUV420 conversion. Maybe also add 422
// support (it's rarely used in practice, especially for images).
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_SHARPYUV_SHARPYUV_H_

110
thirdparty/webp/sharpyuv/sharpyuv_csp.c vendored Normal file
View File

@ -0,0 +1,110 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Colorspace utilities.
#include "sharpyuv/sharpyuv_csp.h"
#include <assert.h>
#include <math.h>
#include <string.h>
static int ToFixed16(float f) { return (int)floor(f * (1 << 16) + 0.5f); }
void SharpYuvComputeConversionMatrix(const SharpYuvColorSpace* yuv_color_space,
SharpYuvConversionMatrix* matrix) {
const float kr = yuv_color_space->kr;
const float kb = yuv_color_space->kb;
const float kg = 1.0f - kr - kb;
const float cr = 0.5f / (1.0f - kb);
const float cb = 0.5f / (1.0f - kr);
const int shift = yuv_color_space->bit_depth - 8;
const float denom = (float)((1 << yuv_color_space->bit_depth) - 1);
float scale_y = 1.0f;
float add_y = 0.0f;
float scale_u = cr;
float scale_v = cb;
float add_uv = (float)(128 << shift);
assert(yuv_color_space->bit_depth >= 8);
if (yuv_color_space->range == kSharpYuvRangeLimited) {
scale_y *= (219 << shift) / denom;
scale_u *= (224 << shift) / denom;
scale_v *= (224 << shift) / denom;
add_y = (float)(16 << shift);
}
matrix->rgb_to_y[0] = ToFixed16(kr * scale_y);
matrix->rgb_to_y[1] = ToFixed16(kg * scale_y);
matrix->rgb_to_y[2] = ToFixed16(kb * scale_y);
matrix->rgb_to_y[3] = ToFixed16(add_y);
matrix->rgb_to_u[0] = ToFixed16(-kr * scale_u);
matrix->rgb_to_u[1] = ToFixed16(-kg * scale_u);
matrix->rgb_to_u[2] = ToFixed16((1 - kb) * scale_u);
matrix->rgb_to_u[3] = ToFixed16(add_uv);
matrix->rgb_to_v[0] = ToFixed16((1 - kr) * scale_v);
matrix->rgb_to_v[1] = ToFixed16(-kg * scale_v);
matrix->rgb_to_v[2] = ToFixed16(-kb * scale_v);
matrix->rgb_to_v[3] = ToFixed16(add_uv);
}
// Matrices are in YUV_FIX fixed point precision.
// WebP's matrix, similar but not identical to kRec601LimitedMatrix.
static const SharpYuvConversionMatrix kWebpMatrix = {
{16839, 33059, 6420, 16 << 16},
{-9719, -19081, 28800, 128 << 16},
{28800, -24116, -4684, 128 << 16},
};
// Kr=0.2990f Kb=0.1140f bits=8 range=kSharpYuvRangeLimited
static const SharpYuvConversionMatrix kRec601LimitedMatrix = {
{16829, 33039, 6416, 16 << 16},
{-9714, -19071, 28784, 128 << 16},
{28784, -24103, -4681, 128 << 16},
};
// Kr=0.2990f Kb=0.1140f bits=8 range=kSharpYuvRangeFull
static const SharpYuvConversionMatrix kRec601FullMatrix = {
{19595, 38470, 7471, 0},
{-11058, -21710, 32768, 128 << 16},
{32768, -27439, -5329, 128 << 16},
};
// Kr=0.2126f Kb=0.0722f bits=8 range=kSharpYuvRangeLimited
static const SharpYuvConversionMatrix kRec709LimitedMatrix = {
{11966, 40254, 4064, 16 << 16},
{-6596, -22189, 28784, 128 << 16},
{28784, -26145, -2639, 128 << 16},
};
// Kr=0.2126f Kb=0.0722f bits=8 range=kSharpYuvRangeFull
static const SharpYuvConversionMatrix kRec709FullMatrix = {
{13933, 46871, 4732, 0},
{-7509, -25259, 32768, 128 << 16},
{32768, -29763, -3005, 128 << 16},
};
const SharpYuvConversionMatrix* SharpYuvGetConversionMatrix(
SharpYuvMatrixType matrix_type) {
switch (matrix_type) {
case kSharpYuvMatrixWebp:
return &kWebpMatrix;
case kSharpYuvMatrixRec601Limited:
return &kRec601LimitedMatrix;
case kSharpYuvMatrixRec601Full:
return &kRec601FullMatrix;
case kSharpYuvMatrixRec709Limited:
return &kRec709LimitedMatrix;
case kSharpYuvMatrixRec709Full:
return &kRec709FullMatrix;
case kSharpYuvMatrixNum:
return NULL;
}
return NULL;
}

59
thirdparty/webp/sharpyuv/sharpyuv_csp.h vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Colorspace utilities.
#ifndef WEBP_SHARPYUV_SHARPYUV_CSP_H_
#define WEBP_SHARPYUV_SHARPYUV_CSP_H_
#include "sharpyuv/sharpyuv.h"
#ifdef __cplusplus
extern "C" {
#endif
// Range of YUV values.
typedef enum {
kSharpYuvRangeFull, // YUV values between [0;255] (for 8 bit)
kSharpYuvRangeLimited // Y in [16;235], YUV in [16;240] (for 8 bit)
} SharpYuvRange;
// Constants that define a YUV color space.
typedef struct {
// Kr and Kb are defined such that:
// Y = Kr * r + Kg * g + Kb * b where Kg = 1 - Kr - Kb.
float kr;
float kb;
int bit_depth; // 8, 10 or 12
SharpYuvRange range;
} SharpYuvColorSpace;
// Fills in 'matrix' for the given YUVColorSpace.
void SharpYuvComputeConversionMatrix(const SharpYuvColorSpace* yuv_color_space,
SharpYuvConversionMatrix* matrix);
// Enums for precomputed conversion matrices.
typedef enum {
kSharpYuvMatrixWebp = 0,
kSharpYuvMatrixRec601Limited,
kSharpYuvMatrixRec601Full,
kSharpYuvMatrixRec709Limited,
kSharpYuvMatrixRec709Full,
kSharpYuvMatrixNum
} SharpYuvMatrixType;
// Returns a pointer to a matrix for one of the predefined colorspaces.
const SharpYuvConversionMatrix* SharpYuvGetConversionMatrix(
SharpYuvMatrixType matrix_type);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_SHARPYUV_SHARPYUV_CSP_H_

102
thirdparty/webp/sharpyuv/sharpyuv_dsp.c vendored Normal file
View File

@ -0,0 +1,102 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Speed-critical functions for Sharp YUV.
//
// Author: Skal (pascal.massimino@gmail.com)
#include "sharpyuv/sharpyuv_dsp.h"
#include <assert.h>
#include <stdlib.h>
#include "src/dsp/cpu.h"
//-----------------------------------------------------------------------------
#if !WEBP_NEON_OMIT_C_CODE
static uint16_t clip(int v, int max) {
return (v < 0) ? 0 : (v > max) ? max : (uint16_t)v;
}
static uint64_t SharpYuvUpdateY_C(const uint16_t* ref, const uint16_t* src,
uint16_t* dst, int len, int bit_depth) {
uint64_t diff = 0;
int i;
const int max_y = (1 << bit_depth) - 1;
for (i = 0; i < len; ++i) {
const int diff_y = ref[i] - src[i];
const int new_y = (int)dst[i] + diff_y;
dst[i] = clip(new_y, max_y);
diff += (uint64_t)abs(diff_y);
}
return diff;
}
static void SharpYuvUpdateRGB_C(const int16_t* ref, const int16_t* src,
int16_t* dst, int len) {
int i;
for (i = 0; i < len; ++i) {
const int diff_uv = ref[i] - src[i];
dst[i] += diff_uv;
}
}
static void SharpYuvFilterRow_C(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out,
int bit_depth) {
int i;
const int max_y = (1 << bit_depth) - 1;
for (i = 0; i < len; ++i, ++A, ++B) {
const int v0 = (A[0] * 9 + A[1] * 3 + B[0] * 3 + B[1] + 8) >> 4;
const int v1 = (A[1] * 9 + A[0] * 3 + B[1] * 3 + B[0] + 8) >> 4;
out[2 * i + 0] = clip(best_y[2 * i + 0] + v0, max_y);
out[2 * i + 1] = clip(best_y[2 * i + 1] + v1, max_y);
}
}
#endif // !WEBP_NEON_OMIT_C_CODE
//-----------------------------------------------------------------------------
uint64_t (*SharpYuvUpdateY)(const uint16_t* src, const uint16_t* ref,
uint16_t* dst, int len, int bit_depth);
void (*SharpYuvUpdateRGB)(const int16_t* src, const int16_t* ref, int16_t* dst,
int len);
void (*SharpYuvFilterRow)(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out,
int bit_depth);
extern void InitSharpYuvSSE2(void);
extern void InitSharpYuvNEON(void);
void SharpYuvInitDsp(VP8CPUInfo cpu_info_func) {
(void)cpu_info_func;
#if !WEBP_NEON_OMIT_C_CODE
SharpYuvUpdateY = SharpYuvUpdateY_C;
SharpYuvUpdateRGB = SharpYuvUpdateRGB_C;
SharpYuvFilterRow = SharpYuvFilterRow_C;
#endif
#if defined(WEBP_HAVE_SSE2)
if (cpu_info_func == NULL || cpu_info_func(kSSE2)) {
InitSharpYuvSSE2();
}
#endif // WEBP_HAVE_SSE2
#if defined(WEBP_HAVE_NEON)
if (WEBP_NEON_OMIT_C_CODE || cpu_info_func == NULL || cpu_info_func(kNEON)) {
InitSharpYuvNEON();
}
#endif // WEBP_HAVE_NEON
assert(SharpYuvUpdateY != NULL);
assert(SharpYuvUpdateRGB != NULL);
assert(SharpYuvFilterRow != NULL);
}

29
thirdparty/webp/sharpyuv/sharpyuv_dsp.h vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Speed-critical functions for Sharp YUV.
#ifndef WEBP_SHARPYUV_SHARPYUV_DSP_H_
#define WEBP_SHARPYUV_SHARPYUV_DSP_H_
#include <stdint.h>
#include "src/dsp/cpu.h"
extern uint64_t (*SharpYuvUpdateY)(const uint16_t* src, const uint16_t* ref,
uint16_t* dst, int len, int bit_depth);
extern void (*SharpYuvUpdateRGB)(const int16_t* src, const int16_t* ref,
int16_t* dst, int len);
extern void (*SharpYuvFilterRow)(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out,
int bit_depth);
void SharpYuvInitDsp(VP8CPUInfo cpu_info_func);
#endif // WEBP_SHARPYUV_SHARPYUV_DSP_H_

View File

@ -0,0 +1,114 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Gamma correction utilities.
#include "sharpyuv/sharpyuv_gamma.h"
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include "src/webp/types.h"
// Gamma correction compensates loss of resolution during chroma subsampling.
// Size of pre-computed table for converting from gamma to linear.
#define GAMMA_TO_LINEAR_TAB_BITS 10
#define GAMMA_TO_LINEAR_TAB_SIZE (1 << GAMMA_TO_LINEAR_TAB_BITS)
static uint32_t kGammaToLinearTabS[GAMMA_TO_LINEAR_TAB_SIZE + 2];
#define LINEAR_TO_GAMMA_TAB_BITS 9
#define LINEAR_TO_GAMMA_TAB_SIZE (1 << LINEAR_TO_GAMMA_TAB_BITS)
static uint32_t kLinearToGammaTabS[LINEAR_TO_GAMMA_TAB_SIZE + 2];
static const double kGammaF = 1. / 0.45;
#define GAMMA_TO_LINEAR_BITS 16
static volatile int kGammaTablesSOk = 0;
void SharpYuvInitGammaTables(void) {
assert(GAMMA_TO_LINEAR_BITS <= 16);
if (!kGammaTablesSOk) {
int v;
const double a = 0.09929682680944;
const double thresh = 0.018053968510807;
const double final_scale = 1 << GAMMA_TO_LINEAR_BITS;
// Precompute gamma to linear table.
{
const double norm = 1. / GAMMA_TO_LINEAR_TAB_SIZE;
const double a_rec = 1. / (1. + a);
for (v = 0; v <= GAMMA_TO_LINEAR_TAB_SIZE; ++v) {
const double g = norm * v;
double value;
if (g <= thresh * 4.5) {
value = g / 4.5;
} else {
value = pow(a_rec * (g + a), kGammaF);
}
kGammaToLinearTabS[v] = (uint32_t)(value * final_scale + .5);
}
// to prevent small rounding errors to cause read-overflow:
kGammaToLinearTabS[GAMMA_TO_LINEAR_TAB_SIZE + 1] =
kGammaToLinearTabS[GAMMA_TO_LINEAR_TAB_SIZE];
}
// Precompute linear to gamma table.
{
const double scale = 1. / LINEAR_TO_GAMMA_TAB_SIZE;
for (v = 0; v <= LINEAR_TO_GAMMA_TAB_SIZE; ++v) {
const double g = scale * v;
double value;
if (g <= thresh) {
value = 4.5 * g;
} else {
value = (1. + a) * pow(g, 1. / kGammaF) - a;
}
kLinearToGammaTabS[v] =
(uint32_t)(final_scale * value + 0.5);
}
// to prevent small rounding errors to cause read-overflow:
kLinearToGammaTabS[LINEAR_TO_GAMMA_TAB_SIZE + 1] =
kLinearToGammaTabS[LINEAR_TO_GAMMA_TAB_SIZE];
}
kGammaTablesSOk = 1;
}
}
static WEBP_INLINE int Shift(int v, int shift) {
return (shift >= 0) ? (v << shift) : (v >> -shift);
}
static WEBP_INLINE uint32_t FixedPointInterpolation(int v, uint32_t* tab,
int tab_pos_shift_right,
int tab_value_shift) {
const uint32_t tab_pos = Shift(v, -tab_pos_shift_right);
// fractional part, in 'tab_pos_shift' fixed-point precision
const uint32_t x = v - (tab_pos << tab_pos_shift_right); // fractional part
// v0 / v1 are in kGammaToLinearBits fixed-point precision (range [0..1])
const uint32_t v0 = Shift(tab[tab_pos + 0], tab_value_shift);
const uint32_t v1 = Shift(tab[tab_pos + 1], tab_value_shift);
// Final interpolation.
const uint32_t v2 = (v1 - v0) * x; // note: v1 >= v0.
const int half =
(tab_pos_shift_right > 0) ? 1 << (tab_pos_shift_right - 1) : 0;
const uint32_t result = v0 + ((v2 + half) >> tab_pos_shift_right);
return result;
}
uint32_t SharpYuvGammaToLinear(uint16_t v, int bit_depth) {
const int shift = GAMMA_TO_LINEAR_TAB_BITS - bit_depth;
if (shift > 0) {
return kGammaToLinearTabS[v << shift];
}
return FixedPointInterpolation(v, kGammaToLinearTabS, -shift, 0);
}
uint16_t SharpYuvLinearToGamma(uint32_t value, int bit_depth) {
return FixedPointInterpolation(
value, kLinearToGammaTabS,
(GAMMA_TO_LINEAR_BITS - LINEAR_TO_GAMMA_TAB_BITS),
bit_depth - GAMMA_TO_LINEAR_BITS);
}

View File

@ -0,0 +1,35 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Gamma correction utilities.
#ifndef WEBP_SHARPYUV_SHARPYUV_GAMMA_H_
#define WEBP_SHARPYUV_SHARPYUV_GAMMA_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// Initializes precomputed tables. Must be called once before calling
// SharpYuvGammaToLinear or SharpYuvLinearToGamma.
void SharpYuvInitGammaTables(void);
// Converts a gamma color value on 'bit_depth' bits to a 16 bit linear value.
uint32_t SharpYuvGammaToLinear(uint16_t v, int bit_depth);
// Converts a 16 bit linear color value to a gamma value on 'bit_depth' bits.
uint16_t SharpYuvLinearToGamma(uint32_t value, int bit_depth);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_SHARPYUV_SHARPYUV_GAMMA_H_

182
thirdparty/webp/sharpyuv/sharpyuv_neon.c vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Speed-critical functions for Sharp YUV.
//
// Author: Skal (pascal.massimino@gmail.com)
#include "sharpyuv/sharpyuv_dsp.h"
#if defined(WEBP_USE_NEON)
#include <assert.h>
#include <stdlib.h>
#include <arm_neon.h>
#endif
extern void InitSharpYuvNEON(void);
#if defined(WEBP_USE_NEON)
static uint16_t clip_NEON(int v, int max) {
return (v < 0) ? 0 : (v > max) ? max : (uint16_t)v;
}
static uint64_t SharpYuvUpdateY_NEON(const uint16_t* ref, const uint16_t* src,
uint16_t* dst, int len, int bit_depth) {
const int max_y = (1 << bit_depth) - 1;
int i;
const int16x8_t zero = vdupq_n_s16(0);
const int16x8_t max = vdupq_n_s16(max_y);
uint64x2_t sum = vdupq_n_u64(0);
uint64_t diff;
for (i = 0; i + 8 <= len; i += 8) {
const int16x8_t A = vreinterpretq_s16_u16(vld1q_u16(ref + i));
const int16x8_t B = vreinterpretq_s16_u16(vld1q_u16(src + i));
const int16x8_t C = vreinterpretq_s16_u16(vld1q_u16(dst + i));
const int16x8_t D = vsubq_s16(A, B); // diff_y
const int16x8_t F = vaddq_s16(C, D); // new_y
const uint16x8_t H =
vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(F, max), zero));
const int16x8_t I = vabsq_s16(D); // abs(diff_y)
vst1q_u16(dst + i, H);
sum = vpadalq_u32(sum, vpaddlq_u16(vreinterpretq_u16_s16(I)));
}
diff = vgetq_lane_u64(sum, 0) + vgetq_lane_u64(sum, 1);
for (; i < len; ++i) {
const int diff_y = ref[i] - src[i];
const int new_y = (int)(dst[i]) + diff_y;
dst[i] = clip_NEON(new_y, max_y);
diff += (uint64_t)(abs(diff_y));
}
return diff;
}
static void SharpYuvUpdateRGB_NEON(const int16_t* ref, const int16_t* src,
int16_t* dst, int len) {
int i;
for (i = 0; i + 8 <= len; i += 8) {
const int16x8_t A = vld1q_s16(ref + i);
const int16x8_t B = vld1q_s16(src + i);
const int16x8_t C = vld1q_s16(dst + i);
const int16x8_t D = vsubq_s16(A, B); // diff_uv
const int16x8_t E = vaddq_s16(C, D); // new_uv
vst1q_s16(dst + i, E);
}
for (; i < len; ++i) {
const int diff_uv = ref[i] - src[i];
dst[i] += diff_uv;
}
}
static void SharpYuvFilterRow16_NEON(const int16_t* A, const int16_t* B,
int len, const uint16_t* best_y,
uint16_t* out, int bit_depth) {
const int max_y = (1 << bit_depth) - 1;
int i;
const int16x8_t max = vdupq_n_s16(max_y);
const int16x8_t zero = vdupq_n_s16(0);
for (i = 0; i + 8 <= len; i += 8) {
const int16x8_t a0 = vld1q_s16(A + i + 0);
const int16x8_t a1 = vld1q_s16(A + i + 1);
const int16x8_t b0 = vld1q_s16(B + i + 0);
const int16x8_t b1 = vld1q_s16(B + i + 1);
const int16x8_t a0b1 = vaddq_s16(a0, b1);
const int16x8_t a1b0 = vaddq_s16(a1, b0);
const int16x8_t a0a1b0b1 = vaddq_s16(a0b1, a1b0); // A0+A1+B0+B1
const int16x8_t a0b1_2 = vaddq_s16(a0b1, a0b1); // 2*(A0+B1)
const int16x8_t a1b0_2 = vaddq_s16(a1b0, a1b0); // 2*(A1+B0)
const int16x8_t c0 = vshrq_n_s16(vaddq_s16(a0b1_2, a0a1b0b1), 3);
const int16x8_t c1 = vshrq_n_s16(vaddq_s16(a1b0_2, a0a1b0b1), 3);
const int16x8_t e0 = vrhaddq_s16(c1, a0);
const int16x8_t e1 = vrhaddq_s16(c0, a1);
const int16x8x2_t f = vzipq_s16(e0, e1);
const int16x8_t g0 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 0));
const int16x8_t g1 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 8));
const int16x8_t h0 = vaddq_s16(g0, f.val[0]);
const int16x8_t h1 = vaddq_s16(g1, f.val[1]);
const int16x8_t i0 = vmaxq_s16(vminq_s16(h0, max), zero);
const int16x8_t i1 = vmaxq_s16(vminq_s16(h1, max), zero);
vst1q_u16(out + 2 * i + 0, vreinterpretq_u16_s16(i0));
vst1q_u16(out + 2 * i + 8, vreinterpretq_u16_s16(i1));
}
for (; i < len; ++i) {
const int a0b1 = A[i + 0] + B[i + 1];
const int a1b0 = A[i + 1] + B[i + 0];
const int a0a1b0b1 = a0b1 + a1b0 + 8;
const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4;
const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4;
out[2 * i + 0] = clip_NEON(best_y[2 * i + 0] + v0, max_y);
out[2 * i + 1] = clip_NEON(best_y[2 * i + 1] + v1, max_y);
}
}
static void SharpYuvFilterRow32_NEON(const int16_t* A, const int16_t* B,
int len, const uint16_t* best_y,
uint16_t* out, int bit_depth) {
const int max_y = (1 << bit_depth) - 1;
int i;
const uint16x8_t max = vdupq_n_u16(max_y);
for (i = 0; i + 4 <= len; i += 4) {
const int16x4_t a0 = vld1_s16(A + i + 0);
const int16x4_t a1 = vld1_s16(A + i + 1);
const int16x4_t b0 = vld1_s16(B + i + 0);
const int16x4_t b1 = vld1_s16(B + i + 1);
const int32x4_t a0b1 = vaddl_s16(a0, b1);
const int32x4_t a1b0 = vaddl_s16(a1, b0);
const int32x4_t a0a1b0b1 = vaddq_s32(a0b1, a1b0); // A0+A1+B0+B1
const int32x4_t a0b1_2 = vaddq_s32(a0b1, a0b1); // 2*(A0+B1)
const int32x4_t a1b0_2 = vaddq_s32(a1b0, a1b0); // 2*(A1+B0)
const int32x4_t c0 = vshrq_n_s32(vaddq_s32(a0b1_2, a0a1b0b1), 3);
const int32x4_t c1 = vshrq_n_s32(vaddq_s32(a1b0_2, a0a1b0b1), 3);
const int32x4_t e0 = vrhaddq_s32(c1, vmovl_s16(a0));
const int32x4_t e1 = vrhaddq_s32(c0, vmovl_s16(a1));
const int32x4x2_t f = vzipq_s32(e0, e1);
const int16x8_t g = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i));
const int32x4_t h0 = vaddw_s16(f.val[0], vget_low_s16(g));
const int32x4_t h1 = vaddw_s16(f.val[1], vget_high_s16(g));
const uint16x8_t i_16 = vcombine_u16(vqmovun_s32(h0), vqmovun_s32(h1));
const uint16x8_t i_clamped = vminq_u16(i_16, max);
vst1q_u16(out + 2 * i + 0, i_clamped);
}
for (; i < len; ++i) {
const int a0b1 = A[i + 0] + B[i + 1];
const int a1b0 = A[i + 1] + B[i + 0];
const int a0a1b0b1 = a0b1 + a1b0 + 8;
const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4;
const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4;
out[2 * i + 0] = clip_NEON(best_y[2 * i + 0] + v0, max_y);
out[2 * i + 1] = clip_NEON(best_y[2 * i + 1] + v1, max_y);
}
}
static void SharpYuvFilterRow_NEON(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out,
int bit_depth) {
if (bit_depth <= 10) {
SharpYuvFilterRow16_NEON(A, B, len, best_y, out, bit_depth);
} else {
SharpYuvFilterRow32_NEON(A, B, len, best_y, out, bit_depth);
}
}
//------------------------------------------------------------------------------
WEBP_TSAN_IGNORE_FUNCTION void InitSharpYuvNEON(void) {
SharpYuvUpdateY = SharpYuvUpdateY_NEON;
SharpYuvUpdateRGB = SharpYuvUpdateRGB_NEON;
SharpYuvFilterRow = SharpYuvFilterRow_NEON;
}
#else // !WEBP_USE_NEON
void InitSharpYuvNEON(void) {}
#endif // WEBP_USE_NEON

204
thirdparty/webp/sharpyuv/sharpyuv_sse2.c vendored Normal file
View File

@ -0,0 +1,204 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Speed-critical functions for Sharp YUV.
//
// Author: Skal (pascal.massimino@gmail.com)
#include "sharpyuv/sharpyuv_dsp.h"
#if defined(WEBP_USE_SSE2)
#include <stdlib.h>
#include <emmintrin.h>
#endif
extern void InitSharpYuvSSE2(void);
#if defined(WEBP_USE_SSE2)
static uint16_t clip_SSE2(int v, int max) {
return (v < 0) ? 0 : (v > max) ? max : (uint16_t)v;
}
static uint64_t SharpYuvUpdateY_SSE2(const uint16_t* ref, const uint16_t* src,
uint16_t* dst, int len, int bit_depth) {
const int max_y = (1 << bit_depth) - 1;
uint64_t diff = 0;
uint32_t tmp[4];
int i;
const __m128i zero = _mm_setzero_si128();
const __m128i max = _mm_set1_epi16(max_y);
const __m128i one = _mm_set1_epi16(1);
__m128i sum = zero;
for (i = 0; i + 8 <= len; i += 8) {
const __m128i A = _mm_loadu_si128((const __m128i*)(ref + i));
const __m128i B = _mm_loadu_si128((const __m128i*)(src + i));
const __m128i C = _mm_loadu_si128((const __m128i*)(dst + i));
const __m128i D = _mm_sub_epi16(A, B); // diff_y
const __m128i E = _mm_cmpgt_epi16(zero, D); // sign (-1 or 0)
const __m128i F = _mm_add_epi16(C, D); // new_y
const __m128i G = _mm_or_si128(E, one); // -1 or 1
const __m128i H = _mm_max_epi16(_mm_min_epi16(F, max), zero);
const __m128i I = _mm_madd_epi16(D, G); // sum(abs(...))
_mm_storeu_si128((__m128i*)(dst + i), H);
sum = _mm_add_epi32(sum, I);
}
_mm_storeu_si128((__m128i*)tmp, sum);
diff = tmp[3] + tmp[2] + tmp[1] + tmp[0];
for (; i < len; ++i) {
const int diff_y = ref[i] - src[i];
const int new_y = (int)dst[i] + diff_y;
dst[i] = clip_SSE2(new_y, max_y);
diff += (uint64_t)abs(diff_y);
}
return diff;
}
static void SharpYuvUpdateRGB_SSE2(const int16_t* ref, const int16_t* src,
int16_t* dst, int len) {
int i = 0;
for (i = 0; i + 8 <= len; i += 8) {
const __m128i A = _mm_loadu_si128((const __m128i*)(ref + i));
const __m128i B = _mm_loadu_si128((const __m128i*)(src + i));
const __m128i C = _mm_loadu_si128((const __m128i*)(dst + i));
const __m128i D = _mm_sub_epi16(A, B); // diff_uv
const __m128i E = _mm_add_epi16(C, D); // new_uv
_mm_storeu_si128((__m128i*)(dst + i), E);
}
for (; i < len; ++i) {
const int diff_uv = ref[i] - src[i];
dst[i] += diff_uv;
}
}
static void SharpYuvFilterRow16_SSE2(const int16_t* A, const int16_t* B,
int len, const uint16_t* best_y,
uint16_t* out, int bit_depth) {
const int max_y = (1 << bit_depth) - 1;
int i;
const __m128i kCst8 = _mm_set1_epi16(8);
const __m128i max = _mm_set1_epi16(max_y);
const __m128i zero = _mm_setzero_si128();
for (i = 0; i + 8 <= len; i += 8) {
const __m128i a0 = _mm_loadu_si128((const __m128i*)(A + i + 0));
const __m128i a1 = _mm_loadu_si128((const __m128i*)(A + i + 1));
const __m128i b0 = _mm_loadu_si128((const __m128i*)(B + i + 0));
const __m128i b1 = _mm_loadu_si128((const __m128i*)(B + i + 1));
const __m128i a0b1 = _mm_add_epi16(a0, b1);
const __m128i a1b0 = _mm_add_epi16(a1, b0);
const __m128i a0a1b0b1 = _mm_add_epi16(a0b1, a1b0); // A0+A1+B0+B1
const __m128i a0a1b0b1_8 = _mm_add_epi16(a0a1b0b1, kCst8);
const __m128i a0b1_2 = _mm_add_epi16(a0b1, a0b1); // 2*(A0+B1)
const __m128i a1b0_2 = _mm_add_epi16(a1b0, a1b0); // 2*(A1+B0)
const __m128i c0 = _mm_srai_epi16(_mm_add_epi16(a0b1_2, a0a1b0b1_8), 3);
const __m128i c1 = _mm_srai_epi16(_mm_add_epi16(a1b0_2, a0a1b0b1_8), 3);
const __m128i d0 = _mm_add_epi16(c1, a0);
const __m128i d1 = _mm_add_epi16(c0, a1);
const __m128i e0 = _mm_srai_epi16(d0, 1);
const __m128i e1 = _mm_srai_epi16(d1, 1);
const __m128i f0 = _mm_unpacklo_epi16(e0, e1);
const __m128i f1 = _mm_unpackhi_epi16(e0, e1);
const __m128i g0 = _mm_loadu_si128((const __m128i*)(best_y + 2 * i + 0));
const __m128i g1 = _mm_loadu_si128((const __m128i*)(best_y + 2 * i + 8));
const __m128i h0 = _mm_add_epi16(g0, f0);
const __m128i h1 = _mm_add_epi16(g1, f1);
const __m128i i0 = _mm_max_epi16(_mm_min_epi16(h0, max), zero);
const __m128i i1 = _mm_max_epi16(_mm_min_epi16(h1, max), zero);
_mm_storeu_si128((__m128i*)(out + 2 * i + 0), i0);
_mm_storeu_si128((__m128i*)(out + 2 * i + 8), i1);
}
for (; i < len; ++i) {
// (9 * A0 + 3 * A1 + 3 * B0 + B1 + 8) >> 4 =
// = (8 * A0 + 2 * (A1 + B0) + (A0 + A1 + B0 + B1 + 8)) >> 4
// We reuse the common sub-expressions.
const int a0b1 = A[i + 0] + B[i + 1];
const int a1b0 = A[i + 1] + B[i + 0];
const int a0a1b0b1 = a0b1 + a1b0 + 8;
const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4;
const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4;
out[2 * i + 0] = clip_SSE2(best_y[2 * i + 0] + v0, max_y);
out[2 * i + 1] = clip_SSE2(best_y[2 * i + 1] + v1, max_y);
}
}
static WEBP_INLINE __m128i s16_to_s32(__m128i in) {
return _mm_srai_epi32(_mm_unpacklo_epi16(in, in), 16);
}
static void SharpYuvFilterRow32_SSE2(const int16_t* A, const int16_t* B,
int len, const uint16_t* best_y,
uint16_t* out, int bit_depth) {
const int max_y = (1 << bit_depth) - 1;
int i;
const __m128i kCst8 = _mm_set1_epi32(8);
const __m128i max = _mm_set1_epi16(max_y);
const __m128i zero = _mm_setzero_si128();
for (i = 0; i + 4 <= len; i += 4) {
const __m128i a0 = s16_to_s32(_mm_loadl_epi64((const __m128i*)(A + i + 0)));
const __m128i a1 = s16_to_s32(_mm_loadl_epi64((const __m128i*)(A + i + 1)));
const __m128i b0 = s16_to_s32(_mm_loadl_epi64((const __m128i*)(B + i + 0)));
const __m128i b1 = s16_to_s32(_mm_loadl_epi64((const __m128i*)(B + i + 1)));
const __m128i a0b1 = _mm_add_epi32(a0, b1);
const __m128i a1b0 = _mm_add_epi32(a1, b0);
const __m128i a0a1b0b1 = _mm_add_epi32(a0b1, a1b0); // A0+A1+B0+B1
const __m128i a0a1b0b1_8 = _mm_add_epi32(a0a1b0b1, kCst8);
const __m128i a0b1_2 = _mm_add_epi32(a0b1, a0b1); // 2*(A0+B1)
const __m128i a1b0_2 = _mm_add_epi32(a1b0, a1b0); // 2*(A1+B0)
const __m128i c0 = _mm_srai_epi32(_mm_add_epi32(a0b1_2, a0a1b0b1_8), 3);
const __m128i c1 = _mm_srai_epi32(_mm_add_epi32(a1b0_2, a0a1b0b1_8), 3);
const __m128i d0 = _mm_add_epi32(c1, a0);
const __m128i d1 = _mm_add_epi32(c0, a1);
const __m128i e0 = _mm_srai_epi32(d0, 1);
const __m128i e1 = _mm_srai_epi32(d1, 1);
const __m128i f0 = _mm_unpacklo_epi32(e0, e1);
const __m128i f1 = _mm_unpackhi_epi32(e0, e1);
const __m128i g = _mm_loadu_si128((const __m128i*)(best_y + 2 * i + 0));
const __m128i h_16 = _mm_add_epi16(g, _mm_packs_epi32(f0, f1));
const __m128i final = _mm_max_epi16(_mm_min_epi16(h_16, max), zero);
_mm_storeu_si128((__m128i*)(out + 2 * i + 0), final);
}
for (; i < len; ++i) {
// (9 * A0 + 3 * A1 + 3 * B0 + B1 + 8) >> 4 =
// = (8 * A0 + 2 * (A1 + B0) + (A0 + A1 + B0 + B1 + 8)) >> 4
// We reuse the common sub-expressions.
const int a0b1 = A[i + 0] + B[i + 1];
const int a1b0 = A[i + 1] + B[i + 0];
const int a0a1b0b1 = a0b1 + a1b0 + 8;
const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4;
const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4;
out[2 * i + 0] = clip_SSE2(best_y[2 * i + 0] + v0, max_y);
out[2 * i + 1] = clip_SSE2(best_y[2 * i + 1] + v1, max_y);
}
}
static void SharpYuvFilterRow_SSE2(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out,
int bit_depth) {
if (bit_depth <= 10) {
SharpYuvFilterRow16_SSE2(A, B, len, best_y, out, bit_depth);
} else {
SharpYuvFilterRow32_SSE2(A, B, len, best_y, out, bit_depth);
}
}
//------------------------------------------------------------------------------
extern void InitSharpYuvSSE2(void);
WEBP_TSAN_IGNORE_FUNCTION void InitSharpYuvSSE2(void) {
SharpYuvUpdateY = SharpYuvUpdateY_SSE2;
SharpYuvUpdateRGB = SharpYuvUpdateRGB_SSE2;
SharpYuvFilterRow = SharpYuvFilterRow_SSE2;
}
#else // !WEBP_USE_SSE2
void InitSharpYuvSSE2(void) {}
#endif // WEBP_USE_SSE2

View File

@ -32,7 +32,7 @@ extern "C" {
// version numbers
#define DEC_MAJ_VERSION 1
#define DEC_MIN_VERSION 2
#define DEC_REV_VERSION 2
#define DEC_REV_VERSION 4
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
// Constraints are: We need to store one 16x16 block of luma samples (y),

View File

@ -178,7 +178,7 @@ static WEBP_INLINE int PlaneCodeToDistance(int xsize, int plane_code) {
//------------------------------------------------------------------------------
// Decodes the next Huffman code from bit-stream.
// FillBitWindow(br) needs to be called at minimum every second call
// VP8LFillBitWindow(br) needs to be called at minimum every second call
// to ReadSymbol, in order to pre-fetch enough bits.
static WEBP_INLINE int ReadSymbol(const HuffmanCode* table,
VP8LBitReader* const br) {
@ -321,7 +321,7 @@ static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec,
// The first code is either 1 bit or 8 bit code.
int symbol = VP8LReadBits(br, (first_symbol_len_code == 0) ? 1 : 8);
code_lengths[symbol] = 1;
// The second code (if present), is always 8 bit long.
// The second code (if present), is always 8 bits long.
if (num_symbols == 2) {
symbol = VP8LReadBits(br, 8);
code_lengths[symbol] = 1;
@ -1281,7 +1281,7 @@ static int ExpandColorMap(int num_colors, VP8LTransform* const transform) {
uint8_t* const new_data = (uint8_t*)new_color_map;
new_color_map[0] = transform->data_[0];
for (i = 4; i < 4 * num_colors; ++i) {
// Equivalent to AddPixelEq(), on a byte-basis.
// Equivalent to VP8LAddPixels(), on a byte-basis.
new_data[i] = (data[i] + new_data[i - 4]) & 0xff;
}
for (; i < 4 * final_num_colors; ++i) {

View File

@ -25,7 +25,7 @@
#define DMUX_MAJ_VERSION 1
#define DMUX_MIN_VERSION 2
#define DMUX_REV_VERSION 2
#define DMUX_REV_VERSION 4
typedef struct {
size_t start_; // start location of the data
@ -614,7 +614,6 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
while (f != NULL) {
const int cur_frame_set = f->frame_num_;
int frame_count = 0;
// Check frame properties.
for (; f != NULL && f->frame_num_ == cur_frame_set; f = f->next_) {
@ -649,8 +648,6 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
dmux->canvas_width_, dmux->canvas_height_)) {
return 0;
}
++frame_count;
}
}
return 1;

View File

@ -83,7 +83,7 @@ static void ApplyAlphaMultiply_NEON(uint8_t* rgba, int alpha_first,
static int DispatchAlpha_NEON(const uint8_t* WEBP_RESTRICT alpha,
int alpha_stride, int width, int height,
uint8_t* WEBP_RESTRICT dst, int dst_stride) {
uint32_t alpha_mask = 0xffffffffu;
uint32_t alpha_mask = 0xffu;
uint8x8_t mask8 = vdup_n_u8(0xff);
uint32_t tmp[2];
int i, j;
@ -107,6 +107,7 @@ static int DispatchAlpha_NEON(const uint8_t* WEBP_RESTRICT alpha,
dst += dst_stride;
}
vst1_u8((uint8_t*)tmp, mask8);
alpha_mask *= 0x01010101;
alpha_mask &= tmp[0];
alpha_mask &= tmp[1];
return (alpha_mask != 0xffffffffu);
@ -135,7 +136,7 @@ static void DispatchAlphaToGreen_NEON(const uint8_t* WEBP_RESTRICT alpha,
static int ExtractAlpha_NEON(const uint8_t* WEBP_RESTRICT argb, int argb_stride,
int width, int height,
uint8_t* WEBP_RESTRICT alpha, int alpha_stride) {
uint32_t alpha_mask = 0xffffffffu;
uint32_t alpha_mask = 0xffu;
uint8x8_t mask8 = vdup_n_u8(0xff);
uint32_t tmp[2];
int i, j;
@ -157,6 +158,7 @@ static int ExtractAlpha_NEON(const uint8_t* WEBP_RESTRICT argb, int argb_stride,
alpha += alpha_stride;
}
vst1_u8((uint8_t*)tmp, mask8);
alpha_mask *= 0x01010101;
alpha_mask &= tmp[0];
alpha_mask &= tmp[1];
return (alpha_mask == 0xffffffffu);

View File

@ -11,7 +11,7 @@
//
// Author: Christian Duvivier (cduvivier@google.com)
#include "src/dsp/dsp.h"
#include "src/dsp/cpu.h"
#if defined(WEBP_HAVE_NEON_RTCD)
#include <stdio.h>

254
thirdparty/webp/src/dsp/cpu.h vendored Normal file
View File

@ -0,0 +1,254 @@
// Copyright 2022 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// CPU detection functions and macros.
//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DSP_CPU_H_
#define WEBP_DSP_CPU_H_
#ifdef HAVE_CONFIG_H
#include "src/webp/config.h"
#endif
#include "src/webp/types.h"
#if defined(__GNUC__)
#define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__)
#define LOCAL_GCC_PREREQ(maj, min) (LOCAL_GCC_VERSION >= (((maj) << 8) | (min)))
#else
#define LOCAL_GCC_VERSION 0
#define LOCAL_GCC_PREREQ(maj, min) 0
#endif
#if defined(__clang__)
#define LOCAL_CLANG_VERSION ((__clang_major__ << 8) | __clang_minor__)
#define LOCAL_CLANG_PREREQ(maj, min) \
(LOCAL_CLANG_VERSION >= (((maj) << 8) | (min)))
#else
#define LOCAL_CLANG_VERSION 0
#define LOCAL_CLANG_PREREQ(maj, min) 0
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#if !defined(HAVE_CONFIG_H)
#if defined(_MSC_VER) && _MSC_VER > 1310 && \
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE2 // Visual C++ SSE2 targets
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1500 && \
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE41 // Visual C++ SSE4.1 targets
#endif
#endif
// WEBP_HAVE_* are used to indicate the presence of the instruction set in dsp
// files without intrinsics, allowing the corresponding Init() to be called.
// Files containing intrinsics will need to be built targeting the instruction
// set so should succeed on one of the earlier tests.
#if (defined(__SSE2__) || defined(WEBP_MSC_SSE2)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_SSE2))
#define WEBP_USE_SSE2
#endif
#if defined(WEBP_USE_SSE2) && !defined(WEBP_HAVE_SSE2)
#define WEBP_HAVE_SSE2
#endif
#if (defined(__SSE4_1__) || defined(WEBP_MSC_SSE41)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_SSE41))
#define WEBP_USE_SSE41
#endif
#if defined(WEBP_USE_SSE41) && !defined(WEBP_HAVE_SSE41)
#define WEBP_HAVE_SSE41
#endif
#undef WEBP_MSC_SSE41
#undef WEBP_MSC_SSE2
// The intrinsics currently cause compiler errors with arm-nacl-gcc and the
// inline assembly would need to be modified for use with Native Client.
#if ((defined(__ARM_NEON__) || defined(__aarch64__)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_NEON))) && \
!defined(__native_client__)
#define WEBP_USE_NEON
#endif
#if !defined(WEBP_USE_NEON) && defined(__ANDROID__) && \
defined(__ARM_ARCH_7A__) && defined(HAVE_CPU_FEATURES_H)
#define WEBP_ANDROID_NEON // Android targets that may have NEON
#define WEBP_USE_NEON
#endif
// Note: ARM64 is supported in Visual Studio 2017, but requires the direct
// inclusion of arm64_neon.h; Visual Studio 2019 includes this file in
// arm_neon.h. Compile errors were seen with Visual Studio 2019 16.4 with
// vtbl4_u8(); a fix was made in 16.6.
#if defined(_MSC_VER) && ((_MSC_VER >= 1700 && defined(_M_ARM)) || \
(_MSC_VER >= 1926 && defined(_M_ARM64)))
#define WEBP_USE_NEON
#define WEBP_USE_INTRINSICS
#endif
#if defined(WEBP_USE_NEON) && !defined(WEBP_HAVE_NEON)
#define WEBP_HAVE_NEON
#endif
#if defined(__mips__) && !defined(__mips64) && defined(__mips_isa_rev) && \
(__mips_isa_rev >= 1) && (__mips_isa_rev < 6)
#define WEBP_USE_MIPS32
#if (__mips_isa_rev >= 2)
#define WEBP_USE_MIPS32_R2
#if defined(__mips_dspr2) || (defined(__mips_dsp_rev) && __mips_dsp_rev >= 2)
#define WEBP_USE_MIPS_DSP_R2
#endif
#endif
#endif
#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5)
#define WEBP_USE_MSA
#endif
#ifndef WEBP_DSP_OMIT_C_CODE
#define WEBP_DSP_OMIT_C_CODE 1
#endif
#if defined(WEBP_USE_NEON) && WEBP_DSP_OMIT_C_CODE
#define WEBP_NEON_OMIT_C_CODE 1
#else
#define WEBP_NEON_OMIT_C_CODE 0
#endif
#if !(LOCAL_CLANG_PREREQ(3, 8) || LOCAL_GCC_PREREQ(4, 8) || \
defined(__aarch64__))
#define WEBP_NEON_WORK_AROUND_GCC 1
#else
#define WEBP_NEON_WORK_AROUND_GCC 0
#endif
// This macro prevents thread_sanitizer from reporting known concurrent writes.
#define WEBP_TSAN_IGNORE_FUNCTION
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#undef WEBP_TSAN_IGNORE_FUNCTION
#define WEBP_TSAN_IGNORE_FUNCTION __attribute__((no_sanitize_thread))
#endif
#endif
#if defined(__has_feature)
#if __has_feature(memory_sanitizer)
#define WEBP_MSAN
#endif
#endif
#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
#include <pthread.h> // NOLINT
#define WEBP_DSP_INIT(func) \
do { \
static volatile VP8CPUInfo func##_last_cpuinfo_used = \
(VP8CPUInfo)&func##_last_cpuinfo_used; \
static pthread_mutex_t func##_lock = PTHREAD_MUTEX_INITIALIZER; \
if (pthread_mutex_lock(&func##_lock)) break; \
if (func##_last_cpuinfo_used != VP8GetCPUInfo) func(); \
func##_last_cpuinfo_used = VP8GetCPUInfo; \
(void)pthread_mutex_unlock(&func##_lock); \
} while (0)
#else // !(defined(WEBP_USE_THREAD) && !defined(_WIN32))
#define WEBP_DSP_INIT(func) \
do { \
static volatile VP8CPUInfo func##_last_cpuinfo_used = \
(VP8CPUInfo)&func##_last_cpuinfo_used; \
if (func##_last_cpuinfo_used == VP8GetCPUInfo) break; \
func(); \
func##_last_cpuinfo_used = VP8GetCPUInfo; \
} while (0)
#endif // defined(WEBP_USE_THREAD) && !defined(_WIN32)
// Defines an Init + helper function that control multiple initialization of
// function pointers / tables.
/* Usage:
WEBP_DSP_INIT_FUNC(InitFunc) {
...function body
}
*/
#define WEBP_DSP_INIT_FUNC(name) \
static WEBP_TSAN_IGNORE_FUNCTION void name##_body(void); \
WEBP_TSAN_IGNORE_FUNCTION void name(void) { WEBP_DSP_INIT(name##_body); } \
static WEBP_TSAN_IGNORE_FUNCTION void name##_body(void)
#define WEBP_UBSAN_IGNORE_UNDEF
#define WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW
#if defined(__clang__) && defined(__has_attribute)
#if __has_attribute(no_sanitize)
// This macro prevents the undefined behavior sanitizer from reporting
// failures. This is only meant to silence unaligned loads on platforms that
// are known to support them.
#undef WEBP_UBSAN_IGNORE_UNDEF
#define WEBP_UBSAN_IGNORE_UNDEF __attribute__((no_sanitize("undefined")))
// This macro prevents the undefined behavior sanitizer from reporting
// failures related to unsigned integer overflows. This is only meant to
// silence cases where this well defined behavior is expected.
#undef WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW
#define WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW \
__attribute__((no_sanitize("unsigned-integer-overflow")))
#endif
#endif
// If 'ptr' is NULL, returns NULL. Otherwise returns 'ptr + off'.
// Prevents undefined behavior sanitizer nullptr-with-nonzero-offset warning.
#if !defined(WEBP_OFFSET_PTR)
#define WEBP_OFFSET_PTR(ptr, off) (((ptr) == NULL) ? NULL : ((ptr) + (off)))
#endif
// Regularize the definition of WEBP_SWAP_16BIT_CSP (backward compatibility)
#if !defined(WEBP_SWAP_16BIT_CSP)
#define WEBP_SWAP_16BIT_CSP 0
#endif
// some endian fix (e.g.: mips-gcc doesn't define __BIG_ENDIAN__)
#if !defined(WORDS_BIGENDIAN) && \
(defined(__BIG_ENDIAN__) || defined(_M_PPC) || \
(defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)))
#define WORDS_BIGENDIAN
#endif
typedef enum {
kSSE2,
kSSE3,
kSlowSSSE3, // special feature for slow SSSE3 architectures
kSSE4_1,
kAVX,
kAVX2,
kNEON,
kMIPS32,
kMIPSdspR2,
kMSA
} CPUFeature;
#ifdef __cplusplus
extern "C" {
#endif
// returns true if the CPU supports the feature.
typedef int (*VP8CPUInfo)(CPUFeature feature);
WEBP_EXTERN VP8CPUInfo VP8GetCPUInfo;
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_DSP_CPU_H_

View File

@ -18,6 +18,7 @@
#include "src/webp/config.h"
#endif
#include "src/dsp/cpu.h"
#include "src/webp/types.h"
#ifdef __cplusplus
@ -43,225 +44,6 @@ extern "C" {
#define WEBP_RESTRICT
#endif
//------------------------------------------------------------------------------
// CPU detection
#if defined(__GNUC__)
# define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__)
# define LOCAL_GCC_PREREQ(maj, min) \
(LOCAL_GCC_VERSION >= (((maj) << 8) | (min)))
#else
# define LOCAL_GCC_VERSION 0
# define LOCAL_GCC_PREREQ(maj, min) 0
#endif
#if defined(__clang__)
# define LOCAL_CLANG_VERSION ((__clang_major__ << 8) | __clang_minor__)
# define LOCAL_CLANG_PREREQ(maj, min) \
(LOCAL_CLANG_VERSION >= (((maj) << 8) | (min)))
#else
# define LOCAL_CLANG_VERSION 0
# define LOCAL_CLANG_PREREQ(maj, min) 0
#endif
#ifndef __has_builtin
# define __has_builtin(x) 0
#endif
#if !defined(HAVE_CONFIG_H)
#if defined(_MSC_VER) && _MSC_VER > 1310 && \
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE2 // Visual C++ SSE2 targets
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1500 && \
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE41 // Visual C++ SSE4.1 targets
#endif
#endif
// WEBP_HAVE_* are used to indicate the presence of the instruction set in dsp
// files without intrinsics, allowing the corresponding Init() to be called.
// Files containing intrinsics will need to be built targeting the instruction
// set so should succeed on one of the earlier tests.
#if (defined(__SSE2__) || defined(WEBP_MSC_SSE2)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_SSE2))
#define WEBP_USE_SSE2
#endif
#if defined(WEBP_USE_SSE2) && !defined(WEBP_HAVE_SSE2)
#define WEBP_HAVE_SSE2
#endif
#if (defined(__SSE4_1__) || defined(WEBP_MSC_SSE41)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_SSE41))
#define WEBP_USE_SSE41
#endif
#if defined(WEBP_USE_SSE41) && !defined(WEBP_HAVE_SSE41)
#define WEBP_HAVE_SSE41
#endif
#undef WEBP_MSC_SSE41
#undef WEBP_MSC_SSE2
// The intrinsics currently cause compiler errors with arm-nacl-gcc and the
// inline assembly would need to be modified for use with Native Client.
#if ((defined(__ARM_NEON__) || defined(__aarch64__)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_NEON))) && \
!defined(__native_client__)
#define WEBP_USE_NEON
#endif
#if !defined(WEBP_USE_NEON) && defined(__ANDROID__) && \
defined(__ARM_ARCH_7A__) && defined(HAVE_CPU_FEATURES_H)
#define WEBP_ANDROID_NEON // Android targets that may have NEON
#define WEBP_USE_NEON
#endif
// Note: ARM64 is supported in Visual Studio 2017, but requires the direct
// inclusion of arm64_neon.h; Visual Studio 2019 includes this file in
// arm_neon.h.
#if defined(_MSC_VER) && \
((_MSC_VER >= 1700 && defined(_M_ARM)) || \
(_MSC_VER >= 1920 && defined(_M_ARM64)))
#define WEBP_USE_NEON
#define WEBP_USE_INTRINSICS
#endif
#if defined(WEBP_USE_NEON) && !defined(WEBP_HAVE_NEON)
#define WEBP_HAVE_NEON
#endif
#if defined(__mips__) && !defined(__mips64) && \
defined(__mips_isa_rev) && (__mips_isa_rev >= 1) && (__mips_isa_rev < 6)
#define WEBP_USE_MIPS32
#if (__mips_isa_rev >= 2)
#define WEBP_USE_MIPS32_R2
#if defined(__mips_dspr2) || (defined(__mips_dsp_rev) && __mips_dsp_rev >= 2)
#define WEBP_USE_MIPS_DSP_R2
#endif
#endif
#endif
#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5)
#define WEBP_USE_MSA
#endif
#ifndef WEBP_DSP_OMIT_C_CODE
#define WEBP_DSP_OMIT_C_CODE 1
#endif
#if defined(WEBP_USE_NEON) && WEBP_DSP_OMIT_C_CODE
#define WEBP_NEON_OMIT_C_CODE 1
#else
#define WEBP_NEON_OMIT_C_CODE 0
#endif
#if !(LOCAL_CLANG_PREREQ(3,8) || LOCAL_GCC_PREREQ(4,8) || defined(__aarch64__))
#define WEBP_NEON_WORK_AROUND_GCC 1
#else
#define WEBP_NEON_WORK_AROUND_GCC 0
#endif
// This macro prevents thread_sanitizer from reporting known concurrent writes.
#define WEBP_TSAN_IGNORE_FUNCTION
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#undef WEBP_TSAN_IGNORE_FUNCTION
#define WEBP_TSAN_IGNORE_FUNCTION __attribute__((no_sanitize_thread))
#endif
#endif
#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
#include <pthread.h> // NOLINT
#define WEBP_DSP_INIT(func) do { \
static volatile VP8CPUInfo func ## _last_cpuinfo_used = \
(VP8CPUInfo)&func ## _last_cpuinfo_used; \
static pthread_mutex_t func ## _lock = PTHREAD_MUTEX_INITIALIZER; \
if (pthread_mutex_lock(&func ## _lock)) break; \
if (func ## _last_cpuinfo_used != VP8GetCPUInfo) func(); \
func ## _last_cpuinfo_used = VP8GetCPUInfo; \
(void)pthread_mutex_unlock(&func ## _lock); \
} while (0)
#else // !(defined(WEBP_USE_THREAD) && !defined(_WIN32))
#define WEBP_DSP_INIT(func) do { \
static volatile VP8CPUInfo func ## _last_cpuinfo_used = \
(VP8CPUInfo)&func ## _last_cpuinfo_used; \
if (func ## _last_cpuinfo_used == VP8GetCPUInfo) break; \
func(); \
func ## _last_cpuinfo_used = VP8GetCPUInfo; \
} while (0)
#endif // defined(WEBP_USE_THREAD) && !defined(_WIN32)
// Defines an Init + helper function that control multiple initialization of
// function pointers / tables.
/* Usage:
WEBP_DSP_INIT_FUNC(InitFunc) {
...function body
}
*/
#define WEBP_DSP_INIT_FUNC(name) \
static WEBP_TSAN_IGNORE_FUNCTION void name ## _body(void); \
WEBP_TSAN_IGNORE_FUNCTION void name(void) { \
WEBP_DSP_INIT(name ## _body); \
} \
static WEBP_TSAN_IGNORE_FUNCTION void name ## _body(void)
#define WEBP_UBSAN_IGNORE_UNDEF
#define WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW
#if defined(__clang__) && defined(__has_attribute)
#if __has_attribute(no_sanitize)
// This macro prevents the undefined behavior sanitizer from reporting
// failures. This is only meant to silence unaligned loads on platforms that
// are known to support them.
#undef WEBP_UBSAN_IGNORE_UNDEF
#define WEBP_UBSAN_IGNORE_UNDEF \
__attribute__((no_sanitize("undefined")))
// This macro prevents the undefined behavior sanitizer from reporting
// failures related to unsigned integer overflows. This is only meant to
// silence cases where this well defined behavior is expected.
#undef WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW
#define WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW \
__attribute__((no_sanitize("unsigned-integer-overflow")))
#endif
#endif
// If 'ptr' is NULL, returns NULL. Otherwise returns 'ptr + off'.
// Prevents undefined behavior sanitizer nullptr-with-nonzero-offset warning.
#if !defined(WEBP_OFFSET_PTR)
#define WEBP_OFFSET_PTR(ptr, off) (((ptr) == NULL) ? NULL : ((ptr) + (off)))
#endif
// Regularize the definition of WEBP_SWAP_16BIT_CSP (backward compatibility)
#if !defined(WEBP_SWAP_16BIT_CSP)
#define WEBP_SWAP_16BIT_CSP 0
#endif
// some endian fix (e.g.: mips-gcc doesn't define __BIG_ENDIAN__)
#if !defined(WORDS_BIGENDIAN) && \
(defined(__BIG_ENDIAN__) || defined(_M_PPC) || \
(defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)))
#define WORDS_BIGENDIAN
#endif
typedef enum {
kSSE2,
kSSE3,
kSlowSSSE3, // special feature for slow SSSE3 architectures
kSSE4_1,
kAVX,
kAVX2,
kNEON,
kMIPS32,
kMIPSdspR2,
kMSA
} CPUFeature;
// returns true if the CPU supports the feature.
typedef int (*VP8CPUInfo)(CPUFeature feature);
WEBP_EXTERN VP8CPUInfo VP8GetCPUInfo;
//------------------------------------------------------------------------------
// Init stub generator
@ -550,15 +332,6 @@ extern void WebPConvertARGBToUV_C(const uint32_t* argb, uint8_t* u, uint8_t* v,
extern void WebPConvertRGBA32ToUV_C(const uint16_t* rgb,
uint8_t* u, uint8_t* v, int width);
// utilities for accurate RGB->YUV conversion
extern uint64_t (*WebPSharpYUVUpdateY)(const uint16_t* src, const uint16_t* ref,
uint16_t* dst, int len);
extern void (*WebPSharpYUVUpdateRGB)(const int16_t* src, const int16_t* ref,
int16_t* dst, int len);
extern void (*WebPSharpYUVFilterRow)(const int16_t* A, const int16_t* B,
int len,
const uint16_t* best_y, uint16_t* out);
// Must be called before using the above.
void WebPInitConvertARGBToYUV(void);

View File

@ -182,8 +182,8 @@ extern VP8LPredictorAddSubFunc VP8LPredictorsSub_C[16];
// -----------------------------------------------------------------------------
// Huffman-cost related functions.
typedef double (*VP8LCostFunc)(const uint32_t* population, int length);
typedef double (*VP8LCostCombinedFunc)(const uint32_t* X, const uint32_t* Y,
typedef float (*VP8LCostFunc)(const uint32_t* population, int length);
typedef float (*VP8LCostCombinedFunc)(const uint32_t* X, const uint32_t* Y,
int length);
typedef float (*VP8LCombinedShannonEntropyFunc)(const int X[256],
const int Y[256]);
@ -198,7 +198,7 @@ typedef struct { // small struct to hold counters
} VP8LStreaks;
typedef struct { // small struct to hold bit entropy results
double entropy; // entropy
float entropy; // entropy
uint32_t sum; // sum of the population
int nonzeros; // number of non-zero elements in the population
uint32_t max_val; // maximum value in the population

View File

@ -402,7 +402,7 @@ static float FastLog2Slow_C(uint32_t v) {
// Compute the combined Shanon's entropy for distribution {X} and {X+Y}
static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) {
int i;
double retval = 0.;
float retval = 0.f;
int sumX = 0, sumXY = 0;
for (i = 0; i < 256; ++i) {
const int x = X[i];
@ -418,7 +418,7 @@ static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) {
}
}
retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY);
return (float)retval;
return retval;
}
void VP8LBitEntropyInit(VP8LBitEntropy* const entropy) {
@ -636,17 +636,17 @@ void VP8LBundleColorMap_C(const uint8_t* const row, int width, int xbits,
//------------------------------------------------------------------------------
static double ExtraCost_C(const uint32_t* population, int length) {
static float ExtraCost_C(const uint32_t* population, int length) {
int i;
double cost = 0.;
float cost = 0.f;
for (i = 2; i < length - 2; ++i) cost += (i >> 1) * population[i + 2];
return cost;
}
static double ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y,
static float ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y,
int length) {
int i;
double cost = 0.;
float cost = 0.f;
for (i = 2; i < length - 2; ++i) {
const int xy = X[i + 2] + Y[i + 2];
cost += (i >> 1) * xy;

View File

@ -103,8 +103,8 @@ static float FastLog2Slow_MIPS32(uint32_t v) {
// cost += i * *(pop + 1);
// pop += 2;
// }
// return (double)cost;
static double ExtraCost_MIPS32(const uint32_t* const population, int length) {
// return (float)cost;
static float ExtraCost_MIPS32(const uint32_t* const population, int length) {
int i, temp0, temp1;
const uint32_t* pop = &population[4];
const uint32_t* const LoopEnd = &population[length];
@ -130,7 +130,7 @@ static double ExtraCost_MIPS32(const uint32_t* const population, int length) {
: "memory", "hi", "lo"
);
return (double)((int64_t)temp0 << 32 | temp1);
return (float)((int64_t)temp0 << 32 | temp1);
}
// C version of this function:
@ -148,8 +148,8 @@ static double ExtraCost_MIPS32(const uint32_t* const population, int length) {
// pX += 2;
// pY += 2;
// }
// return (double)cost;
static double ExtraCostCombined_MIPS32(const uint32_t* const X,
// return (float)cost;
static float ExtraCostCombined_MIPS32(const uint32_t* const X,
const uint32_t* const Y, int length) {
int i, temp0, temp1, temp2, temp3;
const uint32_t* pX = &X[4];
@ -183,7 +183,7 @@ static double ExtraCostCombined_MIPS32(const uint32_t* const X,
: "memory", "hi", "lo"
);
return (double)((int64_t)temp0 << 32 | temp1);
return (float)((int64_t)temp0 << 32 | temp1);
}
#define HUFFMAN_COST_PASS \
@ -347,24 +347,24 @@ static void GetCombinedEntropyUnrefined_MIPS32(const uint32_t X[],
static void AddVector_MIPS32(const uint32_t* pa, const uint32_t* pb,
uint32_t* pout, int size) {
uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
const uint32_t end = ((size) / 4) * 4;
const int end = ((size) / 4) * 4;
const uint32_t* const LoopEnd = pa + end;
int i;
ASM_START
ADD_TO_OUT(0, 4, 8, 12, 1, pa, pb, pout)
ASM_END_0
for (i = end; i < size; ++i) pout[i] = pa[i] + pb[i];
for (i = 0; i < size - end; ++i) pout[i] = pa[i] + pb[i];
}
static void AddVectorEq_MIPS32(const uint32_t* pa, uint32_t* pout, int size) {
uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
const uint32_t end = ((size) / 4) * 4;
const int end = ((size) / 4) * 4;
const uint32_t* const LoopEnd = pa + end;
int i;
ASM_START
ADD_TO_OUT(0, 4, 8, 12, 0, pa, pout, pout)
ASM_END_1
for (i = end; i < size; ++i) pout[i] += pa[i];
for (i = 0; i < size - end; ++i) pout[i] += pa[i];
}
#undef ASM_END_1

View File

@ -239,7 +239,7 @@ static void AddVectorEq_SSE2(const uint32_t* a, uint32_t* out, int size) {
static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) {
int i;
double retval = 0.;
float retval = 0.f;
int sumX = 0, sumXY = 0;
const __m128i zero = _mm_setzero_si128();
@ -273,7 +273,7 @@ static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) {
}
}
retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY);
return (float)retval;
return retval;
}
#else

View File

@ -194,50 +194,6 @@ void WebPConvertRGBA32ToUV_C(const uint16_t* rgb,
//-----------------------------------------------------------------------------
#if !WEBP_NEON_OMIT_C_CODE
#define MAX_Y ((1 << 10) - 1) // 10b precision over 16b-arithmetic
static uint16_t clip_y(int v) {
return (v < 0) ? 0 : (v > MAX_Y) ? MAX_Y : (uint16_t)v;
}
static uint64_t SharpYUVUpdateY_C(const uint16_t* ref, const uint16_t* src,
uint16_t* dst, int len) {
uint64_t diff = 0;
int i;
for (i = 0; i < len; ++i) {
const int diff_y = ref[i] - src[i];
const int new_y = (int)dst[i] + diff_y;
dst[i] = clip_y(new_y);
diff += (uint64_t)abs(diff_y);
}
return diff;
}
static void SharpYUVUpdateRGB_C(const int16_t* ref, const int16_t* src,
int16_t* dst, int len) {
int i;
for (i = 0; i < len; ++i) {
const int diff_uv = ref[i] - src[i];
dst[i] += diff_uv;
}
}
static void SharpYUVFilterRow_C(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out) {
int i;
for (i = 0; i < len; ++i, ++A, ++B) {
const int v0 = (A[0] * 9 + A[1] * 3 + B[0] * 3 + B[1] + 8) >> 4;
const int v1 = (A[1] * 9 + A[0] * 3 + B[1] * 3 + B[0] + 8) >> 4;
out[2 * i + 0] = clip_y(best_y[2 * i + 0] + v0);
out[2 * i + 1] = clip_y(best_y[2 * i + 1] + v1);
}
}
#endif // !WEBP_NEON_OMIT_C_CODE
#undef MAX_Y
//-----------------------------------------------------------------------------
void (*WebPConvertRGB24ToY)(const uint8_t* rgb, uint8_t* y, int width);
void (*WebPConvertBGR24ToY)(const uint8_t* bgr, uint8_t* y, int width);
void (*WebPConvertRGBA32ToUV)(const uint16_t* rgb,
@ -247,18 +203,9 @@ void (*WebPConvertARGBToY)(const uint32_t* argb, uint8_t* y, int width);
void (*WebPConvertARGBToUV)(const uint32_t* argb, uint8_t* u, uint8_t* v,
int src_width, int do_store);
uint64_t (*WebPSharpYUVUpdateY)(const uint16_t* ref, const uint16_t* src,
uint16_t* dst, int len);
void (*WebPSharpYUVUpdateRGB)(const int16_t* ref, const int16_t* src,
int16_t* dst, int len);
void (*WebPSharpYUVFilterRow)(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out);
extern void WebPInitConvertARGBToYUVSSE2(void);
extern void WebPInitConvertARGBToYUVSSE41(void);
extern void WebPInitConvertARGBToYUVNEON(void);
extern void WebPInitSharpYUVSSE2(void);
extern void WebPInitSharpYUVNEON(void);
WEBP_DSP_INIT_FUNC(WebPInitConvertARGBToYUV) {
WebPConvertARGBToY = ConvertARGBToY_C;
@ -269,17 +216,10 @@ WEBP_DSP_INIT_FUNC(WebPInitConvertARGBToYUV) {
WebPConvertRGBA32ToUV = WebPConvertRGBA32ToUV_C;
#if !WEBP_NEON_OMIT_C_CODE
WebPSharpYUVUpdateY = SharpYUVUpdateY_C;
WebPSharpYUVUpdateRGB = SharpYUVUpdateRGB_C;
WebPSharpYUVFilterRow = SharpYUVFilterRow_C;
#endif
if (VP8GetCPUInfo != NULL) {
#if defined(WEBP_HAVE_SSE2)
if (VP8GetCPUInfo(kSSE2)) {
WebPInitConvertARGBToYUVSSE2();
WebPInitSharpYUVSSE2();
}
#endif // WEBP_HAVE_SSE2
#if defined(WEBP_HAVE_SSE41)
@ -293,7 +233,6 @@ WEBP_DSP_INIT_FUNC(WebPInitConvertARGBToYUV) {
if (WEBP_NEON_OMIT_C_CODE ||
(VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) {
WebPInitConvertARGBToYUVNEON();
WebPInitSharpYUVNEON();
}
#endif // WEBP_HAVE_NEON
@ -302,7 +241,4 @@ WEBP_DSP_INIT_FUNC(WebPInitConvertARGBToYUV) {
assert(WebPConvertRGB24ToY != NULL);
assert(WebPConvertBGR24ToY != NULL);
assert(WebPConvertRGBA32ToUV != NULL);
assert(WebPSharpYUVUpdateY != NULL);
assert(WebPSharpYUVUpdateRGB != NULL);
assert(WebPSharpYUVFilterRow != NULL);
}

View File

@ -173,116 +173,8 @@ WEBP_TSAN_IGNORE_FUNCTION void WebPInitConvertARGBToYUVNEON(void) {
WebPConvertRGBA32ToUV = ConvertRGBA32ToUV_NEON;
}
//------------------------------------------------------------------------------
#define MAX_Y ((1 << 10) - 1) // 10b precision over 16b-arithmetic
static uint16_t clip_y_NEON(int v) {
return (v < 0) ? 0 : (v > MAX_Y) ? MAX_Y : (uint16_t)v;
}
static uint64_t SharpYUVUpdateY_NEON(const uint16_t* ref, const uint16_t* src,
uint16_t* dst, int len) {
int i;
const int16x8_t zero = vdupq_n_s16(0);
const int16x8_t max = vdupq_n_s16(MAX_Y);
uint64x2_t sum = vdupq_n_u64(0);
uint64_t diff;
for (i = 0; i + 8 <= len; i += 8) {
const int16x8_t A = vreinterpretq_s16_u16(vld1q_u16(ref + i));
const int16x8_t B = vreinterpretq_s16_u16(vld1q_u16(src + i));
const int16x8_t C = vreinterpretq_s16_u16(vld1q_u16(dst + i));
const int16x8_t D = vsubq_s16(A, B); // diff_y
const int16x8_t F = vaddq_s16(C, D); // new_y
const uint16x8_t H =
vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(F, max), zero));
const int16x8_t I = vabsq_s16(D); // abs(diff_y)
vst1q_u16(dst + i, H);
sum = vpadalq_u32(sum, vpaddlq_u16(vreinterpretq_u16_s16(I)));
}
diff = vgetq_lane_u64(sum, 0) + vgetq_lane_u64(sum, 1);
for (; i < len; ++i) {
const int diff_y = ref[i] - src[i];
const int new_y = (int)(dst[i]) + diff_y;
dst[i] = clip_y_NEON(new_y);
diff += (uint64_t)(abs(diff_y));
}
return diff;
}
static void SharpYUVUpdateRGB_NEON(const int16_t* ref, const int16_t* src,
int16_t* dst, int len) {
int i;
for (i = 0; i + 8 <= len; i += 8) {
const int16x8_t A = vld1q_s16(ref + i);
const int16x8_t B = vld1q_s16(src + i);
const int16x8_t C = vld1q_s16(dst + i);
const int16x8_t D = vsubq_s16(A, B); // diff_uv
const int16x8_t E = vaddq_s16(C, D); // new_uv
vst1q_s16(dst + i, E);
}
for (; i < len; ++i) {
const int diff_uv = ref[i] - src[i];
dst[i] += diff_uv;
}
}
static void SharpYUVFilterRow_NEON(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out) {
int i;
const int16x8_t max = vdupq_n_s16(MAX_Y);
const int16x8_t zero = vdupq_n_s16(0);
for (i = 0; i + 8 <= len; i += 8) {
const int16x8_t a0 = vld1q_s16(A + i + 0);
const int16x8_t a1 = vld1q_s16(A + i + 1);
const int16x8_t b0 = vld1q_s16(B + i + 0);
const int16x8_t b1 = vld1q_s16(B + i + 1);
const int16x8_t a0b1 = vaddq_s16(a0, b1);
const int16x8_t a1b0 = vaddq_s16(a1, b0);
const int16x8_t a0a1b0b1 = vaddq_s16(a0b1, a1b0); // A0+A1+B0+B1
const int16x8_t a0b1_2 = vaddq_s16(a0b1, a0b1); // 2*(A0+B1)
const int16x8_t a1b0_2 = vaddq_s16(a1b0, a1b0); // 2*(A1+B0)
const int16x8_t c0 = vshrq_n_s16(vaddq_s16(a0b1_2, a0a1b0b1), 3);
const int16x8_t c1 = vshrq_n_s16(vaddq_s16(a1b0_2, a0a1b0b1), 3);
const int16x8_t d0 = vaddq_s16(c1, a0);
const int16x8_t d1 = vaddq_s16(c0, a1);
const int16x8_t e0 = vrshrq_n_s16(d0, 1);
const int16x8_t e1 = vrshrq_n_s16(d1, 1);
const int16x8x2_t f = vzipq_s16(e0, e1);
const int16x8_t g0 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 0));
const int16x8_t g1 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 8));
const int16x8_t h0 = vaddq_s16(g0, f.val[0]);
const int16x8_t h1 = vaddq_s16(g1, f.val[1]);
const int16x8_t i0 = vmaxq_s16(vminq_s16(h0, max), zero);
const int16x8_t i1 = vmaxq_s16(vminq_s16(h1, max), zero);
vst1q_u16(out + 2 * i + 0, vreinterpretq_u16_s16(i0));
vst1q_u16(out + 2 * i + 8, vreinterpretq_u16_s16(i1));
}
for (; i < len; ++i) {
const int a0b1 = A[i + 0] + B[i + 1];
const int a1b0 = A[i + 1] + B[i + 0];
const int a0a1b0b1 = a0b1 + a1b0 + 8;
const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4;
const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4;
out[2 * i + 0] = clip_y_NEON(best_y[2 * i + 0] + v0);
out[2 * i + 1] = clip_y_NEON(best_y[2 * i + 1] + v1);
}
}
#undef MAX_Y
//------------------------------------------------------------------------------
extern void WebPInitSharpYUVNEON(void);
WEBP_TSAN_IGNORE_FUNCTION void WebPInitSharpYUVNEON(void) {
WebPSharpYUVUpdateY = SharpYUVUpdateY_NEON;
WebPSharpYUVUpdateRGB = SharpYUVUpdateRGB_NEON;
WebPSharpYUVFilterRow = SharpYUVFilterRow_NEON;
}
#else // !WEBP_USE_NEON
WEBP_DSP_INIT_STUB(WebPInitConvertARGBToYUVNEON)
WEBP_DSP_INIT_STUB(WebPInitSharpYUVNEON)
#endif // WEBP_USE_NEON

View File

@ -747,128 +747,9 @@ WEBP_TSAN_IGNORE_FUNCTION void WebPInitConvertARGBToYUVSSE2(void) {
WebPConvertRGBA32ToUV = ConvertRGBA32ToUV_SSE2;
}
//------------------------------------------------------------------------------
#define MAX_Y ((1 << 10) - 1) // 10b precision over 16b-arithmetic
static uint16_t clip_y(int v) {
return (v < 0) ? 0 : (v > MAX_Y) ? MAX_Y : (uint16_t)v;
}
static uint64_t SharpYUVUpdateY_SSE2(const uint16_t* ref, const uint16_t* src,
uint16_t* dst, int len) {
uint64_t diff = 0;
uint32_t tmp[4];
int i;
const __m128i zero = _mm_setzero_si128();
const __m128i max = _mm_set1_epi16(MAX_Y);
const __m128i one = _mm_set1_epi16(1);
__m128i sum = zero;
for (i = 0; i + 8 <= len; i += 8) {
const __m128i A = _mm_loadu_si128((const __m128i*)(ref + i));
const __m128i B = _mm_loadu_si128((const __m128i*)(src + i));
const __m128i C = _mm_loadu_si128((const __m128i*)(dst + i));
const __m128i D = _mm_sub_epi16(A, B); // diff_y
const __m128i E = _mm_cmpgt_epi16(zero, D); // sign (-1 or 0)
const __m128i F = _mm_add_epi16(C, D); // new_y
const __m128i G = _mm_or_si128(E, one); // -1 or 1
const __m128i H = _mm_max_epi16(_mm_min_epi16(F, max), zero);
const __m128i I = _mm_madd_epi16(D, G); // sum(abs(...))
_mm_storeu_si128((__m128i*)(dst + i), H);
sum = _mm_add_epi32(sum, I);
}
_mm_storeu_si128((__m128i*)tmp, sum);
diff = tmp[3] + tmp[2] + tmp[1] + tmp[0];
for (; i < len; ++i) {
const int diff_y = ref[i] - src[i];
const int new_y = (int)dst[i] + diff_y;
dst[i] = clip_y(new_y);
diff += (uint64_t)abs(diff_y);
}
return diff;
}
static void SharpYUVUpdateRGB_SSE2(const int16_t* ref, const int16_t* src,
int16_t* dst, int len) {
int i = 0;
for (i = 0; i + 8 <= len; i += 8) {
const __m128i A = _mm_loadu_si128((const __m128i*)(ref + i));
const __m128i B = _mm_loadu_si128((const __m128i*)(src + i));
const __m128i C = _mm_loadu_si128((const __m128i*)(dst + i));
const __m128i D = _mm_sub_epi16(A, B); // diff_uv
const __m128i E = _mm_add_epi16(C, D); // new_uv
_mm_storeu_si128((__m128i*)(dst + i), E);
}
for (; i < len; ++i) {
const int diff_uv = ref[i] - src[i];
dst[i] += diff_uv;
}
}
static void SharpYUVFilterRow_SSE2(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out) {
int i;
const __m128i kCst8 = _mm_set1_epi16(8);
const __m128i max = _mm_set1_epi16(MAX_Y);
const __m128i zero = _mm_setzero_si128();
for (i = 0; i + 8 <= len; i += 8) {
const __m128i a0 = _mm_loadu_si128((const __m128i*)(A + i + 0));
const __m128i a1 = _mm_loadu_si128((const __m128i*)(A + i + 1));
const __m128i b0 = _mm_loadu_si128((const __m128i*)(B + i + 0));
const __m128i b1 = _mm_loadu_si128((const __m128i*)(B + i + 1));
const __m128i a0b1 = _mm_add_epi16(a0, b1);
const __m128i a1b0 = _mm_add_epi16(a1, b0);
const __m128i a0a1b0b1 = _mm_add_epi16(a0b1, a1b0); // A0+A1+B0+B1
const __m128i a0a1b0b1_8 = _mm_add_epi16(a0a1b0b1, kCst8);
const __m128i a0b1_2 = _mm_add_epi16(a0b1, a0b1); // 2*(A0+B1)
const __m128i a1b0_2 = _mm_add_epi16(a1b0, a1b0); // 2*(A1+B0)
const __m128i c0 = _mm_srai_epi16(_mm_add_epi16(a0b1_2, a0a1b0b1_8), 3);
const __m128i c1 = _mm_srai_epi16(_mm_add_epi16(a1b0_2, a0a1b0b1_8), 3);
const __m128i d0 = _mm_add_epi16(c1, a0);
const __m128i d1 = _mm_add_epi16(c0, a1);
const __m128i e0 = _mm_srai_epi16(d0, 1);
const __m128i e1 = _mm_srai_epi16(d1, 1);
const __m128i f0 = _mm_unpacklo_epi16(e0, e1);
const __m128i f1 = _mm_unpackhi_epi16(e0, e1);
const __m128i g0 = _mm_loadu_si128((const __m128i*)(best_y + 2 * i + 0));
const __m128i g1 = _mm_loadu_si128((const __m128i*)(best_y + 2 * i + 8));
const __m128i h0 = _mm_add_epi16(g0, f0);
const __m128i h1 = _mm_add_epi16(g1, f1);
const __m128i i0 = _mm_max_epi16(_mm_min_epi16(h0, max), zero);
const __m128i i1 = _mm_max_epi16(_mm_min_epi16(h1, max), zero);
_mm_storeu_si128((__m128i*)(out + 2 * i + 0), i0);
_mm_storeu_si128((__m128i*)(out + 2 * i + 8), i1);
}
for (; i < len; ++i) {
// (9 * A0 + 3 * A1 + 3 * B0 + B1 + 8) >> 4 =
// = (8 * A0 + 2 * (A1 + B0) + (A0 + A1 + B0 + B1 + 8)) >> 4
// We reuse the common sub-expressions.
const int a0b1 = A[i + 0] + B[i + 1];
const int a1b0 = A[i + 1] + B[i + 0];
const int a0a1b0b1 = a0b1 + a1b0 + 8;
const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4;
const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4;
out[2 * i + 0] = clip_y(best_y[2 * i + 0] + v0);
out[2 * i + 1] = clip_y(best_y[2 * i + 1] + v1);
}
}
#undef MAX_Y
//------------------------------------------------------------------------------
extern void WebPInitSharpYUVSSE2(void);
WEBP_TSAN_IGNORE_FUNCTION void WebPInitSharpYUVSSE2(void) {
WebPSharpYUVUpdateY = SharpYUVUpdateY_SSE2;
WebPSharpYUVUpdateRGB = SharpYUVUpdateRGB_SSE2;
WebPSharpYUVFilterRow = SharpYUVFilterRow_SSE2;
}
#else // !WEBP_USE_SSE2
WEBP_DSP_INIT_STUB(WebPInitSamplersSSE2)
WEBP_DSP_INIT_STUB(WebPInitConvertARGBToYUVSSE2)
WEBP_DSP_INIT_STUB(WebPInitSharpYUVSSE2)
#endif // WEBP_USE_SSE2

View File

@ -86,7 +86,7 @@ static int EncodeLossless(const uint8_t* const data, int width, int height,
// a decoder bug related to alpha with color cache.
// See: https://code.google.com/p/webp/issues/detail?id=239
// Need to re-enable this later.
ok = (VP8LEncodeStream(&config, &picture, bw, 0 /*use_cache*/) == VP8_ENC_OK);
ok = VP8LEncodeStream(&config, &picture, bw, /*use_cache=*/0);
WebPPictureFree(&picture);
ok = ok && !bw->error_;
if (!ok) {

View File

@ -15,10 +15,11 @@
//
#include <assert.h>
#include <float.h>
#include "src/dsp/lossless_common.h"
#include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h"
#include "src/dsp/lossless_common.h"
#include "src/utils/color_cache_utils.h"
#include "src/utils/utils.h"
@ -30,15 +31,15 @@ extern void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs,
const PixOrCopy v);
typedef struct {
double alpha_[VALUES_IN_BYTE];
double red_[VALUES_IN_BYTE];
double blue_[VALUES_IN_BYTE];
double distance_[NUM_DISTANCE_CODES];
double* literal_;
float alpha_[VALUES_IN_BYTE];
float red_[VALUES_IN_BYTE];
float blue_[VALUES_IN_BYTE];
float distance_[NUM_DISTANCE_CODES];
float* literal_;
} CostModel;
static void ConvertPopulationCountTableToBitEstimates(
int num_symbols, const uint32_t population_counts[], double output[]) {
int num_symbols, const uint32_t population_counts[], float output[]) {
uint32_t sum = 0;
int nonzeros = 0;
int i;
@ -51,7 +52,7 @@ static void ConvertPopulationCountTableToBitEstimates(
if (nonzeros <= 1) {
memset(output, 0, num_symbols * sizeof(*output));
} else {
const double logsum = VP8LFastLog2(sum);
const float logsum = VP8LFastLog2(sum);
for (i = 0; i < num_symbols; ++i) {
output[i] = logsum - VP8LFastLog2(population_counts[i]);
}
@ -75,8 +76,8 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
}
ConvertPopulationCountTableToBitEstimates(
VP8LHistogramNumCodes(histo->palette_code_bits_),
histo->literal_, m->literal_);
VP8LHistogramNumCodes(histo->palette_code_bits_), histo->literal_,
m->literal_);
ConvertPopulationCountTableToBitEstimates(
VALUES_IN_BYTE, histo->red_, m->red_);
ConvertPopulationCountTableToBitEstimates(
@ -92,26 +93,26 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
return ok;
}
static WEBP_INLINE double GetLiteralCost(const CostModel* const m, uint32_t v) {
static WEBP_INLINE float GetLiteralCost(const CostModel* const m, uint32_t v) {
return m->alpha_[v >> 24] +
m->red_[(v >> 16) & 0xff] +
m->literal_[(v >> 8) & 0xff] +
m->blue_[v & 0xff];
}
static WEBP_INLINE double GetCacheCost(const CostModel* const m, uint32_t idx) {
static WEBP_INLINE float GetCacheCost(const CostModel* const m, uint32_t idx) {
const int literal_idx = VALUES_IN_BYTE + NUM_LENGTH_CODES + idx;
return m->literal_[literal_idx];
}
static WEBP_INLINE double GetLengthCost(const CostModel* const m,
static WEBP_INLINE float GetLengthCost(const CostModel* const m,
uint32_t length) {
int code, extra_bits;
VP8LPrefixEncodeBits(length, &code, &extra_bits);
return m->literal_[VALUES_IN_BYTE + code] + extra_bits;
}
static WEBP_INLINE double GetDistanceCost(const CostModel* const m,
static WEBP_INLINE float GetDistanceCost(const CostModel* const m,
uint32_t distance) {
int code, extra_bits;
VP8LPrefixEncodeBits(distance, &code, &extra_bits);
@ -122,20 +123,20 @@ static WEBP_INLINE void AddSingleLiteralWithCostModel(
const uint32_t* const argb, VP8LColorCache* const hashers,
const CostModel* const cost_model, int idx, int use_color_cache,
float prev_cost, float* const cost, uint16_t* const dist_array) {
double cost_val = prev_cost;
float cost_val = prev_cost;
const uint32_t color = argb[idx];
const int ix = use_color_cache ? VP8LColorCacheContains(hashers, color) : -1;
if (ix >= 0) {
// use_color_cache is true and hashers contains color
const double mul0 = 0.68;
const float mul0 = 0.68f;
cost_val += GetCacheCost(cost_model, ix) * mul0;
} else {
const double mul1 = 0.82;
const float mul1 = 0.82f;
if (use_color_cache) VP8LColorCacheInsert(hashers, color);
cost_val += GetLiteralCost(cost_model, color) * mul1;
}
if (cost[idx] > cost_val) {
cost[idx] = (float)cost_val;
cost[idx] = cost_val;
dist_array[idx] = 1; // only one is inserted.
}
}
@ -172,7 +173,7 @@ struct CostInterval {
// The GetLengthCost(cost_model, k) are cached in a CostCacheInterval.
typedef struct {
double cost_;
float cost_;
int start_;
int end_; // Exclusive.
} CostCacheInterval;
@ -187,7 +188,7 @@ typedef struct {
int count_; // The number of stored intervals.
CostCacheInterval* cache_intervals_;
size_t cache_intervals_size_;
double cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k).
float cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k).
float* costs_;
uint16_t* dist_array_;
// Most of the time, we only need few intervals -> use a free-list, to avoid
@ -262,10 +263,13 @@ static int CostManagerInit(CostManager* const manager,
CostManagerInitFreeList(manager);
// Fill in the cost_cache_.
manager->cache_intervals_size_ = 1;
manager->cost_cache_[0] = GetLengthCost(cost_model, 0);
for (i = 1; i < cost_cache_size; ++i) {
// Has to be done in two passes due to a GCC bug on i686
// related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323
for (i = 0; i < cost_cache_size; ++i) {
manager->cost_cache_[i] = GetLengthCost(cost_model, i);
}
manager->cache_intervals_size_ = 1;
for (i = 1; i < cost_cache_size; ++i) {
// Get the number of bound intervals.
if (manager->cost_cache_[i] != manager->cost_cache_[i - 1]) {
++manager->cache_intervals_size_;
@ -294,7 +298,7 @@ static int CostManagerInit(CostManager* const manager,
cur->end_ = 1;
cur->cost_ = manager->cost_cache_[0];
for (i = 1; i < cost_cache_size; ++i) {
const double cost_val = manager->cost_cache_[i];
const float cost_val = manager->cost_cache_[i];
if (cost_val != cur->cost_) {
++cur;
// Initialize an interval.
@ -303,6 +307,8 @@ static int CostManagerInit(CostManager* const manager,
}
cur->end_ = i + 1;
}
assert((size_t)(cur - manager->cache_intervals_) + 1 ==
manager->cache_intervals_size_);
}
manager->costs_ = (float*)WebPSafeMalloc(pix_count, sizeof(*manager->costs_));
@ -311,7 +317,7 @@ static int CostManagerInit(CostManager* const manager,
return 0;
}
// Set the initial costs_ high for every pixel as we will keep the minimum.
for (i = 0; i < pix_count; ++i) manager->costs_[i] = 1e38f;
for (i = 0; i < pix_count; ++i) manager->costs_[i] = FLT_MAX;
return 1;
}
@ -457,7 +463,7 @@ static WEBP_INLINE void InsertInterval(CostManager* const manager,
// If handling the interval or one of its subintervals becomes to heavy, its
// contribution is added to the costs right away.
static WEBP_INLINE void PushInterval(CostManager* const manager,
double distance_cost, int position,
float distance_cost, int position,
int len) {
size_t i;
CostInterval* interval = manager->head_;
@ -474,7 +480,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager,
const int k = j - position;
float cost_tmp;
assert(k >= 0 && k < MAX_LENGTH);
cost_tmp = (float)(distance_cost + manager->cost_cache_[k]);
cost_tmp = distance_cost + manager->cost_cache_[k];
if (manager->costs_[j] > cost_tmp) {
manager->costs_[j] = cost_tmp;
@ -492,7 +498,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager,
const int end = position + (cost_cache_intervals[i].end_ > len
? len
: cost_cache_intervals[i].end_);
const float cost = (float)(distance_cost + cost_cache_intervals[i].cost_);
const float cost = distance_cost + cost_cache_intervals[i].cost_;
for (; interval != NULL && interval->start_ < end;
interval = interval_next) {
@ -570,22 +576,21 @@ static int BackwardReferencesHashChainDistanceOnly(
const int pix_count = xsize * ysize;
const int use_color_cache = (cache_bits > 0);
const size_t literal_array_size =
sizeof(double) * (NUM_LITERAL_CODES + NUM_LENGTH_CODES +
((cache_bits > 0) ? (1 << cache_bits) : 0));
sizeof(float) * (VP8LHistogramNumCodes(cache_bits));
const size_t cost_model_size = sizeof(CostModel) + literal_array_size;
CostModel* const cost_model =
(CostModel*)WebPSafeCalloc(1ULL, cost_model_size);
VP8LColorCache hashers;
CostManager* cost_manager =
(CostManager*)WebPSafeMalloc(1ULL, sizeof(*cost_manager));
(CostManager*)WebPSafeCalloc(1ULL, sizeof(*cost_manager));
int offset_prev = -1, len_prev = -1;
double offset_cost = -1;
float offset_cost = -1.f;
int first_offset_is_constant = -1; // initialized with 'impossible' value
int reach = 0;
if (cost_model == NULL || cost_manager == NULL) goto Error;
cost_model->literal_ = (double*)(cost_model + 1);
cost_model->literal_ = (float*)(cost_model + 1);
if (use_color_cache) {
cc_init = VP8LColorCacheInit(&hashers, cache_bits);
if (!cc_init) goto Error;

View File

@ -10,6 +10,8 @@
// Author: Jyrki Alakuijala (jyrki@google.com)
//
#include "src/enc/backward_references_enc.h"
#include <assert.h>
#include <float.h>
#include <math.h>
@ -17,10 +19,11 @@
#include "src/dsp/dsp.h"
#include "src/dsp/lossless.h"
#include "src/dsp/lossless_common.h"
#include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/color_cache_utils.h"
#include "src/utils/utils.h"
#include "src/webp/encode.h"
#define MIN_BLOCK_SIZE 256 // minimum block size for backward references
@ -255,10 +258,13 @@ static WEBP_INLINE int MaxFindCopyLength(int len) {
int VP8LHashChainFill(VP8LHashChain* const p, int quality,
const uint32_t* const argb, int xsize, int ysize,
int low_effort) {
int low_effort, const WebPPicture* const pic,
int percent_range, int* const percent) {
const int size = xsize * ysize;
const int iter_max = GetMaxItersForQuality(quality);
const uint32_t window_size = GetWindowSizeForHashChain(quality, xsize);
int remaining_percent = percent_range;
int percent_start = *percent;
int pos;
int argb_comp;
uint32_t base_position;
@ -276,7 +282,13 @@ int VP8LHashChainFill(VP8LHashChain* const p, int quality,
hash_to_first_index =
(int32_t*)WebPSafeMalloc(HASH_SIZE, sizeof(*hash_to_first_index));
if (hash_to_first_index == NULL) return 0;
if (hash_to_first_index == NULL) {
WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
return 0;
}
percent_range = remaining_percent / 2;
remaining_percent -= percent_range;
// Set the int32_t array to -1.
memset(hash_to_first_index, 0xff, HASH_SIZE * sizeof(*hash_to_first_index));
@ -323,12 +335,22 @@ int VP8LHashChainFill(VP8LHashChain* const p, int quality,
hash_to_first_index[hash_code] = pos++;
argb_comp = argb_comp_next;
}
if (!WebPReportProgress(
pic, percent_start + percent_range * pos / (size - 2), percent)) {
WebPSafeFree(hash_to_first_index);
return 0;
}
}
// Process the penultimate pixel.
chain[pos] = hash_to_first_index[GetPixPairHash64(argb + pos)];
WebPSafeFree(hash_to_first_index);
percent_start += percent_range;
if (!WebPReportProgress(pic, percent_start, percent)) return 0;
percent_range = remaining_percent;
// Find the best match interval at each pixel, defined by an offset to the
// pixel and a length. The right-most pixel cannot match anything to the right
// (hence a best length of 0) and the left-most pixel nothing to the left
@ -417,8 +439,17 @@ int VP8LHashChainFill(VP8LHashChain* const p, int quality,
max_base_position = base_position;
}
}
if (!WebPReportProgress(pic,
percent_start + percent_range *
(size - 2 - base_position) /
(size - 2),
percent)) {
return 0;
}
return 1;
}
return WebPReportProgress(pic, percent_start + percent_range, percent);
}
static WEBP_INLINE void AddSingleLiteral(uint32_t pixel, int use_color_cache,
@ -728,7 +759,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
int* const best_cache_bits) {
int i;
const int cache_bits_max = (quality <= 25) ? 0 : *best_cache_bits;
double entropy_min = MAX_ENTROPY;
float entropy_min = MAX_ENTROPY;
int cc_init[MAX_COLOR_CACHE_BITS + 1] = { 0 };
VP8LColorCache hashers[MAX_COLOR_CACHE_BITS + 1];
VP8LRefsCursor c = VP8LRefsCursorInit(refs);
@ -813,7 +844,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
}
for (i = 0; i <= cache_bits_max; ++i) {
const double entropy = VP8LHistogramEstimateBits(histos[i]);
const float entropy = VP8LHistogramEstimateBits(histos[i]);
if (i == 0 || entropy < entropy_min) {
entropy_min = entropy;
*best_cache_bits = i;
@ -890,7 +921,7 @@ static int GetBackwardReferences(int width, int height,
int i, lz77_type;
// Index 0 is for a color cache, index 1 for no cache (if needed).
int lz77_types_best[2] = {0, 0};
double bit_costs_best[2] = {DBL_MAX, DBL_MAX};
float bit_costs_best[2] = {FLT_MAX, FLT_MAX};
VP8LHashChain hash_chain_box;
VP8LBackwardRefs* const refs_tmp = &refs[do_no_cache ? 2 : 1];
int status = 0;
@ -902,7 +933,7 @@ static int GetBackwardReferences(int width, int height,
for (lz77_type = 1; lz77_types_to_try;
lz77_types_to_try &= ~lz77_type, lz77_type <<= 1) {
int res = 0;
double bit_cost = 0.;
float bit_cost = 0.f;
if ((lz77_types_to_try & lz77_type) == 0) continue;
switch (lz77_type) {
case kLZ77RLE:
@ -976,17 +1007,18 @@ static int GetBackwardReferences(int width, int height,
const VP8LHashChain* const hash_chain_tmp =
(lz77_types_best[i] == kLZ77Standard) ? hash_chain : &hash_chain_box;
const int cache_bits = (i == 1) ? 0 : *cache_bits_best;
if (VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits,
float bit_cost_trace;
if (!VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits,
hash_chain_tmp, &refs[i],
refs_tmp)) {
double bit_cost_trace;
goto Error;
}
VP8LHistogramCreate(histo, refs_tmp, cache_bits);
bit_cost_trace = VP8LHistogramEstimateBits(histo);
if (bit_cost_trace < bit_costs_best[i]) {
BackwardRefsSwap(refs_tmp, &refs[i]);
}
}
}
BackwardReferences2DLocality(width, &refs[i]);
@ -1006,25 +1038,31 @@ Error:
return status;
}
WebPEncodingError VP8LGetBackwardReferences(
int VP8LGetBackwardReferences(
int width, int height, const uint32_t* const argb, int quality,
int low_effort, int lz77_types_to_try, int cache_bits_max, int do_no_cache,
const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs,
int* const cache_bits_best) {
int* const cache_bits_best, const WebPPicture* const pic, int percent_range,
int* const percent) {
if (low_effort) {
VP8LBackwardRefs* refs_best;
*cache_bits_best = cache_bits_max;
refs_best = GetBackwardReferencesLowEffort(
width, height, argb, cache_bits_best, hash_chain, refs);
if (refs_best == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY;
if (refs_best == NULL) {
WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
return 0;
}
// Set it in first position.
BackwardRefsSwap(refs_best, &refs[0]);
} else {
if (!GetBackwardReferences(width, height, argb, quality, lz77_types_to_try,
cache_bits_max, do_no_cache, hash_chain, refs,
cache_bits_best)) {
return VP8_ENC_ERROR_OUT_OF_MEMORY;
WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
return 0;
}
}
return VP8_ENC_OK;
return WebPReportProgress(pic, *percent + percent_range, percent);
}

View File

@ -134,10 +134,11 @@ struct VP8LHashChain {
// Must be called first, to set size.
int VP8LHashChainInit(VP8LHashChain* const p, int size);
// Pre-compute the best matches for argb.
// Pre-compute the best matches for argb. pic and percent are for progress.
int VP8LHashChainFill(VP8LHashChain* const p, int quality,
const uint32_t* const argb, int xsize, int ysize,
int low_effort);
int low_effort, const WebPPicture* const pic,
int percent_range, int* const percent);
void VP8LHashChainClear(VP8LHashChain* const p); // release memory
static WEBP_INLINE int VP8LHashChainFindOffset(const VP8LHashChain* const p,
@ -227,11 +228,14 @@ enum VP8LLZ77Type {
// VP8LBackwardRefs is put in the first element, the best value with no-cache in
// the second element.
// In both cases, the last element is used as temporary internally.
WebPEncodingError VP8LGetBackwardReferences(
// pic and percent are for progress.
// Returns false in case of error (stored in pic->error_code).
int VP8LGetBackwardReferences(
int width, int height, const uint32_t* const argb, int quality,
int low_effort, int lz77_types_to_try, int cache_bits_max, int do_no_cache,
const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs,
int* const cache_bits_best);
int* const cache_bits_best, const WebPPicture* const pic, int percent_range,
int* const percent);
#ifdef __cplusplus
}

View File

@ -13,15 +13,17 @@
#include "src/webp/config.h"
#endif
#include <float.h>
#include <math.h>
#include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h"
#include "src/dsp/lossless.h"
#include "src/dsp/lossless_common.h"
#include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/utils.h"
#define MAX_COST 1.e38
#define MAX_BIT_COST FLT_MAX
// Number of partitions for the three dominant (literal, red and blue) symbol
// costs.
@ -228,8 +230,8 @@ void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
// -----------------------------------------------------------------------------
// Entropy-related functions.
static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) {
double mix;
static WEBP_INLINE float BitsEntropyRefine(const VP8LBitEntropy* entropy) {
float mix;
if (entropy->nonzeros < 5) {
if (entropy->nonzeros <= 1) {
return 0;
@ -238,65 +240,65 @@ static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) {
// Let's mix in a bit of entropy to favor good clustering when
// distributions of these are combined.
if (entropy->nonzeros == 2) {
return 0.99 * entropy->sum + 0.01 * entropy->entropy;
return 0.99f * entropy->sum + 0.01f * entropy->entropy;
}
// No matter what the entropy says, we cannot be better than min_limit
// with Huffman coding. I am mixing a bit of entropy into the
// min_limit since it produces much better (~0.5 %) compression results
// perhaps because of better entropy clustering.
if (entropy->nonzeros == 3) {
mix = 0.95;
mix = 0.95f;
} else {
mix = 0.7; // nonzeros == 4.
mix = 0.7f; // nonzeros == 4.
}
} else {
mix = 0.627;
mix = 0.627f;
}
{
double min_limit = 2 * entropy->sum - entropy->max_val;
min_limit = mix * min_limit + (1.0 - mix) * entropy->entropy;
float min_limit = 2.f * entropy->sum - entropy->max_val;
min_limit = mix * min_limit + (1.f - mix) * entropy->entropy;
return (entropy->entropy < min_limit) ? min_limit : entropy->entropy;
}
}
double VP8LBitsEntropy(const uint32_t* const array, int n) {
float VP8LBitsEntropy(const uint32_t* const array, int n) {
VP8LBitEntropy entropy;
VP8LBitsEntropyUnrefined(array, n, &entropy);
return BitsEntropyRefine(&entropy);
}
static double InitialHuffmanCost(void) {
static float InitialHuffmanCost(void) {
// Small bias because Huffman code length is typically not stored in
// full length.
static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3;
static const double kSmallBias = 9.1;
static const float kSmallBias = 9.1f;
return kHuffmanCodeOfHuffmanCodeSize - kSmallBias;
}
// Finalize the Huffman cost based on streak numbers and length type (<3 or >=3)
static double FinalHuffmanCost(const VP8LStreaks* const stats) {
static float FinalHuffmanCost(const VP8LStreaks* const stats) {
// The constants in this function are experimental and got rounded from
// their original values in 1/8 when switched to 1/1024.
double retval = InitialHuffmanCost();
float retval = InitialHuffmanCost();
// Second coefficient: Many zeros in the histogram are covered efficiently
// by a run-length encode. Originally 2/8.
retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1];
retval += stats->counts[0] * 1.5625f + 0.234375f * stats->streaks[0][1];
// Second coefficient: Constant values are encoded less efficiently, but still
// RLE'ed. Originally 6/8.
retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1];
retval += stats->counts[1] * 2.578125f + 0.703125f * stats->streaks[1][1];
// 0s are usually encoded more efficiently than non-0s.
// Originally 15/8.
retval += 1.796875 * stats->streaks[0][0];
retval += 1.796875f * stats->streaks[0][0];
// Originally 26/8.
retval += 3.28125 * stats->streaks[1][0];
retval += 3.28125f * stats->streaks[1][0];
return retval;
}
// Get the symbol entropy for the distribution 'population'.
// Set 'trivial_sym', if there's only one symbol present in the distribution.
static double PopulationCost(const uint32_t* const population, int length,
static float PopulationCost(const uint32_t* const population, int length,
uint32_t* const trivial_sym,
uint8_t* const is_used) {
VP8LBitEntropy bit_entropy;
@ -314,10 +316,9 @@ static double PopulationCost(const uint32_t* const population, int length,
// trivial_at_end is 1 if the two histograms only have one element that is
// non-zero: both the zero-th one, or both the last one.
static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
const uint32_t* const Y,
int length, int is_X_used,
int is_Y_used,
static WEBP_INLINE float GetCombinedEntropy(const uint32_t* const X,
const uint32_t* const Y, int length,
int is_X_used, int is_Y_used,
int trivial_at_end) {
VP8LStreaks stats;
if (trivial_at_end) {
@ -356,7 +357,7 @@ static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
}
// Estimates the Entropy + Huffman + other block overhead size cost.
double VP8LHistogramEstimateBits(VP8LHistogram* const p) {
float VP8LHistogramEstimateBits(VP8LHistogram* const p) {
return
PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_),
NULL, &p->is_used_[0])
@ -373,8 +374,7 @@ double VP8LHistogramEstimateBits(VP8LHistogram* const p) {
static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
const VP8LHistogram* const b,
double cost_threshold,
double* cost) {
float cost_threshold, float* cost) {
const int palette_code_bits = a->palette_code_bits_;
int trivial_at_end = 0;
assert(a->palette_code_bits_ == b->palette_code_bits_);
@ -439,12 +439,11 @@ static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a,
// Since the previous score passed is 'cost_threshold', we only need to compare
// the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out
// early.
static double HistogramAddEval(const VP8LHistogram* const a,
static float HistogramAddEval(const VP8LHistogram* const a,
const VP8LHistogram* const b,
VP8LHistogram* const out,
double cost_threshold) {
double cost = 0;
const double sum_cost = a->bit_cost_ + b->bit_cost_;
VP8LHistogram* const out, float cost_threshold) {
float cost = 0;
const float sum_cost = a->bit_cost_ + b->bit_cost_;
cost_threshold += sum_cost;
if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) {
@ -459,10 +458,10 @@ static double HistogramAddEval(const VP8LHistogram* const a,
// Same as HistogramAddEval(), except that the resulting histogram
// is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit
// the term C(b) which is constant over all the evaluations.
static double HistogramAddThresh(const VP8LHistogram* const a,
static float HistogramAddThresh(const VP8LHistogram* const a,
const VP8LHistogram* const b,
double cost_threshold) {
double cost;
float cost_threshold) {
float cost;
assert(a != NULL && b != NULL);
cost = -a->bit_cost_;
GetCombinedHistogramEntropy(a, b, cost_threshold, &cost);
@ -473,24 +472,22 @@ static double HistogramAddThresh(const VP8LHistogram* const a,
// The structure to keep track of cost range for the three dominant entropy
// symbols.
// TODO(skal): Evaluate if float can be used here instead of double for
// representing the entropy costs.
typedef struct {
double literal_max_;
double literal_min_;
double red_max_;
double red_min_;
double blue_max_;
double blue_min_;
float literal_max_;
float literal_min_;
float red_max_;
float red_min_;
float blue_max_;
float blue_min_;
} DominantCostRange;
static void DominantCostRangeInit(DominantCostRange* const c) {
c->literal_max_ = 0.;
c->literal_min_ = MAX_COST;
c->literal_min_ = MAX_BIT_COST;
c->red_max_ = 0.;
c->red_min_ = MAX_COST;
c->red_min_ = MAX_BIT_COST;
c->blue_max_ = 0.;
c->blue_min_ = MAX_COST;
c->blue_min_ = MAX_BIT_COST;
}
static void UpdateDominantCostRange(
@ -505,10 +502,9 @@ static void UpdateDominantCostRange(
static void UpdateHistogramCost(VP8LHistogram* const h) {
uint32_t alpha_sym, red_sym, blue_sym;
const double alpha_cost =
PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym,
&h->is_used_[3]);
const double distance_cost =
const float alpha_cost =
PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, &h->is_used_[3]);
const float distance_cost =
PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) +
VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
@ -529,10 +525,10 @@ static void UpdateHistogramCost(VP8LHistogram* const h) {
}
}
static int GetBinIdForEntropy(double min, double max, double val) {
const double range = max - min;
static int GetBinIdForEntropy(float min, float max, float val) {
const float range = max - min;
if (range > 0.) {
const double delta = val - min;
const float delta = val - min;
return (int)((NUM_PARTITIONS - 1e-6) * delta / range);
} else {
return 0;
@ -641,15 +637,11 @@ static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,
// Merges some histograms with same bin_id together if it's advantageous.
// Sets the remaining histograms to NULL.
static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
int* num_used,
const uint16_t* const clusters,
uint16_t* const cluster_mappings,
VP8LHistogram* cur_combo,
const uint16_t* const bin_map,
int num_bins,
double combine_cost_factor,
int low_effort) {
static void HistogramCombineEntropyBin(
VP8LHistogramSet* const image_histo, int* num_used,
const uint16_t* const clusters, uint16_t* const cluster_mappings,
VP8LHistogram* cur_combo, const uint16_t* const bin_map, int num_bins,
float combine_cost_factor, int low_effort) {
VP8LHistogram** const histograms = image_histo->histograms;
int idx;
struct {
@ -679,11 +671,10 @@ static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
cluster_mappings[clusters[idx]] = clusters[first];
} else {
// try to merge #idx into #first (both share the same bin_id)
const double bit_cost = histograms[idx]->bit_cost_;
const double bit_cost_thresh = -bit_cost * combine_cost_factor;
const double curr_cost_diff =
HistogramAddEval(histograms[first], histograms[idx],
cur_combo, bit_cost_thresh);
const float bit_cost = histograms[idx]->bit_cost_;
const float bit_cost_thresh = -bit_cost * combine_cost_factor;
const float curr_cost_diff = HistogramAddEval(
histograms[first], histograms[idx], cur_combo, bit_cost_thresh);
if (curr_cost_diff < bit_cost_thresh) {
// Try to merge two histograms only if the combo is a trivial one or
// the two candidate histograms are already non-trivial.
@ -731,8 +722,8 @@ static uint32_t MyRand(uint32_t* const seed) {
typedef struct {
int idx1;
int idx2;
double cost_diff;
double cost_combo;
float cost_diff;
float cost_combo;
} HistogramPair;
typedef struct {
@ -787,10 +778,9 @@ static void HistoQueueUpdateHead(HistoQueue* const histo_queue,
// Update the cost diff and combo of a pair of histograms. This needs to be
// called when the the histograms have been merged with a third one.
static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
const VP8LHistogram* const h2,
double threshold,
const VP8LHistogram* const h2, float threshold,
HistogramPair* const pair) {
const double sum_cost = h1->bit_cost_ + h2->bit_cost_;
const float sum_cost = h1->bit_cost_ + h2->bit_cost_;
pair->cost_combo = 0.;
GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo);
pair->cost_diff = pair->cost_combo - sum_cost;
@ -799,9 +789,9 @@ static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
// Create a pair from indices "idx1" and "idx2" provided its cost
// is inferior to "threshold", a negative entropy.
// It returns the cost of the pair, or 0. if it superior to threshold.
static double HistoQueuePush(HistoQueue* const histo_queue,
static float HistoQueuePush(HistoQueue* const histo_queue,
VP8LHistogram** const histograms, int idx1,
int idx2, double threshold) {
int idx2, float threshold) {
const VP8LHistogram* h1;
const VP8LHistogram* h2;
HistogramPair pair;
@ -945,8 +935,8 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
++tries_with_no_success < num_tries_no_success;
++iter) {
int* mapping_index;
double best_cost =
(histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff;
float best_cost =
(histo_queue.size == 0) ? 0.f : histo_queue.queue[0].cost_diff;
int best_idx1 = -1, best_idx2 = 1;
const uint32_t rand_range = (*num_used - 1) * (*num_used);
// (*num_used) / 2 was chosen empirically. Less means faster but worse
@ -955,7 +945,7 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
// Pick random samples.
for (j = 0; *num_used >= 2 && j < num_tries; ++j) {
double curr_cost;
float curr_cost;
// Choose two different histograms at random and try to combine them.
const uint32_t tmp = MyRand(&seed) % rand_range;
uint32_t idx1 = tmp / (*num_used - 1);
@ -1057,7 +1047,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
if (out_size > 1) {
for (i = 0; i < in_size; ++i) {
int best_out = 0;
double best_bits = MAX_COST;
float best_bits = MAX_BIT_COST;
int k;
if (in_histo[i] == NULL) {
// Arbitrarily set to the previous value if unused to help future LZ77.
@ -1065,7 +1055,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
continue;
}
for (k = 0; k < out_size; ++k) {
double cur_bits;
float cur_bits;
cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
if (k == 0 || cur_bits < best_bits) {
best_bits = cur_bits;
@ -1093,13 +1083,13 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
}
}
static double GetCombineCostFactor(int histo_size, int quality) {
double combine_cost_factor = 0.16;
static float GetCombineCostFactor(int histo_size, int quality) {
float combine_cost_factor = 0.16f;
if (quality < 90) {
if (histo_size > 256) combine_cost_factor /= 2.;
if (histo_size > 512) combine_cost_factor /= 2.;
if (histo_size > 1024) combine_cost_factor /= 2.;
if (quality <= 50) combine_cost_factor /= 2.;
if (histo_size > 256) combine_cost_factor /= 2.f;
if (histo_size > 512) combine_cost_factor /= 2.f;
if (histo_size > 1024) combine_cost_factor /= 2.f;
if (quality <= 50) combine_cost_factor /= 2.f;
}
return combine_cost_factor;
}
@ -1169,13 +1159,13 @@ static void RemoveEmptyHistograms(VP8LHistogramSet* const image_histo) {
}
int VP8LGetHistoImageSymbols(int xsize, int ysize,
const VP8LBackwardRefs* const refs,
int quality, int low_effort,
int histogram_bits, int cache_bits,
const VP8LBackwardRefs* const refs, int quality,
int low_effort, int histogram_bits, int cache_bits,
VP8LHistogramSet* const image_histo,
VP8LHistogram* const tmp_histo,
uint16_t* const histogram_symbols) {
int ok = 0;
uint16_t* const histogram_symbols,
const WebPPicture* const pic, int percent_range,
int* const percent) {
const int histo_xsize =
histogram_bits ? VP8LSubSampleSize(xsize, histogram_bits) : 1;
const int histo_ysize =
@ -1192,7 +1182,10 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
WebPSafeMalloc(2 * image_histo_raw_size, sizeof(map_tmp));
uint16_t* const cluster_mappings = map_tmp + image_histo_raw_size;
int num_used = image_histo_raw_size;
if (orig_histo == NULL || map_tmp == NULL) goto Error;
if (orig_histo == NULL || map_tmp == NULL) {
WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
// Construct the histograms from backward references.
HistogramBuild(xsize, histogram_bits, refs, orig_histo);
@ -1206,16 +1199,15 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
if (entropy_combine) {
uint16_t* const bin_map = map_tmp;
const double combine_cost_factor =
const float combine_cost_factor =
GetCombineCostFactor(image_histo_raw_size, quality);
const uint32_t num_clusters = num_used;
HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort);
// Collapse histograms with similar entropy.
HistogramCombineEntropyBin(image_histo, &num_used, histogram_symbols,
cluster_mappings, tmp_histo, bin_map,
entropy_combine_num_bins, combine_cost_factor,
low_effort);
HistogramCombineEntropyBin(
image_histo, &num_used, histogram_symbols, cluster_mappings, tmp_histo,
bin_map, entropy_combine_num_bins, combine_cost_factor, low_effort);
OptimizeHistogramSymbols(image_histo, cluster_mappings, num_clusters,
map_tmp, histogram_symbols);
}
@ -1229,11 +1221,13 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
int do_greedy;
if (!HistogramCombineStochastic(image_histo, &num_used, threshold_size,
&do_greedy)) {
WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
if (do_greedy) {
RemoveEmptyHistograms(image_histo);
if (!HistogramCombineGreedy(image_histo, &num_used)) {
WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
}
@ -1243,10 +1237,12 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
RemoveEmptyHistograms(image_histo);
HistogramRemap(orig_histo, image_histo, histogram_symbols);
ok = 1;
if (!WebPReportProgress(pic, *percent + percent_range, percent)) {
goto Error;
}
Error:
VP8LFreeHistogramSet(orig_histo);
WebPSafeFree(map_tmp);
return ok;
return (pic->error_code == VP8_ENC_OK);
}

View File

@ -40,10 +40,10 @@ typedef struct {
int palette_code_bits_;
uint32_t trivial_symbol_; // True, if histograms for Red, Blue & Alpha
// literal symbols are single valued.
double bit_cost_; // cached value of bit cost.
double literal_cost_; // Cached values of dominant entropy costs:
double red_cost_; // literal, red & blue.
double blue_cost_;
float bit_cost_; // cached value of bit cost.
float literal_cost_; // Cached values of dominant entropy costs:
float red_cost_; // literal, red & blue.
float blue_cost_;
uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance
} VP8LHistogram;
@ -105,21 +105,23 @@ static WEBP_INLINE int VP8LHistogramNumCodes(int palette_code_bits) {
((palette_code_bits > 0) ? (1 << palette_code_bits) : 0);
}
// Builds the histogram image.
// Builds the histogram image. pic and percent are for progress.
// Returns false in case of error (stored in pic->error_code).
int VP8LGetHistoImageSymbols(int xsize, int ysize,
const VP8LBackwardRefs* const refs,
int quality, int low_effort,
int histogram_bits, int cache_bits,
const VP8LBackwardRefs* const refs, int quality,
int low_effort, int histogram_bits, int cache_bits,
VP8LHistogramSet* const image_histo,
VP8LHistogram* const tmp_histo,
uint16_t* const histogram_symbols);
uint16_t* const histogram_symbols,
const WebPPicture* const pic, int percent_range,
int* const percent);
// Returns the entropy for the symbols in the input array.
double VP8LBitsEntropy(const uint32_t* const array, int n);
float VP8LBitsEntropy(const uint32_t* const array, int n);
// Estimate how many bits the combined entropy of literals and distance
// approximately maps to.
double VP8LHistogramEstimateBits(VP8LHistogram* const p);
float VP8LHistogramEstimateBits(VP8LHistogram* const p);
#ifdef __cplusplus
}

View File

@ -15,12 +15,19 @@
#include <stdlib.h>
#include <math.h>
#include "sharpyuv/sharpyuv.h"
#include "sharpyuv/sharpyuv_csp.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/random_utils.h"
#include "src/utils/utils.h"
#include "src/dsp/dsp.h"
#include "src/dsp/lossless.h"
#include "src/dsp/yuv.h"
#include "src/dsp/cpu.h"
#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
#include <pthread.h>
#endif
// Uncomment to disable gamma-compression during RGB->U/V averaging
#define USE_GAMMA_COMPRESSION
@ -76,16 +83,16 @@ int WebPPictureHasTransparency(const WebPPicture* picture) {
#if defined(USE_GAMMA_COMPRESSION)
// gamma-compensates loss of resolution during chroma subsampling
#define kGamma 0.80 // for now we use a different gamma value than kGammaF
#define kGammaFix 12 // fixed-point precision for linear values
#define kGammaScale ((1 << kGammaFix) - 1)
#define kGammaTabFix 7 // fixed-point fractional bits precision
#define kGammaTabScale (1 << kGammaTabFix)
#define kGammaTabRounder (kGammaTabScale >> 1)
#define kGammaTabSize (1 << (kGammaFix - kGammaTabFix))
// Gamma correction compensates loss of resolution during chroma subsampling.
#define GAMMA_FIX 12 // fixed-point precision for linear values
#define GAMMA_TAB_FIX 7 // fixed-point fractional bits precision
#define GAMMA_TAB_SIZE (1 << (GAMMA_FIX - GAMMA_TAB_FIX))
static const double kGamma = 0.80;
static const int kGammaScale = ((1 << GAMMA_FIX) - 1);
static const int kGammaTabScale = (1 << GAMMA_TAB_FIX);
static const int kGammaTabRounder = (1 << GAMMA_TAB_FIX >> 1);
static int kLinearToGammaTab[kGammaTabSize + 1];
static int kLinearToGammaTab[GAMMA_TAB_SIZE + 1];
static uint16_t kGammaToLinearTab[256];
static volatile int kGammaTablesOk = 0;
static void InitGammaTables(void);
@ -93,13 +100,13 @@ static void InitGammaTables(void);
WEBP_DSP_INIT_FUNC(InitGammaTables) {
if (!kGammaTablesOk) {
int v;
const double scale = (double)(1 << kGammaTabFix) / kGammaScale;
const double scale = (double)(1 << GAMMA_TAB_FIX) / kGammaScale;
const double norm = 1. / 255.;
for (v = 0; v <= 255; ++v) {
kGammaToLinearTab[v] =
(uint16_t)(pow(norm * v, kGamma) * kGammaScale + .5);
}
for (v = 0; v <= kGammaTabSize; ++v) {
for (v = 0; v <= GAMMA_TAB_SIZE; ++v) {
kLinearToGammaTab[v] = (int)(255. * pow(scale * v, 1. / kGamma) + .5);
}
kGammaTablesOk = 1;
@ -111,12 +118,12 @@ static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
}
static WEBP_INLINE int Interpolate(int v) {
const int tab_pos = v >> (kGammaTabFix + 2); // integer part
const int tab_pos = v >> (GAMMA_TAB_FIX + 2); // integer part
const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
const int v0 = kLinearToGammaTab[tab_pos];
const int v1 = kLinearToGammaTab[tab_pos + 1];
const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
assert(tab_pos + 1 < kGammaTabSize + 1);
assert(tab_pos + 1 < GAMMA_TAB_SIZE + 1);
return y;
}
@ -124,7 +131,7 @@ static WEBP_INLINE int Interpolate(int v) {
// U/V value, suitable for RGBToU/V calls.
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
const int y = Interpolate(base_value << shift); // final uplifted value
return (y + kGammaTabRounder) >> kGammaTabFix; // descale
return (y + kGammaTabRounder) >> GAMMA_TAB_FIX; // descale
}
#else
@ -158,415 +165,41 @@ static int RGBToV(int r, int g, int b, VP8Random* const rg) {
//------------------------------------------------------------------------------
// Sharp RGB->YUV conversion
static const int kNumIterations = 4;
static const int kMinDimensionIterativeConversion = 4;
// We could use SFIX=0 and only uint8_t for fixed_y_t, but it produces some
// banding sometimes. Better use extra precision.
#define SFIX 2 // fixed-point precision of RGB and Y/W
typedef int16_t fixed_t; // signed type with extra SFIX precision for UV
typedef uint16_t fixed_y_t; // unsigned type with extra SFIX precision for W
#define SHALF (1 << SFIX >> 1)
#define MAX_Y_T ((256 << SFIX) - 1)
#define SROUNDER (1 << (YUV_FIX + SFIX - 1))
#if defined(USE_GAMMA_COMPRESSION)
// We use tables of different size and precision for the Rec709 / BT2020
// transfer function.
#define kGammaF (1./0.45)
static uint32_t kLinearToGammaTabS[kGammaTabSize + 2];
#define GAMMA_TO_LINEAR_BITS 14
static uint32_t kGammaToLinearTabS[MAX_Y_T + 1]; // size scales with Y_FIX
static volatile int kGammaTablesSOk = 0;
static void InitGammaTablesS(void);
WEBP_DSP_INIT_FUNC(InitGammaTablesS) {
assert(2 * GAMMA_TO_LINEAR_BITS < 32); // we use uint32_t intermediate values
if (!kGammaTablesSOk) {
int v;
const double norm = 1. / MAX_Y_T;
const double scale = 1. / kGammaTabSize;
const double a = 0.09929682680944;
const double thresh = 0.018053968510807;
const double final_scale = 1 << GAMMA_TO_LINEAR_BITS;
for (v = 0; v <= MAX_Y_T; ++v) {
const double g = norm * v;
double value;
if (g <= thresh * 4.5) {
value = g / 4.5;
} else {
const double a_rec = 1. / (1. + a);
value = pow(a_rec * (g + a), kGammaF);
}
kGammaToLinearTabS[v] = (uint32_t)(value * final_scale + .5);
}
for (v = 0; v <= kGammaTabSize; ++v) {
const double g = scale * v;
double value;
if (g <= thresh) {
value = 4.5 * g;
} else {
value = (1. + a) * pow(g, 1. / kGammaF) - a;
}
// we already incorporate the 1/2 rounding constant here
kLinearToGammaTabS[v] =
(uint32_t)(MAX_Y_T * value) + (1 << GAMMA_TO_LINEAR_BITS >> 1);
}
// to prevent small rounding errors to cause read-overflow:
kLinearToGammaTabS[kGammaTabSize + 1] = kLinearToGammaTabS[kGammaTabSize];
kGammaTablesSOk = 1;
}
}
// return value has a fixed-point precision of GAMMA_TO_LINEAR_BITS
static WEBP_INLINE uint32_t GammaToLinearS(int v) {
return kGammaToLinearTabS[v];
}
static WEBP_INLINE uint32_t LinearToGammaS(uint32_t value) {
// 'value' is in GAMMA_TO_LINEAR_BITS fractional precision
const uint32_t v = value * kGammaTabSize;
const uint32_t tab_pos = v >> GAMMA_TO_LINEAR_BITS;
// fractional part, in GAMMA_TO_LINEAR_BITS fixed-point precision
const uint32_t x = v - (tab_pos << GAMMA_TO_LINEAR_BITS); // fractional part
// v0 / v1 are in GAMMA_TO_LINEAR_BITS fixed-point precision (range [0..1])
const uint32_t v0 = kLinearToGammaTabS[tab_pos + 0];
const uint32_t v1 = kLinearToGammaTabS[tab_pos + 1];
// Final interpolation. Note that rounding is already included.
const uint32_t v2 = (v1 - v0) * x; // note: v1 >= v0.
const uint32_t result = v0 + (v2 >> GAMMA_TO_LINEAR_BITS);
return result;
}
#else
static void InitGammaTablesS(void) {}
static WEBP_INLINE uint32_t GammaToLinearS(int v) {
return (v << GAMMA_TO_LINEAR_BITS) / MAX_Y_T;
}
static WEBP_INLINE uint32_t LinearToGammaS(uint32_t value) {
return (MAX_Y_T * value) >> GAMMA_TO_LINEAR_BITS;
}
#endif // USE_GAMMA_COMPRESSION
//------------------------------------------------------------------------------
static uint8_t clip_8b(fixed_t v) {
return (!(v & ~0xff)) ? (uint8_t)v : (v < 0) ? 0u : 255u;
}
static fixed_y_t clip_y(int y) {
return (!(y & ~MAX_Y_T)) ? (fixed_y_t)y : (y < 0) ? 0 : MAX_Y_T;
}
//------------------------------------------------------------------------------
static int RGBToGray(int r, int g, int b) {
const int luma = 13933 * r + 46871 * g + 4732 * b + YUV_HALF;
return (luma >> YUV_FIX);
}
static uint32_t ScaleDown(int a, int b, int c, int d) {
const uint32_t A = GammaToLinearS(a);
const uint32_t B = GammaToLinearS(b);
const uint32_t C = GammaToLinearS(c);
const uint32_t D = GammaToLinearS(d);
return LinearToGammaS((A + B + C + D + 2) >> 2);
}
static WEBP_INLINE void UpdateW(const fixed_y_t* src, fixed_y_t* dst, int w) {
int i;
for (i = 0; i < w; ++i) {
const uint32_t R = GammaToLinearS(src[0 * w + i]);
const uint32_t G = GammaToLinearS(src[1 * w + i]);
const uint32_t B = GammaToLinearS(src[2 * w + i]);
const uint32_t Y = RGBToGray(R, G, B);
dst[i] = (fixed_y_t)LinearToGammaS(Y);
}
}
static void UpdateChroma(const fixed_y_t* src1, const fixed_y_t* src2,
fixed_t* dst, int uv_w) {
int i;
for (i = 0; i < uv_w; ++i) {
const int r = ScaleDown(src1[0 * uv_w + 0], src1[0 * uv_w + 1],
src2[0 * uv_w + 0], src2[0 * uv_w + 1]);
const int g = ScaleDown(src1[2 * uv_w + 0], src1[2 * uv_w + 1],
src2[2 * uv_w + 0], src2[2 * uv_w + 1]);
const int b = ScaleDown(src1[4 * uv_w + 0], src1[4 * uv_w + 1],
src2[4 * uv_w + 0], src2[4 * uv_w + 1]);
const int W = RGBToGray(r, g, b);
dst[0 * uv_w] = (fixed_t)(r - W);
dst[1 * uv_w] = (fixed_t)(g - W);
dst[2 * uv_w] = (fixed_t)(b - W);
dst += 1;
src1 += 2;
src2 += 2;
}
}
static void StoreGray(const fixed_y_t* rgb, fixed_y_t* y, int w) {
int i;
for (i = 0; i < w; ++i) {
y[i] = RGBToGray(rgb[0 * w + i], rgb[1 * w + i], rgb[2 * w + i]);
}
}
//------------------------------------------------------------------------------
static WEBP_INLINE fixed_y_t Filter2(int A, int B, int W0) {
const int v0 = (A * 3 + B + 2) >> 2;
return clip_y(v0 + W0);
}
//------------------------------------------------------------------------------
static WEBP_INLINE fixed_y_t UpLift(uint8_t a) { // 8bit -> SFIX
return ((fixed_y_t)a << SFIX) | SHALF;
}
static void ImportOneRow(const uint8_t* const r_ptr,
const uint8_t* const g_ptr,
const uint8_t* const b_ptr,
int step,
int pic_width,
fixed_y_t* const dst) {
int i;
const int w = (pic_width + 1) & ~1;
for (i = 0; i < pic_width; ++i) {
const int off = i * step;
dst[i + 0 * w] = UpLift(r_ptr[off]);
dst[i + 1 * w] = UpLift(g_ptr[off]);
dst[i + 2 * w] = UpLift(b_ptr[off]);
}
if (pic_width & 1) { // replicate rightmost pixel
dst[pic_width + 0 * w] = dst[pic_width + 0 * w - 1];
dst[pic_width + 1 * w] = dst[pic_width + 1 * w - 1];
dst[pic_width + 2 * w] = dst[pic_width + 2 * w - 1];
}
}
static void InterpolateTwoRows(const fixed_y_t* const best_y,
const fixed_t* prev_uv,
const fixed_t* cur_uv,
const fixed_t* next_uv,
int w,
fixed_y_t* out1,
fixed_y_t* out2) {
const int uv_w = w >> 1;
const int len = (w - 1) >> 1; // length to filter
int k = 3;
while (k-- > 0) { // process each R/G/B segments in turn
// special boundary case for i==0
out1[0] = Filter2(cur_uv[0], prev_uv[0], best_y[0]);
out2[0] = Filter2(cur_uv[0], next_uv[0], best_y[w]);
WebPSharpYUVFilterRow(cur_uv, prev_uv, len, best_y + 0 + 1, out1 + 1);
WebPSharpYUVFilterRow(cur_uv, next_uv, len, best_y + w + 1, out2 + 1);
// special boundary case for i == w - 1 when w is even
if (!(w & 1)) {
out1[w - 1] = Filter2(cur_uv[uv_w - 1], prev_uv[uv_w - 1],
best_y[w - 1 + 0]);
out2[w - 1] = Filter2(cur_uv[uv_w - 1], next_uv[uv_w - 1],
best_y[w - 1 + w]);
}
out1 += w;
out2 += w;
prev_uv += uv_w;
cur_uv += uv_w;
next_uv += uv_w;
}
}
static WEBP_INLINE uint8_t ConvertRGBToY(int r, int g, int b) {
const int luma = 16839 * r + 33059 * g + 6420 * b + SROUNDER;
return clip_8b(16 + (luma >> (YUV_FIX + SFIX)));
}
static WEBP_INLINE uint8_t ConvertRGBToU(int r, int g, int b) {
const int u = -9719 * r - 19081 * g + 28800 * b + SROUNDER;
return clip_8b(128 + (u >> (YUV_FIX + SFIX)));
}
static WEBP_INLINE uint8_t ConvertRGBToV(int r, int g, int b) {
const int v = +28800 * r - 24116 * g - 4684 * b + SROUNDER;
return clip_8b(128 + (v >> (YUV_FIX + SFIX)));
}
static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv,
WebPPicture* const picture) {
int i, j;
uint8_t* dst_y = picture->y;
uint8_t* dst_u = picture->u;
uint8_t* dst_v = picture->v;
const fixed_t* const best_uv_base = best_uv;
const int w = (picture->width + 1) & ~1;
const int h = (picture->height + 1) & ~1;
const int uv_w = w >> 1;
const int uv_h = h >> 1;
for (best_uv = best_uv_base, j = 0; j < picture->height; ++j) {
for (i = 0; i < picture->width; ++i) {
const int off = (i >> 1);
const int W = best_y[i];
const int r = best_uv[off + 0 * uv_w] + W;
const int g = best_uv[off + 1 * uv_w] + W;
const int b = best_uv[off + 2 * uv_w] + W;
dst_y[i] = ConvertRGBToY(r, g, b);
}
best_y += w;
best_uv += (j & 1) * 3 * uv_w;
dst_y += picture->y_stride;
}
for (best_uv = best_uv_base, j = 0; j < uv_h; ++j) {
for (i = 0; i < uv_w; ++i) {
const int off = i;
const int r = best_uv[off + 0 * uv_w];
const int g = best_uv[off + 1 * uv_w];
const int b = best_uv[off + 2 * uv_w];
dst_u[i] = ConvertRGBToU(r, g, b);
dst_v[i] = ConvertRGBToV(r, g, b);
}
best_uv += 3 * uv_w;
dst_u += picture->uv_stride;
dst_v += picture->uv_stride;
}
return 1;
}
//------------------------------------------------------------------------------
// Main function
#define SAFE_ALLOC(W, H, T) ((T*)WebPSafeMalloc((W) * (H), sizeof(T)))
extern void SharpYuvInit(VP8CPUInfo cpu_info_func);
static void SafeInitSharpYuv(void) {
#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
static pthread_mutex_t initsharpyuv_lock = PTHREAD_MUTEX_INITIALIZER;
if (pthread_mutex_lock(&initsharpyuv_lock)) return;
#endif
SharpYuvInit(VP8GetCPUInfo);
#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
(void)pthread_mutex_unlock(&initsharpyuv_lock);
#endif
}
static int PreprocessARGB(const uint8_t* r_ptr,
const uint8_t* g_ptr,
const uint8_t* b_ptr,
int step, int rgb_stride,
WebPPicture* const picture) {
// we expand the right/bottom border if needed
const int w = (picture->width + 1) & ~1;
const int h = (picture->height + 1) & ~1;
const int uv_w = w >> 1;
const int uv_h = h >> 1;
uint64_t prev_diff_y_sum = ~0;
int j, iter;
// TODO(skal): allocate one big memory chunk. But for now, it's easier
// for valgrind debugging to have several chunks.
fixed_y_t* const tmp_buffer = SAFE_ALLOC(w * 3, 2, fixed_y_t); // scratch
fixed_y_t* const best_y_base = SAFE_ALLOC(w, h, fixed_y_t);
fixed_y_t* const target_y_base = SAFE_ALLOC(w, h, fixed_y_t);
fixed_y_t* const best_rgb_y = SAFE_ALLOC(w, 2, fixed_y_t);
fixed_t* const best_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
fixed_t* const target_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
fixed_t* const best_rgb_uv = SAFE_ALLOC(uv_w * 3, 1, fixed_t);
fixed_y_t* best_y = best_y_base;
fixed_y_t* target_y = target_y_base;
fixed_t* best_uv = best_uv_base;
fixed_t* target_uv = target_uv_base;
const uint64_t diff_y_threshold = (uint64_t)(3.0 * w * h);
int ok;
if (best_y_base == NULL || best_uv_base == NULL ||
target_y_base == NULL || target_uv_base == NULL ||
best_rgb_y == NULL || best_rgb_uv == NULL ||
tmp_buffer == NULL) {
ok = WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto End;
const int ok = SharpYuvConvert(
r_ptr, g_ptr, b_ptr, step, rgb_stride, /*rgb_bit_depth=*/8,
picture->y, picture->y_stride, picture->u, picture->uv_stride, picture->v,
picture->uv_stride, /*yuv_bit_depth=*/8, picture->width,
picture->height, SharpYuvGetConversionMatrix(kSharpYuvMatrixWebp));
if (!ok) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
assert(picture->width >= kMinDimensionIterativeConversion);
assert(picture->height >= kMinDimensionIterativeConversion);
WebPInitConvertARGBToYUV();
// Import RGB samples to W/RGB representation.
for (j = 0; j < picture->height; j += 2) {
const int is_last_row = (j == picture->height - 1);
fixed_y_t* const src1 = tmp_buffer + 0 * w;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
// prepare two rows of input
ImportOneRow(r_ptr, g_ptr, b_ptr, step, picture->width, src1);
if (!is_last_row) {
ImportOneRow(r_ptr + rgb_stride, g_ptr + rgb_stride, b_ptr + rgb_stride,
step, picture->width, src2);
} else {
memcpy(src2, src1, 3 * w * sizeof(*src2));
}
StoreGray(src1, best_y + 0, w);
StoreGray(src2, best_y + w, w);
UpdateW(src1, target_y, w);
UpdateW(src2, target_y + w, w);
UpdateChroma(src1, src2, target_uv, uv_w);
memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv));
best_y += 2 * w;
best_uv += 3 * uv_w;
target_y += 2 * w;
target_uv += 3 * uv_w;
r_ptr += 2 * rgb_stride;
g_ptr += 2 * rgb_stride;
b_ptr += 2 * rgb_stride;
}
// Iterate and resolve clipping conflicts.
for (iter = 0; iter < kNumIterations; ++iter) {
const fixed_t* cur_uv = best_uv_base;
const fixed_t* prev_uv = best_uv_base;
uint64_t diff_y_sum = 0;
best_y = best_y_base;
best_uv = best_uv_base;
target_y = target_y_base;
target_uv = target_uv_base;
for (j = 0; j < h; j += 2) {
fixed_y_t* const src1 = tmp_buffer + 0 * w;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
{
const fixed_t* const next_uv = cur_uv + ((j < h - 2) ? 3 * uv_w : 0);
InterpolateTwoRows(best_y, prev_uv, cur_uv, next_uv, w, src1, src2);
prev_uv = cur_uv;
cur_uv = next_uv;
}
UpdateW(src1, best_rgb_y + 0 * w, w);
UpdateW(src2, best_rgb_y + 1 * w, w);
UpdateChroma(src1, src2, best_rgb_uv, uv_w);
// update two rows of Y and one row of RGB
diff_y_sum += WebPSharpYUVUpdateY(target_y, best_rgb_y, best_y, 2 * w);
WebPSharpYUVUpdateRGB(target_uv, best_rgb_uv, best_uv, 3 * uv_w);
best_y += 2 * w;
best_uv += 3 * uv_w;
target_y += 2 * w;
target_uv += 3 * uv_w;
}
// test exit condition
if (iter > 0) {
if (diff_y_sum < diff_y_threshold) break;
if (diff_y_sum > prev_diff_y_sum) break;
}
prev_diff_y_sum = diff_y_sum;
}
// final reconstruction
ok = ConvertWRGBToYUV(best_y_base, best_uv_base, picture);
End:
WebPSafeFree(best_y_base);
WebPSafeFree(best_uv_base);
WebPSafeFree(target_y_base);
WebPSafeFree(target_uv_base);
WebPSafeFree(best_rgb_y);
WebPSafeFree(best_rgb_uv);
WebPSafeFree(tmp_buffer);
return ok;
}
#undef SAFE_ALLOC
//------------------------------------------------------------------------------
// "Fast" regular RGB->YUV
@ -591,8 +224,8 @@ static const int kAlphaFix = 19;
// and constant are adjusted very tightly to fit 32b arithmetic.
// In particular, they use the fact that the operands for 'v / a' are actually
// derived as v = (a0.p0 + a1.p1 + a2.p2 + a3.p3) and a = a0 + a1 + a2 + a3
// with ai in [0..255] and pi in [0..1<<kGammaFix). The constraint to avoid
// overflow is: kGammaFix + kAlphaFix <= 31.
// with ai in [0..255] and pi in [0..1<<GAMMA_FIX). The constraint to avoid
// overflow is: GAMMA_FIX + kAlphaFix <= 31.
static const uint32_t kInvAlpha[4 * 0xff + 1] = {
0, /* alpha = 0 */
524288, 262144, 174762, 131072, 104857, 87381, 74898, 65536,
@ -818,11 +451,20 @@ static WEBP_INLINE void AccumulateRGB(const uint8_t* const r_ptr,
dst[0] = SUM4(r_ptr + j, step);
dst[1] = SUM4(g_ptr + j, step);
dst[2] = SUM4(b_ptr + j, step);
// MemorySanitizer may raise false positives with data that passes through
// RGBA32PackedToPlanar_16b_SSE41() due to incorrect modeling of shuffles.
// See https://crbug.com/webp/573.
#ifdef WEBP_MSAN
dst[3] = 0;
#endif
}
if (width & 1) {
dst[0] = SUM2(r_ptr + j);
dst[1] = SUM2(g_ptr + j);
dst[2] = SUM2(b_ptr + j);
#ifdef WEBP_MSAN
dst[3] = 0;
#endif
}
}
@ -863,18 +505,18 @@ static int ImportYUVAFromRGBA(const uint8_t* r_ptr,
use_iterative_conversion = 0;
}
if (!WebPPictureAllocYUVA(picture, width, height)) {
if (!WebPPictureAllocYUVA(picture)) {
return 0;
}
if (has_alpha) {
assert(step == 4);
#if defined(USE_GAMMA_COMPRESSION) && defined(USE_INVERSE_ALPHA_TABLE)
assert(kAlphaFix + kGammaFix <= 31);
assert(kAlphaFix + GAMMA_FIX <= 31);
#endif
}
if (use_iterative_conversion) {
InitGammaTablesS();
SafeInitSharpYuv();
if (!PreprocessARGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, picture)) {
return 0;
}
@ -1044,7 +686,7 @@ int WebPPictureYUVAToARGB(WebPPicture* picture) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
// Allocate a new argb buffer (discarding the previous one).
if (!WebPPictureAllocARGB(picture, picture->width, picture->height)) return 0;
if (!WebPPictureAllocARGB(picture)) return 0;
picture->use_argb = 1;
// Convert
@ -1106,6 +748,8 @@ static int Import(WebPPicture* const picture,
const int width = picture->width;
const int height = picture->height;
if (abs(rgb_stride) < (import_alpha ? 4 : 3) * width) return 0;
if (!picture->use_argb) {
const uint8_t* a_ptr = import_alpha ? rgb + 3 : NULL;
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
@ -1163,24 +807,24 @@ static int Import(WebPPicture* const picture,
#if !defined(WEBP_REDUCE_CSP)
int WebPPictureImportBGR(WebPPicture* picture,
const uint8_t* rgb, int rgb_stride) {
return (picture != NULL && rgb != NULL)
? Import(picture, rgb, rgb_stride, 3, 1, 0)
const uint8_t* bgr, int bgr_stride) {
return (picture != NULL && bgr != NULL)
? Import(picture, bgr, bgr_stride, 3, 1, 0)
: 0;
}
int WebPPictureImportBGRA(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return (picture != NULL && rgba != NULL)
? Import(picture, rgba, rgba_stride, 4, 1, 1)
const uint8_t* bgra, int bgra_stride) {
return (picture != NULL && bgra != NULL)
? Import(picture, bgra, bgra_stride, 4, 1, 1)
: 0;
}
int WebPPictureImportBGRX(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return (picture != NULL && rgba != NULL)
? Import(picture, rgba, rgba_stride, 4, 1, 0)
const uint8_t* bgrx, int bgrx_stride) {
return (picture != NULL && bgrx != NULL)
? Import(picture, bgrx, bgrx_stride, 4, 1, 0)
: 0;
}
@ -1201,9 +845,9 @@ int WebPPictureImportRGBA(WebPPicture* picture,
}
int WebPPictureImportRGBX(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return (picture != NULL && rgba != NULL)
? Import(picture, rgba, rgba_stride, 4, 0, 0)
const uint8_t* rgbx, int rgbx_stride) {
return (picture != NULL && rgbx != NULL)
? Import(picture, rgbx, rgbx_stride, 4, 0, 0)
: 0;
}

View File

@ -45,6 +45,22 @@ int WebPPictureInitInternal(WebPPicture* picture, int version) {
//------------------------------------------------------------------------------
int WebPValidatePicture(const WebPPicture* const picture) {
if (picture == NULL) return 0;
if (picture->width <= 0 || picture->height <= 0) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
}
if (picture->width <= 0 || picture->width / 4 > INT_MAX / 4 ||
picture->height <= 0 || picture->height / 4 > INT_MAX / 4) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
}
if (picture->colorspace != WEBP_YUV420 &&
picture->colorspace != WEBP_YUV420A) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
return 1;
}
static void WebPPictureResetBufferARGB(WebPPicture* const picture) {
picture->memory_argb_ = NULL;
picture->argb = NULL;
@ -63,18 +79,17 @@ void WebPPictureResetBuffers(WebPPicture* const picture) {
WebPPictureResetBufferYUVA(picture);
}
int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height) {
int WebPPictureAllocARGB(WebPPicture* const picture) {
void* memory;
const int width = picture->width;
const int height = picture->height;
const uint64_t argb_size = (uint64_t)width * height;
assert(picture != NULL);
if (!WebPValidatePicture(picture)) return 0;
WebPSafeFree(picture->memory_argb_);
WebPPictureResetBufferARGB(picture);
if (width <= 0 || height <= 0) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
}
// allocate a new buffer.
memory = WebPSafeMalloc(argb_size + WEBP_ALIGN_CST, sizeof(*picture->argb));
if (memory == NULL) {
@ -86,10 +101,10 @@ int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height) {
return 1;
}
int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
const WebPEncCSP uv_csp =
(WebPEncCSP)((int)picture->colorspace & WEBP_CSP_UV_MASK);
int WebPPictureAllocYUVA(WebPPicture* const picture) {
const int has_alpha = (int)picture->colorspace & WEBP_CSP_ALPHA_BIT;
const int width = picture->width;
const int height = picture->height;
const int y_stride = width;
const int uv_width = (int)(((int64_t)width + 1) >> 1);
const int uv_height = (int)(((int64_t)height + 1) >> 1);
@ -98,15 +113,11 @@ int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
uint64_t y_size, uv_size, a_size, total_size;
uint8_t* mem;
assert(picture != NULL);
if (!WebPValidatePicture(picture)) return 0;
WebPSafeFree(picture->memory_);
WebPPictureResetBufferYUVA(picture);
if (uv_csp != WEBP_YUV420) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
// alpha
a_width = has_alpha ? width : 0;
a_stride = a_width;
@ -152,15 +163,12 @@ int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
int WebPPictureAlloc(WebPPicture* picture) {
if (picture != NULL) {
const int width = picture->width;
const int height = picture->height;
WebPPictureFree(picture); // erase previous buffer
if (!picture->use_argb) {
return WebPPictureAllocYUVA(picture, width, height);
return WebPPictureAllocYUVA(picture);
} else {
return WebPPictureAllocARGB(picture, width, height);
return WebPPictureAllocARGB(picture);
}
}
return 1;

View File

@ -13,14 +13,15 @@
#include "src/webp/encode.h"
#if !defined(WEBP_REDUCE_SIZE)
#include <assert.h>
#include <stdlib.h>
#include "src/enc/vp8i_enc.h"
#if !defined(WEBP_REDUCE_SIZE)
#include "src/utils/rescaler_utils.h"
#include "src/utils/utils.h"
#endif // !defined(WEBP_REDUCE_SIZE)
#define HALVE(x) (((x) + 1) >> 1)
@ -56,6 +57,7 @@ static int AdjustAndCheckRectangle(const WebPPicture* const pic,
return 1;
}
#if !defined(WEBP_REDUCE_SIZE)
int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) {
if (src == NULL || dst == NULL) return 0;
if (src == dst) return 1;
@ -81,6 +83,7 @@ int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) {
}
return 1;
}
#endif // !defined(WEBP_REDUCE_SIZE)
int WebPPictureIsView(const WebPPicture* picture) {
if (picture == NULL) return 0;
@ -120,6 +123,7 @@ int WebPPictureView(const WebPPicture* src,
return 1;
}
#if !defined(WEBP_REDUCE_SIZE)
//------------------------------------------------------------------------------
// Picture cropping
@ -198,34 +202,34 @@ static void AlphaMultiplyY(WebPPicture* const pic, int inverse) {
}
}
int WebPPictureRescale(WebPPicture* pic, int width, int height) {
int WebPPictureRescale(WebPPicture* picture, int width, int height) {
WebPPicture tmp;
int prev_width, prev_height;
rescaler_t* work;
if (pic == NULL) return 0;
prev_width = pic->width;
prev_height = pic->height;
if (picture == NULL) return 0;
prev_width = picture->width;
prev_height = picture->height;
if (!WebPRescalerGetScaledDimensions(
prev_width, prev_height, &width, &height)) {
return 0;
}
PictureGrabSpecs(pic, &tmp);
PictureGrabSpecs(picture, &tmp);
tmp.width = width;
tmp.height = height;
if (!WebPPictureAlloc(&tmp)) return 0;
if (!pic->use_argb) {
if (!picture->use_argb) {
work = (rescaler_t*)WebPSafeMalloc(2ULL * width, sizeof(*work));
if (work == NULL) {
WebPPictureFree(&tmp);
return 0;
}
// If present, we need to rescale alpha first (for AlphaMultiplyY).
if (pic->a != NULL) {
if (picture->a != NULL) {
WebPInitAlphaProcessing();
if (!RescalePlane(pic->a, prev_width, prev_height, pic->a_stride,
if (!RescalePlane(picture->a, prev_width, prev_height, picture->a_stride,
tmp.a, width, height, tmp.a_stride, work, 1)) {
return 0;
}
@ -233,17 +237,15 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
// We take transparency into account on the luma plane only. That's not
// totally exact blending, but still is a good approximation.
AlphaMultiplyY(pic, 0);
if (!RescalePlane(pic->y, prev_width, prev_height, pic->y_stride,
AlphaMultiplyY(picture, 0);
if (!RescalePlane(picture->y, prev_width, prev_height, picture->y_stride,
tmp.y, width, height, tmp.y_stride, work, 1) ||
!RescalePlane(pic->u,
HALVE(prev_width), HALVE(prev_height), pic->uv_stride,
tmp.u,
HALVE(width), HALVE(height), tmp.uv_stride, work, 1) ||
!RescalePlane(pic->v,
HALVE(prev_width), HALVE(prev_height), pic->uv_stride,
tmp.v,
HALVE(width), HALVE(height), tmp.uv_stride, work, 1)) {
!RescalePlane(picture->u, HALVE(prev_width), HALVE(prev_height),
picture->uv_stride, tmp.u, HALVE(width), HALVE(height),
tmp.uv_stride, work, 1) ||
!RescalePlane(picture->v, HALVE(prev_width), HALVE(prev_height),
picture->uv_stride, tmp.v, HALVE(width), HALVE(height),
tmp.uv_stride, work, 1)) {
return 0;
}
AlphaMultiplyY(&tmp, 1);
@ -257,18 +259,17 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
// weighting first (black-matting), scale the RGB values, and remove
// the premultiplication afterward (while preserving the alpha channel).
WebPInitAlphaProcessing();
AlphaMultiplyARGB(pic, 0);
if (!RescalePlane((const uint8_t*)pic->argb, prev_width, prev_height,
pic->argb_stride * 4,
(uint8_t*)tmp.argb, width, height,
tmp.argb_stride * 4, work, 4)) {
AlphaMultiplyARGB(picture, 0);
if (!RescalePlane((const uint8_t*)picture->argb, prev_width, prev_height,
picture->argb_stride * 4, (uint8_t*)tmp.argb, width,
height, tmp.argb_stride * 4, work, 4)) {
return 0;
}
AlphaMultiplyARGB(&tmp, 1);
}
WebPPictureFree(pic);
WebPPictureFree(picture);
WebPSafeFree(work);
*pic = tmp;
*picture = tmp;
return 1;
}
@ -280,23 +281,6 @@ int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) {
return 0;
}
int WebPPictureIsView(const WebPPicture* picture) {
(void)picture;
return 0;
}
int WebPPictureView(const WebPPicture* src,
int left, int top, int width, int height,
WebPPicture* dst) {
(void)src;
(void)left;
(void)top;
(void)width;
(void)height;
(void)dst;
return 0;
}
int WebPPictureCrop(WebPPicture* pic,
int left, int top, int width, int height) {
(void)pic;

View File

@ -190,27 +190,28 @@ static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
return (0xff000000u | (r << 16) | (g << 8) | b);
}
void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
void WebPBlendAlpha(WebPPicture* picture, uint32_t background_rgb) {
const int red = (background_rgb >> 16) & 0xff;
const int green = (background_rgb >> 8) & 0xff;
const int blue = (background_rgb >> 0) & 0xff;
int x, y;
if (pic == NULL) return;
if (!pic->use_argb) {
const int uv_width = (pic->width >> 1); // omit last pixel during u/v loop
if (picture == NULL) return;
if (!picture->use_argb) {
// omit last pixel during u/v loop
const int uv_width = (picture->width >> 1);
const int Y0 = VP8RGBToY(red, green, blue, YUV_HALF);
// VP8RGBToU/V expects the u/v values summed over four pixels
const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
uint8_t* y_ptr = pic->y;
uint8_t* u_ptr = pic->u;
uint8_t* v_ptr = pic->v;
uint8_t* a_ptr = pic->a;
const int has_alpha = picture->colorspace & WEBP_CSP_ALPHA_BIT;
uint8_t* y_ptr = picture->y;
uint8_t* u_ptr = picture->u;
uint8_t* v_ptr = picture->v;
uint8_t* a_ptr = picture->a;
if (!has_alpha || a_ptr == NULL) return; // nothing to do
for (y = 0; y < pic->height; ++y) {
for (y = 0; y < picture->height; ++y) {
// Luma blending
for (x = 0; x < pic->width; ++x) {
for (x = 0; x < picture->width; ++x) {
const uint8_t alpha = a_ptr[x];
if (alpha < 0xff) {
y_ptr[x] = BLEND(Y0, y_ptr[x], alpha);
@ -219,7 +220,7 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
// Chroma blending every even line
if ((y & 1) == 0) {
uint8_t* const a_ptr2 =
(y + 1 == pic->height) ? a_ptr : a_ptr + pic->a_stride;
(y + 1 == picture->height) ? a_ptr : a_ptr + picture->a_stride;
for (x = 0; x < uv_width; ++x) {
// Average four alpha values into a single blending weight.
// TODO(skal): might lead to visible contouring. Can we do better?
@ -229,24 +230,24 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
}
if (pic->width & 1) { // rightmost pixel
if (picture->width & 1) { // rightmost pixel
const uint32_t alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]);
u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
}
} else {
u_ptr += pic->uv_stride;
v_ptr += pic->uv_stride;
u_ptr += picture->uv_stride;
v_ptr += picture->uv_stride;
}
memset(a_ptr, 0xff, pic->width); // reset alpha value to opaque
a_ptr += pic->a_stride;
y_ptr += pic->y_stride;
memset(a_ptr, 0xff, picture->width); // reset alpha value to opaque
a_ptr += picture->a_stride;
y_ptr += picture->y_stride;
}
} else {
uint32_t* argb = pic->argb;
uint32_t* argb = picture->argb;
const uint32_t background = MakeARGB32(red, green, blue);
for (y = 0; y < pic->height; ++y) {
for (x = 0; x < pic->width; ++x) {
for (y = 0; y < picture->height; ++y) {
for (x = 0; x < picture->width; ++x) {
const int alpha = (argb[x] >> 24) & 0xff;
if (alpha != 0xff) {
if (alpha > 0) {
@ -262,7 +263,7 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
}
}
}
argb += pic->argb_stride;
argb += picture->argb_stride;
}
}
}

View File

@ -16,6 +16,7 @@
#include "src/dsp/lossless.h"
#include "src/dsp/lossless_common.h"
#include "src/enc/vp8i_enc.h"
#include "src/enc/vp8li_enc.h"
#define MAX_DIFF_COST (1e30f)
@ -31,10 +32,10 @@ static WEBP_INLINE int GetMin(int a, int b) { return (a > b) ? b : a; }
// Methods to calculate Entropy (Shannon).
static float PredictionCostSpatial(const int counts[256], int weight_0,
double exp_val) {
float exp_val) {
const int significant_symbols = 256 >> 4;
const double exp_decay_factor = 0.6;
double bits = weight_0 * counts[0];
const float exp_decay_factor = 0.6f;
float bits = (float)weight_0 * counts[0];
int i;
for (i = 1; i < significant_symbols; ++i) {
bits += exp_val * (counts[i] + counts[256 - i]);
@ -46,9 +47,9 @@ static float PredictionCostSpatial(const int counts[256], int weight_0,
static float PredictionCostSpatialHistogram(const int accumulated[4][256],
const int tile[4][256]) {
int i;
double retval = 0;
float retval = 0.f;
for (i = 0; i < 4; ++i) {
const double kExpValue = 0.94;
const float kExpValue = 0.94f;
retval += PredictionCostSpatial(tile[i], 1, kExpValue);
retval += VP8LCombinedShannonEntropy(tile[i], accumulated[i]);
}
@ -472,12 +473,15 @@ static void CopyImageWithPrediction(int width, int height,
// with respect to predictions. If near_lossless_quality < 100, applies
// near lossless processing, shaving off more bits of residuals for lower
// qualities.
void VP8LResidualImage(int width, int height, int bits, int low_effort,
int VP8LResidualImage(int width, int height, int bits, int low_effort,
uint32_t* const argb, uint32_t* const argb_scratch,
uint32_t* const image, int near_lossless_quality,
int exact, int used_subtract_green) {
int exact, int used_subtract_green,
const WebPPicture* const pic, int percent_range,
int* const percent) {
const int tiles_per_row = VP8LSubSampleSize(width, bits);
const int tiles_per_col = VP8LSubSampleSize(height, bits);
int percent_start = *percent;
int tile_y;
int histo[4][256];
const int max_quantization = 1 << VP8LNearLosslessBits(near_lossless_quality);
@ -491,17 +495,24 @@ void VP8LResidualImage(int width, int height, int bits, int low_effort,
for (tile_y = 0; tile_y < tiles_per_col; ++tile_y) {
int tile_x;
for (tile_x = 0; tile_x < tiles_per_row; ++tile_x) {
const int pred = GetBestPredictorForTile(width, height, tile_x, tile_y,
bits, histo, argb_scratch, argb, max_quantization, exact,
used_subtract_green, image);
const int pred = GetBestPredictorForTile(
width, height, tile_x, tile_y, bits, histo, argb_scratch, argb,
max_quantization, exact, used_subtract_green, image);
image[tile_y * tiles_per_row + tile_x] = ARGB_BLACK | (pred << 8);
}
if (!WebPReportProgress(
pic, percent_start + percent_range * tile_y / tiles_per_col,
percent)) {
return 0;
}
}
}
CopyImageWithPrediction(width, height, bits, image, argb_scratch, argb,
low_effort, max_quantization, exact,
used_subtract_green);
return WebPReportProgress(pic, percent_start + percent_range, percent);
}
//------------------------------------------------------------------------------
@ -532,7 +543,7 @@ static float PredictionCostCrossColor(const int accumulated[256],
const int counts[256]) {
// Favor low entropy, locally and globally.
// Favor small absolute values for PredictionCostSpatial
static const double kExpValue = 2.4;
static const float kExpValue = 2.4f;
return VP8LCombinedShannonEntropy(counts, accumulated) +
PredictionCostSpatial(counts, 3, kExpValue);
}
@ -714,11 +725,14 @@ static void CopyTileWithColorTransform(int xsize, int ysize,
}
}
void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
uint32_t* const argb, uint32_t* image) {
int VP8LColorSpaceTransform(int width, int height, int bits, int quality,
uint32_t* const argb, uint32_t* image,
const WebPPicture* const pic, int percent_range,
int* const percent) {
const int max_tile_size = 1 << bits;
const int tile_xsize = VP8LSubSampleSize(width, bits);
const int tile_ysize = VP8LSubSampleSize(height, bits);
int percent_start = *percent;
int accumulated_red_histo[256] = { 0 };
int accumulated_blue_histo[256] = { 0 };
int tile_x, tile_y;
@ -768,5 +782,11 @@ void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
}
}
}
if (!WebPReportProgress(
pic, percent_start + percent_range * tile_y / tile_ysize,
percent)) {
return 0;
}
}
return 1;
}

View File

@ -533,7 +533,8 @@ static void InitScore(VP8ModeScore* const rd) {
rd->score = MAX_COST;
}
static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
static void CopyScore(VP8ModeScore* WEBP_RESTRICT const dst,
const VP8ModeScore* WEBP_RESTRICT const src) {
dst->D = src->D;
dst->SD = src->SD;
dst->R = src->R;
@ -542,7 +543,8 @@ static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
dst->score = src->score;
}
static void AddScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
static void AddScore(VP8ModeScore* WEBP_RESTRICT const dst,
const VP8ModeScore* WEBP_RESTRICT const src) {
dst->D += src->D;
dst->SD += src->SD;
dst->R += src->R;
@ -588,10 +590,10 @@ static WEBP_INLINE score_t RDScoreTrellis(int lambda, score_t rate,
// Coefficient type.
enum { TYPE_I16_AC = 0, TYPE_I16_DC = 1, TYPE_CHROMA_A = 2, TYPE_I4_AC = 3 };
static int TrellisQuantizeBlock(const VP8Encoder* const enc,
static int TrellisQuantizeBlock(const VP8Encoder* WEBP_RESTRICT const enc,
int16_t in[16], int16_t out[16],
int ctx0, int coeff_type,
const VP8Matrix* const mtx,
const VP8Matrix* WEBP_RESTRICT const mtx,
int lambda) {
const ProbaArray* const probas = enc->proba_.coeffs_[coeff_type];
CostArrayPtr const costs =
@ -767,9 +769,9 @@ static int TrellisQuantizeBlock(const VP8Encoder* const enc,
// all at once. Output is the reconstructed block in *yuv_out, and the
// quantized levels in *levels.
static int ReconstructIntra16(VP8EncIterator* const it,
VP8ModeScore* const rd,
uint8_t* const yuv_out,
static int ReconstructIntra16(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd,
uint8_t* WEBP_RESTRICT const yuv_out,
int mode) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode];
@ -819,10 +821,10 @@ static int ReconstructIntra16(VP8EncIterator* const it,
return nz;
}
static int ReconstructIntra4(VP8EncIterator* const it,
static int ReconstructIntra4(VP8EncIterator* WEBP_RESTRICT const it,
int16_t levels[16],
const uint8_t* const src,
uint8_t* const yuv_out,
const uint8_t* WEBP_RESTRICT const src,
uint8_t* WEBP_RESTRICT const yuv_out,
int mode) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode];
@ -855,7 +857,8 @@ static int ReconstructIntra4(VP8EncIterator* const it,
// Quantize as usual, but also compute and return the quantization error.
// Error is already divided by DSHIFT.
static int QuantizeSingle(int16_t* const v, const VP8Matrix* const mtx) {
static int QuantizeSingle(int16_t* WEBP_RESTRICT const v,
const VP8Matrix* WEBP_RESTRICT const mtx) {
int V = *v;
const int sign = (V < 0);
if (sign) V = -V;
@ -869,9 +872,10 @@ static int QuantizeSingle(int16_t* const v, const VP8Matrix* const mtx) {
return (sign ? -V : V) >> DSCALE;
}
static void CorrectDCValues(const VP8EncIterator* const it,
const VP8Matrix* const mtx,
int16_t tmp[][16], VP8ModeScore* const rd) {
static void CorrectDCValues(const VP8EncIterator* WEBP_RESTRICT const it,
const VP8Matrix* WEBP_RESTRICT const mtx,
int16_t tmp[][16],
VP8ModeScore* WEBP_RESTRICT const rd) {
// | top[0] | top[1]
// --------+--------+---------
// left[0] | tmp[0] tmp[1] <-> err0 err1
@ -902,8 +906,8 @@ static void CorrectDCValues(const VP8EncIterator* const it,
}
}
static void StoreDiffusionErrors(VP8EncIterator* const it,
const VP8ModeScore* const rd) {
static void StoreDiffusionErrors(VP8EncIterator* WEBP_RESTRICT const it,
const VP8ModeScore* WEBP_RESTRICT const rd) {
int ch;
for (ch = 0; ch <= 1; ++ch) {
int8_t* const top = it->top_derr_[it->x_][ch];
@ -922,8 +926,9 @@ static void StoreDiffusionErrors(VP8EncIterator* const it,
//------------------------------------------------------------------------------
static int ReconstructUV(VP8EncIterator* const it, VP8ModeScore* const rd,
uint8_t* const yuv_out, int mode) {
static int ReconstructUV(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd,
uint8_t* WEBP_RESTRICT const yuv_out, int mode) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode];
const uint8_t* const src = it->yuv_in_ + U_OFF_ENC;
@ -994,7 +999,8 @@ static void SwapOut(VP8EncIterator* const it) {
SwapPtr(&it->yuv_out_, &it->yuv_out2_);
}
static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
static void PickBestIntra16(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT rd) {
const int kNumBlocks = 16;
VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_i16_;
@ -1054,7 +1060,7 @@ static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
//------------------------------------------------------------------------------
// return the cost array corresponding to the surrounding prediction modes.
static const uint16_t* GetCostModeI4(VP8EncIterator* const it,
static const uint16_t* GetCostModeI4(VP8EncIterator* WEBP_RESTRICT const it,
const uint8_t modes[16]) {
const int preds_w = it->enc_->preds_w_;
const int x = (it->i4_ & 3), y = it->i4_ >> 2;
@ -1063,7 +1069,8 @@ static const uint16_t* GetCostModeI4(VP8EncIterator* const it,
return VP8FixedCostsI4[top][left];
}
static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
static int PickBestIntra4(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd) {
const VP8Encoder* const enc = it->enc_;
const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_i4_;
@ -1159,7 +1166,8 @@ static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
//------------------------------------------------------------------------------
static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
static void PickBestUV(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd) {
const int kNumBlocks = 8;
const VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_uv_;
@ -1211,7 +1219,8 @@ static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
//------------------------------------------------------------------------------
// Final reconstruction and quantization.
static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) {
static void SimpleQuantize(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd) {
const VP8Encoder* const enc = it->enc_;
const int is_i16 = (it->mb_->type_ == 1);
int nz = 0;
@ -1236,9 +1245,9 @@ static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) {
}
// Refine intra16/intra4 sub-modes based on distortion only (not rate).
static void RefineUsingDistortion(VP8EncIterator* const it,
static void RefineUsingDistortion(VP8EncIterator* WEBP_RESTRICT const it,
int try_both_modes, int refine_uv_mode,
VP8ModeScore* const rd) {
VP8ModeScore* WEBP_RESTRICT const rd) {
score_t best_score = MAX_COST;
int nz = 0;
int mode;
@ -1352,7 +1361,8 @@ static void RefineUsingDistortion(VP8EncIterator* const it,
//------------------------------------------------------------------------------
// Entry point
int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd,
int VP8Decimate(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd,
VP8RDLevel rd_opt) {
int is_skipped;
const int method = it->enc_->method_;

View File

@ -32,7 +32,7 @@ extern "C" {
// version numbers
#define ENC_MAJ_VERSION 1
#define ENC_MIN_VERSION 2
#define ENC_REV_VERSION 2
#define ENC_REV_VERSION 4
enum { MAX_LF_LEVELS = 64, // Maximum loop filter level
MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost
@ -470,7 +470,8 @@ int VP8EncAnalyze(VP8Encoder* const enc);
// Sets up segment's quantization values, base_quant_ and filter strengths.
void VP8SetSegmentParams(VP8Encoder* const enc, float quality);
// Pick best modes and fills the levels. Returns true if skipped.
int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd,
int VP8Decimate(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd,
VP8RDLevel rd_opt);
// in alpha.c
@ -490,19 +491,24 @@ int VP8FilterStrengthFromDelta(int sharpness, int delta);
// misc utils for picture_*.c:
// Returns true if 'picture' is non-NULL and dimensions/colorspace are within
// their valid ranges. If returning false, the 'error_code' in 'picture' is
// updated.
int WebPValidatePicture(const WebPPicture* const picture);
// Remove reference to the ARGB/YUVA buffer (doesn't free anything).
void WebPPictureResetBuffers(WebPPicture* const picture);
// Allocates ARGB buffer of given dimension (previous one is always free'd).
// Preserves the YUV(A) buffer. Returns false in case of error (invalid param,
// out-of-memory).
int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height);
// Allocates ARGB buffer according to set width/height (previous one is
// always free'd). Preserves the YUV(A) buffer. Returns false in case of error
// (invalid param, out-of-memory).
int WebPPictureAllocARGB(WebPPicture* const picture);
// Allocates YUVA buffer of given dimension (previous one is always free'd).
// Uses picture->csp to determine whether an alpha buffer is needed.
// Allocates YUVA buffer according to set width/height (previous one is always
// free'd). Uses picture->csp to determine whether an alpha buffer is needed.
// Preserves the ARGB buffer.
// Returns false in case of error (invalid param, out-of-memory).
int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height);
int WebPPictureAllocYUVA(WebPPicture* const picture);
// Replace samples that are fully transparent by 'color' to help compressibility
// (no guarantee, though). Assumes pic->use_argb is true.

File diff suppressed because it is too large Load Diff

View File

@ -89,9 +89,10 @@ int VP8LEncodeImage(const WebPConfig* const config,
// Encodes the main image stream using the supplied bit writer.
// If 'use_cache' is false, disables the use of color cache.
WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
const WebPPicture* const picture,
VP8LBitWriter* const bw, int use_cache);
// Returns false in case of error (stored in picture->error_code).
int VP8LEncodeStream(const WebPConfig* const config,
const WebPPicture* const picture, VP8LBitWriter* const bw,
int use_cache);
#if (WEBP_NEAR_LOSSLESS == 1)
// in near_lossless.c
@ -103,13 +104,18 @@ int VP8ApplyNearLossless(const WebPPicture* const picture, int quality,
//------------------------------------------------------------------------------
// Image transforms in predictor.c.
void VP8LResidualImage(int width, int height, int bits, int low_effort,
// pic and percent are for progress.
// Returns false in case of error (stored in pic->error_code).
int VP8LResidualImage(int width, int height, int bits, int low_effort,
uint32_t* const argb, uint32_t* const argb_scratch,
uint32_t* const image, int near_lossless, int exact,
int used_subtract_green);
int used_subtract_green, const WebPPicture* const pic,
int percent_range, int* const percent);
void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
uint32_t* const argb, uint32_t* image);
int VP8LColorSpaceTransform(int width, int height, int bits, int quality,
uint32_t* const argb, uint32_t* image,
const WebPPicture* const pic, int percent_range,
int* const percent);
//------------------------------------------------------------------------------

View File

@ -336,9 +336,7 @@ int WebPEncode(const WebPConfig* config, WebPPicture* pic) {
if (!WebPValidateConfig(config)) {
return WebPEncodingSetError(pic, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
if (pic->width <= 0 || pic->height <= 0) {
return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION);
}
if (!WebPValidatePicture(pic)) return 0;
if (pic->width > WEBP_MAX_DIMENSION || pic->height > WEBP_MAX_DIMENSION) {
return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION);
}

View File

@ -70,6 +70,7 @@ void WebPMuxDelete(WebPMux* mux) {
err = ChunkAssignData(&chunk, data, copy_data, tag); \
if (err == WEBP_MUX_OK) { \
err = ChunkSetHead(&chunk, (LIST)); \
if (err != WEBP_MUX_OK) ChunkRelease(&chunk); \
} \
return err; \
}

View File

@ -29,7 +29,7 @@ extern "C" {
#define MUX_MAJ_VERSION 1
#define MUX_MIN_VERSION 2
#define MUX_REV_VERSION 2
#define MUX_REV_VERSION 4
// Chunk object.
typedef struct WebPChunk WebPChunk;

View File

@ -155,17 +155,18 @@ WebPMuxError ChunkSetHead(WebPChunk* const chunk,
WebPMuxError ChunkAppend(WebPChunk* const chunk,
WebPChunk*** const chunk_list) {
WebPMuxError err;
assert(chunk_list != NULL && *chunk_list != NULL);
if (**chunk_list == NULL) {
ChunkSetHead(chunk, *chunk_list);
err = ChunkSetHead(chunk, *chunk_list);
} else {
WebPChunk* last_chunk = **chunk_list;
while (last_chunk->next_ != NULL) last_chunk = last_chunk->next_;
ChunkSetHead(chunk, &last_chunk->next_);
*chunk_list = &last_chunk->next_;
err = ChunkSetHead(chunk, &last_chunk->next_);
if (err == WEBP_MUX_OK) *chunk_list = &last_chunk->next_;
}
return WEBP_MUX_OK;
return err;
}
//------------------------------------------------------------------------------

View File

@ -441,7 +441,7 @@ WEBP_EXTERN int WebPPictureCrop(WebPPicture* picture,
// the original dimension will be lost). Picture 'dst' need not be initialized
// with WebPPictureInit() if it is different from 'src', since its content will
// be overwritten.
// Returns false in case of memory allocation error or invalid parameters.
// Returns false in case of invalid parameters.
WEBP_EXTERN int WebPPictureView(const WebPPicture* src,
int left, int top, int width, int height,
WebPPicture* dst);
@ -455,7 +455,7 @@ WEBP_EXTERN int WebPPictureIsView(const WebPPicture* picture);
// dimension will be calculated preserving the aspect ratio.
// No gamma correction is applied.
// Returns false in case of error (invalid parameter or insufficient memory).
WEBP_EXTERN int WebPPictureRescale(WebPPicture* pic, int width, int height);
WEBP_EXTERN int WebPPictureRescale(WebPPicture* picture, int width, int height);
// Colorspace conversion function to import RGB samples.
// Previous buffer will be free'd, if any.
@ -526,7 +526,7 @@ WEBP_EXTERN int WebPPictureHasTransparency(const WebPPicture* picture);
// Remove the transparency information (if present) by blending the color with
// the background color 'background_rgb' (specified as 24bit RGB triplet).
// After this call, all alpha values are reset to 0xff.
WEBP_EXTERN void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb);
WEBP_EXTERN void WebPBlendAlpha(WebPPicture* picture, uint32_t background_rgb);
//------------------------------------------------------------------------------
// Main call