[add] first
This commit is contained in:
41
Libraries/external/baselib/Include/Baselib.h
vendored
Normal file
41
Libraries/external/baselib/Include/Baselib.h
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
#pragma once
|
||||
|
||||
#include "Internal/PlatformDetection.h"
|
||||
#include "Internal/ArchitectureDetection.h"
|
||||
#include "Internal/PlatformEnvironment.h"
|
||||
|
||||
|
||||
#ifdef BASELIB_INLINE_NAMESPACE
|
||||
#ifndef __cplusplus
|
||||
#error "BASELIB_INLINE_NAMESPACE is not available when compiling C code"
|
||||
#endif
|
||||
|
||||
#define BASELIB_CPP_INTERFACE inline namespace BASELIB_INLINE_NAMESPACE
|
||||
#define BASELIB_C_INTERFACE BASELIB_CPP_INTERFACE
|
||||
#else
|
||||
#define BASELIB_CPP_INTERFACE extern "C++"
|
||||
#define BASELIB_C_INTERFACE extern "C"
|
||||
#endif
|
||||
|
||||
#if defined(BASELIB_USE_DYNAMICLIBRARY)
|
||||
#define BASELIB_API IMPORTED_SYMBOL
|
||||
#elif defined(BASELIB_DYNAMICLIBRARY)
|
||||
#define BASELIB_API EXPORTED_SYMBOL
|
||||
#else
|
||||
#define BASELIB_API
|
||||
#endif
|
||||
|
||||
// BASELIB_BINDING_GENERATION is set by the bindings generator and by BindingsExposedInlineImplementations.cpp
|
||||
// in order to selectively provide symbols bindings can link to for some our our inline implementations.
|
||||
#ifdef BASELIB_BINDING_GENERATION
|
||||
#define BASELIB_INLINE_API BASELIB_API
|
||||
#define BASELIB_FORCEINLINE_API BASELIB_API
|
||||
#else
|
||||
#define BASELIB_INLINE_API static inline
|
||||
#define BASELIB_FORCEINLINE_API static COMPILER_FORCEINLINE
|
||||
#endif
|
||||
|
||||
|
||||
#include "Internal/BasicTypes.h"
|
||||
#include "Internal/CoreMacros.h"
|
||||
#include "Internal/Assert.h"
|
||||
21
Libraries/external/baselib/Include/C/Baselib_Alignment.h
vendored
Normal file
21
Libraries/external/baselib/Include/C/Baselib_Alignment.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
#pragma once
|
||||
|
||||
#ifndef BASELIB_ALIGN_OF
|
||||
#if defined(__cplusplus) // We assume C++11 support (also, note that Mscv has correct version numbers on this attribute as opt-in)
|
||||
#define BASELIB_ALIGN_OF(TYPE_) alignof(TYPE_)
|
||||
|
||||
// As of gcc8+clang 8, alignof and _Alignof return the ABI alignment of a type, as opposed to the preferred alignment.
|
||||
// __alignof still returns the preferred alignment.
|
||||
// Also see:
|
||||
// https://gcc.gnu.org/gcc-8/porting_to.html#alignof
|
||||
// https://releases.llvm.org/8.0.0/tools/clang/docs/ReleaseNotes.html#modified-compiler-flags
|
||||
#elif STDC_VERSION >= 201112L
|
||||
#define BASELIB_ALIGN_OF(TYPE_) _Alignof(TYPE_)
|
||||
#else
|
||||
#define BASELIB_ALIGN_OF(TYPE_) COMPILER_ALIGN_OF(TYPE_)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef BASELIB_ALIGN_AS
|
||||
#define BASELIB_ALIGN_AS(ALIGNMENT_) COMPILER_ALIGN_AS(ALIGNMENT_)
|
||||
#endif
|
||||
411
Libraries/external/baselib/Include/C/Baselib_Atomic.h
vendored
Normal file
411
Libraries/external/baselib/Include/C/Baselib_Atomic.h
vendored
Normal file
@@ -0,0 +1,411 @@
|
||||
#pragma once
|
||||
|
||||
// This API is not type safe. For a type safe version use Baselib_Atomic_TypeSafe.h (C) or Atomic.h (C++)
|
||||
//
|
||||
// Atomics closely mimic C11/C++11 implementation, with the following differences:
|
||||
//
|
||||
// *) C API: as Visual Studio C compiler doesn't support _Generic we can't have a single named function operating on different types, or
|
||||
// selecting different implementations depending on memory order.
|
||||
// This leads to having to explicitly specify type size and ordering in the function name, for example
|
||||
// 'Baselib_atomic_load_32_acquire' instead of 'Baselib_atomic_load' as one would have available in in C11 or equivalent in C++11.
|
||||
//
|
||||
|
||||
// not type specific
|
||||
// ----------------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE void Baselib_atomic_thread_fence_relaxed(void);
|
||||
static FORCE_INLINE void Baselib_atomic_thread_fence_acquire(void);
|
||||
static FORCE_INLINE void Baselib_atomic_thread_fence_release(void);
|
||||
static FORCE_INLINE void Baselib_atomic_thread_fence_acq_rel(void);
|
||||
static FORCE_INLINE void Baselib_atomic_thread_fence_seq_cst(void);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_load_8_relaxed_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_8_acquire_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_8_seq_cst_v(const void* obj, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_8_relaxed_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_8_release_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_8_seq_cst_v(void* obj, const void* value);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_8_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_8_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_8_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_8_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_8_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_8_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_8_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_8_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_8_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_8_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_8_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_8_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_8_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_8_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_8_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_8_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_8_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_8_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_8_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_8_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
// 16-bit declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE void Baselib_atomic_load_16_relaxed_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_16_acquire_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_16_seq_cst_v(const void* obj, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_16_relaxed_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_16_release_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_16_seq_cst_v(void* obj, const void* value);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_16_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_16_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_16_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_16_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_16_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_16_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_16_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_16_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_16_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_16_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_16_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_16_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_16_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_16_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_16_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_16_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_16_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_16_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_16_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_16_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
// 32-bit declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE void Baselib_atomic_load_32_relaxed_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_32_acquire_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_32_seq_cst_v(const void* obj, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_32_relaxed_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_32_release_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_32_seq_cst_v(void* obj, const void* value);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_32_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_32_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_32_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_32_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_32_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_32_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_32_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_32_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_32_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_32_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_32_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_32_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_32_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_32_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_32_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_32_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_32_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_32_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_32_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_32_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
// 64-bit declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE void Baselib_atomic_load_64_relaxed_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_64_acquire_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_64_seq_cst_v(const void* obj, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_64_relaxed_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_64_release_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_64_seq_cst_v(void* obj, const void* value);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_64_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_64_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_64_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_64_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_64_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_64_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_64_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_64_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_64_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_64_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_64_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_64_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_64_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_64_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_64_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_64_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_64_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_64_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_64_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_64_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
// 128-bit declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
#if PLATFORM_ARCH_64
|
||||
|
||||
// commented out const:
|
||||
// 128bit loads are guranteed to not change obj but may need a store to confirm atomicity
|
||||
static FORCE_INLINE void Baselib_atomic_load_128_relaxed_v(/* const */ void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_128_acquire_v(/* const */ void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_128_seq_cst_v(/* const */ void* obj, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_128_relaxed_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_128_release_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_128_seq_cst_v(void* obj, const void* value);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_128_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_128_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_128_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_128_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_128_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
#endif
|
||||
|
||||
// ptr declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE void Baselib_atomic_load_ptr_relaxed_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_ptr_acquire_v(const void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_ptr_seq_cst_v(const void* obj, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr_relaxed_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr_release_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr_seq_cst_v(void* obj, const void* value);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
// ptr2x declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
// commented out const:
|
||||
// 128bit loads are guranteed to not change obj but may need a store to confirm atomicity
|
||||
static FORCE_INLINE void Baselib_atomic_load_ptr2x_relaxed_v(/* const */ void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_ptr2x_acquire_v(/* const */ void* obj, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_load_ptr2x_seq_cst_v(/* const */ void* obj, void* result);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr2x_relaxed_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr2x_release_v(void* obj, const void* value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr2x_seq_cst_v(void* obj, const void* value);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_relaxed_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_acquire_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_release_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_acq_rel_v(void* obj, const void* value, void* result);
|
||||
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_seq_cst_v(void* obj, const void* value, void* result);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_relaxed_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acquire_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acquire_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_release_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acq_rel_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_seq_cst_acquire_v(void* obj, void* expected, const void* value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
|
||||
|
||||
// Compiler Specific Implementation
|
||||
// ----------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
#if PLATFORM_CUSTOM_ATOMICS
|
||||
// Platform header does not know where macro header lives and likely needs it.
|
||||
#include "../../Include/C/Baselib_Atomic_Macros.h"
|
||||
#include "C/Baselib_Atomic_Platform.inl.h"
|
||||
#elif COMPILER_CLANG || COMPILER_GCC
|
||||
#include "Internal/Compiler/Baselib_Atomic_Gcc.h"
|
||||
#elif COMPILER_MSVC
|
||||
#include "Internal/Compiler/Baselib_Atomic_Msvc.h"
|
||||
#endif
|
||||
151
Libraries/external/baselib/Include/C/Baselib_Atomic_LLSC.h
vendored
Normal file
151
Libraries/external/baselib/Include/C/Baselib_Atomic_LLSC.h
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
#pragma once
|
||||
|
||||
// In computer science, load-link and store-conditional (LL/SC) are a pair of instructions used in multithreading to achieve synchronization.
|
||||
// Load-link returns the current value of a memory location, while a subsequent store-conditional to the same memory location will store a new
|
||||
// value only if no updates have occurred to that location since the load-link. Together, this implements a lock-free atomic read-modify-write operation.
|
||||
//
|
||||
// Comparison of LL/SC and compare-and-swap
|
||||
// If any updates have occurred, the store-conditional is guaranteed to fail, even if the value read by the load-link has since been restored.
|
||||
// As such, an LL/SC pair is stronger than a read followed by a compare-and-swap (CAS), which will not detect updates if the old value has been restored
|
||||
// (see ABA problem).
|
||||
//
|
||||
// "Load-link/store-conditional", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Load-link/store-conditional&oldid=916413430
|
||||
//
|
||||
|
||||
//
|
||||
// Baselib_atomic_llsc_break
|
||||
//
|
||||
// This is has no functional effect, but can improve performance on some Arm architectures
|
||||
//
|
||||
// Example:
|
||||
// Baselib_atomic_llsc_32_relaxed_relaxed_v(&obj, &expected, &value, { if (expected == 0) { Baselib_atomic_llsc_break(); break; } );
|
||||
//
|
||||
#define Baselib_atomic_llsc_break() detail_Baselib_atomic_llsc_break()
|
||||
|
||||
//
|
||||
// Baselib_atomic_llsc_<int_type>_<load order>_<store_order>_v(obj, expected, value, code)
|
||||
//
|
||||
// int_type - 8, 16, 32, 64, ptr, ptr2x, 128 (128 available only on 64-bit architectures)
|
||||
// load_order - relaxed, acquire, seq_cst
|
||||
// store_order - relaxed, release, seq_cst
|
||||
//
|
||||
// obj - address to memory to store 'value' into.
|
||||
// Must be cache-line size aligned and sized. Any update of this memory between the LL/SC pair results in unpredictable behaviour.
|
||||
// expected - address to memory to load 'obj' into.
|
||||
// Loaded by LL. Any updates of this memory between the LL/SC pair results in unpredictable behaviour.
|
||||
// value - address to memory containing value to store into 'obj':
|
||||
// Stored by SC to 'obj' memory on success, otherwise 'code' is repeated.
|
||||
// code - code executed between the LL/SC pair.
|
||||
//
|
||||
// Notes on Arm optimized clang implementation:
|
||||
// Armv7A and Armv8A architectures are enabled by default. Newer architectures will be enabled once tested and verified compliant.
|
||||
// Specifically, the configuration of the exclusive access global/local monitors such as ERG (Exclusives Reservation Granule) size may vary on other platforms.
|
||||
// See Arm Synchronization Primitives: http://infocenter.arm.com/help/topic/com.arm.doc.dht0008a/DHT0008A_arm_synchronization_primitives.pdf
|
||||
// chapter 1.2 "Exclusive accesses" for more detailed information.
|
||||
//
|
||||
// Notes on default implementation (platforms/architectures not listed in the Arm clang notes)
|
||||
// Atomic load and compare_exchange intrinsics emulates LL/SC capability.
|
||||
// The values of 'expected' and 'obj' value to determine if SC should succeed and store 'value'.
|
||||
//
|
||||
// Example:
|
||||
// struct Data { BASELIB_ALIGN_AS(PLATFORM_CACHE_LINE_SIZE) int32_t obj = 0; } data;
|
||||
// int32_t expected = 1, value = 2;
|
||||
// Baselib_atomic_llsc_32_relaxed_relaxed_v(&data.obj, &expected, &value, { if (expected == 0) value = 3; } );
|
||||
// <-- obj is now 3
|
||||
//
|
||||
#define Baselib_atomic_llsc_8_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, relaxed, relaxed)
|
||||
#define Baselib_atomic_llsc_8_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, acquire, relaxed)
|
||||
#define Baselib_atomic_llsc_8_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, relaxed, release)
|
||||
#define Baselib_atomic_llsc_8_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, acquire, release)
|
||||
#define Baselib_atomic_llsc_8_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, seq_cst, seq_cst)
|
||||
|
||||
#define Baselib_atomic_llsc_16_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, relaxed, relaxed)
|
||||
#define Baselib_atomic_llsc_16_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, acquire, relaxed)
|
||||
#define Baselib_atomic_llsc_16_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, relaxed, release)
|
||||
#define Baselib_atomic_llsc_16_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, acquire, release)
|
||||
#define Baselib_atomic_llsc_16_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, seq_cst, seq_cst)
|
||||
|
||||
#define Baselib_atomic_llsc_32_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, relaxed, relaxed)
|
||||
#define Baselib_atomic_llsc_32_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, acquire, relaxed)
|
||||
#define Baselib_atomic_llsc_32_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, relaxed, release)
|
||||
#define Baselib_atomic_llsc_32_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, acquire, release)
|
||||
#define Baselib_atomic_llsc_32_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, seq_cst, seq_cst)
|
||||
|
||||
#define Baselib_atomic_llsc_64_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, relaxed, relaxed)
|
||||
#define Baselib_atomic_llsc_64_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, acquire, relaxed)
|
||||
#define Baselib_atomic_llsc_64_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, relaxed, release)
|
||||
#define Baselib_atomic_llsc_64_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, acquire, release)
|
||||
#define Baselib_atomic_llsc_64_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, seq_cst, seq_cst)
|
||||
|
||||
#define Baselib_atomic_llsc_ptr_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, relaxed, relaxed)
|
||||
#define Baselib_atomic_llsc_ptr_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, acquire, relaxed)
|
||||
#define Baselib_atomic_llsc_ptr_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, relaxed, release)
|
||||
#define Baselib_atomic_llsc_ptr_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, acquire, release)
|
||||
#define Baselib_atomic_llsc_ptr_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, seq_cst, seq_cst)
|
||||
|
||||
#if PLATFORM_ARCH_64
|
||||
|
||||
#define Baselib_atomic_llsc_ptr2x_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, relaxed)
|
||||
#define Baselib_atomic_llsc_ptr2x_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, relaxed)
|
||||
#define Baselib_atomic_llsc_ptr2x_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, release)
|
||||
#define Baselib_atomic_llsc_ptr2x_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, release)
|
||||
#define Baselib_atomic_llsc_ptr2x_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, seq_cst, seq_cst)
|
||||
|
||||
#define Baselib_atomic_llsc_128_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, relaxed)
|
||||
#define Baselib_atomic_llsc_128_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, relaxed)
|
||||
#define Baselib_atomic_llsc_128_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, release)
|
||||
#define Baselib_atomic_llsc_128_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, release)
|
||||
#define Baselib_atomic_llsc_128_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, seq_cst, seq_cst)
|
||||
|
||||
#else // PLATFORM_ARCH_64
|
||||
|
||||
#define Baselib_atomic_llsc_ptr2x_relaxed_relaxed_v(obj, expected, value, code) Baselib_atomic_llsc_64_relaxed_relaxed_v(obj, expected, value, code)
|
||||
#define Baselib_atomic_llsc_ptr2x_acquire_relaxed_v(obj, expected, value, code) Baselib_atomic_llsc_64_acquire_relaxed_v(obj, expected, value, code)
|
||||
#define Baselib_atomic_llsc_ptr2x_relaxed_release_v(obj, expected, value, code) Baselib_atomic_llsc_64_relaxed_release_v(obj, expected, value, code)
|
||||
#define Baselib_atomic_llsc_ptr2x_acquire_release_v(obj, expected, value, code) Baselib_atomic_llsc_64_acquire_release_v(obj, expected, value, code)
|
||||
#define Baselib_atomic_llsc_ptr2x_seq_cst_seq_cst_v(obj, expected, value, code) Baselib_atomic_llsc_64_seq_cst_seq_cst_v(obj, expected, value, code)
|
||||
|
||||
#endif
|
||||
|
||||
// Enable LLSC native support for supported compilers and architectures/profiles
|
||||
#ifndef PLATFORM_LLSC_NATIVE_SUPPORT
|
||||
#if (COMPILER_CLANG) && ((__ARM_ARCH >= 7) && (__ARM_ARCH < 9) && (__ARM_ARCH_PROFILE == 'A'))
|
||||
#define PLATFORM_LLSC_NATIVE_SUPPORT 1
|
||||
#else
|
||||
#define PLATFORM_LLSC_NATIVE_SUPPORT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if PLATFORM_LLSC_NATIVE_SUPPORT
|
||||
// Arm specific implementation of LLSC macros
|
||||
#include "Internal/Compiler/Baselib_Atomic_LLSC_Gcc.inl.h"
|
||||
#else
|
||||
// Generic implementation of LLSC macros
|
||||
#include "Baselib_Atomic.h"
|
||||
|
||||
// LLSC exlusive state access break implementation (nop)
|
||||
#define detail_Baselib_atomic_llsc_break()
|
||||
|
||||
// LLSC implementation using load/cmp_xcgh
|
||||
#define detail_Baselib_atomic_llsc_cmpxchg_v(obj, expected, value, code, size, loadbarrier, storebarrier) \
|
||||
do { \
|
||||
Baselib_atomic_load_##size##_##loadbarrier##_v(obj, expected); \
|
||||
do { \
|
||||
code; \
|
||||
} while (!Baselib_atomic_compare_exchange_weak_##size##_##storebarrier##_##loadbarrier##_v(obj, expected, value)); \
|
||||
} while (false)
|
||||
|
||||
#define detail_Baselib_atomic_llsc_relaxed_relaxed_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, relaxed, relaxed)
|
||||
#define detail_Baselib_atomic_llsc_acquire_relaxed_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, acquire, acquire)
|
||||
#define detail_Baselib_atomic_llsc_relaxed_release_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, relaxed, release)
|
||||
#define detail_Baselib_atomic_llsc_acquire_release_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, acquire, acq_rel)
|
||||
#define detail_Baselib_atomic_llsc_seq_cst_seq_cst_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, seq_cst, seq_cst)
|
||||
|
||||
#define detail_Baselib_atomic_llsc_v(obj, expected, value, code, size, loadbarrier, storebarrier) \
|
||||
detail_Baselib_atomic_llsc_##loadbarrier##_##storebarrier##_v(obj, expected, value, code, size)
|
||||
|
||||
#define detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, loadbarrier, storebarrier) \
|
||||
detail_Baselib_atomic_llsc_v(obj, expected, value, code, 128, loadbarrier, storebarrier)
|
||||
|
||||
#endif // PLATFORM_LLSC_NATIVE_SUPPORT
|
||||
167
Libraries/external/baselib/Include/C/Baselib_Atomic_Macros.h
vendored
Normal file
167
Libraries/external/baselib/Include/C/Baselib_Atomic_Macros.h
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_Alignment.h"
|
||||
|
||||
//
|
||||
// order - relaxed, acquire, release, acq_rel, seq_cst
|
||||
//
|
||||
// MACRO_(order, ...)
|
||||
//
|
||||
#define Baselib_Atomic_FOR_EACH_MEMORY_ORDER(MACRO_, ...) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(acquire, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(release, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(acq_rel, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(seq_cst, __VA_ARGS__))
|
||||
|
||||
//
|
||||
// operation - load, store, fetch_add, fetch_and, fetch_or, fetch_xor, exchange, compare_exchange_weak, compare_exchange_strong
|
||||
// order - relaxed, acquire, release, acq_rel, seq_cst
|
||||
// order_success - relaxed, acquire, release, acq_rel, seq_cst
|
||||
// order_failure - relaxed, acquire, seq_cst
|
||||
//
|
||||
// LOAD_MACRO_(operation, order, ...)
|
||||
// STORE_MACRO_(operation, order, ...)
|
||||
// ADD_MACRO_(operation, order, ...)
|
||||
// AND_MACRO_(operation, order, ...)
|
||||
// OR_MACRO_(operation, order, ...)
|
||||
// XOR_MACRO_(operation, order, ...)
|
||||
// XCHG_MACRO_(operation, order, ...)
|
||||
// CMP_XCHG_WEAK_MACRO_(operation, order_success, order_failure, ...)
|
||||
// CMP_XCHG_STRONG_MACRO_(operation, order_success, order_failure, ...)
|
||||
//
|
||||
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ...) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_MEMORY_ORDER(LOAD_MACRO_, load, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_STORE_MEMORY_ORDER(STORE_MACRO_, store, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(ADD_MACRO_, fetch_add, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(AND_MACRO_, fetch_and, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(OR_MACRO_, fetch_or, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(XOR_MACRO_, fetch_xor, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(XCHG_MACRO_, exchange, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_CMP_XCHG_MEMORY_ORDER(CMP_XCHG_WEAK_MACRO_, compare_exchange_weak, __VA_ARGS__) \
|
||||
DETAIL__Baselib_Atomic_FOR_EACH_CMP_XCHG_MEMORY_ORDER(CMP_XCHG_STRONG_MACRO_, compare_exchange_strong, __VA_ARGS__)
|
||||
|
||||
//
|
||||
// LOAD_MACRO_(operation, order, ...)
|
||||
// STORE_MACRO_(operation, order, ...)
|
||||
// LOAD_STORE_MACRO_(operation, order, ...)
|
||||
// CMP_XCHG_MACRO_(operation, order_success, order_failure, ...)
|
||||
//
|
||||
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER2(LOAD_MACRO_, STORE_MACRO_, LOAD_STORE_MACRO_, CMP_XCHG_MACRO_, ...) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER( \
|
||||
LOAD_MACRO_, \
|
||||
STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
CMP_XCHG_MACRO_, \
|
||||
CMP_XCHG_MACRO_, \
|
||||
__VA_ARGS__)
|
||||
|
||||
//
|
||||
// operation - load, store, fetch_add, fetch_and, fetch_or, fetch_xor, exchange, compare_exchange_weak, compare_exchange_strong
|
||||
// order - relaxed, acquire, release, acq_rel, seq_cst
|
||||
// order_success - relaxed, acquire, release, acq_rel, seq_cst
|
||||
// order_failure - relaxed, acquire, seq_cst
|
||||
// id - 8, 16, 32, 64
|
||||
// bits - 8, 16, 32, 64
|
||||
// int_type - int8_t, int16_t, int32_t, int64_t
|
||||
//
|
||||
// LOAD_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// STORE_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// ADD_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// AND_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// OR_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// XOR_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// XCHG_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// CMP_XCHG_WEAK_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
|
||||
// CMP_XCHG_STRONG_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
|
||||
//
|
||||
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_INT_TYPE(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ...) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 8, 8, int8_t __VA_ARGS__) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 16, 16, int16_t, __VA_ARGS__) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 32, 32, int32_t, __VA_ARGS__) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 64, 64, int64_t, __VA_ARGS__)
|
||||
|
||||
//
|
||||
// operation - load, store, fetch_add, fetch_and, fetch_or, fetch_xor, exchange, compare_exchange_weak, compare_exchange_strong
|
||||
// order - relaxed, acquire, release, acq_rel, seq_cst
|
||||
// order_success - relaxed, acquire, release, acq_rel, seq_cst
|
||||
// order_failure - relaxed, acquire, seq_cst
|
||||
// id - 8, 16, 32, 64, ptr
|
||||
// bits - 8, 16, 32, 64
|
||||
// int_type - int8_t, int16_t, int32_t, int64_t, intptr_t
|
||||
//
|
||||
// LOAD_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// STORE_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// ADD_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// AND_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// OR_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// XOR_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// XCHG_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// CMP_XCHG_WEAK_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
|
||||
// CMP_XCHG_STRONG_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
|
||||
//
|
||||
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ...) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_INT_TYPE(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, __VA_ARGS__) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ptr, DETAIL__Baselib_Atomic_PTR_SIZE, intptr_t, __VA_ARGS__)
|
||||
|
||||
//
|
||||
// LOAD_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// STORE_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// LOAD_STORE_MACRO_(operation, order, id, bits, int_type, ...)
|
||||
// CMP_XCHG_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
|
||||
//
|
||||
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE2(LOAD_MACRO_, STORE_MACRO_, LOAD_STORE_MACRO_, CMP_XCHG_MACRO_, ...) \
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE( \
|
||||
LOAD_MACRO_, \
|
||||
STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
LOAD_STORE_MACRO_, \
|
||||
CMP_XCHG_MACRO_, \
|
||||
CMP_XCHG_MACRO_, \
|
||||
__VA_ARGS__)
|
||||
|
||||
//
|
||||
// Implementation details
|
||||
// ----------------------------------------------------------------------------------
|
||||
#if PLATFORM_ARCH_64
|
||||
#define DETAIL__Baselib_Atomic_PTR_SIZE 64
|
||||
#else
|
||||
#define DETAIL__Baselib_Atomic_PTR_SIZE 32
|
||||
#endif
|
||||
|
||||
#define DETAIL__Baselib_Atomic_EVAL(...) __VA_ARGS__
|
||||
|
||||
#define DETAIL__Baselib_Atomic_FOR_EACH_LOAD_MEMORY_ORDER(MACRO_, OP_, ...) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, __VA_ARGS__))
|
||||
|
||||
#define DETAIL__Baselib_Atomic_FOR_EACH_STORE_MEMORY_ORDER(MACRO_, OP_, ...) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, release, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, __VA_ARGS__))
|
||||
|
||||
#define DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(MACRO_, OP_, ...) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, release, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acq_rel, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, __VA_ARGS__))
|
||||
|
||||
#define DETAIL__Baselib_Atomic_FOR_EACH_CMP_XCHG_MEMORY_ORDER(MACRO_, OP_, ...) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, acquire, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, release, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acq_rel, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acq_rel, acquire, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, relaxed, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, acquire, __VA_ARGS__)) \
|
||||
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, seq_cst, __VA_ARGS__))
|
||||
343
Libraries/external/baselib/Include/C/Baselib_Atomic_TypeSafe.h
vendored
Normal file
343
Libraries/external/baselib/Include/C/Baselib_Atomic_TypeSafe.h
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_Atomic.h"
|
||||
|
||||
// TypeSafe version of baselib atomics "C" API
|
||||
|
||||
// 8-bit declarations
|
||||
// ----------------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE int8_t Baselib_atomic_load_8_relaxed(const int8_t* obj);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_load_8_acquire(const int8_t* obj);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_load_8_seq_cst(const int8_t* obj);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_8_relaxed(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_8_release(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_8_seq_cst(int8_t* obj, int8_t value);
|
||||
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_relaxed(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_acquire(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_release(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_acq_rel(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_seq_cst(int8_t* obj, int8_t value);
|
||||
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_relaxed(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_acquire(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_release(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_acq_rel(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_seq_cst(int8_t* obj, int8_t value);
|
||||
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_relaxed(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_acquire(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_release(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_acq_rel(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_seq_cst(int8_t* obj, int8_t value);
|
||||
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_relaxed(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_acquire(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_release(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_acq_rel(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_seq_cst(int8_t* obj, int8_t value);
|
||||
|
||||
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_relaxed(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_acquire(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_release(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_acq_rel(int8_t* obj, int8_t value);
|
||||
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_seq_cst(int8_t* obj, int8_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_relaxed_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_acquire(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_release_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_acquire(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_acquire(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_seq_cst(int8_t* obj, int8_t* expected, int8_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_relaxed_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_acquire(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_release_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_acquire(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_relaxed(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_acquire(int8_t* obj, int8_t* expected, int8_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_seq_cst(int8_t* obj, int8_t* expected, int8_t value);
|
||||
|
||||
// 16-bit declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE int16_t Baselib_atomic_load_16_relaxed(const int16_t* obj);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_load_16_acquire(const int16_t* obj);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_load_16_seq_cst(const int16_t* obj);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_16_relaxed(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_16_release(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_16_seq_cst(int16_t* obj, int16_t value);
|
||||
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_relaxed(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_acquire(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_release(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_acq_rel(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_seq_cst(int16_t* obj, int16_t value);
|
||||
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_relaxed(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_acquire(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_release(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_acq_rel(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_seq_cst(int16_t* obj, int16_t value);
|
||||
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_relaxed(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_acquire(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_release(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_acq_rel(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_seq_cst(int16_t* obj, int16_t value);
|
||||
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_relaxed(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_acquire(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_release(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_acq_rel(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_seq_cst(int16_t* obj, int16_t value);
|
||||
|
||||
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_relaxed(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_acquire(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_release(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_acq_rel(int16_t* obj, int16_t value);
|
||||
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_seq_cst(int16_t* obj, int16_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_relaxed_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_acquire(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_release_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_acquire(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_acquire(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_seq_cst(int16_t* obj, int16_t* expected, int16_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_relaxed_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_acquire(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_release_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_acquire(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_relaxed(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_acquire(int16_t* obj, int16_t* expected, int16_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_seq_cst(int16_t* obj, int16_t* expected, int16_t value);
|
||||
|
||||
// 32-bit declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE int32_t Baselib_atomic_load_32_relaxed(const int32_t* obj);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_load_32_acquire(const int32_t* obj);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_load_32_seq_cst(const int32_t* obj);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_32_relaxed(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_32_release(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_32_seq_cst(int32_t* obj, int32_t value);
|
||||
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_relaxed(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_acquire(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_release(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_acq_rel(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_seq_cst(int32_t* obj, int32_t value);
|
||||
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_relaxed(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_acquire(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_release(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_acq_rel(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_seq_cst(int32_t* obj, int32_t value);
|
||||
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_relaxed(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_acquire(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_release(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_acq_rel(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_seq_cst(int32_t* obj, int32_t value);
|
||||
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_relaxed(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_acquire(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_release(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_acq_rel(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_seq_cst(int32_t* obj, int32_t value);
|
||||
|
||||
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_relaxed(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_acquire(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_release(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_acq_rel(int32_t* obj, int32_t value);
|
||||
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_seq_cst(int32_t* obj, int32_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_acquire(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_release_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_acquire(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_acquire(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_seq_cst(int32_t* obj, int32_t* expected, int32_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_relaxed_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_acquire(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_release_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_acquire(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_relaxed(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_acquire(int32_t* obj, int32_t* expected, int32_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_seq_cst(int32_t* obj, int32_t* expected, int32_t value);
|
||||
|
||||
// 64-bit declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE int64_t Baselib_atomic_load_64_relaxed(const int64_t* obj);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_load_64_acquire(const int64_t* obj);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_load_64_seq_cst(const int64_t* obj);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_64_relaxed(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_64_release(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_64_seq_cst(int64_t* obj, int64_t value);
|
||||
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_relaxed(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_acquire(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_release(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_acq_rel(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_seq_cst(int64_t* obj, int64_t value);
|
||||
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_relaxed(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_acquire(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_release(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_acq_rel(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_seq_cst(int64_t* obj, int64_t value);
|
||||
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_relaxed(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_acquire(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_release(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_acq_rel(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_seq_cst(int64_t* obj, int64_t value);
|
||||
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_relaxed(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_acquire(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_release(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_acq_rel(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_seq_cst(int64_t* obj, int64_t value);
|
||||
|
||||
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_relaxed(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_acquire(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_release(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_acq_rel(int64_t* obj, int64_t value);
|
||||
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_seq_cst(int64_t* obj, int64_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_acquire(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_release_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_acquire(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_acquire(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_seq_cst(int64_t* obj, int64_t* expected, int64_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_relaxed_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_acquire(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_release_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_acquire(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_relaxed(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_acquire(int64_t* obj, int64_t* expected, int64_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_seq_cst(int64_t* obj, int64_t* expected, int64_t value);
|
||||
|
||||
// ptr declarations
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_load_ptr_relaxed(const intptr_t* obj);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_load_ptr_acquire(const intptr_t* obj);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_load_ptr_seq_cst(const intptr_t* obj);
|
||||
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr_relaxed(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr_release(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE void Baselib_atomic_store_ptr_seq_cst(intptr_t* obj, intptr_t value);
|
||||
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_relaxed(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_acquire(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_release(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_acq_rel(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_seq_cst(intptr_t* obj, intptr_t value);
|
||||
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_relaxed(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_acquire(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_release(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_acq_rel(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_seq_cst(intptr_t* obj, intptr_t value);
|
||||
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_relaxed(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_acquire(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_release(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_acq_rel(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_seq_cst(intptr_t* obj, intptr_t value);
|
||||
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_relaxed(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_acquire(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_release(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_acq_rel(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_seq_cst(intptr_t* obj, intptr_t value);
|
||||
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_relaxed(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_acquire(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_release(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_acq_rel(intptr_t* obj, intptr_t value);
|
||||
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_seq_cst(intptr_t* obj, intptr_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_relaxed_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_release_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_seq_cst(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_relaxed_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_release_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_seq_cst(intptr_t* obj, intptr_t* expected, intptr_t value);
|
||||
|
||||
// Macro Implementation
|
||||
// ------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
#define detail_LOAD(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE int_type Baselib_atomic_##op##_##id##_##order(const int_type* obj) \
|
||||
{ \
|
||||
int_type result; \
|
||||
Baselib_atomic_##op##_##bits##_##order##_v(obj, &result); \
|
||||
return result; \
|
||||
}
|
||||
|
||||
#define detail_STORE(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order(int_type* obj, int_type value) \
|
||||
{ \
|
||||
Baselib_atomic_##op##_##bits##_##order##_v(obj, &value); \
|
||||
}
|
||||
|
||||
#define detail_LOAD_STORE(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE int_type Baselib_atomic_##op##_##id##_##order(int_type* obj, int_type value) \
|
||||
{ \
|
||||
int_type result; \
|
||||
Baselib_atomic_##op##_##bits##_##order##_v(obj, &value, &result); \
|
||||
return result; \
|
||||
}
|
||||
|
||||
#define detail_CMP_XCHG(op, order1, order2, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2(int_type* obj, int_type* expected, int_type value) \
|
||||
{ \
|
||||
return Baselib_atomic_##op##_##bits##_##order1##_##order2##_v(obj, expected, &value); \
|
||||
}
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE2(detail_LOAD, detail_STORE, detail_LOAD_STORE, detail_CMP_XCHG);
|
||||
|
||||
#undef detail_LOAD
|
||||
#undef detail_STORE
|
||||
#undef detail_LOAD_STORE
|
||||
#undef detail_CMP_XCHG
|
||||
81
Libraries/external/baselib/Include/C/Baselib_CappedSemaphore.h
vendored
Normal file
81
Libraries/external/baselib/Include/C/Baselib_CappedSemaphore.h
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_CappedSemaphore
|
||||
|
||||
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
|
||||
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
|
||||
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
|
||||
// decremented, or toggled) depending on programmer-defined conditions.
|
||||
//
|
||||
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
|
||||
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
|
||||
// resource becomes available.
|
||||
//
|
||||
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
|
||||
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#include "Internal/Baselib_CappedSemaphore_FutexBased.inl.h"
|
||||
#else
|
||||
#include "Internal/Baselib_CappedSemaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Creates a capped counting semaphore synchronization primitive.
|
||||
//
|
||||
// Cap is the number of tokens that can be held by the semaphore when there is no contention.
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
//
|
||||
// For optimal performance, the returned Baselib_CappedSemaphore should be stored at a cache aligned memory location.
|
||||
//
|
||||
// \returns A struct representing a semaphore instance. Use Baselib_CappedSemaphore_Free to free the semaphore.
|
||||
BASELIB_INLINE_API Baselib_CappedSemaphore Baselib_CappedSemaphore_Create(uint16_t cap);
|
||||
|
||||
// Try to consume a token and return immediately.
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// \returns true if token was consumed. false if not.
|
||||
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryAcquire(Baselib_CappedSemaphore* semaphore);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
BASELIB_INLINE_API void Baselib_CappedSemaphore_Acquire(Baselib_CappedSemaphore* semaphore);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
|
||||
// while Acquire may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \param timeoutInMilliseconds Time to wait for token to become available in milliseconds.
|
||||
//
|
||||
// \returns true if token was consumed. false if timeout was reached.
|
||||
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryTimedAcquire(Baselib_CappedSemaphore* semaphore, const uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Submit tokens to the semaphore.
|
||||
//
|
||||
// If threads are waiting an equal amount of tokens are consumed before this function return.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// \returns number of submitted tokens.
|
||||
BASELIB_INLINE_API uint16_t Baselib_CappedSemaphore_Release(Baselib_CappedSemaphore* semaphore, const uint16_t count);
|
||||
|
||||
// Sets the semaphore token count to zero and release all waiting threads.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// \returns number of released threads.
|
||||
BASELIB_INLINE_API uint32_t Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(Baselib_CappedSemaphore* semaphore);
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
|
||||
// Calling this function with a nullptr result in a no-op.
|
||||
BASELIB_INLINE_API void Baselib_CappedSemaphore_Free(Baselib_CappedSemaphore* semaphore);
|
||||
61
Libraries/external/baselib/Include/C/Baselib_CountdownTimer.h
vendored
Normal file
61
Libraries/external/baselib/Include/C/Baselib_CountdownTimer.h
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_Timer.h"
|
||||
#include <math.h>
|
||||
|
||||
typedef struct Baselib_CountdownTimer
|
||||
{
|
||||
Baselib_Timer_Ticks startTimeInTicks;
|
||||
Baselib_Timer_Ticks timeoutInTicks;
|
||||
} Baselib_CountdownTimer;
|
||||
|
||||
BASELIB_INLINE_API Baselib_Timer_Ticks Detail_MillisecondsToTicks(double milliseconds)
|
||||
{
|
||||
return (Baselib_Timer_Ticks)(milliseconds * Baselib_NanosecondsPerMillisecond / Baselib_Timer_TickToNanosecondsConversionFactor);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API double Detail_TicksToMilliseconds(Baselib_Timer_Ticks ticks)
|
||||
{
|
||||
return ticks * Baselib_Timer_TickToNanosecondsConversionFactor / Baselib_NanosecondsPerMillisecond;
|
||||
}
|
||||
|
||||
// Create and start a countdown timer
|
||||
BASELIB_INLINE_API Baselib_CountdownTimer Baselib_CountdownTimer_StartMs(uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const Baselib_CountdownTimer timer = {Baselib_Timer_GetHighPrecisionTimerTicks(), Detail_MillisecondsToTicks(timeoutInMilliseconds)};
|
||||
return timer;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API Baselib_CountdownTimer Baselib_CountdownTimer_StartTicks(Baselib_Timer_Ticks timeoutInTicks)
|
||||
{
|
||||
const Baselib_CountdownTimer timer = {Baselib_Timer_GetHighPrecisionTimerTicks(), timeoutInTicks};
|
||||
return timer;
|
||||
}
|
||||
|
||||
// Get the number of ticks left before countdown expires.
|
||||
//
|
||||
// This function is guaranteed to return zero once timeout expired.
|
||||
// It is also guaranteed that this function will not return zero until timeout expires.
|
||||
BASELIB_INLINE_API Baselib_Timer_Ticks Baselib_CountdownTimer_GetTimeLeftInTicks(Baselib_CountdownTimer timer)
|
||||
{
|
||||
const Baselib_Timer_Ticks then = timer.startTimeInTicks;
|
||||
const Baselib_Timer_Ticks now = Baselib_Timer_GetHighPrecisionTimerTicks();
|
||||
const Baselib_Timer_Ticks timeLeft = timer.timeoutInTicks - (now - then);
|
||||
return timeLeft <= timer.timeoutInTicks ? timeLeft : 0;
|
||||
}
|
||||
|
||||
// Get the number of milliseconds left before countdown expires.
|
||||
//
|
||||
// This function is guaranteed to return zero once timeout expired.
|
||||
// It is also guaranteed that this function will not return zero until timeout expires.
|
||||
BASELIB_INLINE_API uint32_t Baselib_CountdownTimer_GetTimeLeftInMilliseconds(Baselib_CountdownTimer timer)
|
||||
{
|
||||
const Baselib_Timer_Ticks timeLeft = Baselib_CountdownTimer_GetTimeLeftInTicks(timer);
|
||||
return (uint32_t)ceil(Detail_TicksToMilliseconds(timeLeft));
|
||||
}
|
||||
|
||||
// Check if timout has been reached.
|
||||
BASELIB_INLINE_API bool Baselib_CountdownTimer_TimeoutExpired(Baselib_CountdownTimer timer)
|
||||
{
|
||||
return Baselib_CountdownTimer_GetTimeLeftInTicks(timer) == 0;
|
||||
}
|
||||
21
Libraries/external/baselib/Include/C/Baselib_Debug.h
vendored
Normal file
21
Libraries/external/baselib/Include/C/Baselib_Debug.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Generates breakpoint exception (interrupt) the same way as normal breakpoint would.
|
||||
//
|
||||
// If debugger is attached, this will break into the debugger.
|
||||
// If debugger is not attached, application will crash, unless breakpoint exception is handled.
|
||||
// Breakpoint exception can be handled on some platforms by using signal(SIGTRAP, ...) or AddVectoredExceptionHandler.
|
||||
// Platforms can override default compiler implementation by providing BASELIB_DEBUG_TRAP.
|
||||
#define Baselib_Debug_Break() BASELIB_DEBUG_TRAP()
|
||||
|
||||
// \returns true if debugger is attached
|
||||
BASELIB_API bool Baselib_Debug_IsDebuggerAttached(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
128
Libraries/external/baselib/Include/C/Baselib_DynamicLibrary.h
vendored
Normal file
128
Libraries/external/baselib/Include/C/Baselib_DynamicLibrary.h
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib Dynamic Library.
|
||||
|
||||
// In computing, a dynamic linker is the part of an operating system that loads and links
|
||||
// the shared libraries needed by an executable when it is executed (at "run time"),
|
||||
// by copying the content of libraries from persistent storage to RAM, filling jump tables and
|
||||
// relocating pointers. The specific operating system and executable format determine how
|
||||
// the dynamic linker functions and how it is implemented.
|
||||
//
|
||||
// "Dynamic linker", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Dynamic_linker&oldid=935827444
|
||||
//
|
||||
// Platform specific gotchas:
|
||||
// - On Posix/Darwin based platforms, if executable/library has import entries,
|
||||
// as for importing functions from .so's/.dylib's at executable/library open time,
|
||||
// Baselib_DynamicLibrary_GetFunction is able to return them as well.
|
||||
// This is because of ELF/Mach-O format limitations.
|
||||
// - On Posix/Darwin based platforms, to be able to query symbols in an executable
|
||||
// they must be made visible via --external-dynamic and -external_dynamic flags respectively.
|
||||
// Some linkers have an option to make specific symbols visible.
|
||||
// - Emscripten limitations are detailed in
|
||||
// https://github.com/emscripten-core/emscripten/wiki/Linking
|
||||
// - On some platforms dynamic linker doesn't load downstream dependencies.
|
||||
// For example if library A imports a symbol from library B,
|
||||
// and this is passed to the compiler/linker at compilation step,
|
||||
// on most platforms it will generate load entries inside library A to load library B,
|
||||
// so if you load library A then library B will be loaded for you by the dynamic linker.
|
||||
// But on some platforms, you have to load library B first, and then library A.
|
||||
|
||||
#include "Baselib_ErrorState.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
typedef struct Baselib_DynamicLibrary_Handle { intptr_t handle; } Baselib_DynamicLibrary_Handle;
|
||||
// values in range from 0 inclusive to -5 are valid handles on some platforms
|
||||
static const Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_Handle_Invalid = { -100 };
|
||||
|
||||
#include <C/Baselib_DynamicLibrary.inl.h>
|
||||
|
||||
// Open a dynamic library.
|
||||
//
|
||||
// Dynamic libraries are reference counted, so if the same library is loaded again
|
||||
// with Baselib_DynamicLibrary_OpenUtf8/Baselib_DynamicLibrary_OpenUtf16, the same file handle is returned.
|
||||
// It is also possible to load two different libraries containing two different functions that have the same name.
|
||||
//
|
||||
// Please note that additional error information should be retrieved via error state explain and be presented to the end user.
|
||||
// This is needed to improve ergonomics of debugging library loading issues.
|
||||
//
|
||||
// \param pathnameUtf8 Library file to be opened.
|
||||
// If relative pathname is provided, platform library search rules are applied (if any).
|
||||
// If nullptr is passed, Baselib_ErrorCode_InvalidArgument will be risen.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_FailedToOpenDynamicLibrary: Unable to open requested dynamic library.
|
||||
// - Baselib_ErrorCode_NotSupported: This feature is not supported on the current platform.
|
||||
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_OpenUtf8(
|
||||
const char* pathnameUtf8,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Open a dynamic library.
|
||||
// Functionally identical to Baselib_DynamicLibrary_OpenUtf8, but accepts UTF-16 path instead.
|
||||
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_OpenUtf16(
|
||||
const baselib_char16_t* pathnameUtf16,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Return a handle that can be used to query functions in the program's scope.
|
||||
// Must be closed via Baselib_DynamicLibrary_Close.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_NotSupported: This feature is not supported on the current platform.
|
||||
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_OpenProgramHandle(
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Convert native handle into baselib handle without changing the dynamic library ref counter.
|
||||
//
|
||||
// Provided handle should be closed either via Baselib_DynamicLibrary_Close or other means.
|
||||
// The caller is responsible for closing the handle once done with it.
|
||||
// Other corresponding resources should be closed by other means.
|
||||
//
|
||||
// \param handle Platform defined native handle.
|
||||
// \param type Platform defined native handle type from Baselib_DynamicLibrary_NativeHandleType enum.
|
||||
// If unsupported type is passed, will return Baselib_DynamicLibrary_Handle_Invalid.
|
||||
//
|
||||
// \returns Baselib_DynamicLibrary_Handle handle.
|
||||
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_FromNativeHandle(
|
||||
uint64_t handle,
|
||||
uint32_t type,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Lookup a function in a dynamic library.
|
||||
//
|
||||
// \param handle Library handle.
|
||||
// If Baselib_DynamicLibrary_Handle_Invalid is passed, Baselib_ErrorCode_InvalidArgument will be risen.
|
||||
// \param functionName Function name to look for.
|
||||
// If nullptr is passed, Baselib_ErrorCode_InvalidArgument will be risen.
|
||||
//
|
||||
// \returns pointer to the function (can be NULL for symbols mapped to NULL).
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_FunctionNotFound: Requested function was not found.
|
||||
BASELIB_API void* Baselib_DynamicLibrary_GetFunction(
|
||||
Baselib_DynamicLibrary_Handle handle,
|
||||
const char* functionName,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Close a dynamic library.
|
||||
//
|
||||
// Decreases reference counter, if it becomes zero, closes the library.
|
||||
// If system api will return an error during this operation, the process will be aborted.
|
||||
//
|
||||
// \param handle Library handle.
|
||||
// If Baselib_DynamicLibrary_Handle_Invalid is passed, function is no-op.
|
||||
BASELIB_API void Baselib_DynamicLibrary_Close(
|
||||
Baselib_DynamicLibrary_Handle handle
|
||||
);
|
||||
|
||||
#if __cplusplus
|
||||
}
|
||||
#endif
|
||||
60
Libraries/external/baselib/Include/C/Baselib_ErrorCode.h
vendored
Normal file
60
Libraries/external/baselib/Include/C/Baselib_ErrorCode.h
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
#pragma once
|
||||
|
||||
#include "Internal/Baselib_EnumSizeCheck.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// All possible baselib error codes.
|
||||
typedef enum Baselib_ErrorCode
|
||||
{
|
||||
Baselib_ErrorCode_Success = 0x00000000,
|
||||
|
||||
// Common
|
||||
Baselib_ErrorCode_OutOfMemory = 0x01000000,
|
||||
Baselib_ErrorCode_OutOfSystemResources,
|
||||
Baselib_ErrorCode_InvalidAddressRange,
|
||||
// nativeErrorCode contains name of invalid argument
|
||||
Baselib_ErrorCode_InvalidArgument,
|
||||
Baselib_ErrorCode_InvalidBufferSize,
|
||||
Baselib_ErrorCode_InvalidState,
|
||||
Baselib_ErrorCode_NotSupported,
|
||||
Baselib_ErrorCode_Timeout,
|
||||
|
||||
// Memory
|
||||
Baselib_ErrorCode_UnsupportedAlignment = 0x02000000,
|
||||
Baselib_ErrorCode_InvalidPageSize,
|
||||
Baselib_ErrorCode_InvalidPageCount,
|
||||
Baselib_ErrorCode_UnsupportedPageState,
|
||||
|
||||
// Thread
|
||||
Baselib_ErrorCode_ThreadCannotJoinSelf = 0x03000000,
|
||||
|
||||
// Socket
|
||||
Baselib_ErrorCode_NetworkInitializationError = 0x04000000,
|
||||
Baselib_ErrorCode_AddressInUse,
|
||||
// Risen in case if destination cannot be reached or requested address for bind was not local.
|
||||
Baselib_ErrorCode_AddressUnreachable,
|
||||
Baselib_ErrorCode_AddressFamilyNotSupported,
|
||||
Baselib_ErrorCode_Disconnected,
|
||||
|
||||
// FileIO
|
||||
Baselib_ErrorCode_InvalidPathname = 0x05000000,
|
||||
Baselib_ErrorCode_RequestedAccessIsNotAllowed,
|
||||
Baselib_ErrorCode_IOError,
|
||||
|
||||
// DynamicLibrary
|
||||
Baselib_ErrorCode_FailedToOpenDynamicLibrary = 0x06000000,
|
||||
Baselib_ErrorCode_FunctionNotFound,
|
||||
|
||||
// An error that was not anticipated by the baselib authors.
|
||||
// Occurrence of this error is preceeded by a debug assertion.
|
||||
Baselib_ErrorCode_UnexpectedError = 0xFFFFFFFF,
|
||||
} Baselib_ErrorCode;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_ErrorCode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
134
Libraries/external/baselib/Include/C/Baselib_ErrorState.h
vendored
Normal file
134
Libraries/external/baselib/Include/C/Baselib_ErrorState.h
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_ErrorCode.h"
|
||||
#include "Baselib_SourceLocation.h"
|
||||
#include <assert.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Native error code type.
|
||||
typedef enum Baselib_ErrorState_NativeErrorCodeType_t
|
||||
{
|
||||
// Native error code is not present.
|
||||
Baselib_ErrorState_NativeErrorCodeType_None = 0,
|
||||
|
||||
// All platform error codes types must be bigger or equal to this value.
|
||||
Baselib_ErrorState_NativeErrorCodeType_PlatformDefined,
|
||||
} Baselib_ErrorState_NativeErrorCodeType_t;
|
||||
typedef uint8_t Baselib_ErrorState_NativeErrorCodeType;
|
||||
|
||||
// Extra information type.
|
||||
typedef enum Baselib_ErrorState_ExtraInformationType_t
|
||||
{
|
||||
// Extra information is not present.
|
||||
Baselib_ErrorState_ExtraInformationType_None = 0,
|
||||
|
||||
// Extra information is a pointer of const char* type.
|
||||
// Pointer guaranteed to be valid for lifetime of the program (static strings, buffers, etc).
|
||||
Baselib_ErrorState_ExtraInformationType_StaticString,
|
||||
|
||||
// Extra information is a generation counter to ErrorState internal static buffer.
|
||||
Baselib_ErrorState_ExtraInformationType_GenerationCounter,
|
||||
} Baselib_ErrorState_ExtraInformationType_t;
|
||||
typedef uint8_t Baselib_ErrorState_ExtraInformationType;
|
||||
|
||||
// Baselib error information.
|
||||
//
|
||||
// All functions that expect a pointer to a error state object will *not* allow to pass a nullptr for it
|
||||
// If an error state with code other than Success is passed, the function is guaranteed to early out.
|
||||
// Note that even if an error state is expected, there might be no full argument validation. For details check documentation of individual functions.
|
||||
typedef struct Baselib_ErrorState
|
||||
{
|
||||
Baselib_SourceLocation sourceLocation;
|
||||
uint64_t nativeErrorCode;
|
||||
uint64_t extraInformation;
|
||||
Baselib_ErrorCode code;
|
||||
Baselib_ErrorState_NativeErrorCodeType nativeErrorCodeType;
|
||||
Baselib_ErrorState_ExtraInformationType extraInformationType;
|
||||
} Baselib_ErrorState;
|
||||
|
||||
// Creates a new error state object that is initialized to Baselib_ErrorCode_Success.
|
||||
static inline Baselib_ErrorState Baselib_ErrorState_Create(void)
|
||||
{
|
||||
Baselib_ErrorState errorState = {
|
||||
{ NULL, NULL, 0 },
|
||||
0,
|
||||
0,
|
||||
Baselib_ErrorCode_Success,
|
||||
Baselib_ErrorState_NativeErrorCodeType_None,
|
||||
Baselib_ErrorState_ExtraInformationType_None
|
||||
};
|
||||
return errorState;
|
||||
}
|
||||
|
||||
// Resets an existing error state to success and passes it on. Passes nullptr directly on.
|
||||
static inline Baselib_ErrorState* Baselib_ErrorState_Reset(Baselib_ErrorState* errorState)
|
||||
{
|
||||
if (errorState)
|
||||
errorState->code = Baselib_ErrorCode_Success;
|
||||
return errorState;
|
||||
}
|
||||
|
||||
static inline bool Baselib_ErrorState_ErrorRaised(const Baselib_ErrorState* errorState)
|
||||
{
|
||||
BaselibAssert(errorState);
|
||||
return errorState->code != Baselib_ErrorCode_Success;
|
||||
}
|
||||
|
||||
static inline void Baselib_ErrorState_RaiseError(
|
||||
Baselib_ErrorState* errorState,
|
||||
Baselib_ErrorCode errorCode,
|
||||
Baselib_ErrorState_NativeErrorCodeType nativeErrorCodeType,
|
||||
uint64_t nativeErrorCode,
|
||||
Baselib_ErrorState_ExtraInformationType extraInformationType,
|
||||
uint64_t extraInformation,
|
||||
Baselib_SourceLocation sourceLocation
|
||||
)
|
||||
{
|
||||
if (!errorState)
|
||||
return;
|
||||
if (errorState->code != Baselib_ErrorCode_Success)
|
||||
return;
|
||||
errorState->sourceLocation = sourceLocation;
|
||||
errorState->nativeErrorCode = nativeErrorCode;
|
||||
errorState->extraInformation = extraInformation;
|
||||
errorState->code = errorCode;
|
||||
errorState->nativeErrorCodeType = nativeErrorCodeType;
|
||||
errorState->extraInformationType = extraInformationType;
|
||||
}
|
||||
|
||||
typedef enum Baselib_ErrorState_ExplainVerbosity
|
||||
{
|
||||
// Include error type with platform specific value (if specified).
|
||||
Baselib_ErrorState_ExplainVerbosity_ErrorType = 0,
|
||||
// Include error type with platform specific value (if specified),
|
||||
// source location (subject to BASELIB_ENABLE_SOURCELOCATION define) and an error explanation if available.
|
||||
Baselib_ErrorState_ExplainVerbosity_ErrorType_SourceLocation_Explanation = 1,
|
||||
} Baselib_ErrorState_ExplainVerbosity;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_ErrorState_ExplainVerbosity);
|
||||
|
||||
// Writes a null terminated string containing native error code value and explanation if possible.
|
||||
//
|
||||
// \param errorState Error state to explain. If null an empty string will be written into buffer.
|
||||
// \param buffer Buffer to write explanation into.
|
||||
// If nullptr is passed, nothing will be written but function will still return correct amount of bytes.
|
||||
// \param bufferLen Length of buffer in bytes.
|
||||
// If 0 is passed, behaviour is the same as passing nullptr as buffer.
|
||||
// \param verbosity Verbosity level of the explanation string.
|
||||
//
|
||||
// \returns the number of characters that would have been written if buffer had been sufficiently large, including the terminating null character.
|
||||
BASELIB_API uint32_t Baselib_ErrorState_Explain(
|
||||
const Baselib_ErrorState* errorState,
|
||||
char buffer[],
|
||||
uint32_t bufferLen,
|
||||
Baselib_ErrorState_ExplainVerbosity verbosity
|
||||
);
|
||||
|
||||
#include <C/Baselib_ErrorState.inl.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
88
Libraries/external/baselib/Include/C/Baselib_EventSemaphore.h
vendored
Normal file
88
Libraries/external/baselib/Include/C/Baselib_EventSemaphore.h
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_EventSemaphore
|
||||
|
||||
// In computer science, an event (also called event semaphore) is a type of synchronization mechanism that is used to indicate to waiting processes when a
|
||||
// particular condition has become true.
|
||||
// An event is an abstract data type with a boolean state and the following operations:
|
||||
// * wait - when executed, causes the suspension of the executing process until the state of the event is set to true. If the state is already set to true has no effect.
|
||||
// * set - sets the event's state to true, release all waiting processes.
|
||||
// * clear - sets the event's state to false.
|
||||
//
|
||||
// "Event (synchronization primitive)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Event_(synchronization_primitive)&oldid=781517732
|
||||
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#include "Internal/Baselib_EventSemaphore_FutexBased.inl.h"
|
||||
#else
|
||||
#include "Internal/Baselib_EventSemaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Creates an event semaphore synchronization primitive. Initial state of event is unset.
|
||||
//
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
//
|
||||
// For optimal performance, the returned Baselib_EventSemaphore should be stored at a cache aligned memory location.
|
||||
//
|
||||
// \returns A struct representing a semaphore instance. Use Baselib_EventSemaphore_Free to free the semaphore.
|
||||
BASELIB_INLINE_API Baselib_EventSemaphore Baselib_EventSemaphore_Create(void);
|
||||
|
||||
// Try to acquire semaphore.
|
||||
//
|
||||
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// \returns true if event is set, false other wise.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryAcquire(Baselib_EventSemaphore* semaphore);
|
||||
|
||||
// Acquire semaphore.
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Acquire(Baselib_EventSemaphore* semaphore);
|
||||
|
||||
// Try to acquire semaphore.
|
||||
//
|
||||
// If event is set this function return true, otherwise the thread will wait for event to be set or for release to be called.
|
||||
//
|
||||
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
|
||||
// while Acquire may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \returns true if semaphore was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryTimedAcquire(Baselib_EventSemaphore* semaphore, const uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Sets the event
|
||||
//
|
||||
// Setting the event will cause all waiting threads to wakeup. And will let all future acquiring threads through until Baselib_EventSemaphore_Reset is called.
|
||||
// It is guaranteed that any thread waiting previously on the EventSemaphore will be woken up, even if the semaphore is immediately reset. (no lock stealing)
|
||||
//
|
||||
// Guaranteed to emit a release barrier.
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Set(Baselib_EventSemaphore* semaphore);
|
||||
|
||||
// Reset event
|
||||
//
|
||||
// Resetting the event will cause all future acquiring threads to enter a wait state.
|
||||
// Has no effect if the EventSemaphore is already in a reset state.
|
||||
//
|
||||
// Guaranteed to emit a release barrier.
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Reset(Baselib_EventSemaphore* semaphore);
|
||||
|
||||
// Reset event and release all waiting threads
|
||||
//
|
||||
// Resetting the event will cause all future acquiring threads to enter a wait state.
|
||||
// If there were any threads waiting (i.e. the EventSemaphore was already in a release state) they will be released.
|
||||
//
|
||||
// Guaranteed to emit a release barrier.
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(Baselib_EventSemaphore* semaphore);
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, calling free may trigger an assert and may cause process abort.
|
||||
// Calling this function with a nullptr result in a no-op
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Free(Baselib_EventSemaphore* semaphore);
|
||||
400
Libraries/external/baselib/Include/C/Baselib_FileIO.h
vendored
Normal file
400
Libraries/external/baselib/Include/C/Baselib_FileIO.h
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib FileIO
|
||||
//
|
||||
// This is a file reading abstraction api heavily influenced by next-gen async API's like io_uring, windows register I/O, etc.
|
||||
// This api allows for platform independent async file reading.
|
||||
|
||||
#include "Baselib_ErrorState.h"
|
||||
#include "Baselib_Memory.h"
|
||||
#include "Internal/Baselib_EnumSizeCheck.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Event queue handle.
|
||||
typedef struct Baselib_FileIO_EventQueue {void* handle;} Baselib_FileIO_EventQueue;
|
||||
|
||||
// Async file handle.
|
||||
typedef struct Baselib_FileIO_AsyncFile {void* handle;} Baselib_FileIO_AsyncFile;
|
||||
|
||||
// Sync file handle.
|
||||
typedef struct Baselib_FileIO_SyncFile {void* handle;} Baselib_FileIO_SyncFile;
|
||||
|
||||
// Event queue handle invalid constant.
|
||||
static const Baselib_FileIO_EventQueue Baselib_FileIO_EventQueue_Invalid = { NULL };
|
||||
|
||||
// Async file handle invalid constant.
|
||||
static const Baselib_FileIO_AsyncFile Baselib_FileIO_AsyncFile_Invalid = { NULL };
|
||||
|
||||
// Sync file handle invalid constant.
|
||||
static const Baselib_FileIO_SyncFile Baselib_FileIO_SyncFile_Invalid = { (void*)-1 };
|
||||
|
||||
typedef enum Baselib_FileIO_OpenFlags_t
|
||||
{
|
||||
// Allows read access to the file.
|
||||
Baselib_FileIO_OpenFlags_Read = 0x01,
|
||||
// Allows write access to the file.
|
||||
Baselib_FileIO_OpenFlags_Write = 0x02,
|
||||
// Opens existing file without changes or creates 0 size file if file doesn't exist.
|
||||
// On some platforms open will implicitly add write flag if required by native API's.
|
||||
Baselib_FileIO_OpenFlags_OpenAlways = 0x04,
|
||||
// Always creates 0 size file.
|
||||
// On some platforms open will implicitly add write flag if required by native API's.
|
||||
Baselib_FileIO_OpenFlags_CreateAlways = 0x08,
|
||||
} Baselib_FileIO_OpenFlags_t;
|
||||
typedef uint32_t Baselib_FileIO_OpenFlags;
|
||||
|
||||
// File IO read request.
|
||||
typedef struct Baselib_FileIO_ReadRequest
|
||||
{
|
||||
// Offset in a file to read from.
|
||||
// If offset+size is pointing pass EOF, will read up to EOF bytes.
|
||||
// If offset is pointing pass EOF, will read 0 bytes.
|
||||
uint64_t offset;
|
||||
// Buffer to read to, must be available for duration of operation.
|
||||
void* buffer;
|
||||
// Size of requested read.
|
||||
// If 0 is passed will read 0 bytes and raise no error.
|
||||
uint64_t size;
|
||||
} Baselib_FileIO_ReadRequest;
|
||||
|
||||
// File IO priorities.
|
||||
// First we process all requests with high priority, then with normal priority.
|
||||
// There's no round-robin, and high priority can starve normal priority.
|
||||
typedef enum Baselib_FileIO_Priority
|
||||
{
|
||||
Baselib_FileIO_Priority_Normal = 0,
|
||||
Baselib_FileIO_Priority_High = 1
|
||||
} Baselib_FileIO_Priority;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_FileIO_Priority);
|
||||
|
||||
typedef enum Baselib_FileIO_EventQueue_ResultType
|
||||
{
|
||||
// Upon receiving this event, please call the provided callback with provided data argument.
|
||||
Baselib_FileIO_EventQueue_Callback = 1,
|
||||
// Result of open file operation.
|
||||
Baselib_FileIO_EventQueue_OpenFile = 2,
|
||||
// Result of read file operation.
|
||||
Baselib_FileIO_EventQueue_ReadFile = 3,
|
||||
// Result of close file operation.
|
||||
Baselib_FileIO_EventQueue_CloseFile = 4
|
||||
} Baselib_FileIO_EventQueue_ResultType;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_FileIO_EventQueue_ResultType);
|
||||
|
||||
typedef void (*EventQueueCallback)(uint64_t userdata);
|
||||
|
||||
typedef struct Baselib_FileIO_EventQueue_Result_Callback
|
||||
{
|
||||
// Please invoke this callback with userdata from the event.
|
||||
EventQueueCallback callback;
|
||||
} Baselib_FileIO_EventQueue_Result_Callback;
|
||||
|
||||
typedef struct Baselib_FileIO_EventQueue_Result_OpenFile
|
||||
{
|
||||
// Size of the file as seen on during open.
|
||||
uint64_t fileSize;
|
||||
} Baselib_FileIO_EventQueue_Result_OpenFile;
|
||||
|
||||
typedef struct Baselib_FileIO_EventQueue_Result_ReadFile
|
||||
{
|
||||
// Bytes transferred during read.
|
||||
uint64_t bytesTransferred;
|
||||
} Baselib_FileIO_EventQueue_Result_ReadFile;
|
||||
|
||||
// Event queue result.
|
||||
typedef struct Baselib_FileIO_EventQueue_Result
|
||||
{
|
||||
// Event type.
|
||||
Baselib_FileIO_EventQueue_ResultType type;
|
||||
// Userdata as provided to the request.
|
||||
uint64_t userdata;
|
||||
// Error state of the operation.
|
||||
Baselib_ErrorState errorState;
|
||||
union
|
||||
{
|
||||
Baselib_FileIO_EventQueue_Result_Callback callback;
|
||||
Baselib_FileIO_EventQueue_Result_OpenFile openFile;
|
||||
Baselib_FileIO_EventQueue_Result_ReadFile readFile;
|
||||
};
|
||||
} Baselib_FileIO_EventQueue_Result;
|
||||
|
||||
// Creates event queue.
|
||||
//
|
||||
// \returns Event queue.
|
||||
BASELIB_API Baselib_FileIO_EventQueue Baselib_FileIO_EventQueue_Create(void);
|
||||
|
||||
// Frees event queue.
|
||||
//
|
||||
// \param eq event queue to free.
|
||||
BASELIB_API void Baselib_FileIO_EventQueue_Free(
|
||||
Baselib_FileIO_EventQueue eq
|
||||
);
|
||||
|
||||
// Dequeue events from event queue.
|
||||
//
|
||||
// \param eq Event queue to dequeue from.
|
||||
// \param results Results array to dequeue elements into.
|
||||
// If null will return 0.
|
||||
// \param count Amount of elements in results array.
|
||||
// If equals 0 will return 0.
|
||||
// \param timeoutInMilliseconds If no elements are present in the queue,
|
||||
// waits for any elements to be appear for specified amount of time.
|
||||
// If 0 is passed, wait is omitted.
|
||||
// If elements are present, dequeues up-to-count elements, and wait is omitted.
|
||||
//
|
||||
// File operations errors are reported via Baselib_FileIO_EventQueue_Result::errorState
|
||||
// Possible error codes:
|
||||
// - InvalidPathname: Requested pathname is invalid (not found, a directory, etc).
|
||||
// - RequestedAccessIsNotAllowed: Access to requested pathname is not allowed.
|
||||
// - IOError: IO error occured.
|
||||
//
|
||||
// \returns Amount of results filled.
|
||||
BASELIB_API uint64_t Baselib_FileIO_EventQueue_Dequeue(
|
||||
Baselib_FileIO_EventQueue eq,
|
||||
Baselib_FileIO_EventQueue_Result results[],
|
||||
uint64_t count,
|
||||
uint32_t timeoutInMilliseconds // 0 will return immediately
|
||||
);
|
||||
|
||||
// Request dequeue to shutdown
|
||||
//
|
||||
// \param eq Event queue to shutdown.
|
||||
// \param threadCount Number of threads to signal termination
|
||||
//
|
||||
// An empty queue will hang in Baselib_FileIO_EventQueue_Dequeue for as long as the timeout lasts.
|
||||
// This function can be used to exit such a condition
|
||||
BASELIB_API void Baselib_FileIO_EventQueue_Shutdown(
|
||||
Baselib_FileIO_EventQueue eq,
|
||||
uint32_t threadCount
|
||||
);
|
||||
|
||||
// Asynchronously opens a file.
|
||||
//
|
||||
// \param eq Event queue to associate file with.
|
||||
// File can only be associated with one event queue,
|
||||
// but one event queue can be associated with multiple files.
|
||||
// If invalid event queue is passed, will return invalid file handle.
|
||||
// \param pathname Platform defined pathname of a file.
|
||||
// Can be freed after this function returns.
|
||||
// If null is passed will return invalid file handle.
|
||||
// \param userdata Userdata to be set in the completion event.
|
||||
// \param priority Priority for file opening operation.
|
||||
//
|
||||
// Please note errors are reported via Baselib_FileIO_EventQueue_Result::errorState
|
||||
// Possible error codes:
|
||||
// - InvalidPathname: Requested pathname is invalid (not found, a directory, etc).
|
||||
// - RequestedAccessIsNotAllowed: Access to requested pathname is not allowed.
|
||||
// - IOError: IO error occured.
|
||||
//
|
||||
// \returns Async file handle, which can be used immediately for scheduling other operations.
|
||||
// In case if file opening fails, all scheduled operations will fail as well.
|
||||
// In case if invalid arguments are passed, might return invalid file handle (see args descriptions).
|
||||
BASELIB_API Baselib_FileIO_AsyncFile Baselib_FileIO_AsyncOpen(
|
||||
Baselib_FileIO_EventQueue eq,
|
||||
const char* pathname,
|
||||
uint64_t userdata,
|
||||
Baselib_FileIO_Priority priority
|
||||
);
|
||||
|
||||
// Asynchronously reads data from a file.
|
||||
//
|
||||
// Note scheduling reads on closed file is undefined.
|
||||
//
|
||||
// \param file Async file to read from.
|
||||
// If invalid file handle is passed, will no-op.
|
||||
// If file handle was already closed, behavior is undefined.
|
||||
// \param requests Requests to schedule.
|
||||
// If more than 1 provided,
|
||||
// will provide completion event per individual request in the array.
|
||||
// If null is passed, will no-op.
|
||||
// \param count Amount of requests in requests array.
|
||||
// If 0 is passed, will no-op.
|
||||
// \param userdata Userdata to be set in the completion event(s).
|
||||
// \param priority Priority for file reading operation(s).
|
||||
//
|
||||
// Please note errors are reported via Baselib_FileIO_EventQueue_Result::errorState
|
||||
// If file is invalid handle, error can not be reported because event queue is not known.
|
||||
// Possible error codes:
|
||||
// - IOError: IO error occured.
|
||||
BASELIB_API void Baselib_FileIO_AsyncRead(
|
||||
Baselib_FileIO_AsyncFile file,
|
||||
Baselib_FileIO_ReadRequest requests[],
|
||||
uint64_t count,
|
||||
uint64_t userdata,
|
||||
Baselib_FileIO_Priority priority
|
||||
);
|
||||
|
||||
// Asynchronously closes a file.
|
||||
//
|
||||
// Will wait for all pending operations to complete,
|
||||
// after that will close a file and put a completion event.
|
||||
//
|
||||
// \param file Async file to close.
|
||||
// If invalid file handle is passed, will no-op.
|
||||
//
|
||||
// Please note errors are reported via Baselib_FileIO_EventQueue_Result::errorState
|
||||
// If file is invalid handle, error can not be reported because event queue is not known.
|
||||
// Possible error codes:
|
||||
// - IOError: IO error occured.
|
||||
BASELIB_API void Baselib_FileIO_AsyncClose(
|
||||
Baselib_FileIO_AsyncFile file
|
||||
);
|
||||
|
||||
// Synchronously opens a file.
|
||||
//
|
||||
// Will try use the most open access permissions options that are available for each platform.
|
||||
// Meaning it might be possible for other process to write to file opened via this API.
|
||||
// On most platforms file can be simultaneously opened with different open flags.
|
||||
// If you require more strict options, or platform specific access configuration, please use Baselib_FileIO_SyncFileFromNativeHandle.
|
||||
//
|
||||
// \param pathname Platform defined pathname to open.
|
||||
// \param openFlags Open flags.
|
||||
// If file is created because one of Create flags is passed, it will have size of 0 bytes.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: Invalid argument was passed.
|
||||
// - RequestedAccessIsNotAllowed: Request access is not allowed.
|
||||
// - IOError: Generic IO error occured.
|
||||
//
|
||||
// \returns SyncFile handle.
|
||||
BASELIB_API Baselib_FileIO_SyncFile Baselib_FileIO_SyncOpen(
|
||||
const char* pathname,
|
||||
Baselib_FileIO_OpenFlags openFlags,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Transfer ownership of native handle to Baselib_FileIO_SyncFile handle.
|
||||
//
|
||||
// This function transfers ownership, meaning you don't need to close native handle yourself,
|
||||
// instead returned SyncFile must closed via Baselib_FileIO_SyncClose.
|
||||
// Implementations might cache information about the file state,
|
||||
// so native handle shouldn't be used after transfering ownership.
|
||||
//
|
||||
// \param handle Platform defined native handle.
|
||||
// If invalid native handle is passed, will return Baselib_FileIO_SyncFile_Invalid.
|
||||
// \param type Platform defined native handle type from Baselib_FileIO_NativeHandleType enum.
|
||||
// If unsupported type is passed, will return Baselib_FileIO_SyncFile_Invalid.
|
||||
//
|
||||
// \returns SyncFile handle.
|
||||
BASELIB_API Baselib_FileIO_SyncFile Baselib_FileIO_SyncFileFromNativeHandle(
|
||||
uint64_t handle,
|
||||
uint32_t type
|
||||
);
|
||||
|
||||
// Synchronously reads data from a file.
|
||||
//
|
||||
// \param file File to read from.
|
||||
// If invalid file handle is passed, will raise InvalidArgument error and return 0.
|
||||
// \param offset Offset in the file to read data at.
|
||||
// If offset+size goes past end-of-file (EOF), function will read until EOF.
|
||||
// If offset points past EOF, will return 0.
|
||||
// \param buffer Pointer to data to read into.
|
||||
// \param size Size of data to read.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: Invalid argument was passed.
|
||||
// - IOError: Generic IO error occured.
|
||||
//
|
||||
// \returns Amount of bytes read.
|
||||
BASELIB_API uint64_t Baselib_FileIO_SyncRead(
|
||||
Baselib_FileIO_SyncFile file,
|
||||
uint64_t offset,
|
||||
void* buffer,
|
||||
uint64_t size,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Synchronously writes data to a file.
|
||||
//
|
||||
// \param file File to write to.
|
||||
// If invalid file handle is passed, will raise InvalidArgument error and return 0.
|
||||
// \param offset Offset in the file to write data at.
|
||||
// If offset+size goes past end-of-file (EOF), then file will be resized.
|
||||
// \param buffer Pointer to data to write.
|
||||
// \param size Size of data to write.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: Invalid argument was passed.
|
||||
// - IOError: Generic IO error occured.
|
||||
//
|
||||
// \returns Amount of bytes written.
|
||||
BASELIB_API uint64_t Baselib_FileIO_SyncWrite(
|
||||
Baselib_FileIO_SyncFile file,
|
||||
uint64_t offset,
|
||||
const void* buffer,
|
||||
uint64_t size,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Synchronously flushes file buffers.
|
||||
//
|
||||
// Operating system might buffer some write operations.
|
||||
// Flushing buffers is required to guarantee (best effort) writing data to disk.
|
||||
//
|
||||
// \param file File to flush.
|
||||
// If invalid file handle is passed, will no-op.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: Invalid argument was passed.
|
||||
// - IOError: Generic IO error occured.
|
||||
BASELIB_API void Baselib_FileIO_SyncFlush(
|
||||
Baselib_FileIO_SyncFile file,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Synchronously changes file size.
|
||||
//
|
||||
// \param file File to get size of.
|
||||
// If invalid file handle is passed, will raise invalid argument error.
|
||||
// \param size New file size.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: Invalid argument was passed.
|
||||
// - IOError: Generic IO error occured.
|
||||
//
|
||||
// \returns File size.
|
||||
BASELIB_API void Baselib_FileIO_SyncSetFileSize(
|
||||
Baselib_FileIO_SyncFile file,
|
||||
uint64_t size,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Synchronously retrieves file size.
|
||||
//
|
||||
// \param file File to get size of.
|
||||
// If invalid file handle is passed, will return 0.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: Invalid argument was passed.
|
||||
// - IOError: Generic IO error occured.
|
||||
//
|
||||
// \returns File size.
|
||||
BASELIB_API uint64_t Baselib_FileIO_SyncGetFileSize(
|
||||
Baselib_FileIO_SyncFile file,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Synchronously closes a file.
|
||||
//
|
||||
// Close does not guarantee that the data was written to disk,
|
||||
// Please use Baselib_FileIO_SyncFlush to guarantee (best effort) that data was written to disk.
|
||||
//
|
||||
// \param file File to close.
|
||||
// If invalid file handle is passed, will no-op.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: Invalid argument was passed.
|
||||
// - IOError: Generic IO error occured.
|
||||
BASELIB_API void Baselib_FileIO_SyncClose(
|
||||
Baselib_FileIO_SyncFile file,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
#include <C/Baselib_FileIO.inl.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
74
Libraries/external/baselib/Include/C/Baselib_HighCapacitySemaphore.h
vendored
Normal file
74
Libraries/external/baselib/Include/C/Baselib_HighCapacitySemaphore.h
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_HighCapacitySemaphore
|
||||
// This semaphore is similar to Baselib_Semaphore but allows for a far greater token count for a price of a bit slower performance.
|
||||
// This semaphore is usable for counting resources.
|
||||
|
||||
// This is the max number of tokens guaranteed to be held by the semaphore at
|
||||
// any given point in time. Tokens submitted that exceed this value may silently be discarded.
|
||||
static const int64_t Baselib_HighCapacitySemaphore_MaxGuaranteedCount = UINT64_C(1) << 61;
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#include "Internal/Baselib_HighCapacitySemaphore_FutexBased.inl.h"
|
||||
#else
|
||||
#include "Internal/Baselib_HighCapacitySemaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Creates a counting semaphore synchronization primitive.
|
||||
//
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
//
|
||||
// For optimal performance, the returned Baselib_HighCapacitySemaphore should be stored at a cache aligned memory location.
|
||||
//
|
||||
// \returns A struct representing a semaphore instance. Use Baselib_HighCapacitySemaphore_Free to free the semaphore.
|
||||
BASELIB_INLINE_API Baselib_HighCapacitySemaphore Baselib_HighCapacitySemaphore_Create(void);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
// Returns if token was consumed or was woken up by Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads.
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Acquire(Baselib_HighCapacitySemaphore* semaphore);
|
||||
|
||||
// Try to consume a token and return immediately.
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// \returns true if token was consumed. false if not.
|
||||
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryAcquire(Baselib_HighCapacitySemaphore* semaphore);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
|
||||
// while Acquire may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \param timeout Time to wait for token to become available.
|
||||
//
|
||||
// \returns true if token was consumed or was woken up by Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads. false if timeout was reached.
|
||||
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryTimedAcquire(Baselib_HighCapacitySemaphore* semaphore, const uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Submit tokens to the semaphore.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
|
||||
// If count reach `Baselib_HighCapacitySemaphore_MaxGuaranteedCount` this function may silently discard any overflow.
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Release(Baselib_HighCapacitySemaphore* semaphore, const uint32_t count);
|
||||
|
||||
// If threads are waiting on Baselib_HighCapacitySemaphore_Acquire / Baselib_HighCapacitySemaphore_TryTimedAcquire,
|
||||
// releases enough tokens to wake them up. Otherwise consumes all available tokens.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// \returns number of released threads.
|
||||
BASELIB_INLINE_API uint64_t Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(Baselib_HighCapacitySemaphore* semaphore);
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
|
||||
// Calling this function with a nullptr result in a no-op
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Free(Baselib_HighCapacitySemaphore* semaphore);
|
||||
69
Libraries/external/baselib/Include/C/Baselib_Lock.h
vendored
Normal file
69
Libraries/external/baselib/Include/C/Baselib_Lock.h
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
#pragma once
|
||||
|
||||
// In computer science, a lock or mutex (from mutual exclusion) is a synchronization mechanism for enforcing limits on access to a resource in an environment
|
||||
// where there are many threads of execution. A lock is designed to enforce a mutual exclusion concurrency control policy.
|
||||
//
|
||||
// "Lock (computer science)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Lock_(computer_science)&oldid=875674239
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#include "Internal/Baselib_Lock_FutexBased.inl.h"
|
||||
#else
|
||||
#include "Internal/Baselib_Lock_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Creates a lock synchronization primitive.
|
||||
//
|
||||
// If there are not enough system resources to create a lock, process abort is triggered.
|
||||
//
|
||||
// For optimal performance, the returned Baselib_Lock should be stored at a cache aligned memory location.
|
||||
//
|
||||
// \returns A struct representing a lock instance. Use Baselib_Lock_Free to free the lock.
|
||||
BASELIB_INLINE_API Baselib_Lock Baselib_Lock_Create(void);
|
||||
|
||||
// Try to acquire lock and return immediately.
|
||||
//
|
||||
// If lock is held, either by this or another thread, then lock is not acquired and function return false.
|
||||
//
|
||||
// If successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// \returns true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_Lock_TryAcquire(Baselib_Lock* lock);
|
||||
|
||||
// Acquire lock.
|
||||
//
|
||||
// If lock is held, either by this or another thread, then the function wait for lock to be released.
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
BASELIB_INLINE_API void Baselib_Lock_Acquire(Baselib_Lock* lock);
|
||||
|
||||
// Try to acquire lock.
|
||||
//
|
||||
// If lock is held, either by this or another thread, then the function wait for timeoutInMilliseconds for lock to be released.
|
||||
//
|
||||
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
|
||||
// while Acquire may enter the kernel and cause a context switch.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \returns true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_Lock_TryTimedAcquire(Baselib_Lock* lock, uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Release lock and make it available to other threads.
|
||||
//
|
||||
// This function can be called from any thread, not only the thread that acquired the lock.
|
||||
// If no lock was previously held calling this function result in a no-op.
|
||||
//
|
||||
// When the lock is released this function is guaranteed to emit a release barrier.
|
||||
BASELIB_INLINE_API void Baselib_Lock_Release(Baselib_Lock* lock);
|
||||
|
||||
// Reclaim resources and memory held by lock.
|
||||
//
|
||||
// If threads are waiting on the lock, calling free may trigger an assert and may cause process abort.
|
||||
// Calling this function with a nullptr result in a no-op
|
||||
BASELIB_INLINE_API void Baselib_Lock_Free(Baselib_Lock* lock);
|
||||
196
Libraries/external/baselib/Include/C/Baselib_Memory.h
vendored
Normal file
196
Libraries/external/baselib/Include/C/Baselib_Memory.h
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_ErrorState.h"
|
||||
#include "Internal/Baselib_EnumSizeCheck.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Max alignment that can be passed to Baselib_Memory_AlignedAlloc and Baselib_Memory_AlignedReallocate functions
|
||||
static const size_t Baselib_Memory_MaxAlignment = 64 * 1024;
|
||||
|
||||
// We can't handle platform varying constants in the C# bindings right now.
|
||||
#if !defined(BASELIB_BINDING_GENERATION)
|
||||
|
||||
// Minimum guaranteed alignment for Baselib_Memory_Allocate/Baselib_Memory_AlignedAlloc in bytes.
|
||||
//
|
||||
// Guaranteed to be at least 8.
|
||||
// Note that on some platforms it is possible to overwrite the internally used allocator in which case this guarantee may no longer be upheld.
|
||||
static const size_t Baselib_Memory_MinGuaranteedAlignment = PLATFORM_MEMORY_MALLOC_MIN_ALIGNMENT;
|
||||
|
||||
#else
|
||||
|
||||
// Minimum guaranteed alignment for Baselib_Memory_Allocate/Baselib_Memory_AlignedAlloc in bytes.
|
||||
//
|
||||
// Guaranteed to be at least 8.
|
||||
// Note that on some platforms it is possible to overwrite the internally used allocator in which case this guarantee may no longer be upheld.
|
||||
static const size_t Baselib_Memory_MinGuaranteedAlignment = 8;
|
||||
|
||||
#endif // !defined(BASELIB_BINDING_GENERATION)
|
||||
|
||||
// Information about available pages sizes.
|
||||
//
|
||||
// Page sizes do not reflect necessarily hardware ("physical") page sizes, but rather "virtual" page sizes that the OS is dealing with.
|
||||
// I.e. a virtual page may refer to several hardware pages, but the OS exposes only a single state for this group of pages.
|
||||
typedef struct Baselib_Memory_PageSizeInfo
|
||||
{
|
||||
// Commonly used page size on this platform.
|
||||
uint64_t defaultPageSize;
|
||||
|
||||
// pageSizesLen valid page sizes, ordered from small to large.
|
||||
uint64_t pageSizes[6];
|
||||
uint64_t pageSizesLen;
|
||||
} Baselib_Memory_PageSizeInfo;
|
||||
|
||||
typedef struct Baselib_Memory_PageAllocation
|
||||
{
|
||||
void* ptr;
|
||||
uint64_t pageSize;
|
||||
uint64_t pageCount;
|
||||
} Baselib_Memory_PageAllocation;
|
||||
|
||||
static const Baselib_Memory_PageAllocation Baselib_Memory_PageAllocation_Invalid = {0, 0, 0};
|
||||
|
||||
// Fills out a Baselib_Memory_PageSizeInfo struct.
|
||||
//
|
||||
// \param outPagesSizeInfo: Pointer to page size info struct. Passing 'nullptr' will return immediately.
|
||||
BASELIB_API void Baselib_Memory_GetPageSizeInfo(Baselib_Memory_PageSizeInfo* outPagesSizeInfo);
|
||||
|
||||
// Allocates memory using a system allocator like malloc.
|
||||
//
|
||||
// Allocation failures or invalid alignments will trigger process abort.
|
||||
//
|
||||
// \param size Size of the allocation. Zero is valid.
|
||||
// \returns Unique pointer to allocation. At least aligned to by Baselib_Memory_MinGuaranteedAlignment bytes.
|
||||
// This is true for zero sized allocations as well.
|
||||
BASELIB_API void* Baselib_Memory_Allocate(size_t size);
|
||||
|
||||
// Reallocates memory previously allocated by Baselib_Memory_Allocate or Baselib_Memory_Reallocate.
|
||||
//
|
||||
// Allocation failures or invalid alignments will trigger process abort.
|
||||
//
|
||||
// \param ptr Pointer previously returned by Baselib_Memory_Allocate or Baselib_Memory_Reallocate.
|
||||
// Reallocating an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_Allocate or
|
||||
// Baselib_Memory_Reallocate leads to undefined behavior.
|
||||
// Passing `nullptr` yield the same result as calling Baselib_Memory_Allocate.
|
||||
// \param size Size of the allocation. No special restrictions apply, zero is valid.
|
||||
// \returns Unique pointer to allocation. At least aligned to by Baselib_Memory_MinGuaranteedAlignment bytes.
|
||||
// This is true for zero sized allocations as well.
|
||||
BASELIB_API void* Baselib_Memory_Reallocate(void* ptr, size_t newSize);
|
||||
|
||||
// Frees memory allocated by Baselib_Memory_Allocate Baselib_Memory_Reallocate.
|
||||
//
|
||||
// \param ptr Pointer previously returned by Baselib_Memory_Allocate or Baselib_Memory_Reallocate.
|
||||
// Freeing an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_Allocate or Baselib_Memory_Reallocate leads to undefined behavior.
|
||||
// Passing `nullptr` result in a no-op.
|
||||
BASELIB_API void Baselib_Memory_Free(void* ptr);
|
||||
|
||||
// Allocates memory using a system allocator like malloc and guarantees that the returned pointer is aligned to the specified alignment.
|
||||
//
|
||||
// Allocation failures or invalid alignments will trigger process abort.
|
||||
//
|
||||
// \param size Size of the allocation. No special restrictions (like multiples of alignment) apply, zero is valid.
|
||||
// \param alignment Needs to be a power of two which is also a multiple of of pointer size (i.e. sizeof(void*)) but less or equal to Baselib_Memory_MaxAlignment.
|
||||
// Any alignment smaller than Baselib_Memory_MinGuaranteedAlignment, will be clamped to Baselib_Memory_MinGuaranteedAlignment.
|
||||
// \returns Unique pointer to aligned allocation. This is true for zero sized allocations as well.
|
||||
BASELIB_API void* Baselib_Memory_AlignedAllocate(size_t size, size_t alignment);
|
||||
|
||||
// Reallocates memory previously allocated by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
|
||||
//
|
||||
// Allocation failures or invalid alignments will trigger process abort.
|
||||
//
|
||||
// \param ptr Pointer previously returned by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
|
||||
// Reallocating an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_AlignedAllocate or
|
||||
// Baselib_Memory_AlignedReallocate leads to undefined behavior.
|
||||
// Passing `nullptr` yield the same result as calling Baselib_Memory_AlignedAllocate.
|
||||
// \param size Size of the allocation. No special restrictions apply, zero is valid.
|
||||
// \param alignment Needs to be a power of two which is also a multiple of of pointer size (i.e. sizeof(void*)) but less or equal to Baselib_Memory_MaxAlignment.
|
||||
// Any alignment smaller than Baselib_Memory_MinGuaranteedAlignment, will be clamped to Baselib_Memory_MinGuaranteedAlignment.
|
||||
// \returns Unique pointer to aligned allocation. This is true for zero sized allocations as well.
|
||||
BASELIB_API void* Baselib_Memory_AlignedReallocate(void* ptr, size_t newSize, size_t alignment);
|
||||
|
||||
// Frees memory allocated by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
|
||||
//
|
||||
// \param ptr Pointer previously returned by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
|
||||
// Freeing an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate leads to undefined behavior.
|
||||
// Passing `nullptr` result in a no-op.
|
||||
BASELIB_API void Baselib_Memory_AlignedFree(void* ptr);
|
||||
|
||||
|
||||
// Page state options
|
||||
typedef enum Baselib_Memory_PageState
|
||||
{
|
||||
// The page are in a reserved state and any access will cause a seg-fault/access violation.
|
||||
// On some platforms that support this state this may be just a hint to the OS and there is no guarantee pages in this state behave differently from Baselib_Memory_PageState_NoAccess.
|
||||
// The Baselib implementation does a best effort and tries to ensure as best as possible that pages in this state are not commited.
|
||||
Baselib_Memory_PageState_Reserved = 0x00,
|
||||
|
||||
// This is a no access page and will cause a seg-fault/access violation when accessed.
|
||||
Baselib_Memory_PageState_NoAccess = 0x01,
|
||||
// The memory can only be read.
|
||||
Baselib_Memory_PageState_ReadOnly = 0x02,
|
||||
// The memory can be read and written.
|
||||
Baselib_Memory_PageState_ReadWrite = 0x04,
|
||||
|
||||
// The memory can be used to execute code and can be read.
|
||||
Baselib_Memory_PageState_ReadOnly_Executable = 0x10 | Baselib_Memory_PageState_ReadOnly,
|
||||
// The memory can be used to execute code and can be both read and written.
|
||||
Baselib_Memory_PageState_ReadWrite_Executable = 0x10 | Baselib_Memory_PageState_ReadWrite,
|
||||
} Baselib_Memory_PageState;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_Memory_PageState);
|
||||
|
||||
// Allocates a given number of memory pages and guarantees that the returned pointer is aligned to specified multiple of the page size.
|
||||
//
|
||||
// Large alignments may lead to a significantly higher use of virtual address space than the amount of memory requested.
|
||||
// This may result in an aligned page allocation to fail where a less/non-aligned allocation would succeed.
|
||||
// Note that this is especially common in 32bit applications but a platform may impose additional restrictions on the size of its virtual address space.
|
||||
// Whether a page allocation is pure virtual address space or already commited memory depends on the platform and passed page state flag.
|
||||
//
|
||||
// \param pageCount Number of pages requested (each will have pageSize size)
|
||||
// \param alignmentInMultipleOfPageSize Specified alignment in multiple of page sizes (a value of 1 implies alignment to page size).
|
||||
// Value needs to be larger than zero and a power of two, otherwise UnsupportedAlignment will be raised.
|
||||
// \param pageState: In which state the pages should be. Certain values may raise UnsupportedPageState on certain platforms.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidPageSize: Page size doesn't match any of the available page sizes (see Baselib_Memory_GetPageSizeInfo).
|
||||
// - Baselib_ErrorCode_InvalidPageCount: Requested number of pages is zero.
|
||||
// - Baselib_ErrorCode_UnsupportedAlignment: Requested alignment is invalid.
|
||||
// - Baselib_ErrorCode_UnsupportedPageState: The underlying system doesn't support the requested page state (see Baselib_Memory_PageState).
|
||||
// - Baselib_ErrorCode_OutOfMemory: If there is not enough continuous address space available, or physical memory space when acquiring committed memory.
|
||||
//
|
||||
// \returns Page allocation info or Baselib_Memory_PageAllocation_Invalid in case of an error.
|
||||
BASELIB_API Baselib_Memory_PageAllocation Baselib_Memory_AllocatePages(uint64_t pageSize, uint64_t pageCount, uint64_t alignmentInMultipleOfPageSize, Baselib_Memory_PageState pageState, Baselib_ErrorState* errorState);
|
||||
|
||||
// Releases the previously allocated pages (using either Baselib_Memory_AllocatePages)
|
||||
//
|
||||
// A single call of ReleasePages must encompass all pages that were originally allocated with a single call of AllocatePages.
|
||||
// Passing Baselib_Memory_PageAllocation with a nullptr or a zero page count result in a no-op.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidAddressRange: Address range was detected to not match a valid allocation.
|
||||
// CAUTION: Not all platforms are able to detect this and may either raise an error or cause undefined behavior.
|
||||
// Note to implementors: Raising the error is strongly preferred as it helps identifying issues in user code.
|
||||
// - Baselib_ErrorCode_InvalidPageSize: If page size doesn't match a previous allocation at `pageAllocation.ptr`.
|
||||
//
|
||||
// Implementation note:
|
||||
// We could be able to allow granular ReleasePages call, but even then only in the _allocation granularity_ which might be different from the page size.
|
||||
// (e.g. windows page size 4k allocation granularity 64k)
|
||||
BASELIB_API void Baselib_Memory_ReleasePages(Baselib_Memory_PageAllocation pageAllocation, Baselib_ErrorState* errorState);
|
||||
|
||||
// Modifies the page state property of an already allocated virtual address range.
|
||||
//
|
||||
// It is possible to modify only some of the pages allocated by Baselib_Memory_AllocatePages.
|
||||
// Passing `nullptr` or a zero page count result in a no-op.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidAddressRange: Address range is not covered by a valid allocation.
|
||||
// Platforms that emulate page allocations (e.g. Emscripten) are not able to present this error and will pass the function call silently.
|
||||
// - Baselib_ErrorCode_InvalidPageSize: If page size doesn't match the previous allocation at `addressOfFirstPage`.
|
||||
// - Baselib_ErrorCode_UnsupportedPageState: The underlying system doesn't support the requested page state (see Baselib_Memory_PageState).
|
||||
BASELIB_API void Baselib_Memory_SetPageState(void* addressOfFirstPage, uint64_t pageSize, uint64_t pageCount, Baselib_Memory_PageState pageState, Baselib_ErrorState* errorState);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
100
Libraries/external/baselib/Include/C/Baselib_NetworkAddress.h
vendored
Normal file
100
Libraries/external/baselib/Include/C/Baselib_NetworkAddress.h
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib Network Address
|
||||
|
||||
#include "Baselib_ErrorState.h"
|
||||
#include "Baselib_Alignment.h"
|
||||
#include "Internal/Baselib_EnumSizeCheck.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Address family.
|
||||
typedef enum Baselib_NetworkAddress_Family
|
||||
{
|
||||
Baselib_NetworkAddress_Family_Invalid = 0,
|
||||
Baselib_NetworkAddress_Family_IPv4 = 1,
|
||||
Baselib_NetworkAddress_Family_IPv6 = 2
|
||||
} Baselib_NetworkAddress_Family;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_NetworkAddress_Family);
|
||||
|
||||
// Fixed size address structure, large enough to hold IPv4 and IPv6 addresses.
|
||||
typedef struct Baselib_NetworkAddress
|
||||
{
|
||||
union
|
||||
{
|
||||
uint8_t data[16];
|
||||
uint8_t ipv6[16]; // in network byte order
|
||||
uint8_t ipv4[4]; // in network byte order
|
||||
};
|
||||
BASELIB_ALIGN_AS(2) uint8_t port[2]; // in network byte order
|
||||
uint8_t family;
|
||||
uint8_t _padding; // Explicit padding to allow for deterministic bitwise compare.
|
||||
|
||||
// Scope zone index for IPv6 (ignored for IPv4)
|
||||
// Defaults to zero if not specified.
|
||||
// Note that unlike the other fields in this struct, this is *not* in network byte order!
|
||||
uint32_t ipv6_scope_id;
|
||||
} Baselib_NetworkAddress;
|
||||
|
||||
// Max length of any string representing an IP address
|
||||
static const uint32_t Baselib_NetworkAddress_IpMaxStringLength = 46;
|
||||
|
||||
// Binary encode string representation of an address.
|
||||
//
|
||||
// Neither port not ipAddressBuffer scope id are parsed from the ip string.
|
||||
// dstAddress->ipv6_scope_id is set to zero and needs to be manually set if required.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument - One or more of the input parameters are invalid
|
||||
BASELIB_API void Baselib_NetworkAddress_Encode(
|
||||
Baselib_NetworkAddress* dstAddress,
|
||||
Baselib_NetworkAddress_Family family,
|
||||
const char ip[],
|
||||
uint16_t port,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Decode binary representation of an address.
|
||||
//
|
||||
// family, ipAddressBuffer, and port are all optional arguments.
|
||||
// passing zero as ipAddressBufferLen is the same as passing an ipAddressBuffer nullptr.
|
||||
// Port and IPv6 scope id are not encodeded to ipAddressBuffer.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument - srcAddress is null or otherwise invalid.
|
||||
// - Baselib_ErrorCode_InvalidBufferSize - ipAddressBuffer is too small to hold decoded ip address.
|
||||
BASELIB_API void Baselib_NetworkAddress_Decode(
|
||||
const Baselib_NetworkAddress* srcAddress,
|
||||
Baselib_NetworkAddress_Family* family,
|
||||
char ipAddressBuffer[],
|
||||
uint32_t ipAddressBufferLen,
|
||||
uint16_t* port,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Returns zero initialized network address struct
|
||||
static inline Baselib_NetworkAddress Baselib_NetworkAddress_Empty(void)
|
||||
{
|
||||
Baselib_NetworkAddress address;
|
||||
memset(&address, 0, sizeof(address));
|
||||
return address;
|
||||
}
|
||||
|
||||
typedef enum Baselib_NetworkAddress_AddressReuse
|
||||
{
|
||||
Baselib_NetworkAddress_AddressReuse_DoNotAllow = 0,
|
||||
|
||||
// Allow multiple sockets to be bound to the same address/port.
|
||||
// All sockets bound to the same address/port need to have this flag set.
|
||||
Baselib_NetworkAddress_AddressReuse_Allow = 1,
|
||||
} Baselib_NetworkAddress_AddressReuse;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_NetworkAddress_AddressReuse);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
14
Libraries/external/baselib/Include/C/Baselib_Process.h
vendored
Normal file
14
Libraries/external/baselib/Include/C/Baselib_Process.h
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_ErrorCode.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
BASELIB_API COMPILER_NORETURN void Baselib_Process_Abort(Baselib_ErrorCode error);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
76
Libraries/external/baselib/Include/C/Baselib_ReentrantLock.h
vendored
Normal file
76
Libraries/external/baselib/Include/C/Baselib_ReentrantLock.h
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_ReentrantLock
|
||||
|
||||
// In computer science, the reentrant mutex (recursive mutex, recursive lock) is particular type of mutual exclusion (mutex) device that may be locked multiple
|
||||
// times by the same process/thread, without causing a deadlock.
|
||||
|
||||
// While any attempt to perform the "lock" operation on an ordinary mutex (lock) would either fail or block when the mutex is already locked, on a recursive
|
||||
// mutex this operation will succeed if and only if the locking thread is the one that already holds the lock. Typically, a recursive mutex tracks the number
|
||||
// of times it has been locked, and requires equally many unlock operations to be performed before other threads may lock it.
|
||||
//
|
||||
// "Reentrant mutex", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Reentrant_mutex&oldid=818566928
|
||||
|
||||
#include "Internal/Baselib_ReentrantLock.inl.h"
|
||||
|
||||
// Creates a reentrant lock synchronization primitive.
|
||||
//
|
||||
// If there are not enough system resources to create a lock, process abort is triggered.
|
||||
//
|
||||
// For optimal performance, the returned Baselib_ReentrantLock should be stored at a cache aligned memory location.
|
||||
//
|
||||
// \returns A struct representing a lock instance. Use Baselib_ReentrantLock_Free to free the lock.
|
||||
BASELIB_INLINE_API Baselib_ReentrantLock Baselib_ReentrantLock_Create(void);
|
||||
|
||||
|
||||
// Try to acquire lock and return immediately.
|
||||
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
|
||||
// to be made before the lock is released.
|
||||
//
|
||||
// When lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// \returns true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryAcquire(Baselib_ReentrantLock* lock);
|
||||
|
||||
// Acquire lock.
|
||||
//
|
||||
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
|
||||
// to be made before the lock is released.
|
||||
// If lock is held by another thread, this function wait for lock to be released.
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
BASELIB_INLINE_API void Baselib_ReentrantLock_Acquire(Baselib_ReentrantLock* lock);
|
||||
|
||||
// Acquire lock.
|
||||
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
|
||||
// to be made before the lock is released.
|
||||
// If lock is held by another thread, this function wait for timeoutInMilliseconds for lock to be released.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
|
||||
// while Acquire may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \returns true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryTimedAcquire(Baselib_ReentrantLock* lock, uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Release lock.
|
||||
// If lock count is still higher than zero after the release operation then lock remain in a locked state.
|
||||
// If lock count reach zero the lock is unlocked and made available to other threads
|
||||
//
|
||||
// When the lock is released this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Calling this function from a thread that doesn't own the lock result triggers an assert in debug and causes undefined behavior in release builds.
|
||||
BASELIB_INLINE_API void Baselib_ReentrantLock_Release(Baselib_ReentrantLock* lock);
|
||||
|
||||
// Reclaim resources and memory held by lock.
|
||||
//
|
||||
// If threads are waiting on the lock, calling free may trigger an assert and may cause process abort.
|
||||
// Calling this function with a nullptr result in a no-op
|
||||
BASELIB_INLINE_API void Baselib_ReentrantLock_Free(Baselib_ReentrantLock* lock);
|
||||
391
Libraries/external/baselib/Include/C/Baselib_RegisteredNetwork.h
vendored
Normal file
391
Libraries/external/baselib/Include/C/Baselib_RegisteredNetwork.h
vendored
Normal file
@@ -0,0 +1,391 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_ErrorState.h"
|
||||
#include "Baselib_Memory.h"
|
||||
#include "Baselib_NetworkAddress.h"
|
||||
#include "Internal/Baselib_EnumSizeCheck.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Network buffers
|
||||
|
||||
// Implementation defined internal buffer id.
|
||||
typedef void* Baselib_RegisteredNetwork_Buffer_Id;
|
||||
static const Baselib_RegisteredNetwork_Buffer_Id Baselib_RegisteredNetwork_Buffer_Id_Invalid = 0;
|
||||
|
||||
// Network buffer structure.
|
||||
// One buffer can contain multiple packets and endpoints.
|
||||
typedef struct Baselib_RegisteredNetwork_Buffer
|
||||
{
|
||||
Baselib_RegisteredNetwork_Buffer_Id id;
|
||||
Baselib_Memory_PageAllocation allocation;
|
||||
} Baselib_RegisteredNetwork_Buffer;
|
||||
|
||||
// Create a network buffer from a set of previously allocated memory pages.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidAddressRange: if pageAllocation is invalid
|
||||
//
|
||||
// \returns A network buffer. If registration fails, then buffer id is set to Baselib_RegisteredNetwork_Buffer_Id_Invalid.
|
||||
BASELIB_API Baselib_RegisteredNetwork_Buffer Baselib_RegisteredNetwork_Buffer_Register(
|
||||
Baselib_Memory_PageAllocation pageAllocation,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Deregister network buffer. Disassociate memory pages and buffer representation.
|
||||
//
|
||||
// Allocated pages will stay allocated and can now be used for something else.
|
||||
// Passing an invalid buffer results in a no-op.
|
||||
BASELIB_API void Baselib_RegisteredNetwork_Buffer_Deregister(
|
||||
Baselib_RegisteredNetwork_Buffer buffer
|
||||
);
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Network buffers slices
|
||||
|
||||
// Slice of a network buffer.
|
||||
typedef struct Baselib_RegisteredNetwork_BufferSlice
|
||||
{
|
||||
Baselib_RegisteredNetwork_Buffer_Id id;
|
||||
void* data; // data of the slice
|
||||
uint32_t size; // size of the slice in bytes
|
||||
uint32_t offset; // offset in main buffer
|
||||
} Baselib_RegisteredNetwork_BufferSlice;
|
||||
|
||||
// Creates slice from network buffer
|
||||
//
|
||||
// \param buffer Buffer to create slice from.
|
||||
// \param offset Offset in buffer in bytes.
|
||||
// \param size Size of the slice in bytes.
|
||||
BASELIB_API Baselib_RegisteredNetwork_BufferSlice Baselib_RegisteredNetwork_BufferSlice_Create(
|
||||
Baselib_RegisteredNetwork_Buffer buffer,
|
||||
uint32_t offset,
|
||||
uint32_t size
|
||||
);
|
||||
|
||||
// Create empty slice that doesn't point to anything
|
||||
//
|
||||
// Guaranteed to reference Baselib_RegisteredNetwork_Buffer_Id_Invalid and have all other values zeroed out.
|
||||
BASELIB_API Baselib_RegisteredNetwork_BufferSlice Baselib_RegisteredNetwork_BufferSlice_Empty(void);
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Network endpoints are platform defined representation (sockaddr_in-like) of network address (family, ip, port).
|
||||
|
||||
typedef struct Baselib_RegisteredNetwork_Endpoint { Baselib_RegisteredNetwork_BufferSlice slice; } Baselib_RegisteredNetwork_Endpoint;
|
||||
|
||||
static const uint32_t Baselib_RegisteredNetwork_Endpoint_MaxSize = 28; // in bytes
|
||||
|
||||
// Place network address into the network buffer.
|
||||
//
|
||||
// Destination must be able to accommodate Baselib_RegisteredNetwork_Endpoint_MaxSize bytes.
|
||||
//
|
||||
// \param srcAddress Network address to use, pass nullptr to create an empty endpoint.
|
||||
// \param dstSlice Where to write encoded data.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if dstSlice is invalid
|
||||
// - InvalidBufferSize: if dstSlice is smaller than Baselib_RegisteredNetwork_Endpoint_MaxSize
|
||||
//
|
||||
// \returns Endpoint or Endpoint_Empty in case of failure.
|
||||
BASELIB_API Baselib_RegisteredNetwork_Endpoint Baselib_RegisteredNetwork_Endpoint_Create(
|
||||
const Baselib_NetworkAddress* srcAddress,
|
||||
Baselib_RegisteredNetwork_BufferSlice dstSlice,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Return empty endpoint that doesn't point to anything
|
||||
//
|
||||
// Guaranteed to contain Baselib_RegisteredNetwork_BufferSlice_Empty
|
||||
BASELIB_API Baselib_RegisteredNetwork_Endpoint Baselib_RegisteredNetwork_Endpoint_Empty(void);
|
||||
|
||||
// Decode endpoint.
|
||||
//
|
||||
// \param endpoint Endpoint to be converted.
|
||||
// \param dstAddress Pointer to address to write data to.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if endpoint is invalid or dstAddress is null
|
||||
BASELIB_API void Baselib_RegisteredNetwork_Endpoint_GetNetworkAddress(
|
||||
Baselib_RegisteredNetwork_Endpoint endpoint,
|
||||
Baselib_NetworkAddress* dstAddress,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Request & Completion
|
||||
|
||||
// Send/receive request.
|
||||
typedef struct Baselib_RegisteredNetwork_Request
|
||||
{
|
||||
Baselib_RegisteredNetwork_BufferSlice payload;
|
||||
|
||||
// for sending: remote address to which the payload is sent (required for UDP)
|
||||
// for receiving: address from which the data was sent (optional)
|
||||
Baselib_RegisteredNetwork_Endpoint remoteEndpoint;
|
||||
|
||||
// TODO: Not support yet. (We would also need to support this in Baselib_Socket first)
|
||||
// for sending: unused
|
||||
// for receiving: local address on which the data was received (optional)
|
||||
//Baselib_RegisteredNetwork_Endpoint localEndpoint;
|
||||
|
||||
void* requestUserdata;
|
||||
} Baselib_RegisteredNetwork_Request;
|
||||
|
||||
// Success or failure of a Baselib_RegisteredNetwork_CompletionResult.
|
||||
typedef enum Baselib_RegisteredNetwork_CompletionStatus
|
||||
{
|
||||
// Networking request failed.
|
||||
Baselib_RegisteredNetwork_CompletionStatus_Failed = 0,
|
||||
// Networking request successfully finished.
|
||||
Baselib_RegisteredNetwork_CompletionStatus_Success = 1,
|
||||
} Baselib_RegisteredNetwork_CompletionStatus;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_RegisteredNetwork_CompletionStatus);
|
||||
|
||||
// Result of a previously scheduled send/receive
|
||||
//
|
||||
// When a networking request is completed, this is placed into an internal completion queue.
|
||||
typedef struct Baselib_RegisteredNetwork_CompletionResult
|
||||
{
|
||||
Baselib_RegisteredNetwork_CompletionStatus status;
|
||||
uint32_t bytesTransferred;
|
||||
void* requestUserdata;
|
||||
} Baselib_RegisteredNetwork_CompletionResult;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// UDP connectionless socket.
|
||||
|
||||
typedef struct Baselib_RegisteredNetwork_Socket_UDP { struct Baselib_RegisteredNetwork_Socket_UDP_Impl* handle; } Baselib_RegisteredNetwork_Socket_UDP;
|
||||
static const Baselib_RegisteredNetwork_Socket_UDP Baselib_RegisteredNetwork_Socket_UDP_Invalid = { NULL };
|
||||
|
||||
// Creates an UDP socket with internal request and completion queues.
|
||||
//
|
||||
// \param bindAddress Address to bind socket to, in connectionless UDP every socket has to be bound.
|
||||
// \param endpointReuse Allows multiple sockets to be bound to the same address/port if set to AddressReuse_Allow,
|
||||
// All sockets bound to the same address/port need to have this flag set.
|
||||
// \param sendQueueSize Send queue size in amount of entries.
|
||||
// \param recvQueueSize Receive queue size in amount of entries.
|
||||
//
|
||||
// Known issues (behavior may change in the future):
|
||||
// - Some platforms do not support sending zero sized UDP packets.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if bindAddress pointer is null or incompatible or both sendQueueSize and recvQueueSize are zero
|
||||
// - EndpointInUse: endpoint is already in use
|
||||
// - AddressFamilyNotSupported: if the requested address family is not available.
|
||||
// - OutOfSystemResources: if network session limit was exceeded
|
||||
//
|
||||
// \returns A UDP socket. If socket creation fails, socket holds a Baselib_RegisteredNetwork_Socket_UDP_InvalidHandle.
|
||||
BASELIB_API Baselib_RegisteredNetwork_Socket_UDP Baselib_RegisteredNetwork_Socket_UDP_Create(
|
||||
const Baselib_NetworkAddress* bindAddress,
|
||||
Baselib_NetworkAddress_AddressReuse endpointReuse,
|
||||
uint32_t sendQueueSize,
|
||||
uint32_t recvQueueSize,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Schedules receive requests.
|
||||
//
|
||||
// \param socket Socket to be used.
|
||||
// \param requests Array of pointers to requests. No-op if null.
|
||||
// Request objects can be freed after the function call.
|
||||
// \param requestsCount Amount of requests in the array. No-op if zero.
|
||||
//
|
||||
// If requests is null or requestsCount is zero, this operation is a no-op.
|
||||
// Note that actual receiving may be deferred until you call Baselib_RegisteredNetwork_Socket_UDP_ProcessRecv.
|
||||
// UDP message data that doesn't fit a message buffer is silently discarded.
|
||||
//
|
||||
// Known issues (behavior may change in the future):
|
||||
// - Some platforms does not support receiving zero sized UDP packets.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns The number of scheduled items. If scheduling fails this function return zero.
|
||||
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_ScheduleRecv(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
const Baselib_RegisteredNetwork_Request* requests,
|
||||
uint32_t requestsCount,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Schedules send requests.
|
||||
//
|
||||
// \param socket Socket to be used.
|
||||
// \param requests Array of pointers to requests. No-op if null.
|
||||
// Request objects can be freed after the function call.
|
||||
// \param requestsCount Amount of requests in the array. No-op if zero.
|
||||
//
|
||||
// If requests is null or requestsCount is zero, this operation is a no-op.
|
||||
// Note that actual receiving may be deferred until you call Baselib_RegisteredNetwork_Socket_UDP_ProcessSend.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns The number of scheduled items. If scheduling fails this function return zero.
|
||||
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_ScheduleSend(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
const Baselib_RegisteredNetwork_Request* requests,
|
||||
uint32_t requestsCount,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Status of processing send/recv.
|
||||
typedef enum Baselib_RegisteredNetwork_ProcessStatus
|
||||
{
|
||||
// No further items to process.
|
||||
//
|
||||
// Note that this does not imply that all requests have been fully processed at any moment in time.
|
||||
Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately = 0,
|
||||
|
||||
// deprecated, same as Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately
|
||||
Baselib_RegisteredNetwork_ProcessStatus_Done
|
||||
COMPILER_DEPRECATED_ENUM_VALUE("Use Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately instead (equivalent)") = 0,
|
||||
|
||||
// Should call again, there is more workload to process.
|
||||
Baselib_RegisteredNetwork_ProcessStatus_Pending = 1,
|
||||
} Baselib_RegisteredNetwork_ProcessStatus;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_RegisteredNetwork_ProcessStatus);
|
||||
|
||||
// Processes the receive queue on a socket.
|
||||
//
|
||||
// Needs to be called periodically to ensure requests are processed.
|
||||
// You should call this in loop until either your time budget is exceed or the function returns false.
|
||||
//
|
||||
// Platforms emulating RIO behavior with sockets, perform one receive per call until there are no more receive requests in the queue.
|
||||
// Requests failed due to empty socket receive buffer are requeued and processed at the next call to Baselib_RegisteredNetwork_Socket_UDP_ProcessRecv.
|
||||
// In that case Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately is returned since an immediate retry will not have any effect.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns Baselib_RegisteredNetwork_ProcessStatus_Pending if there is more workload to process immediately, Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately if otherwise
|
||||
BASELIB_API Baselib_RegisteredNetwork_ProcessStatus Baselib_RegisteredNetwork_Socket_UDP_ProcessRecv(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Processes the send queue on a socket.
|
||||
//
|
||||
// Needs to be called periodically to ensure requests are processed.
|
||||
// You should call this in loop until either your time budget is exceed or the function returns false.
|
||||
//
|
||||
// Platforms emulating RIO behavior with sockets, perform one send per call until there are no more send requests in the queue.
|
||||
// Requests failed due to full socket send buffer are requeued processed at the next call to Baselib_RegisteredNetwork_Socket_UDP_ProcessSend.
|
||||
// In that case Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately is returned since an immediate retry will not have any effect.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns Baselib_RegisteredNetwork_ProcessStatus_Pending if there is more workload to process immediately, Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately if otherwise
|
||||
BASELIB_API Baselib_RegisteredNetwork_ProcessStatus Baselib_RegisteredNetwork_Socket_UDP_ProcessSend(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Status of a recv/send completion queue.
|
||||
typedef enum Baselib_RegisteredNetwork_CompletionQueueStatus
|
||||
{
|
||||
// No results are ready for dequeing.
|
||||
Baselib_RegisteredNetwork_CompletionQueueStatus_NoResultsAvailable = 0,
|
||||
// Results are available for dequeing.
|
||||
Baselib_RegisteredNetwork_CompletionQueueStatus_ResultsAvailable = 1,
|
||||
} Baselib_RegisteredNetwork_CompletionQueueStatus;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_RegisteredNetwork_CompletionQueueStatus);
|
||||
|
||||
// Wait until results appears for a previously scheduled receive.
|
||||
//
|
||||
// \param timeoutInMilliseconds Wait timeout.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns Baselib_RegisteredNetwork_CompletionQueueStatus_ResultsAvailable if results are available for dequeue, Baselib_RegisteredNetwork_CompletionQueueStatus_NoResultsAvailable otherwise
|
||||
BASELIB_API Baselib_RegisteredNetwork_CompletionQueueStatus Baselib_RegisteredNetwork_Socket_UDP_WaitForCompletedRecv(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
uint32_t timeoutInMilliseconds,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Wait until results appears for a previously scheduled send.
|
||||
//
|
||||
// \param timeoutInMilliseconds Wait timeout.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns Baselib_RegisteredNetwork_CompletionQueueStatus_ResultsAvailable if results are available for dequeue, Baselib_RegisteredNetwork_CompletionQueueStatus_NoResultsAvailable otherwise
|
||||
BASELIB_API Baselib_RegisteredNetwork_CompletionQueueStatus Baselib_RegisteredNetwork_Socket_UDP_WaitForCompletedSend(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
uint32_t timeoutInMilliseconds,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Dequeue receive result.
|
||||
//
|
||||
// \param results Results array. No-op if null.
|
||||
// \param resultsCount Amount of elements in results array. No-op if zero.
|
||||
//
|
||||
// If you're calling this method on multiple threads for the same completion queue in parallel, it may spuriously return 0.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns number of dequeued entries
|
||||
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_DequeueRecv(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
Baselib_RegisteredNetwork_CompletionResult results[],
|
||||
uint32_t resultsCount,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Dequeue send result.
|
||||
//
|
||||
// \param results Results array. No-op if null.
|
||||
// \param resultsCount Amount of elements in results array. No-op if zero.
|
||||
//
|
||||
// If you're calling this method on multiple threads for the same completion queue in parallel, it may spuriously return 0.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid
|
||||
//
|
||||
// \returns number of dequeued entries
|
||||
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_DequeueSend(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
Baselib_RegisteredNetwork_CompletionResult results[],
|
||||
uint32_t resultsCount,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Get bind address of udp socket.
|
||||
//
|
||||
// \param socket Socket to be used.
|
||||
// \param dstAddress Pointer to address to write data to.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - InvalidArgument: if socket is invalid or if dstAddress is null
|
||||
BASELIB_API void Baselib_RegisteredNetwork_Socket_UDP_GetNetworkAddress(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket,
|
||||
Baselib_NetworkAddress* dstAddress,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Closes UDP socket.
|
||||
//
|
||||
// Passing an invalid socket handle result in a no-op.
|
||||
//
|
||||
// \param socket Socket to be closed.
|
||||
BASELIB_API void Baselib_RegisteredNetwork_Socket_UDP_Close(
|
||||
Baselib_RegisteredNetwork_Socket_UDP socket
|
||||
);
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
84
Libraries/external/baselib/Include/C/Baselib_Semaphore.h
vendored
Normal file
84
Libraries/external/baselib/Include/C/Baselib_Semaphore.h
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_Semaphore
|
||||
|
||||
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
|
||||
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
|
||||
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
|
||||
// decremented, or toggled) depending on programmer-defined conditions.
|
||||
//
|
||||
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
|
||||
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
|
||||
// resource becomes available.
|
||||
//
|
||||
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
|
||||
|
||||
// This is the max number of tokens guaranteed to be held by the semaphore at
|
||||
// any given point in time. Tokens submitted that exceed this value may silently be discarded.
|
||||
static const int32_t Baselib_Semaphore_MaxGuaranteedCount = UINT16_MAX;
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#include "Internal/Baselib_Semaphore_FutexBased.inl.h"
|
||||
#else
|
||||
#include "Internal/Baselib_Semaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Creates a counting semaphore synchronization primitive.
|
||||
//
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
//
|
||||
// For optimal performance, the returned Baselib_Semaphore should be stored at a cache aligned memory location.
|
||||
//
|
||||
// \returns A struct representing a semaphore instance. Use Baselib_Semaphore_Free to free the semaphore.
|
||||
BASELIB_INLINE_API Baselib_Semaphore Baselib_Semaphore_Create(void);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
// Returns if token was consumed or was woken up by Baselib_Semaphore_ResetAndReleaseWaitingThreads.
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Acquire(Baselib_Semaphore* semaphore);
|
||||
|
||||
// Try to consume a token and return immediately.
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// \returns true if token was consumed. false if not.
|
||||
BASELIB_INLINE_API bool Baselib_Semaphore_TryAcquire(Baselib_Semaphore* semaphore);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
|
||||
// while Acquire may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \param timeout Time to wait for token to become available.
|
||||
//
|
||||
// \returns true if token was consumed or was woken up by Baselib_Semaphore_ResetAndReleaseWaitingThreads. false if timeout was reached.
|
||||
BASELIB_INLINE_API bool Baselib_Semaphore_TryTimedAcquire(Baselib_Semaphore* semaphore, const uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Submit tokens to the semaphore.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
|
||||
// If count reach `Baselib_Semaphore_MaxGuaranteedCount` this function may silently discard any overflow.
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Release(Baselib_Semaphore* semaphore, const uint16_t count);
|
||||
|
||||
// If threads are waiting on Baselib_Semaphore_Acquire / Baselib_Semaphore_TryTimedAcquire,
|
||||
// releases enough tokens to wake them up. Otherwise consumes all available tokens.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// \returns number of released threads.
|
||||
BASELIB_INLINE_API uint32_t Baselib_Semaphore_ResetAndReleaseWaitingThreads(Baselib_Semaphore* semaphore);
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
|
||||
// Calling this function with a nullptr result in a no-op
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Free(Baselib_Semaphore* semaphore);
|
||||
274
Libraries/external/baselib/Include/C/Baselib_Socket.h
vendored
Normal file
274
Libraries/external/baselib/Include/C/Baselib_Socket.h
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib Socket
|
||||
//
|
||||
// This is a socket platform abstraction api heavily influenced by non-blocking Berkeley Sockets.
|
||||
// Berkeley Sockets look like they behave in similar fashion on all platforms, but there are a lot of small differences.
|
||||
// Compared to Berkeley Sockets this API is somewhat more high level and doesn't provide as fine grained control.
|
||||
#include "Baselib_ErrorState.h"
|
||||
#include "Baselib_NetworkAddress.h"
|
||||
#include "Internal/Baselib_EnumSizeCheck.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Socket Handle, a handle to a specific socket.
|
||||
typedef struct Baselib_Socket_Handle { intptr_t handle; } Baselib_Socket_Handle;
|
||||
static const Baselib_Socket_Handle Baselib_Socket_Handle_Invalid = { -1 };
|
||||
|
||||
// Socket protocol.
|
||||
typedef enum Baselib_Socket_Protocol
|
||||
{
|
||||
Baselib_Socket_Protocol_UDP = 1,
|
||||
Baselib_Socket_Protocol_TCP = 2,
|
||||
} Baselib_Socket_Protocol;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_Socket_Protocol);
|
||||
|
||||
// Socket message. Used to send or receive data in message based protocols such as UDP.
|
||||
typedef struct Baselib_Socket_Message
|
||||
{
|
||||
Baselib_NetworkAddress* address;
|
||||
void* data;
|
||||
uint32_t dataLen;
|
||||
} Baselib_Socket_Message;
|
||||
|
||||
// Create a socket.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: if context, family or protocol is invalid or unknown.
|
||||
// - Baselib_ErrorCode_AddressFamilyNotSupported: if the requested address family is not available.
|
||||
BASELIB_API Baselib_Socket_Handle Baselib_Socket_Create(
|
||||
Baselib_NetworkAddress_Family family,
|
||||
Baselib_Socket_Protocol protocol,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Bind socket to a local address and port.
|
||||
//
|
||||
// Bind can only be called once per socket.
|
||||
// Address can either be a specific interface ip address.
|
||||
// In case if encoded ip is nullptr / "0.0.0.0" / "::" (same as INADDR_ANY) will bind to all interfaces.
|
||||
//
|
||||
// \param addressReuse A set of sockets can be bound to the same address port combination if all
|
||||
// sockets are bound with this flag set to AddressReuse_Allow, similar to
|
||||
// SO_REUSEADDR+SO_REUSEPORT.
|
||||
// Please note that setting this flag to false doesn't mean anyone is forbidden
|
||||
// to binding to the same ip/port combo, or in other words it does NOT use
|
||||
// SO_EXCLUSIVEADDRUSE where it's available.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid open socket. Address pointer is null or incompatible.
|
||||
// - Baselib_ErrorCode_AddressInUse: Address or port is already bound by another socket, or the system is out of ephemeral ports.
|
||||
// - Baselib_ErrorCode_AddressUnreachable: Address doesn't map to any known interface.
|
||||
BASELIB_API void Baselib_Socket_Bind(
|
||||
Baselib_Socket_Handle socket,
|
||||
const Baselib_NetworkAddress* address,
|
||||
Baselib_NetworkAddress_AddressReuse addressReuse,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Connect a socket to a remote address.
|
||||
//
|
||||
// Note that this function initiates an asynchronous connection. You must call
|
||||
// Baselib_Socket_Poll with Baselib_Socket_PollEvents.requestedEvents =
|
||||
// Baselib_Socket_PollEvents_Connected to wait for the connection to finish.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket. Address pointer is null or incompatible.
|
||||
// - Baselib_ErrorCode_AddressUnreachable: Unable to establish a connection with peer.
|
||||
BASELIB_API void Baselib_Socket_TCP_Connect(
|
||||
Baselib_Socket_Handle socket,
|
||||
const Baselib_NetworkAddress* address,
|
||||
Baselib_NetworkAddress_AddressReuse addressReuse,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Bitmask of events to be used in Baselib_Socket_Poll
|
||||
typedef enum Baselib_Socket_PollEvents
|
||||
{
|
||||
Baselib_Socket_PollEvents_Readable = 1,
|
||||
Baselib_Socket_PollEvents_Writable = 2,
|
||||
// Note: Connected cannot be set at the same time as Readable and Writable.
|
||||
Baselib_Socket_PollEvents_Connected = 4,
|
||||
} Baselib_Socket_PollEvents;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_Socket_PollEvents);
|
||||
|
||||
// Socket entry to be passed into Baselib_Socket_Poll.
|
||||
//
|
||||
// Note that the name `Fd` does not refer to the fact that these are file
|
||||
// descriptors (they are sockets), but rather the fact that nearly every socket
|
||||
// API calls this struct "pollfd".
|
||||
typedef struct Baselib_Socket_PollFd
|
||||
{
|
||||
Baselib_Socket_Handle handle;
|
||||
Baselib_Socket_PollEvents requestedEvents;
|
||||
Baselib_Socket_PollEvents resultEvents;
|
||||
Baselib_ErrorState* errorState;
|
||||
} Baselib_Socket_PollFd;
|
||||
|
||||
// Helper method to construct a Baselib_Socket_PollFd. Use of this method is not
|
||||
// necessary, you may fill out the struct yourself if desired.
|
||||
static inline Baselib_Socket_PollFd Baselib_Socket_PollFd_New(Baselib_Socket_Handle handle, Baselib_Socket_PollEvents events, Baselib_ErrorState* errorState)
|
||||
{
|
||||
Baselib_Socket_PollFd result;
|
||||
result.handle = handle;
|
||||
result.requestedEvents = events;
|
||||
result.resultEvents = (Baselib_Socket_PollEvents)0;
|
||||
result.errorState = errorState;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Wait for a socket being readable, writable, or an error occurs. Specific
|
||||
// events that occurred will be set in sockets[i].resultEvents. Errors
|
||||
// associated with particular sockets will be reported in sockets[i].errorState.
|
||||
//
|
||||
// It is valid to have sockets[i].errorState to point to the same ErrorState as
|
||||
// the outer parameter errorState - or, more generally, you may alias whatever
|
||||
// error states within sockets[i].errorState and the parameter errorState.
|
||||
//
|
||||
// If timeoutInMilliseconds==0, Poll() will not block. There is no option to
|
||||
// wait indefinitely.
|
||||
//
|
||||
// Possible error codes on the outer parameter errorState:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Sockets list is null. An individual socket handle is invalid.
|
||||
//
|
||||
// Possible error codes on sockets[i].errorState:
|
||||
// - Baselib_ErrorCode_AddressUnreachable: Asynchronous Connect() failed.
|
||||
// - Baselib_ErrorCode_Disconnected: Socket has been disconnected, or asynchronous Connect() failed (apple devices).
|
||||
BASELIB_API void Baselib_Socket_Poll(
|
||||
Baselib_Socket_PollFd* sockets,
|
||||
uint32_t socketsCount,
|
||||
uint32_t timeoutInMilliseconds,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Get address of locally bound socket.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid bound socket. Address pointer is null.
|
||||
BASELIB_API void Baselib_Socket_GetAddress(
|
||||
Baselib_Socket_Handle socket,
|
||||
Baselib_NetworkAddress* address,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Configure a TCP server socket to begin listening for incoming connections.
|
||||
// The maximum queue size is used for each platform.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket.
|
||||
// - Baselib_ErrorCode_AddressInUse: Another socket is already listening on the same port, or the system is out of ephemeral ports.
|
||||
BASELIB_API void Baselib_Socket_TCP_Listen(
|
||||
Baselib_Socket_Handle socket,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Accept an incoming TCP connection to this server socket. When there are no
|
||||
// incoming connections, this returns Baselib_Socket_Handle_Invalid and does not
|
||||
// raise an error.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket.
|
||||
BASELIB_API Baselib_Socket_Handle Baselib_Socket_TCP_Accept(
|
||||
Baselib_Socket_Handle socket,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Send messages to unconnected destinations.
|
||||
//
|
||||
// Socket does not need to be bound before calling SendMessages.
|
||||
// When sending multiple messages an error may be raised after some of the messages were submitted.
|
||||
//
|
||||
// If the socket is not already bound to a port SendMessages will implicitly bind the socket before issuing the send operation.
|
||||
//
|
||||
// Warning: This function may not fail when called with a TCP socket, as it may
|
||||
// simply ignore the address parameter, and send to whatever the socket is
|
||||
// connected to. However, as there is no way to retreive the actual number of
|
||||
// bytes sent with this API, its use in this manner is strongly discouraged.
|
||||
//
|
||||
// Known issues (behavior may change in the future):
|
||||
// Some platforms do not support sending zero sized UDP packets.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_AddressUnreachable: Message destination is known to not be reachable from this machine.
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket. Messages is `NULL` or a message has an invalid or incompatible destination.
|
||||
// - Baselib_ErrorCode_InvalidBufferSize: Message payload exceeds max message size.
|
||||
//
|
||||
// \returns The number of messages successfully sent. This number may be lower than messageCount if send buffer is full or an error was raised. Reported error will be about last message tried to send.
|
||||
BASELIB_API uint32_t Baselib_Socket_UDP_Send(
|
||||
Baselib_Socket_Handle socket,
|
||||
Baselib_Socket_Message messages[],
|
||||
uint32_t messagesCount,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Send a message to the connected peer.
|
||||
//
|
||||
// \returns The possibly-zero length of the message actually sent, which may be less than `dataLen`.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket. Socket validity is not checked if dataLen==0.
|
||||
// - Baselib_ErrorCode_Disconnected: Socket has been disconnected.
|
||||
BASELIB_API uint32_t Baselib_Socket_TCP_Send(
|
||||
Baselib_Socket_Handle socket,
|
||||
void* data,
|
||||
uint32_t dataLen,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Receive messages from unconnected sources.
|
||||
//
|
||||
// UDP message data that doesn't fit a message buffer is silently discarded.
|
||||
//
|
||||
// Warning: This function may not fail when called with a TCP socket, as it may
|
||||
// simply ignore the address parameter, and receive from whatever the socket is
|
||||
// connected to. However, as there is no way to retreive the actual number of
|
||||
// bytes received with this API, its use in this manner is strongly discouraged.
|
||||
//
|
||||
// Known issues (behavior may change in the future):
|
||||
// If the socket is not bound to a port RecvMessages will return zero without raising an error.
|
||||
// Some platforms does not support receiveing zero sized UDP packets.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket. Or messages is `NULL`.
|
||||
//
|
||||
// \returns The number of messages successfully received. This number may be lower than messageCount if recv buffer is empty or an error was raised. Reported error will be about last message tried to receive.
|
||||
BASELIB_API uint32_t Baselib_Socket_UDP_Recv(
|
||||
Baselib_Socket_Handle socket,
|
||||
Baselib_Socket_Message messages[],
|
||||
uint32_t messagesCount,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Receive a message from a connected source. Note that this method differs from
|
||||
// traditional socket APIs in that it is valid to return 0, this means that no
|
||||
// data were received. Disconnection is detected by errorState being
|
||||
// Baselib_ErrorCode_Disconnected.
|
||||
//
|
||||
// This function may or may not work when passed a UDP socket. Graceful error
|
||||
// handling of this case is omitted due to performance reasons.
|
||||
//
|
||||
// \returns The length of the message actually received, which may be less than `dataLen` or even zero.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket.
|
||||
// - Baselib_ErrorCode_Disconnected: Socket has been disconnected.
|
||||
BASELIB_API uint32_t Baselib_Socket_TCP_Recv(
|
||||
Baselib_Socket_Handle socket,
|
||||
void* data,
|
||||
uint32_t dataLen,
|
||||
Baselib_ErrorState* errorState
|
||||
);
|
||||
|
||||
// Close socket.
|
||||
//
|
||||
// Closing an already closed socket results in a no-op.
|
||||
BASELIB_API void Baselib_Socket_Close(
|
||||
Baselib_Socket_Handle socket
|
||||
);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
33
Libraries/external/baselib/Include/C/Baselib_SourceLocation.h
vendored
Normal file
33
Libraries/external/baselib/Include/C/Baselib_SourceLocation.h
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
#pragma once
|
||||
|
||||
#ifndef BASELIB_ENABLE_SOURCELOCATION
|
||||
#ifdef NDEBUG
|
||||
#define BASELIB_ENABLE_SOURCELOCATION 0
|
||||
#else
|
||||
#define BASELIB_ENABLE_SOURCELOCATION 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Human readable about the original location of a piece of source code.
|
||||
typedef struct Baselib_SourceLocation
|
||||
{
|
||||
const char* file;
|
||||
const char* function;
|
||||
uint32_t lineNumber;
|
||||
} Baselib_SourceLocation;
|
||||
|
||||
// Macro to create source location in-place for the current line of code.
|
||||
#if BASELIB_ENABLE_SOURCELOCATION
|
||||
#define BASELIB_SOURCELOCATION Baselib_SourceLocation { __FILE__, __func__, __LINE__ }
|
||||
#else
|
||||
#define BASELIB_SOURCELOCATION Baselib_SourceLocation { NULL, NULL, 0 }
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
9
Libraries/external/baselib/Include/C/Baselib_StaticAssert.h
vendored
Normal file
9
Libraries/external/baselib/Include/C/Baselib_StaticAssert.h
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
// C99 compatible static_assert
|
||||
// Use static_assert in all C++ code directly.
|
||||
#ifdef __cplusplus
|
||||
#define BASELIB_STATIC_ASSERT(EXPR_, MSG_) static_assert(EXPR_, MSG_)
|
||||
#else
|
||||
#define BASELIB_STATIC_ASSERT(EXPR_, MSG_) COMPILER_C_STATIC_ASSERT(EXPR_, MSG_)
|
||||
#endif
|
||||
63
Libraries/external/baselib/Include/C/Baselib_SystemFutex.h
vendored
Normal file
63
Libraries/external/baselib/Include/C/Baselib_SystemFutex.h
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_SystemFutex
|
||||
|
||||
// In computing, a futex (short for "fast userspace mutex") is a kernel system call that programmers can use to implement basic locking, or as a building block
|
||||
// for higher-level locking abstractions such as semaphores and POSIX mutexes or condition variables.
|
||||
//
|
||||
// A futex consists of a kernelspace wait queue that is attached to an atomic integer in userspace. Multiple processes or threads operate on the integer
|
||||
// entirely in userspace (using atomic operations to avoid interfering with one another), and only resort to relatively expensive system calls to request
|
||||
// operations on the wait queue (for example to wake up waiting processes, or to put the current process on the wait queue). A properly programmed futex-based
|
||||
// lock will not use system calls except when the lock is contended; since most operations do not require arbitration between processes, this will not happen
|
||||
// in most cases.
|
||||
//
|
||||
// "Futex", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Futex&oldid=850172014
|
||||
|
||||
#include "Baselib_WakeupFallbackStrategy.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Determines if the platform has access to a kernel level futex api
|
||||
//
|
||||
// If native support is not present the futex will fallback to an emulated futex setup.
|
||||
//
|
||||
// Notes on the emulation:
|
||||
// * It uses a single synchronization primitive to multiplex all potential addresses. This means there will be
|
||||
// additional contention as well as spurious wakeups compared to a native implementation.
|
||||
// * While the fallback implementation is not something that should be used in production it can still provide value
|
||||
// when bringing up new platforms or to test features built on top of the futex api.
|
||||
BASELIB_INLINE_API bool Baselib_SystemFutex_NativeSupport(void) { return PLATFORM_FUTEX_NATIVE_SUPPORT == 1; }
|
||||
|
||||
// Wait for notification.
|
||||
//
|
||||
// Address will be checked atomically against expected before entering wait. This can be used to guarantee there are no lost wakeups.
|
||||
// Note: When notified the thread always wake up regardless if the expectation match the value at address or not.
|
||||
//
|
||||
// | Problem this solves
|
||||
// | Thread 1: checks condition and determine we should enter wait
|
||||
// | Thread 2: change condition and notify waiting threads
|
||||
// | Thread 1: enters waiting state
|
||||
// |
|
||||
// | With a futex the two Thread 1 operations become a single op.
|
||||
//
|
||||
// Spurious Wakeup - This function is subject to spurious wakeups.
|
||||
//
|
||||
// \param address Any address that can be read from both user and kernel space.
|
||||
// \param expected What address points to will be checked against this value. If the values don't match thread will not enter a waiting state.
|
||||
// \param timeoutInMilliseconds A timeout indicating to the kernel when to wake the thread. Regardless of being notified or not.
|
||||
BASELIB_API void Baselib_SystemFutex_Wait(int32_t* address, int32_t expected, uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Notify threads waiting on a specific address.
|
||||
//
|
||||
// \param address Any address that can be read from both user and kernel space
|
||||
// \param count Number of waiting threads to wakeup.
|
||||
// \param wakeupFallbackStrategy Platforms that don't support waking up a specific number of threads will use this strategy.
|
||||
BASELIB_API void Baselib_SystemFutex_Notify(int32_t* address, uint32_t count, Baselib_WakeupFallbackStrategy wakeupFallbackStrategy);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
81
Libraries/external/baselib/Include/C/Baselib_SystemSemaphore.h
vendored
Normal file
81
Libraries/external/baselib/Include/C/Baselib_SystemSemaphore.h
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_SystemSemaphore
|
||||
|
||||
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
|
||||
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
|
||||
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
|
||||
// decremented, or toggled) depending on programmer-defined conditions.
|
||||
//
|
||||
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
|
||||
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
|
||||
// resource becomes available.
|
||||
//
|
||||
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
typedef struct Baselib_SystemSemaphore_Handle { void* handle; } Baselib_SystemSemaphore_Handle;
|
||||
|
||||
// This is the maximum number of tokens that can be made available on a semaphore
|
||||
enum { Baselib_SystemSemaphore_MaxCount = INT32_MAX };
|
||||
|
||||
// Creates a counting semaphore synchronization primitive.
|
||||
//
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
//
|
||||
// \returns A handle to a semaphore instance. Use Baselib_SystemSemaphore_Free to free the semaphore.
|
||||
BASELIB_API Baselib_SystemSemaphore_Handle Baselib_SystemSemaphore_Create(void);
|
||||
|
||||
// Creates a counting semaphore synchronization primitive given a memory buffer.
|
||||
//
|
||||
// Semaphore is created in-place in semaphoreData and must be atleast Baselib_SystemSemaphore_PlatformSize in size.
|
||||
// (Some platforms dont support in-place creation and this function then works the same as Baselib_SystemSemaphore_Create() )
|
||||
//
|
||||
// \returns A handle to a semaphore instance. Use Baselib_Semaphore_FreeInplace to free the semaphore.
|
||||
BASELIB_API Baselib_SystemSemaphore_Handle Baselib_SystemSemaphore_CreateInplace(void* semaphoreData);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
BASELIB_API void Baselib_SystemSemaphore_Acquire(Baselib_SystemSemaphore_Handle semaphore);
|
||||
|
||||
// Try to consume a token and return immediately.
|
||||
//
|
||||
// \returns true if token was consumed. false if not.
|
||||
BASELIB_API bool Baselib_SystemSemaphore_TryAcquire(Baselib_SystemSemaphore_Handle semaphore);
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \param timeout Time to wait for token to become available.
|
||||
//
|
||||
// \returns true if token was consumed. false if timeout was reached.
|
||||
BASELIB_API bool Baselib_SystemSemaphore_TryTimedAcquire(Baselib_SystemSemaphore_Handle semaphore, uint32_t timeoutInMilliseconds);
|
||||
|
||||
// Submit tokens to the semaphore.
|
||||
//
|
||||
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
|
||||
// If count reach `Baselib_SystemSemaphore_MaxCount` this function silently discard any overflow.
|
||||
// Note that hitting max count may inflict a heavy performance penalty.
|
||||
BASELIB_API void Baselib_SystemSemaphore_Release(Baselib_SystemSemaphore_Handle semaphore, uint32_t count);
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, calling free may cause process abort.
|
||||
BASELIB_API void Baselib_SystemSemaphore_Free(Baselib_SystemSemaphore_Handle semaphore);
|
||||
|
||||
// Reclaim resources held by the semaphore created using Baselib_SystemSemaphore_CreateInplace
|
||||
//
|
||||
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
|
||||
// Must not be used to free a semaphore created with Baselib_Semaphore_Create
|
||||
BASELIB_API void Baselib_SystemSemaphore_FreeInplace(Baselib_SystemSemaphore_Handle semaphore);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
107
Libraries/external/baselib/Include/C/Baselib_Thread.h
vendored
Normal file
107
Libraries/external/baselib/Include/C/Baselib_Thread.h
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
#pragma once
|
||||
|
||||
#include "Baselib_Timer.h"
|
||||
#include "Baselib_ErrorState.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Unique thread id that can be used to compare different threads or stored for bookkeeping etc..
|
||||
typedef intptr_t Baselib_Thread_Id;
|
||||
|
||||
// Baselib_Thread_Id that is guaranteed not to represent a thread
|
||||
static const Baselib_Thread_Id Baselib_Thread_InvalidId = 0;
|
||||
|
||||
// Max number of characters for threadnames internal to baselib. Used for name in Baselib_Thread_Config
|
||||
// In practice thread implementation on some platforms support even fewer characters for names
|
||||
static const size_t Baselib_Thread_MaxThreadNameLength = 64;
|
||||
|
||||
// Yields the execution context of the current thread to other threads, potentially causing a context switch.
|
||||
//
|
||||
// The operating system may decide to not switch to any other thread.
|
||||
BASELIB_API void Baselib_Thread_YieldExecution(void);
|
||||
|
||||
// Return the thread id of the current thread, i.e. the thread that is calling this function
|
||||
BASELIB_API Baselib_Thread_Id Baselib_Thread_GetCurrentThreadId(void);
|
||||
|
||||
|
||||
// We currently do not allow creating threads from C# bindings,
|
||||
// since there is right now no way accessible way to inform the garbage collector about new baselib threads.
|
||||
// I.e. any managed allocation on a baselib thread created from C# would never be garbage collected!
|
||||
#ifndef BASELIB_BINDING_GENERATION
|
||||
|
||||
// The minimum guaranteed number of max concurrent threads that works on all platforms.
|
||||
//
|
||||
// This only applies if all the threads are created with Baselib.
|
||||
// In practice, it might not be possible to create this many threads either. If memory is exhausted,
|
||||
// by for example creating threads with very large stacks, that might translate to a lower limit in practice.
|
||||
// Note that on many platforms the actual limit is way higher.
|
||||
static const int Baselib_Thread_MinGuaranteedMaxConcurrentThreads = 64;
|
||||
|
||||
typedef struct Baselib_Thread Baselib_Thread;
|
||||
|
||||
typedef void (*Baselib_Thread_EntryPointFunction)(void* arg);
|
||||
|
||||
typedef struct Baselib_Thread_Config
|
||||
{
|
||||
// Nullterminated name of the created thread (optional)
|
||||
// Useful exclusively for debugging - which tooling it is shown by and how it can be queried is platform dependent.
|
||||
// Truncated to Baselib_Thread_MaxThreadNameLength number of characters and copied to an internal buffer
|
||||
const char* name;
|
||||
|
||||
// The minimum size in bytes to allocate for the thread stack. (optional)
|
||||
// If not set, a platform/system specific default stack size will be used.
|
||||
// If the value set does not conform to platform specific minimum values or alignment requirements,
|
||||
// the actual stack size used will be bigger than what was requested.
|
||||
uint64_t stackSize;
|
||||
|
||||
// Required, this is set by calling Baselib_Thread_ConfigCreate with a valid entry point function.
|
||||
Baselib_Thread_EntryPointFunction entryPoint;
|
||||
|
||||
// Argument to the entry point function, does only need to be set if entryPoint takes an argument.
|
||||
void* entryPointArgument;
|
||||
} Baselib_Thread_Config;
|
||||
|
||||
// Creates and starts a new thread.
|
||||
//
|
||||
// On some platforms the thread name is not set until the thread has begun executing, which is not guaranteed
|
||||
// to have happened when the creation function returns. There is typically a platform specific limit on the length of
|
||||
// the thread name. If config.name is longer than this limit, the name will be automatically truncated.
|
||||
//
|
||||
// \param config A pointer to a config object. entryPoint needs to be a valid function pointer, all other properties can be zero/null.
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: config.entryPoint is null
|
||||
// - Baselib_ErrorCode_OutOfSystemResources: there is not enough memory to create a thread with that stack size or the system limit of number of concurrent threads has been reached
|
||||
BASELIB_API Baselib_Thread* Baselib_Thread_Create(Baselib_Thread_Config config, Baselib_ErrorState* errorState);
|
||||
|
||||
|
||||
// Waits until a thread has finished its execution.
|
||||
//
|
||||
// Also frees its resources.
|
||||
// If called and completed successfully, no Baselib_Thread function can be called again on the same Baselib_Thread.
|
||||
//
|
||||
// \param thread A pointer to a thread object.
|
||||
// \param timeoutInMilliseconds Time to wait for the thread to finish
|
||||
//
|
||||
// Possible error codes:
|
||||
// - Baselib_ErrorCode_InvalidArgument: thread is null
|
||||
// - Baselib_ErrorCode_ThreadCannotJoinSelf: the thread parameter points to the current thread, i.e. the thread that is calling this function
|
||||
// - Baselib_ErrorCode_Timeout: timeout is reached before the thread has finished
|
||||
BASELIB_API void Baselib_Thread_Join(Baselib_Thread* thread, uint32_t timeoutInMilliseconds, Baselib_ErrorState* errorState);
|
||||
|
||||
// Return the thread id of the thread given as argument
|
||||
//
|
||||
// \param thread A pointer to a thread object.
|
||||
BASELIB_API Baselib_Thread_Id Baselib_Thread_GetId(Baselib_Thread* thread);
|
||||
|
||||
// Returns true if there is support in baselib for threads on this platform, otherwise false.
|
||||
BASELIB_API bool Baselib_Thread_SupportsThreads(void);
|
||||
|
||||
#endif // !BASELIB_BINDING_GENERATION
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
58
Libraries/external/baselib/Include/C/Baselib_ThreadLocalStorage.h
vendored
Normal file
58
Libraries/external/baselib/Include/C/Baselib_ThreadLocalStorage.h
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
#pragma once
|
||||
|
||||
// Baselib_ThreadLocalStorage
|
||||
|
||||
// Thread-local storage (TLS) is a computer programming method that uses static or global memory local to a thread.
|
||||
//
|
||||
// TLS is used in some places where ordinary, single-threaded programs would use global variables, but where this would be inappropriate
|
||||
// in multithreaded cases. An example of such situations is where functions use a global variable to set an error condition
|
||||
// (for example the global variable errno used by many functions of the C library). If errno were a global variable,
|
||||
// a call of a system function on one thread may overwrite the value previously set by a call of a system function on a different thread,
|
||||
// possibly before following code on that different thread could check for the error condition. The solution is to have errno be a variable
|
||||
// that looks like it is global, but in fact exists once per thread—i.e., it lives in thread-local storage. A second use case would be
|
||||
// multiple threads accumulating information into a global variable. To avoid a race condition, every access to this global variable would
|
||||
// have to be protected by a mutex. Alternatively, each thread might accumulate into a thread-local variable (that, by definition,
|
||||
// cannot be read from or written to from other threads, implying that there can be no race conditions). Threads then only have to synchronise
|
||||
// a final accumulation from their own thread-local variable into a single, truly global variable.
|
||||
//
|
||||
// Many systems impose restrictions on the size of the thread-local memory block, in fact often rather tight limits.
|
||||
// On the other hand, if a system can provide at least a memory address (pointer) sized variable thread-local, then this allows the use of
|
||||
// arbitrarily sized memory blocks in a thread-local manner, by allocating such a memory block dynamically and storing the memory address of
|
||||
// that block in the thread-local variable.
|
||||
//
|
||||
// "Thread-local storage", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Thread-local_storage&oldid=860347814
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// It's guaranteed that we can allocate at least Baselib_TLS_MinimumGuaranteedSlots values on all platforms.
|
||||
static const uint32_t Baselib_TLS_MinimumGuaranteedSlots = 100;
|
||||
|
||||
// Thread Local Storage slot handle.
|
||||
typedef uintptr_t Baselib_TLS_Handle;
|
||||
|
||||
// Allocates a new Thread Local Storage slot. In case of an error, abort with Baselib_ErrorCode_OutOfSystemResources will be triggered.
|
||||
// On some platforms this might be fiber local storage.
|
||||
//
|
||||
// The value of a newly create Thread Local Storage slot is guaranteed to be zero on all threads.
|
||||
BASELIB_API Baselib_TLS_Handle Baselib_TLS_Alloc(void);
|
||||
|
||||
// Frees provided Thread Local Storage slot.
|
||||
BASELIB_API void Baselib_TLS_Free(Baselib_TLS_Handle handle);
|
||||
|
||||
// Sets value to Thread Local Storage slot.
|
||||
BASELIB_FORCEINLINE_API void Baselib_TLS_Set(Baselib_TLS_Handle handle, uintptr_t value);
|
||||
|
||||
// Gets value from Thread Local Storage slot.
|
||||
//
|
||||
// If called on just initialized variable, guaranteed to return 0.
|
||||
BASELIB_FORCEINLINE_API uintptr_t Baselib_TLS_Get(Baselib_TLS_Handle handle);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
|
||||
#include <C/Baselib_ThreadLocalStorage.inl.h>
|
||||
85
Libraries/external/baselib/Include/C/Baselib_Timer.h
vendored
Normal file
85
Libraries/external/baselib/Include/C/Baselib_Timer.h
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Time conversion factors.
|
||||
//
|
||||
// (not an enum since Int32 can't represent Baselib_NanosecondsPerMinute)
|
||||
static const uint64_t Baselib_SecondsPerMinute = 60ULL;
|
||||
static const uint64_t Baselib_MillisecondsPerSecond = 1000ULL;
|
||||
static const uint64_t Baselib_MillisecondsPerMinute = 60ULL * 1000ULL;
|
||||
static const uint64_t Baselib_MicrosecondsPerMillisecond = 1000ULL;
|
||||
static const uint64_t Baselib_MicrosecondsPerSecond = 1000ULL * 1000ULL;
|
||||
static const uint64_t Baselib_MicrosecondsPerMinute = 60ULL * 1000ULL * 1000ULL;
|
||||
static const uint64_t Baselib_NanosecondsPerMicrosecond = 1000ULL;
|
||||
static const uint64_t Baselib_NanosecondsPerMillisecond = 1000ULL * 1000ULL;
|
||||
static const uint64_t Baselib_NanosecondsPerSecond = 1000ULL * 1000ULL * 1000ULL;
|
||||
static const uint64_t Baselib_NanosecondsPerMinute = 60ULL * 1000ULL * 1000ULL * 1000ULL;
|
||||
|
||||
// Timer specific representation of time progression
|
||||
typedef uint64_t Baselib_Timer_Ticks;
|
||||
|
||||
// Baselib_Timer_Ticks are guaranteed to be more granular than this constant.
|
||||
static const uint64_t Baselib_Timer_MaxNumberOfNanosecondsPerTick = 1000ULL;
|
||||
|
||||
// Baselib_Timer_Ticks are guaranteed to be less granular than this constant.
|
||||
static const double Baselib_Timer_MinNumberOfNanosecondsPerTick = 0.01;
|
||||
|
||||
// Defines the conversion ratio from Baselib_Timer_Ticks to nanoseconds as a fraction.
|
||||
typedef struct Baselib_Timer_TickToNanosecondConversionRatio
|
||||
{
|
||||
uint64_t ticksToNanosecondsNumerator;
|
||||
uint64_t ticksToNanosecondsDenominator;
|
||||
} Baselib_Timer_TickToNanosecondConversionRatio;
|
||||
|
||||
// Returns the conversion ratio between ticks and nanoseconds.
|
||||
//
|
||||
// The conversion factor is guaranteed to be constant for the entire application for its entire lifetime.
|
||||
// However, it may be different on every start of the application.
|
||||
//
|
||||
// \returns The conversion factor from ticks to nanoseconds as an integer fraction.
|
||||
BASELIB_API Baselib_Timer_TickToNanosecondConversionRatio Baselib_Timer_GetTicksToNanosecondsConversionRatio(void);
|
||||
|
||||
// The fraction of Baselib_Timer_GetTicksToNanosecondsConversionRatio as a precomputed double value. It is subject to precision loss.
|
||||
//
|
||||
// Attention:
|
||||
// This value is determined during static initialization of baselib. As such it should not be used if it is not guaranteed that baselib is fully loaded.
|
||||
// Prefer Baselib_Timer_GetTicksToNanosecondsConversionRatio when in doubt.
|
||||
extern BASELIB_API const double Baselib_Timer_TickToNanosecondsConversionFactor;
|
||||
|
||||
// Get the current tick count of the high precision timer.
|
||||
//
|
||||
// Accuracy:
|
||||
// It is assumed that the accuracy corresponds to the granularity of Baselib_Timer_Ticks (which is determined by Baselib_Timer_GetTicksToNanosecondsConversionRatio).
|
||||
// However, there are no strict guarantees on the accuracy of the timer.
|
||||
//
|
||||
// Monotony:
|
||||
// ATTENTION: On some platforms this clock is suspended during application/device sleep states.
|
||||
// The timer is not susceptible to wall clock time changes by the user.
|
||||
// Different threads are guaranteed to be on the same timeline.
|
||||
//
|
||||
// Known issues:
|
||||
// * Some web browsers impose Spectre mitigation which can introduce jitter in this timer.
|
||||
// * Some web browsers may have different timelines per thread/webworker if they are not spawned on startup (this is a bug according to newest W3C specification)
|
||||
//
|
||||
// \returns Current tick value of the high precision timer.
|
||||
BASELIB_API Baselib_Timer_Ticks Baselib_Timer_GetHighPrecisionTimerTicks(void);
|
||||
|
||||
// This function will wait for at least the requested amount of time before returning.
|
||||
//
|
||||
// Unlike some implementations of 'sleep', passing 0 does NOT guarantee a thread yield and may return immediately! Use the corresponding functionality in Baselib_Thread instead.
|
||||
//
|
||||
// \param timeInMilliseconds Time to wait in milliseconds
|
||||
BASELIB_API void Baselib_Timer_WaitForAtLeast(uint32_t timeInMilliseconds);
|
||||
|
||||
// Time since application startup in seconds.
|
||||
//
|
||||
// Disregarding potential rounding errors, all threads are naturally on the same timeline (i.e. time since process start).
|
||||
BASELIB_API double Baselib_Timer_GetTimeSinceStartupInSeconds(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
33
Libraries/external/baselib/Include/C/Baselib_WakeupFallbackStrategy.h
vendored
Normal file
33
Libraries/external/baselib/Include/C/Baselib_WakeupFallbackStrategy.h
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
#pragma once
|
||||
|
||||
#include "Internal/Baselib_EnumSizeCheck.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
// Can be used to control the wakeup behavior on platforms that don't support waking up a specific number of thread.
|
||||
// Syscalls don't come for free so you need to weigh the cost of doing multiple syscalls against the cost of having lots of context switches.
|
||||
//
|
||||
// There are however two easy cases.
|
||||
// * When you only want to notify one thread use Baselib_WakeupFallbackStrategy_OneByOne.
|
||||
// * When you want to wakeup all threads use Baselib_WakeupFallbackStrategy_All
|
||||
//
|
||||
// For the not so easy cases.
|
||||
// * Use Baselib_WakeupFallbackStrategy_OneByOne when wake count is low, or significantly lower than the number of waiting threads.
|
||||
// * Use Baselib_WakeupFallbackStrategy_All if wake count is high.
|
||||
typedef enum Baselib_WakeupFallbackStrategy
|
||||
{
|
||||
// Do one syscall for each waiting thread or notification.
|
||||
Baselib_WakeupFallbackStrategy_OneByOne,
|
||||
|
||||
// Do a single syscall to wake all waiting threads.
|
||||
Baselib_WakeupFallbackStrategy_All,
|
||||
} Baselib_WakeupFallbackStrategy;
|
||||
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_WakeupFallbackStrategy);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
152
Libraries/external/baselib/Include/C/Internal/Baselib_CappedSemaphore_FutexBased.inl.h
vendored
Normal file
152
Libraries/external/baselib/Include/C/Internal/Baselib_CappedSemaphore_FutexBased.inl.h
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_CountdownTimer.h"
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemFutex.h"
|
||||
#include "../Baselib_Thread.h"
|
||||
|
||||
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_CappedSemaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Space out to different cache lines.
|
||||
// the idea here is that threads waking up from sleep should not have to
|
||||
// access the cache line where count is stored, and only touch wakeups.
|
||||
// the only exception to that rule is if we hit a timeout.
|
||||
typedef struct Baselib_CappedSemaphore
|
||||
{
|
||||
int32_t wakeups;
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
|
||||
int32_t count;
|
||||
const int32_t cap;
|
||||
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t) * 2]; // Having cap on the same cacheline is fine since it is a constant.
|
||||
} Baselib_CappedSemaphore;
|
||||
|
||||
BASELIB_STATIC_ASSERT(sizeof(Baselib_CappedSemaphore) == PLATFORM_CACHE_LINE_SIZE * 2, "Baselib_CappedSemaphore (Futex) size should match 2*cacheline size (128bytes)");
|
||||
BASELIB_STATIC_ASSERT(offsetof(Baselib_CappedSemaphore, wakeups) ==
|
||||
(offsetof(Baselib_CappedSemaphore, count) - PLATFORM_CACHE_LINE_SIZE), "Baselib_CappedSemaphore (futex) wakeups and count shouldnt share cacheline");
|
||||
|
||||
|
||||
BASELIB_INLINE_API Baselib_CappedSemaphore Baselib_CappedSemaphore_Create(const uint16_t cap)
|
||||
{
|
||||
Baselib_CappedSemaphore semaphore = { 0, {0}, 0, cap, {0} };
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Detail_Baselib_CappedSemaphore_ConsumeWakeup(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->wakeups);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->wakeups, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryAcquire(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_CappedSemaphore_Acquire(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return;
|
||||
|
||||
while (!Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore))
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, UINT32_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryTimedAcquire(Baselib_CappedSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return true;
|
||||
|
||||
if (Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore))
|
||||
return true;
|
||||
|
||||
uint32_t timeLeft = timeoutInMilliseconds;
|
||||
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
|
||||
do
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, timeLeft);
|
||||
if (Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore))
|
||||
return true;
|
||||
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
|
||||
}
|
||||
while (timeLeft);
|
||||
|
||||
// When timeout occurs we need to make sure we do one of the following:
|
||||
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
|
||||
//
|
||||
// If count is not negative it's likely we are racing with a release operation in which case we
|
||||
// may end up having a successful acquire operation.
|
||||
do
|
||||
{
|
||||
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (count < 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
|
||||
return false;
|
||||
}
|
||||
// Likely a race, yield to give the release operation room to complete.
|
||||
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore));
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint16_t Baselib_CappedSemaphore_Release(Baselib_CappedSemaphore* semaphore, const uint16_t _count)
|
||||
{
|
||||
int32_t count = _count;
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
do
|
||||
{
|
||||
if (previousCount == semaphore->cap)
|
||||
return 0;
|
||||
|
||||
if (previousCount + count > semaphore->cap)
|
||||
count = semaphore->cap - previousCount;
|
||||
}
|
||||
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->count, &previousCount, previousCount + count));
|
||||
|
||||
if (OPTIMIZER_UNLIKELY(previousCount < 0))
|
||||
{
|
||||
const int32_t waitingThreads = -previousCount;
|
||||
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
|
||||
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint32_t Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
|
||||
if (OPTIMIZER_LIKELY(count >= 0))
|
||||
return 0;
|
||||
const int32_t threadsToWakeup = -count;
|
||||
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
|
||||
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_All);
|
||||
return threadsToWakeup;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_CappedSemaphore_Free(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
if (!semaphore)
|
||||
return;
|
||||
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
|
||||
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
|
||||
}
|
||||
122
Libraries/external/baselib/Include/C/Internal/Baselib_CappedSemaphore_SemaphoreBased.inl.h
vendored
Normal file
122
Libraries/external/baselib/Include/C/Internal/Baselib_CappedSemaphore_SemaphoreBased.inl.h
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemSemaphore.h"
|
||||
#include "../Baselib_Thread.h"
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "It's highly recommended to use Baselib_CappedSemaphore_FutexBased.inl.h on platforms which has native semaphore support"
|
||||
#endif
|
||||
|
||||
typedef struct Baselib_CappedSemaphore
|
||||
{
|
||||
Baselib_SystemSemaphore_Handle handle;
|
||||
int32_t count;
|
||||
const int32_t cap;
|
||||
// Make the capped semaphore take a full cache line so that if the user cacheline aligned semaphore,
|
||||
// llsc operations on count will not spuriously fail.
|
||||
char _cachelineSpacer[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t) * 2 - sizeof(Baselib_SystemSemaphore_Handle)];
|
||||
char _systemSemaphoreData[Baselib_SystemSemaphore_PlatformSize];
|
||||
} Baselib_CappedSemaphore;
|
||||
|
||||
BASELIB_STATIC_ASSERT((offsetof(Baselib_CappedSemaphore, count) + PLATFORM_CACHE_LINE_SIZE - sizeof(Baselib_SystemSemaphore_Handle)) ==
|
||||
offsetof(Baselib_CappedSemaphore, _systemSemaphoreData), "count and internalData must not share cacheline");
|
||||
|
||||
BASELIB_INLINE_API Baselib_CappedSemaphore Baselib_CappedSemaphore_Create(uint16_t cap)
|
||||
{
|
||||
Baselib_CappedSemaphore semaphore = {{0}, 0, cap, {0}, {0}};
|
||||
semaphore.handle = Baselib_SystemSemaphore_CreateInplace(&semaphore._systemSemaphoreData);
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_CappedSemaphore_Acquire(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return;
|
||||
|
||||
Baselib_SystemSemaphore_Acquire(semaphore->handle);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryAcquire(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryTimedAcquire(Baselib_CappedSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return true;
|
||||
|
||||
if (OPTIMIZER_LIKELY(Baselib_SystemSemaphore_TryTimedAcquire(semaphore->handle, timeoutInMilliseconds)))
|
||||
return true;
|
||||
|
||||
// When timeout occurs we need to make sure we do one of the following:
|
||||
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
|
||||
//
|
||||
// If count is not negative it's likely we are racing with a release operation in which case we
|
||||
// may end up having a successful acquire operation.
|
||||
do
|
||||
{
|
||||
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (count < 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
|
||||
return false;
|
||||
}
|
||||
// Likely a race, yield to give the release operation room to complete.
|
||||
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!Baselib_SystemSemaphore_TryAcquire(semaphore->handle));
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint16_t Baselib_CappedSemaphore_Release(Baselib_CappedSemaphore* semaphore, const uint16_t _count)
|
||||
{
|
||||
int32_t count = _count;
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
do
|
||||
{
|
||||
if (previousCount == semaphore->cap)
|
||||
return 0;
|
||||
|
||||
if (previousCount + count > semaphore->cap)
|
||||
count = semaphore->cap - previousCount;
|
||||
}
|
||||
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->count, &previousCount, previousCount + count));
|
||||
|
||||
if (OPTIMIZER_UNLIKELY(previousCount < 0))
|
||||
{
|
||||
const int32_t waitingThreads = -previousCount;
|
||||
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint32_t Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
|
||||
if (OPTIMIZER_LIKELY(count >= 0))
|
||||
return 0;
|
||||
const int32_t threadsToWakeup = -count;
|
||||
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
|
||||
return threadsToWakeup;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_CappedSemaphore_Free(Baselib_CappedSemaphore* semaphore)
|
||||
{
|
||||
if (!semaphore)
|
||||
return;
|
||||
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
|
||||
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
|
||||
Baselib_SystemSemaphore_FreeInplace(semaphore->handle);
|
||||
}
|
||||
7
Libraries/external/baselib/Include/C/Internal/Baselib_EnumSizeCheck.h
vendored
Normal file
7
Libraries/external/baselib/Include/C/Internal/Baselib_EnumSizeCheck.h
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_StaticAssert.h"
|
||||
|
||||
#define BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(_enumType) \
|
||||
BASELIB_STATIC_ASSERT(sizeof(_enumType) == 4, \
|
||||
"Baselib assumes that sizeof any enum type is exactly 4 bytes, there might be ABI compatibility problems if violated");
|
||||
198
Libraries/external/baselib/Include/C/Internal/Baselib_EventSemaphore_FutexBased.inl.h
vendored
Normal file
198
Libraries/external/baselib/Include/C/Internal/Baselib_EventSemaphore_FutexBased.inl.h
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_CountdownTimer.h"
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemFutex.h"
|
||||
|
||||
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_EventSemaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
typedef struct Baselib_EventSemaphore
|
||||
{
|
||||
int32_t state;
|
||||
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
|
||||
} Baselib_EventSemaphore;
|
||||
|
||||
BASELIB_STATIC_ASSERT(sizeof(Baselib_EventSemaphore) == PLATFORM_CACHE_LINE_SIZE, "Baselib_EventSemaphore size should match cacheline size (64bytes)");
|
||||
|
||||
// The futex based event semaphore is in one of *three* states:
|
||||
// * ResetNoWaitingThreads: EventSemaphore blocks threads, but there aren't any blocked yet
|
||||
// * Reset: EventSemaphore blocks threads and there are some already
|
||||
// * Set: EventSemaphore is not blocking any acquiring threads
|
||||
//
|
||||
// The ResetNoWaitingThreads state is an optimization that allows us to avoid the (comparatively) costly futex notification syscalls.
|
||||
//
|
||||
// In addition, there is a generation counter baked into the state variable in order to prevent lock stealing.
|
||||
// -> Any change in the state during acquire (other than going from ResetNoWaitingThreads to Reset) means that the thread can continue
|
||||
// (since in this case either it was set on the current generation or the generation was changed which implies an earlier release operation)
|
||||
//
|
||||
// Allowed state transitions:
|
||||
// ResetNoWaitingThreads-Gen(X) -> Reset-Gen(X) == Acquire/TryTimedAcquire if no thread was waiting already
|
||||
// ResetNoWaitingThreads-Gen(X) -> Set-Gen(X) == Set but no thread was waiting
|
||||
// Reset-Gen(X) -> Set-Get(X+1) == Set if threads were waiting
|
||||
// Set-Get(X) -> ResetNoWaitingThreads-Gen(X) == Reset/ResetAndReleaseWaitingThreads
|
||||
// Reset-Gen(X) -> ResetNoWaitingThreads-Gen(X+1) == ResetAndReleaseWaitingThreads if threads were waiting
|
||||
//
|
||||
// Note how any state transition from Reset requires increasing the generation counter.
|
||||
|
||||
enum
|
||||
{
|
||||
//Detail_Baselib_EventSemaphore_ResetNoWaitingThreads = 0,
|
||||
Detail_Baselib_EventSemaphore_Set = (uint32_t)1 << 30,
|
||||
Detail_Baselib_EventSemaphore_Reset = (uint32_t)2 << 30,
|
||||
Detail_Baselib_EventSemaphore_GenMask = ~((uint32_t)(1 | 2) << 30)
|
||||
};
|
||||
|
||||
static FORCE_INLINE uint32_t Detail_Baselib_EventSemaphore_Generation(int32_t state)
|
||||
{
|
||||
return state & Detail_Baselib_EventSemaphore_GenMask;
|
||||
}
|
||||
|
||||
// If Detail_Baselib_EventSemaphore_ResetNoWaitingThreads is set, sets Detail_Baselib_EventSemaphore_Reset flag.
|
||||
// Returns last known state of the semaphore.
|
||||
// Does nothing if state changed while this function runs (that includes generation changes while attempting to set the ResetState!)
|
||||
static FORCE_INLINE uint32_t Detail_Baselib_EventSemaphore_TransitionFrom_ResetNoWaitingThreadsState_To_ResetState(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
int32_t state = Baselib_atomic_load_32_acquire(&semaphore->state);
|
||||
const int32_t resetState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Reset;
|
||||
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
|
||||
while (state == resetNoWaitingThreadsState)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->state, &state, resetState))
|
||||
return resetState;
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API Baselib_EventSemaphore Baselib_EventSemaphore_Create(void)
|
||||
{
|
||||
const Baselib_EventSemaphore semaphore = { 0, {0} };
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryAcquire(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
const int32_t state = Baselib_atomic_load_32_acquire(&semaphore->state);
|
||||
return state & Detail_Baselib_EventSemaphore_Set ? true : false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Acquire(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
const int32_t state = Detail_Baselib_EventSemaphore_TransitionFrom_ResetNoWaitingThreadsState_To_ResetState(semaphore);
|
||||
if (state & Detail_Baselib_EventSemaphore_Set)
|
||||
return;
|
||||
do
|
||||
{
|
||||
// State is now in Detail_Baselib_EventSemaphore_Reset-Gen(X).
|
||||
Baselib_SystemFutex_Wait(&semaphore->state, state, UINT32_MAX);
|
||||
// If the state has changed in any way, it is now in either of
|
||||
// Set-Gen(X), Set-Gen(X+n), ResetNoWaitingThreads-Gen(X+n) or Reset(X+n). (with n>0)
|
||||
if (state != Baselib_atomic_load_32_relaxed(&semaphore->state))
|
||||
return;
|
||||
}
|
||||
while (true);
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryTimedAcquire(Baselib_EventSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int32_t state = Detail_Baselib_EventSemaphore_TransitionFrom_ResetNoWaitingThreadsState_To_ResetState(semaphore);
|
||||
if (state & Detail_Baselib_EventSemaphore_Set)
|
||||
return true;
|
||||
uint32_t timeLeft = timeoutInMilliseconds;
|
||||
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
|
||||
do
|
||||
{
|
||||
// State is now in Detail_Baselib_EventSemaphore_Reset-Gen(X).
|
||||
Baselib_SystemFutex_Wait(&semaphore->state, state, timeLeft);
|
||||
// If the state has changed in any way, it is now in either of
|
||||
// Set-Gen(X), Set-Gen(X+n), ResetNoWaitingThreads-Gen(X+n) or Reset(X+n). (with n>0)
|
||||
if (state != Baselib_atomic_load_32_relaxed(&semaphore->state))
|
||||
return true;
|
||||
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
|
||||
}
|
||||
while (timeLeft);
|
||||
|
||||
// The EventSemaphore looks now like there are still threads waiting even if there *might* be none!
|
||||
// This is not an issue however, since it merely means that Set/ResetAndReleaseWaitingThreads will do a potentially redundant futex notification.
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Reset(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
int32_t state = Baselib_atomic_load_32_relaxed(&semaphore->state);
|
||||
const int32_t setState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Set;
|
||||
while (state == setState)
|
||||
{
|
||||
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
|
||||
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, resetNoWaitingThreadsState))
|
||||
return;
|
||||
}
|
||||
Baselib_atomic_thread_fence_release();
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Set(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
int32_t state = Baselib_atomic_load_32_relaxed(&semaphore->state);
|
||||
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
|
||||
const int32_t resetState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Reset;
|
||||
|
||||
// If there is no thread waiting on the semaphore, there is no need to wake & increase the generation count.
|
||||
// Just set it to Set if it isn't already.
|
||||
while (state == resetNoWaitingThreadsState)
|
||||
{
|
||||
const int32_t setState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Set;
|
||||
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, setState))
|
||||
return;
|
||||
}
|
||||
// If this is not the case however, we do exactly that, increase the generation & wake all threads.
|
||||
while (state == resetState)
|
||||
{
|
||||
const int32_t nextGenSetState = Detail_Baselib_EventSemaphore_Generation(state + 1) | Detail_Baselib_EventSemaphore_Set;
|
||||
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, nextGenSetState))
|
||||
{
|
||||
Baselib_SystemFutex_Notify(&semaphore->state, UINT32_MAX, Baselib_WakeupFallbackStrategy_All);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// EventSemaphore was already in set state.
|
||||
Baselib_atomic_thread_fence_release();
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
// Note that doing a Baselib_EventSemaphore_Set & Baselib_EventSemaphore_Reset has the same observable effects, just slightly slower.
|
||||
|
||||
int32_t state = Baselib_atomic_load_32_relaxed(&semaphore->state);
|
||||
const int32_t setState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Set;
|
||||
const int32_t resetState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Reset;
|
||||
|
||||
// If there is no thread waiting on the semaphore, there is no need to wake & increase the generation count.
|
||||
// Just set it to ResetNoWaitingThreads if it isn't already.
|
||||
while (state == setState)
|
||||
{
|
||||
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
|
||||
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, resetNoWaitingThreadsState))
|
||||
return;
|
||||
}
|
||||
// If this is not the case however, we do exactly that, increase the generation & wake all threads.
|
||||
while (state == resetState)
|
||||
{
|
||||
const int32_t nextGenPendingResetState = Detail_Baselib_EventSemaphore_Generation(state + 1);
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->state, &state, nextGenPendingResetState))
|
||||
{
|
||||
Baselib_SystemFutex_Notify(&semaphore->state, UINT32_MAX, Baselib_WakeupFallbackStrategy_All);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// EventSemaphore was already in ResetNoWaiting threads state.
|
||||
Baselib_atomic_thread_fence_release();
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Free(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
}
|
||||
211
Libraries/external/baselib/Include/C/Internal/Baselib_EventSemaphore_SemaphoreBased.inl.h
vendored
Normal file
211
Libraries/external/baselib/Include/C/Internal/Baselib_EventSemaphore_SemaphoreBased.inl.h
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_CountdownTimer.h"
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemSemaphore.h"
|
||||
#include "../Baselib_StaticAssert.h"
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "It's highly recommended to use Baselib_EventSemaphore_FutexBased.inl.h on platforms which has native semaphore support"
|
||||
#endif
|
||||
|
||||
typedef union BASELIB_ALIGN_AS (8) Detail_Baselib_EventSemaphore_State
|
||||
{
|
||||
struct
|
||||
{
|
||||
// Can be changed without checking for changes in numWaitingForSetInProgress (use 32bit cmpex)
|
||||
int32_t numWaitingForSetAndStateFlags;
|
||||
// Typically not changed without checking numWaitingForSetAndStateFlags (use 64bit cmpex)
|
||||
int32_t numWaitingForSetInProgress;
|
||||
} parts;
|
||||
int64_t stateInt64;
|
||||
} Detail_Baselib_EventSemaphore_State;
|
||||
|
||||
enum
|
||||
{
|
||||
// If this flag is set, threads are still waking up from a previous Set or ResetAndReleaseWaitingThreads call.
|
||||
// While this is set, any thread entering an Acquire method (that doesn't see Detail_Baselib_EventSemaphore_SetFlag),
|
||||
// will wait until it is cleared before proceeding with normal operations.
|
||||
Detail_Baselib_EventSemaphore_SetInProgressFlag = (uint32_t)1 << 30,
|
||||
|
||||
// If this flag is set, threads acquiring the semaphore succeed immediately.
|
||||
Detail_Baselib_EventSemaphore_SetFlag = (uint32_t)2 << 30,
|
||||
|
||||
Detail_Baselib_EventSemaphore_NumWaitingForSetMask = ~((uint32_t)(1 | 2) << 30)
|
||||
};
|
||||
|
||||
typedef struct Baselib_EventSemaphore
|
||||
{
|
||||
Detail_Baselib_EventSemaphore_State state;
|
||||
Baselib_SystemSemaphore_Handle setSemaphore;
|
||||
Baselib_SystemSemaphore_Handle setInProgressSemaphore;
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - 2 * sizeof(Baselib_SystemSemaphore_Handle) - sizeof(Detail_Baselib_EventSemaphore_State)];
|
||||
char _systemSemaphoreDataSemaphore[Baselib_SystemSemaphore_PlatformSize];
|
||||
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - Baselib_SystemSemaphore_PlatformSize];
|
||||
char _systemSemaphoreDataInProgressSemaphore[Baselib_SystemSemaphore_PlatformSize];
|
||||
} Baselib_EventSemaphore;
|
||||
|
||||
BASELIB_STATIC_ASSERT((offsetof(Baselib_EventSemaphore, state) + PLATFORM_CACHE_LINE_SIZE) ==
|
||||
offsetof(Baselib_EventSemaphore, _systemSemaphoreDataSemaphore), "state and _systemSemaphoreDataSemaphore must not share cacheline");
|
||||
|
||||
BASELIB_STATIC_ASSERT((offsetof(Baselib_EventSemaphore, _systemSemaphoreDataSemaphore) + PLATFORM_CACHE_LINE_SIZE) ==
|
||||
offsetof(Baselib_EventSemaphore, _systemSemaphoreDataInProgressSemaphore), "_systemSemaphoreDataSemaphore and _systemSemaphoreDataInProgressSemaphore must not share cacheline");
|
||||
|
||||
// How (Timed)Acquire works for the SemaphoreBased EventSemaphore:
|
||||
//
|
||||
// If there is a set pending (Detail_Baselib_EventSemaphore_SetInProgressFlag is set),
|
||||
// it means that not all threads from the previous wakeup call (either via Set or ResetAndReleaseWaitingThreads) have been woken up.
|
||||
// If we would just continue, we might steal the wakeup tokens of those threads! So instead we wait until they are done.
|
||||
//
|
||||
// This is different from the FutexBased version, however there is no way for a user to distinguish that from
|
||||
// a "regular (but lengthy)" preemption at the start of the function.
|
||||
// Meaning that we don't care how often the semaphore got set and reset in the meantime!
|
||||
//
|
||||
//
|
||||
// Invariants:
|
||||
//
|
||||
// Allowed flag state transitions:
|
||||
// 0 -> Set | SetInProgress
|
||||
// Set | SetInProgress <-> Set
|
||||
// Set | SetInProgress <-> SetInProgress
|
||||
// Set -> 0
|
||||
// SetInProgress -> 0
|
||||
//
|
||||
// Additionally:
|
||||
// * numWaitingForSetInProgress can only grow if SetInProgress is set.
|
||||
// * numWaitingForSet can only grow if Set is set
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
BASELIB_API void Detail_Baselib_EventSemaphore_SemaphoreBased_AcquireNonSet(int32_t initialNumWaitingForSetAndStateFlags, Baselib_EventSemaphore* semaphore);
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_API bool Detail_Baselib_EventSemaphore_SemaphoreBased_TryTimedAcquireNonSet(int32_t initialNumWaitingForSetAndStateFlags, Baselib_EventSemaphore* semaphore, uint32_t timeoutInMilliseconds);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
|
||||
|
||||
static FORCE_INLINE bool Detail_Baselib_EventSemaphore_IsSet(int32_t numWaitingForSetAndStateFlags)
|
||||
{
|
||||
return (numWaitingForSetAndStateFlags & Detail_Baselib_EventSemaphore_SetFlag) ? true : false;
|
||||
}
|
||||
|
||||
static FORCE_INLINE bool Detail_Baselib_EventSemaphore_IsSetInProgress(int32_t numWaitingForSetAndStateFlags)
|
||||
{
|
||||
return (numWaitingForSetAndStateFlags & Detail_Baselib_EventSemaphore_SetInProgressFlag) ? true : false;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t Detail_Baselib_EventSemaphore_GetWaitingForSetCount(int32_t numWaitingForSetAndStateFlags)
|
||||
{
|
||||
return numWaitingForSetAndStateFlags & Detail_Baselib_EventSemaphore_NumWaitingForSetMask;
|
||||
}
|
||||
|
||||
// Changes WaitingForSet count without affecting state flags
|
||||
static FORCE_INLINE int32_t Detail_Baselib_EventSemaphore_SetWaitingForSetCount(int32_t currentNumWaitingForSetAndStateFlags, int32_t newNumWaitingForSet)
|
||||
{
|
||||
return newNumWaitingForSet | (currentNumWaitingForSetAndStateFlags & (~Detail_Baselib_EventSemaphore_NumWaitingForSetMask));
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API Baselib_EventSemaphore Baselib_EventSemaphore_Create(void)
|
||||
{
|
||||
Baselib_EventSemaphore semaphore = {{{0, 0}}, {0}, {0}, {0}, {0}, {0}, {0}};
|
||||
|
||||
semaphore.setSemaphore = Baselib_SystemSemaphore_CreateInplace(semaphore._systemSemaphoreDataSemaphore);
|
||||
semaphore.setInProgressSemaphore = Baselib_SystemSemaphore_CreateInplace(semaphore._systemSemaphoreDataInProgressSemaphore);
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryAcquire(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
const int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_acquire(&semaphore->state.parts.numWaitingForSetAndStateFlags);
|
||||
return Detail_Baselib_EventSemaphore_IsSet(numWaitingForSetAndStateFlags);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Acquire(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
const int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_acquire(&semaphore->state.parts.numWaitingForSetAndStateFlags);
|
||||
if (!Detail_Baselib_EventSemaphore_IsSet(numWaitingForSetAndStateFlags))
|
||||
Detail_Baselib_EventSemaphore_SemaphoreBased_AcquireNonSet(numWaitingForSetAndStateFlags, semaphore);
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryTimedAcquire(Baselib_EventSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_acquire(&semaphore->state.parts.numWaitingForSetAndStateFlags);
|
||||
if (!Detail_Baselib_EventSemaphore_IsSet(numWaitingForSetAndStateFlags))
|
||||
return Detail_Baselib_EventSemaphore_SemaphoreBased_TryTimedAcquireNonSet(numWaitingForSetAndStateFlags, semaphore, timeoutInMilliseconds);
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Reset(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
int32_t resetNumWaitingForSetAndStateFlags;
|
||||
int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_relaxed(&semaphore->state.parts.numWaitingForSetAndStateFlags);
|
||||
do
|
||||
{
|
||||
resetNumWaitingForSetAndStateFlags = numWaitingForSetAndStateFlags & (~Detail_Baselib_EventSemaphore_SetFlag);
|
||||
}
|
||||
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(
|
||||
&semaphore->state.parts.numWaitingForSetAndStateFlags,
|
||||
&numWaitingForSetAndStateFlags,
|
||||
resetNumWaitingForSetAndStateFlags));
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Set(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_relaxed(&semaphore->state.parts.numWaitingForSetAndStateFlags);
|
||||
int32_t numWaitingForSetAndStateFlagsSet, numWaitingForSet;
|
||||
|
||||
do
|
||||
{
|
||||
numWaitingForSetAndStateFlagsSet = numWaitingForSetAndStateFlags | Detail_Baselib_EventSemaphore_SetFlag;
|
||||
numWaitingForSet = Detail_Baselib_EventSemaphore_GetWaitingForSetCount(numWaitingForSetAndStateFlags);
|
||||
BaselibAssert(numWaitingForSet >= 0, "There needs to be always a non-negative amount of threads waiting for Set");
|
||||
if (numWaitingForSet)
|
||||
numWaitingForSetAndStateFlagsSet |= Detail_Baselib_EventSemaphore_SetInProgressFlag;
|
||||
}
|
||||
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(
|
||||
&semaphore->state.parts.numWaitingForSetAndStateFlags,
|
||||
&numWaitingForSetAndStateFlags,
|
||||
numWaitingForSetAndStateFlagsSet));
|
||||
|
||||
if (!Detail_Baselib_EventSemaphore_IsSetInProgress(numWaitingForSetAndStateFlags) && numWaitingForSet)
|
||||
Baselib_SystemSemaphore_Release(semaphore->setSemaphore, numWaitingForSet);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
// Note that doing a Baselib_EventSemaphore_Set & Baselib_EventSemaphore_Reset has the same observable effects, just slightly slower.
|
||||
|
||||
int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_relaxed(&semaphore->state.parts.numWaitingForSetAndStateFlags);
|
||||
int32_t resetNumWaitingForSetAndStateFlags, numWaitingForSet;
|
||||
do
|
||||
{
|
||||
resetNumWaitingForSetAndStateFlags = numWaitingForSetAndStateFlags & (~Detail_Baselib_EventSemaphore_SetFlag);
|
||||
numWaitingForSet = Detail_Baselib_EventSemaphore_GetWaitingForSetCount(numWaitingForSetAndStateFlags);
|
||||
BaselibAssert(numWaitingForSet >= 0, "There needs to be always a non-negative amount of threads waiting for Set");
|
||||
if (numWaitingForSet)
|
||||
resetNumWaitingForSetAndStateFlags |= Detail_Baselib_EventSemaphore_SetInProgressFlag;
|
||||
}
|
||||
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(
|
||||
&semaphore->state.parts.numWaitingForSetAndStateFlags,
|
||||
&numWaitingForSetAndStateFlags,
|
||||
resetNumWaitingForSetAndStateFlags));
|
||||
|
||||
if (!Detail_Baselib_EventSemaphore_IsSetInProgress(numWaitingForSetAndStateFlags) && numWaitingForSet)
|
||||
Baselib_SystemSemaphore_Release(semaphore->setSemaphore, numWaitingForSet);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_EventSemaphore_Free(Baselib_EventSemaphore* semaphore)
|
||||
{
|
||||
if (!semaphore)
|
||||
return;
|
||||
|
||||
Baselib_SystemSemaphore_FreeInplace(semaphore->setSemaphore);
|
||||
Baselib_SystemSemaphore_FreeInplace(semaphore->setInProgressSemaphore);
|
||||
}
|
||||
150
Libraries/external/baselib/Include/C/Internal/Baselib_HighCapacitySemaphore_FutexBased.inl.h
vendored
Normal file
150
Libraries/external/baselib/Include/C/Internal/Baselib_HighCapacitySemaphore_FutexBased.inl.h
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_CountdownTimer.h"
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemFutex.h"
|
||||
#include "../Baselib_Thread.h"
|
||||
|
||||
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_HighCapacitySemaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Space out to different cache lines.
|
||||
// the idea here is that threads waking up from sleep should not have to
|
||||
// access the cache line where count is stored, and only touch wakeups.
|
||||
// the only exception to that rule is if we hit a timeout.
|
||||
typedef struct Baselib_HighCapacitySemaphore
|
||||
{
|
||||
int32_t wakeups;
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int64_t)];
|
||||
int64_t count;
|
||||
char _cachelineSpacer2[PLATFORM_CACHE_LINE_SIZE - sizeof(int64_t)];
|
||||
} Baselib_HighCapacitySemaphore;
|
||||
|
||||
BASELIB_INLINE_API Baselib_HighCapacitySemaphore Baselib_HighCapacitySemaphore_Create(void)
|
||||
{
|
||||
Baselib_HighCapacitySemaphore semaphore = {0, {0}, 0, {0}};
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->wakeups);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->wakeups, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryAcquire(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
int64_t previousCount = Baselib_atomic_load_64_relaxed(&semaphore->count);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_64_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Acquire(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return;
|
||||
|
||||
while (!Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(semaphore))
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, UINT32_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryTimedAcquire(Baselib_HighCapacitySemaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return true;
|
||||
|
||||
uint32_t timeLeft = timeoutInMilliseconds;
|
||||
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
|
||||
do
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, timeLeft);
|
||||
if (Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(semaphore))
|
||||
return true;
|
||||
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
|
||||
}
|
||||
while (timeLeft);
|
||||
|
||||
// When timeout occurs we need to make sure we do one of the following:
|
||||
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
|
||||
//
|
||||
// If count is not negative it's likely we are racing with a release operation in which case we
|
||||
// may end up having a successful acquire operation.
|
||||
do
|
||||
{
|
||||
int64_t count = Baselib_atomic_load_64_relaxed(&semaphore->count);
|
||||
while (count < 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &count, count + 1))
|
||||
return false;
|
||||
}
|
||||
// Likely a race, yield to give the release operation room to complete.
|
||||
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(semaphore));
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Release(Baselib_HighCapacitySemaphore* semaphore, const uint32_t _count)
|
||||
{
|
||||
const int64_t count = _count;
|
||||
int64_t previousCount = Baselib_atomic_fetch_add_64_release(&semaphore->count, count);
|
||||
|
||||
// This should only be possible if millions of threads enter this function simultaneously posting with a high count.
|
||||
// See overflow protection below.
|
||||
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", (int32_t)previousCount, (int32_t)count);
|
||||
|
||||
if (OPTIMIZER_UNLIKELY(previousCount < 0))
|
||||
{
|
||||
const int64_t waitingThreads = -previousCount;
|
||||
const int64_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
BaselibAssert(threadsToWakeup <= INT32_MAX);
|
||||
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, (int32_t)threadsToWakeup);
|
||||
Baselib_SystemFutex_Notify(&semaphore->wakeups, (int32_t)threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
|
||||
return;
|
||||
}
|
||||
|
||||
// overflow protection
|
||||
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
|
||||
// this way we won't have to do clamping on every iteration
|
||||
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_HighCapacitySemaphore_MaxGuaranteedCount * 2))
|
||||
{
|
||||
const int64_t maxCount = Baselib_HighCapacitySemaphore_MaxGuaranteedCount;
|
||||
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint64_t Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
const int64_t count = Baselib_atomic_exchange_64_release(&semaphore->count, 0);
|
||||
if (OPTIMIZER_LIKELY(count >= 0))
|
||||
return 0;
|
||||
const int64_t threadsToWakeup = -count;
|
||||
BaselibAssert(threadsToWakeup <= INT32_MAX);
|
||||
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, (int32_t)threadsToWakeup);
|
||||
Baselib_SystemFutex_Notify(&semaphore->wakeups, (int32_t)threadsToWakeup, Baselib_WakeupFallbackStrategy_All);
|
||||
return threadsToWakeup;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Free(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
if (!semaphore)
|
||||
return;
|
||||
const int64_t count = Baselib_atomic_load_64_seq_cst(&semaphore->count);
|
||||
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
|
||||
}
|
||||
126
Libraries/external/baselib/Include/C/Internal/Baselib_HighCapacitySemaphore_SemaphoreBased.inl.h
vendored
Normal file
126
Libraries/external/baselib/Include/C/Internal/Baselib_HighCapacitySemaphore_SemaphoreBased.inl.h
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemSemaphore.h"
|
||||
#include "../Baselib_Thread.h"
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "It's highly recommended to use Baselib_HighCapacitySemaphore_FutexBased.inl.h on platforms which has native semaphore support"
|
||||
#endif
|
||||
|
||||
typedef struct Baselib_HighCapacitySemaphore
|
||||
{
|
||||
int64_t count;
|
||||
Baselib_SystemSemaphore_Handle handle;
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int64_t) - sizeof(Baselib_SystemSemaphore_Handle)];
|
||||
char _systemSemaphoreData[Baselib_SystemSemaphore_PlatformSize];
|
||||
} Baselib_HighCapacitySemaphore;
|
||||
|
||||
BASELIB_STATIC_ASSERT((offsetof(Baselib_HighCapacitySemaphore, count) + PLATFORM_CACHE_LINE_SIZE) ==
|
||||
offsetof(Baselib_HighCapacitySemaphore, _systemSemaphoreData), "count and internalData must not share cacheline");
|
||||
|
||||
BASELIB_INLINE_API Baselib_HighCapacitySemaphore Baselib_HighCapacitySemaphore_Create(void)
|
||||
{
|
||||
Baselib_HighCapacitySemaphore semaphore = {0, {0}, {0}, {0}};
|
||||
semaphore.handle = Baselib_SystemSemaphore_CreateInplace(&semaphore._systemSemaphoreData);
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryAcquire(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
int64_t previousCount = Baselib_atomic_load_64_relaxed(&semaphore->count);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_64_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Acquire(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return;
|
||||
|
||||
Baselib_SystemSemaphore_Acquire(semaphore->handle);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryTimedAcquire(Baselib_HighCapacitySemaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return true;
|
||||
|
||||
if (OPTIMIZER_LIKELY(Baselib_SystemSemaphore_TryTimedAcquire(semaphore->handle, timeoutInMilliseconds)))
|
||||
return true;
|
||||
|
||||
// When timeout occurs we need to make sure we do one of the following:
|
||||
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
|
||||
//
|
||||
// If count is not negative it's likely we are racing with a release operation in which case we
|
||||
// may end up having a successful acquire operation.
|
||||
do
|
||||
{
|
||||
int64_t count = Baselib_atomic_load_64_relaxed(&semaphore->count);
|
||||
while (count < 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &count, count + 1))
|
||||
return false;
|
||||
}
|
||||
// Likely a race, yield to give the release operation room to complete.
|
||||
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!Baselib_SystemSemaphore_TryAcquire(semaphore->handle));
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Release(Baselib_HighCapacitySemaphore* semaphore, const uint32_t _count)
|
||||
{
|
||||
const int64_t count = _count;
|
||||
int64_t previousCount = Baselib_atomic_fetch_add_64_release(&semaphore->count, count);
|
||||
|
||||
// This should only be possible if millions of threads enter this function simultaneously posting with a high count.
|
||||
// See overflow protection below.
|
||||
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", (int32_t)previousCount, (int32_t)count);
|
||||
|
||||
if (OPTIMIZER_UNLIKELY(previousCount < 0))
|
||||
{
|
||||
const int64_t waitingThreads = -previousCount;
|
||||
const int64_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
BaselibAssert(threadsToWakeup <= (int64_t)UINT32_MAX);
|
||||
Baselib_SystemSemaphore_Release(semaphore->handle, (uint32_t)threadsToWakeup);
|
||||
return;
|
||||
}
|
||||
|
||||
// overflow protection
|
||||
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
|
||||
// this way we won't have to do clamping on every iteration
|
||||
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_HighCapacitySemaphore_MaxGuaranteedCount * 2))
|
||||
{
|
||||
const int64_t maxCount = Baselib_HighCapacitySemaphore_MaxGuaranteedCount;
|
||||
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint64_t Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
const int64_t count = Baselib_atomic_exchange_64_release(&semaphore->count, 0);
|
||||
if (OPTIMIZER_LIKELY(count >= 0))
|
||||
return 0;
|
||||
const int64_t threadsToWakeup = -count;
|
||||
BaselibAssert(threadsToWakeup <= (int64_t)UINT32_MAX);
|
||||
Baselib_SystemSemaphore_Release(semaphore->handle, (uint32_t)threadsToWakeup);
|
||||
return threadsToWakeup;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Free(Baselib_HighCapacitySemaphore* semaphore)
|
||||
{
|
||||
if (!semaphore)
|
||||
return;
|
||||
const int64_t count = Baselib_atomic_load_64_seq_cst(&semaphore->count);
|
||||
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
|
||||
Baselib_SystemSemaphore_FreeInplace(semaphore->handle);
|
||||
}
|
||||
92
Libraries/external/baselib/Include/C/Internal/Baselib_Lock_FutexBased.inl.h
vendored
Normal file
92
Libraries/external/baselib/Include/C/Internal/Baselib_Lock_FutexBased.inl.h
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_CountdownTimer.h"
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemFutex.h"
|
||||
|
||||
enum Detail_Baselib_Lock_State
|
||||
{
|
||||
Detail_Baselib_Lock_UNLOCKED = 0,
|
||||
Detail_Baselib_Lock_LOCKED = 1,
|
||||
Detail_Baselib_Lock_CONTENDED = 2,
|
||||
};
|
||||
typedef struct Baselib_Lock
|
||||
{
|
||||
int32_t state;
|
||||
char _cachelineSpacer[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
|
||||
} Baselib_Lock;
|
||||
|
||||
BASELIB_INLINE_API Baselib_Lock Baselib_Lock_Create(void)
|
||||
{
|
||||
Baselib_Lock lock = {Detail_Baselib_Lock_UNLOCKED, {0}};
|
||||
return lock;
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_Lock_TryAcquire(Baselib_Lock* lock)
|
||||
{
|
||||
int32_t previousState = Detail_Baselib_Lock_UNLOCKED;
|
||||
do
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&lock->state, &previousState, Detail_Baselib_Lock_LOCKED))
|
||||
return true;
|
||||
}
|
||||
while (previousState == Detail_Baselib_Lock_UNLOCKED);
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Lock_Acquire(Baselib_Lock* lock)
|
||||
{
|
||||
int32_t previousState = Detail_Baselib_Lock_UNLOCKED;
|
||||
do
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&lock->state, &previousState, previousState + 1))
|
||||
break;
|
||||
}
|
||||
while (previousState != Detail_Baselib_Lock_CONTENDED);
|
||||
|
||||
while (OPTIMIZER_LIKELY(previousState != Detail_Baselib_Lock_UNLOCKED))
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&lock->state, Detail_Baselib_Lock_CONTENDED, UINT32_MAX);
|
||||
previousState = Baselib_atomic_exchange_32_relaxed(&lock->state, Detail_Baselib_Lock_CONTENDED);
|
||||
}
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_Lock_TryTimedAcquire(Baselib_Lock* lock, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
int32_t previousState = Detail_Baselib_Lock_UNLOCKED;
|
||||
do
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&lock->state, &previousState, previousState + 1))
|
||||
break;
|
||||
}
|
||||
while (previousState != Detail_Baselib_Lock_CONTENDED);
|
||||
|
||||
if (OPTIMIZER_LIKELY(previousState == Detail_Baselib_Lock_UNLOCKED))
|
||||
return true;
|
||||
|
||||
uint32_t timeLeft = timeoutInMilliseconds;
|
||||
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
|
||||
do
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&lock->state, Detail_Baselib_Lock_CONTENDED, timeoutInMilliseconds);
|
||||
const int32_t previousState = Baselib_atomic_exchange_32_relaxed(&lock->state, Detail_Baselib_Lock_CONTENDED);
|
||||
if (previousState == Detail_Baselib_Lock_UNLOCKED)
|
||||
return true;
|
||||
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
|
||||
}
|
||||
while (timeLeft);
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Lock_Release(Baselib_Lock* lock)
|
||||
{
|
||||
const int32_t previousState = Baselib_atomic_exchange_32_release(&lock->state, Detail_Baselib_Lock_UNLOCKED);
|
||||
if (previousState == Detail_Baselib_Lock_CONTENDED)
|
||||
Baselib_SystemFutex_Notify(&lock->state, 1, Baselib_WakeupFallbackStrategy_OneByOne);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Lock_Free(Baselib_Lock* lock)
|
||||
{
|
||||
}
|
||||
46
Libraries/external/baselib/Include/C/Internal/Baselib_Lock_SemaphoreBased.inl.h
vendored
Normal file
46
Libraries/external/baselib/Include/C/Internal/Baselib_Lock_SemaphoreBased.inl.h
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_CountdownTimer.h"
|
||||
#include "../Baselib_CappedSemaphore.h"
|
||||
|
||||
typedef struct Baselib_Lock
|
||||
{
|
||||
Baselib_CappedSemaphore semaphore;
|
||||
} Baselib_Lock;
|
||||
|
||||
BASELIB_INLINE_API Baselib_Lock Baselib_Lock_Create(void)
|
||||
{
|
||||
Baselib_Lock lock = { Baselib_CappedSemaphore_Create(1) };
|
||||
uint16_t submittedTokens = Baselib_CappedSemaphore_Release(&lock.semaphore, 1);
|
||||
BaselibAssert(submittedTokens == 1, "CappedSemaphore was unable to accept our token");
|
||||
return lock;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Lock_Acquire(Baselib_Lock* lock)
|
||||
{
|
||||
Baselib_CappedSemaphore_Acquire(&lock->semaphore);
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_Lock_TryAcquire(Baselib_Lock* lock)
|
||||
{
|
||||
return Baselib_CappedSemaphore_TryAcquire(&lock->semaphore);
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_Lock_TryTimedAcquire(Baselib_Lock* lock, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
return Baselib_CappedSemaphore_TryTimedAcquire(&lock->semaphore, timeoutInMilliseconds);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Lock_Release(Baselib_Lock* lock)
|
||||
{
|
||||
Baselib_CappedSemaphore_Release(&lock->semaphore, 1);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Lock_Free(Baselib_Lock* lock)
|
||||
{
|
||||
if (!lock)
|
||||
return;
|
||||
Baselib_CappedSemaphore_Free(&lock->semaphore);
|
||||
}
|
||||
93
Libraries/external/baselib/Include/C/Internal/Baselib_ReentrantLock.inl.h
vendored
Normal file
93
Libraries/external/baselib/Include/C/Internal/Baselib_ReentrantLock.inl.h
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_Lock.h"
|
||||
#include "../Baselib_StaticAssert.h"
|
||||
#include "../Baselib_Alignment.h"
|
||||
#include "../Baselib_Thread.h"
|
||||
|
||||
typedef struct Baselib_ReentrantLock
|
||||
{
|
||||
Baselib_Lock lock;
|
||||
Baselib_Thread_Id owner;
|
||||
int32_t count;
|
||||
} Baselib_ReentrantLock;
|
||||
|
||||
BASELIB_STATIC_ASSERT((BASELIB_ALIGN_OF(Baselib_ReentrantLock) + offsetof(Baselib_ReentrantLock, owner)) % sizeof(Baselib_Thread_Id) == 0, "Baselib_ReentrantLock::owner is not aligned for atomic use");
|
||||
BASELIB_STATIC_ASSERT((BASELIB_ALIGN_OF(Baselib_ReentrantLock) + offsetof(Baselib_ReentrantLock, count)) % sizeof(int32_t) == 0, "Baselib_ReentrantLock::count is not aligned for atomic use");
|
||||
|
||||
BASELIB_INLINE_API Baselib_ReentrantLock Baselib_ReentrantLock_Create(void)
|
||||
{
|
||||
Baselib_ReentrantLock lock = {Baselib_Lock_Create(), Baselib_Thread_InvalidId, 0};
|
||||
return lock;
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryAcquire(Baselib_ReentrantLock* lock)
|
||||
{
|
||||
const Baselib_Thread_Id currentThreadId = Baselib_Thread_GetCurrentThreadId();
|
||||
const Baselib_Thread_Id lockOwner = Baselib_atomic_load_ptr_relaxed(&lock->owner);
|
||||
if (OPTIMIZER_LIKELY(currentThreadId != lockOwner))
|
||||
{
|
||||
if (!Baselib_Lock_TryAcquire(&lock->lock))
|
||||
return false;
|
||||
lock->owner = currentThreadId;
|
||||
lock->count = 1;
|
||||
return true;
|
||||
}
|
||||
lock->count++;
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_ReentrantLock_Acquire(Baselib_ReentrantLock* lock)
|
||||
{
|
||||
const Baselib_Thread_Id currentThreadId = Baselib_Thread_GetCurrentThreadId();
|
||||
const Baselib_Thread_Id lockOwner = Baselib_atomic_load_ptr_relaxed(&lock->owner);
|
||||
if (OPTIMIZER_LIKELY(currentThreadId != lockOwner))
|
||||
{
|
||||
Baselib_Lock_Acquire(&lock->lock);
|
||||
lock->owner = currentThreadId;
|
||||
lock->count = 1;
|
||||
return;
|
||||
}
|
||||
lock->count++;
|
||||
}
|
||||
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryTimedAcquire(Baselib_ReentrantLock* lock, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const Baselib_Thread_Id currentThreadId = Baselib_Thread_GetCurrentThreadId();
|
||||
const Baselib_Thread_Id lockOwner = Baselib_atomic_load_ptr_relaxed(&lock->owner);
|
||||
if (OPTIMIZER_LIKELY(currentThreadId != lockOwner))
|
||||
{
|
||||
if (!Baselib_Lock_TryTimedAcquire(&lock->lock, timeoutInMilliseconds))
|
||||
return false;
|
||||
lock->owner = currentThreadId;
|
||||
lock->count = 1;
|
||||
return true;
|
||||
}
|
||||
lock->count++;
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_ReentrantLock_Release(Baselib_ReentrantLock* lock)
|
||||
{
|
||||
if (lock->count > 0)
|
||||
{
|
||||
BaselibAssert(Baselib_atomic_load_ptr_relaxed(&lock->owner) == Baselib_Thread_GetCurrentThreadId(), "A recursive lock can only be unlocked by the locking thread");
|
||||
if (OPTIMIZER_LIKELY(lock->count == 1))
|
||||
{
|
||||
lock->owner = Baselib_Thread_InvalidId;
|
||||
lock->count = 0;
|
||||
Baselib_Lock_Release(&lock->lock);
|
||||
return;
|
||||
}
|
||||
lock->count--;
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_ReentrantLock_Free(Baselib_ReentrantLock* lock)
|
||||
{
|
||||
if (!lock)
|
||||
return;
|
||||
Baselib_Lock_Free(&lock->lock);
|
||||
}
|
||||
152
Libraries/external/baselib/Include/C/Internal/Baselib_Semaphore_FutexBased.inl.h
vendored
Normal file
152
Libraries/external/baselib/Include/C/Internal/Baselib_Semaphore_FutexBased.inl.h
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_CountdownTimer.h"
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemFutex.h"
|
||||
#include "../Baselib_Thread.h"
|
||||
|
||||
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_Semaphore_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
// Space out to different cache lines.
|
||||
// the idea here is that threads waking up from sleep should not have to
|
||||
// access the cache line where count is stored, and only touch wakeups.
|
||||
// the only exception to that rule is if we hit a timeout.
|
||||
typedef struct Baselib_Semaphore
|
||||
{
|
||||
int32_t wakeups;
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
|
||||
int32_t count;
|
||||
char _cachelineSpacer2[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
|
||||
} Baselib_Semaphore;
|
||||
|
||||
BASELIB_STATIC_ASSERT(sizeof(Baselib_Semaphore) == PLATFORM_CACHE_LINE_SIZE * 2, "Baselib_Semaphore (Futex) size should match 2*cacheline size (128bytes)");
|
||||
BASELIB_STATIC_ASSERT(offsetof(Baselib_Semaphore, wakeups) ==
|
||||
(offsetof(Baselib_Semaphore, count) - PLATFORM_CACHE_LINE_SIZE), "Baselib_Semaphore (Futex) wakeups and count shouldnt share cacheline");
|
||||
|
||||
BASELIB_INLINE_API Baselib_Semaphore Baselib_Semaphore_Create(void)
|
||||
{
|
||||
Baselib_Semaphore semaphore = {0, {0}, 0, {0}};
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Detail_Baselib_Semaphore_ConsumeWakeup(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->wakeups);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->wakeups, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_Semaphore_TryAcquire(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Acquire(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return;
|
||||
|
||||
while (!Detail_Baselib_Semaphore_ConsumeWakeup(semaphore))
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, UINT32_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_Semaphore_TryTimedAcquire(Baselib_Semaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return true;
|
||||
|
||||
uint32_t timeLeft = timeoutInMilliseconds;
|
||||
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
|
||||
do
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, timeLeft);
|
||||
if (Detail_Baselib_Semaphore_ConsumeWakeup(semaphore))
|
||||
return true;
|
||||
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
|
||||
}
|
||||
while (timeLeft);
|
||||
|
||||
// When timeout occurs we need to make sure we do one of the following:
|
||||
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
|
||||
//
|
||||
// If count is not negative it's likely we are racing with a release operation in which case we
|
||||
// may end up having a successful acquire operation.
|
||||
do
|
||||
{
|
||||
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (count < 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
|
||||
return false;
|
||||
}
|
||||
// Likely a race, yield to give the release operation room to complete.
|
||||
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!Detail_Baselib_Semaphore_ConsumeWakeup(semaphore));
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Release(Baselib_Semaphore* semaphore, const uint16_t _count)
|
||||
{
|
||||
const int32_t count = _count;
|
||||
int32_t previousCount = Baselib_atomic_fetch_add_32_release(&semaphore->count, count);
|
||||
|
||||
// This should only be possible if thousands of threads enter this function simultaneously posting with a high count.
|
||||
// See overflow protection below.
|
||||
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", previousCount, count);
|
||||
|
||||
if (OPTIMIZER_UNLIKELY(previousCount < 0))
|
||||
{
|
||||
const int32_t waitingThreads = -previousCount;
|
||||
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
|
||||
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
|
||||
return;
|
||||
}
|
||||
|
||||
// overflow protection
|
||||
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
|
||||
// this way we won't have to do clamping on every iteration
|
||||
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_Semaphore_MaxGuaranteedCount * 2))
|
||||
{
|
||||
const int32_t maxCount = Baselib_Semaphore_MaxGuaranteedCount;
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint32_t Baselib_Semaphore_ResetAndReleaseWaitingThreads(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
|
||||
if (OPTIMIZER_LIKELY(count >= 0))
|
||||
return 0;
|
||||
const int32_t threadsToWakeup = -count;
|
||||
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
|
||||
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_All);
|
||||
return threadsToWakeup;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Free(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
if (!semaphore)
|
||||
return;
|
||||
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
|
||||
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
|
||||
}
|
||||
126
Libraries/external/baselib/Include/C/Internal/Baselib_Semaphore_SemaphoreBased.inl.h
vendored
Normal file
126
Libraries/external/baselib/Include/C/Internal/Baselib_Semaphore_SemaphoreBased.inl.h
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Baselib_Atomic_TypeSafe.h"
|
||||
#include "../Baselib_SystemSemaphore.h"
|
||||
#include "../Baselib_Thread.h"
|
||||
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "It's highly recommended to use Baselib_Semaphore_FutexBased.inl.h on platforms which has native semaphore support"
|
||||
#endif
|
||||
|
||||
typedef struct Baselib_Semaphore
|
||||
{
|
||||
Baselib_SystemSemaphore_Handle handle;
|
||||
int32_t count;
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t) - sizeof(Baselib_SystemSemaphore_Handle)];
|
||||
char _systemSemaphoreData[Baselib_SystemSemaphore_PlatformSize];
|
||||
} Baselib_Semaphore;
|
||||
|
||||
BASELIB_STATIC_ASSERT((offsetof(Baselib_Semaphore, count) + PLATFORM_CACHE_LINE_SIZE - sizeof(Baselib_SystemSemaphore_Handle)) ==
|
||||
offsetof(Baselib_Semaphore, _systemSemaphoreData), "count and internalData must not share cacheline");
|
||||
|
||||
BASELIB_INLINE_API Baselib_Semaphore Baselib_Semaphore_Create(void)
|
||||
{
|
||||
Baselib_Semaphore semaphore = {{0}, 0, {0}, {0}};
|
||||
semaphore.handle = Baselib_SystemSemaphore_CreateInplace(&semaphore._systemSemaphoreData);
|
||||
return semaphore;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_Semaphore_TryAcquire(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Acquire(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return;
|
||||
|
||||
Baselib_SystemSemaphore_Acquire(semaphore->handle);
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API bool Baselib_Semaphore_TryTimedAcquire(Baselib_Semaphore* semaphore, const uint32_t timeoutInMilliseconds)
|
||||
{
|
||||
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
|
||||
if (OPTIMIZER_LIKELY(previousCount > 0))
|
||||
return true;
|
||||
|
||||
if (OPTIMIZER_LIKELY(Baselib_SystemSemaphore_TryTimedAcquire(semaphore->handle, timeoutInMilliseconds)))
|
||||
return true;
|
||||
|
||||
// When timeout occurs we need to make sure we do one of the following:
|
||||
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
|
||||
//
|
||||
// If count is not negative it's likely we are racing with a release operation in which case we
|
||||
// may end up having a successful acquire operation.
|
||||
do
|
||||
{
|
||||
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
|
||||
while (count < 0)
|
||||
{
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
|
||||
return false;
|
||||
}
|
||||
// Likely a race, yield to give the release operation room to complete.
|
||||
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!Baselib_SystemSemaphore_TryAcquire(semaphore->handle));
|
||||
return true;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Release(Baselib_Semaphore* semaphore, const uint16_t _count)
|
||||
{
|
||||
const int32_t count = _count;
|
||||
int32_t previousCount = Baselib_atomic_fetch_add_32_release(&semaphore->count, count);
|
||||
|
||||
// This should only be possible if thousands of threads enter this function simultaneously posting with a high count.
|
||||
// See overflow protection below.
|
||||
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", previousCount, count);
|
||||
|
||||
if (OPTIMIZER_UNLIKELY(previousCount < 0))
|
||||
{
|
||||
const int32_t waitingThreads = -previousCount;
|
||||
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
|
||||
return;
|
||||
}
|
||||
|
||||
// overflow protection
|
||||
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
|
||||
// this way we won't have to do clamping on every iteration
|
||||
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_Semaphore_MaxGuaranteedCount * 2))
|
||||
{
|
||||
const int32_t maxCount = Baselib_Semaphore_MaxGuaranteedCount;
|
||||
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API uint32_t Baselib_Semaphore_ResetAndReleaseWaitingThreads(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
|
||||
if (OPTIMIZER_LIKELY(count >= 0))
|
||||
return 0;
|
||||
const int32_t threadsToWakeup = -count;
|
||||
|
||||
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
|
||||
return threadsToWakeup;
|
||||
}
|
||||
|
||||
BASELIB_INLINE_API void Baselib_Semaphore_Free(Baselib_Semaphore* semaphore)
|
||||
{
|
||||
if (!semaphore)
|
||||
return;
|
||||
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
|
||||
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
|
||||
Baselib_SystemSemaphore_FreeInplace(semaphore->handle);
|
||||
}
|
||||
194
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_Gcc.h
vendored
Normal file
194
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_Gcc.h
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../../C/Baselib_Atomic.h"
|
||||
#include "../../../C/Baselib_Atomic_Macros.h"
|
||||
#include "Baselib_Atomic_Gcc_Apple_LLVM_Patch.h"
|
||||
|
||||
#if COMPILER_GCC && ((__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 7))
|
||||
#pragma message "GNUC: " PP_STRINGIZE(__GNUC__) " GNUC_MINOR: " PP_STRINGIZE(__GNUC_MINOR__)
|
||||
#error "GCC is too old and/or missing compatible atomic built-in functions" PP_STRINGIZE(__GNUC__)
|
||||
#endif
|
||||
|
||||
#define detail_intrinsic_relaxed __ATOMIC_RELAXED
|
||||
#define detail_intrinsic_acquire __ATOMIC_ACQUIRE
|
||||
#define detail_intrinsic_release __ATOMIC_RELEASE
|
||||
#define detail_intrinsic_acq_rel __ATOMIC_ACQ_REL
|
||||
#define detail_intrinsic_seq_cst __ATOMIC_SEQ_CST
|
||||
|
||||
// Patch gcc and clang intrinsics to achieve a sequentially consistent barrier.
|
||||
// As of writing Clang 9, GCC 9 none of them produce a seq cst barrier for load-store operations.
|
||||
// To fix this we switch load store to be acquire release with a full final barrier.
|
||||
|
||||
#define detail_ldst_intrinsic_relaxed detail_intrinsic_relaxed
|
||||
#define detail_ldst_intrinsic_acquire detail_intrinsic_acquire
|
||||
#define detail_ldst_intrinsic_release detail_intrinsic_release
|
||||
#define detail_ldst_intrinsic_acq_rel detail_intrinsic_acq_rel
|
||||
#define detail_ldst_intrinsic_seq_cst detail_intrinsic_seq_cst
|
||||
|
||||
#if defined(__aarch64__)
|
||||
#undef detail_ldst_intrinsic_seq_cst
|
||||
#define detail_ldst_intrinsic_seq_cst __ATOMIC_ACQ_REL
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_relaxed
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acquire
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_release
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acq_rel
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_seq_cst __extension__({__atomic_thread_fence (__ATOMIC_SEQ_CST); });
|
||||
#else
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_relaxed
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acquire
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_release
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acq_rel
|
||||
#define detail_AARCH64_SEQCST_PATCH_BARRIER_seq_cst
|
||||
#endif
|
||||
|
||||
#define detail_THREAD_FENCE(order, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_thread_fence_##order(void) \
|
||||
{ \
|
||||
__extension__({__atomic_thread_fence (detail_intrinsic_##order); }); \
|
||||
} \
|
||||
|
||||
#define detail_LOAD(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(const void* obj, void* result) \
|
||||
{ \
|
||||
__extension__({ __atomic_load((int_type*)obj, (int_type*)result, detail_intrinsic_##order); }); \
|
||||
}
|
||||
|
||||
#define detail_LOAD_NOT_CONST(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
|
||||
{ \
|
||||
__extension__({ __atomic_load((int_type*)obj, (int_type*)result, detail_intrinsic_##order); }); \
|
||||
}
|
||||
|
||||
#define detail_STORE(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
|
||||
{ \
|
||||
__extension__({ __atomic_store((int_type*)obj, (int_type*)value, detail_intrinsic_##order); }); \
|
||||
}
|
||||
|
||||
#define detail_ALU(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
|
||||
{ \
|
||||
*(int_type*)result = __extension__({ __atomic_##op((int_type*)obj, *(int_type*)value, detail_ldst_intrinsic_##order); });\
|
||||
detail_AARCH64_SEQCST_PATCH_BARRIER_##order; \
|
||||
}
|
||||
|
||||
#define detail_XCHG(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
|
||||
{ \
|
||||
__extension__({ __atomic_exchange((int_type*)obj, (int_type*)value, (int_type*)result, detail_ldst_intrinsic_##order); });\
|
||||
detail_AARCH64_SEQCST_PATCH_BARRIER_##order; \
|
||||
}
|
||||
|
||||
#define detail_CMP_XCHG_WEAK(op, order1, order2, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
|
||||
{ \
|
||||
detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value); \
|
||||
bool result = __extension__({ __atomic_compare_exchange( \
|
||||
(int_type*)obj, \
|
||||
(int_type*)expected, \
|
||||
(int_type*)value, \
|
||||
1, \
|
||||
detail_ldst_intrinsic_##order1, \
|
||||
detail_ldst_intrinsic_##order2); \
|
||||
}); \
|
||||
if (result) { detail_AARCH64_SEQCST_PATCH_BARRIER_##order1; } \
|
||||
else { detail_AARCH64_SEQCST_PATCH_BARRIER_##order2;} \
|
||||
return result; \
|
||||
}
|
||||
|
||||
#define detail_CMP_XCHG_STRONG(op, order1, order2, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
|
||||
{ \
|
||||
detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value); \
|
||||
bool result = __extension__ ({ __atomic_compare_exchange( \
|
||||
(int_type*)obj, \
|
||||
(int_type*)expected, \
|
||||
(int_type*)value, \
|
||||
0, \
|
||||
detail_ldst_intrinsic_##order1, \
|
||||
detail_ldst_intrinsic_##order2); \
|
||||
}); \
|
||||
if (result) { detail_AARCH64_SEQCST_PATCH_BARRIER_##order1; } \
|
||||
else { detail_AARCH64_SEQCST_PATCH_BARRIER_##order2;} \
|
||||
return result; \
|
||||
}
|
||||
|
||||
#define detail_NOT_SUPPORTED(...)
|
||||
|
||||
Baselib_Atomic_FOR_EACH_MEMORY_ORDER(
|
||||
detail_THREAD_FENCE
|
||||
)
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE(
|
||||
detail_LOAD, // load
|
||||
detail_STORE, // store
|
||||
detail_ALU, // add
|
||||
detail_ALU, // and
|
||||
detail_ALU, // or
|
||||
detail_ALU, // xor
|
||||
detail_XCHG, // exchange
|
||||
detail_CMP_XCHG_WEAK, // compare_exchange_weak
|
||||
detail_CMP_XCHG_STRONG, // compare_exchange_strong
|
||||
)
|
||||
|
||||
#if PLATFORM_ARCH_64
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
|
||||
detail_LOAD_NOT_CONST, // load
|
||||
detail_STORE, // store
|
||||
detail_NOT_SUPPORTED, // add
|
||||
detail_NOT_SUPPORTED, // and
|
||||
detail_NOT_SUPPORTED, // or
|
||||
detail_NOT_SUPPORTED, // xor
|
||||
detail_XCHG, // exchange
|
||||
detail_CMP_XCHG_WEAK, // compare_exchange_weak
|
||||
detail_CMP_XCHG_STRONG, // compare_exchange_strong
|
||||
128, 128, __int128 // type information
|
||||
)
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
|
||||
detail_LOAD_NOT_CONST, // load
|
||||
detail_STORE, // store
|
||||
detail_NOT_SUPPORTED, // add
|
||||
detail_NOT_SUPPORTED, // and
|
||||
detail_NOT_SUPPORTED, // or
|
||||
detail_NOT_SUPPORTED, // xor
|
||||
detail_XCHG, // exchange
|
||||
detail_CMP_XCHG_WEAK, // compare_exchange_weak
|
||||
detail_CMP_XCHG_STRONG, // compare_exchange_strong
|
||||
ptr2x, 128, __int128 // type information
|
||||
)
|
||||
#else
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
|
||||
detail_LOAD_NOT_CONST, // load
|
||||
detail_STORE, // store
|
||||
detail_NOT_SUPPORTED, // add
|
||||
detail_NOT_SUPPORTED, // and
|
||||
detail_NOT_SUPPORTED, // or
|
||||
detail_NOT_SUPPORTED, // xor
|
||||
detail_XCHG, // exchange
|
||||
detail_CMP_XCHG_WEAK, // compare_exchange_weak
|
||||
detail_CMP_XCHG_STRONG, // compare_exchange_strong
|
||||
ptr2x, 64, int64_t // type information
|
||||
)
|
||||
|
||||
#endif
|
||||
|
||||
#undef detail_intrinsic_relaxed
|
||||
#undef detail_intrinsic_acquire
|
||||
#undef detail_intrinsic_release
|
||||
#undef detail_intrinsic_acq_rel
|
||||
#undef detail_intrinsic_seq_cst
|
||||
|
||||
#undef detail_THREAD_FENCE
|
||||
#undef detail_LOAD
|
||||
#undef detail_LOAD_NOT_CONST
|
||||
#undef detail_STORE
|
||||
#undef detail_ALU
|
||||
#undef detail_XCHG
|
||||
#undef detail_CMP_XCHG_WEAK
|
||||
#undef detail_CMP_XCHG_STRONG
|
||||
#undef detail_NOT_SUPPORTED
|
||||
|
||||
#include "Baselib_Atomic_Gcc_Apple_LLVM_Patch_PostInclude.h"
|
||||
142
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_Gcc_Apple_LLVM_Patch.h
vendored
Normal file
142
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_Gcc_Apple_LLVM_Patch.h
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
#pragma once
|
||||
|
||||
#if PLATFORM_USE_APPLE_LLVM_ATOMIC_CMPXCHG_128_PATCH
|
||||
|
||||
//
|
||||
// Patch for Apple LLVM version 8.x.x (clang-800.0.38 - clang-900.0.37) intrinsic 128-bit __atomic_compare_exchange implementation (debug, using opt level -O0).
|
||||
// Note that this patch is only in effect on tvOS/iOS AArch64 debug builds for Apple LLVM version 8.x.x. Arm32 verified working without patch.
|
||||
//
|
||||
// Problem:
|
||||
// For the above builds, the __atomic_compare_exchange asm expasion used SUBS/SBCS to compare the pair of "obj" and "expected" values.
|
||||
// SUBS/SBCS does not provide sufficient NZCV flags for comparing two 64-bit values.
|
||||
// The result is erraneous comparison of "obj" and "expected". Some examples:
|
||||
//
|
||||
// -- fails (lo != lo && hi == hi)
|
||||
// obj.lo = 5;
|
||||
// obj.hi = 10;
|
||||
// expected.lo = 3;
|
||||
// expected.hi = 10;
|
||||
//
|
||||
// -- works (expected.lo < 0)
|
||||
// obj.lo = 5;
|
||||
// obj.hi = 20;
|
||||
// expected.lo = -3;
|
||||
// expected.hi = 20;
|
||||
//
|
||||
// -- fails (obj.lo < 0 && hi == hi)
|
||||
// obj.lo = -5;
|
||||
// obj.hi = 30;
|
||||
// expected.lo = 3;
|
||||
// expected.hi = 30;
|
||||
//
|
||||
// -- fails (expected.lo < 0 && obj.hi+1 == expected.hi)
|
||||
// obj.lo = 5;
|
||||
// obj.hi = 3;
|
||||
// expected.lo = -3;
|
||||
// expected.hi = 2;
|
||||
//
|
||||
// Solution: Inline assembly replacement of __atomic_compare_exchange using the same approach as in release mode
|
||||
//
|
||||
// Note: This patch should be removed in it's entirety once we require Apple LLVM version 9 (clang-900.0.37) or higher for building.
|
||||
//
|
||||
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ld_instr, st_instr, barrier_instr) \
|
||||
{ \
|
||||
register bool result asm ("w0"); \
|
||||
asm volatile \
|
||||
( \
|
||||
" ldp x12, x13, [%x4] ; load expected \n" \
|
||||
" ldp x10, x11, [%x5] ; load value \n" \
|
||||
" " #ld_instr " x9, x8, [%x3] ; load obj \n" \
|
||||
" eor x13, x8, x13 ; compare to expected \n" \
|
||||
" eor x12, x9, x12 \n" \
|
||||
" orr x12, x12, x13 \n" \
|
||||
" cbnz x12, 0f ; not equal = no store \n" \
|
||||
" " #st_instr " w12, x10, x11, [%x0] ; try store \n" \
|
||||
" cbnz w12, 1f \n" \
|
||||
" orr w0, wzr, #0x1 ; success, result in w0 \n" \
|
||||
" b 2f \n" \
|
||||
"0: ; no store \n" \
|
||||
" clrex \n" \
|
||||
"1: ; failed store \n" \
|
||||
" movz w0, #0 \n" \
|
||||
"2: ; store expected, fail \n" \
|
||||
" tbnz w0, #0, 3f \n" \
|
||||
" stp x9, x8, [%x1] \n" \
|
||||
"3: \n" \
|
||||
" " #barrier_instr " \n" \
|
||||
\
|
||||
: "+r" (obj), "+r" (expected), "=r" (result) \
|
||||
: "r" (obj), "r" (expected), "r" (value) \
|
||||
: "x8", "x9", "x10", "x11", "x12", "x13", "cc", "memory"); \
|
||||
\
|
||||
return result != 0; \
|
||||
}
|
||||
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_relaxed_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldxp, stxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_release_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldxp, stlxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, dmb ish)
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, dmb ish)
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_seq_cst(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, dmb ish)
|
||||
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ld_instr, st_instr, barrier_instr) \
|
||||
{ \
|
||||
register bool result asm ("w0"); \
|
||||
asm volatile \
|
||||
( \
|
||||
" ldp x10, x11, [%x4] ; load expected \n" \
|
||||
" ldp x12, x13, [%x5] ; load value \n" \
|
||||
"0: \n" \
|
||||
" " #ld_instr " x9, x8, [%x3] ; load obj (ldxp/ldaxp) \n" \
|
||||
" eor x14, x8, x11 ; compare to expected \n" \
|
||||
" eor x15, x9, x10 \n" \
|
||||
" orr x14, x15, x14 \n" \
|
||||
" cbnz x14, 1f ; not equal = no store \n" \
|
||||
" " #st_instr " w14, x12, x13, [%x0] ; try store (stxp/stlxp) \n" \
|
||||
" cbnz w14, 0b ; retry or store result in w0 \n" \
|
||||
" orr w0, wzr, #0x1 \n" \
|
||||
" b 2f \n" \
|
||||
"1: ; no store \n" \
|
||||
" movz w0, #0 \n" \
|
||||
" clrex \n" \
|
||||
"2: ; store expected on fail \n" \
|
||||
" tbnz w0, #0, 3f \n" \
|
||||
" stp x9, x8, [%x1] \n" \
|
||||
"3: \n" \
|
||||
" " #barrier_instr " \n" \
|
||||
\
|
||||
: "+r" (obj), "+r" (expected), "=r" (result) \
|
||||
: "r" (obj), "r" (expected), "r" (value) \
|
||||
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "cc", "memory"); \
|
||||
\
|
||||
return result != 0; \
|
||||
}
|
||||
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_relaxed_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldxp, stxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_release_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldxp, stlxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, )
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, dmb ish)
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, dmb ish)
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_seq_cst(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, dmb ish)
|
||||
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value) \
|
||||
if(sizeof(int_type) == 16) \
|
||||
detail_APPLE_LLVM_CMP_XCHG_WEAK_128_##order1##_##order2(obj, expected, value);
|
||||
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value) \
|
||||
if(sizeof(int_type) == 16) \
|
||||
detail_APPLE_LLVM_CMP_XCHG_STRONG_128_##order1##_##order2(obj, expected, value);
|
||||
|
||||
#else // PLATFORM_USE_APPLE_LLVM_ATOMIC_CMPXCHG_128_PATCH
|
||||
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH(...)
|
||||
#define detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH(...)
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
#if PLATFORM_USE_APPLE_LLVM_ATOMIC_CMPXCHG_128_PATCH
|
||||
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_relaxed_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_acquire
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_release_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_acquire
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_acquire
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_seq_cst
|
||||
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_relaxed_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_acquire
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_release_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_acquire
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_relaxed
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_acquire
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_seq_cst
|
||||
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH
|
||||
#undef detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH
|
||||
|
||||
#endif
|
||||
40
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_LLSC_Gcc.inl.h
vendored
Normal file
40
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_LLSC_Gcc.inl.h
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
#pragma once
|
||||
|
||||
// Arm exlusive state access break implementation
|
||||
#define detail_Baselib_atomic_llsc_break() __builtin_arm_clrex()
|
||||
|
||||
// Arm exlusive LLSC implementation using intrinsics.
|
||||
#define detail_Baselib_atomic_llsc_arm_ts(obj, expected, value, code, ll_instr, sc_instr, load_barrier, store_barrier) \
|
||||
do { \
|
||||
do { \
|
||||
*expected = __builtin_arm_##ll_instr(obj); \
|
||||
load_barrier; \
|
||||
code; \
|
||||
} while (__builtin_arm_##sc_instr(*value, obj)); \
|
||||
store_barrier; \
|
||||
} while (false)
|
||||
|
||||
#define detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ll_instr, sc_instr, loadbarrier, storebarrier) \
|
||||
detail_Baselib_atomic_llsc_arm_ts((int_type*)((void*)obj), \
|
||||
(int_type*)((void*)expected), \
|
||||
(int_type*)((void*)value), \
|
||||
code, ll_instr, sc_instr, loadbarrier, storebarrier)
|
||||
|
||||
#define detail_Baselib_atomic_llsc_relaxed_relaxed_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, ,)
|
||||
#if PLATFORM_ARCH_64
|
||||
#define detail_Baselib_atomic_llsc_acquire_relaxed_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldaex, strex, ,)
|
||||
#define detail_Baselib_atomic_llsc_relaxed_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, stlex, ,)
|
||||
#define detail_Baselib_atomic_llsc_acquire_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldaex, stlex, ,)
|
||||
#define detail_Baselib_atomic_llsc_seq_cst_seq_cst_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldaex, stlex, , __builtin_arm_dmb(11) )
|
||||
#else
|
||||
#define detail_Baselib_atomic_llsc_acquire_relaxed_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, __builtin_arm_dmb(11), )
|
||||
#define detail_Baselib_atomic_llsc_relaxed_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, ,__builtin_arm_dmb(11) )
|
||||
#define detail_Baselib_atomic_llsc_acquire_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, __builtin_arm_dmb(11) , __builtin_arm_dmb(11) )
|
||||
#define detail_Baselib_atomic_llsc_seq_cst_seq_cst_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, __builtin_arm_dmb(11) , __builtin_arm_dmb(11) )
|
||||
#endif
|
||||
|
||||
#define detail_Baselib_atomic_llsc_v(obj, expected, value, code, size, loadbarrier, storebarrier) \
|
||||
detail_Baselib_atomic_llsc_##loadbarrier##_##storebarrier##_v(obj, expected, value, code, int##size##_t)
|
||||
|
||||
#define detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, loadbarrier, storebarrier) \
|
||||
detail_Baselib_atomic_llsc_##loadbarrier##_##storebarrier##_v(obj, expected, value, code, __int128)
|
||||
358
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_Msvc.h
vendored
Normal file
358
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_Msvc.h
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../../C/Baselib_Atomic.h"
|
||||
#include "../../../C/Baselib_Atomic_Macros.h"
|
||||
|
||||
#include "Baselib_Atomic_MsvcIntrinsics.h"
|
||||
|
||||
#define detail_relaxed_relaxed(...) __VA_ARGS__
|
||||
#define detail_relaxed_acquire(...)
|
||||
#define detail_relaxed_release(...)
|
||||
#define detail_relaxed_acq_rel(...)
|
||||
#define detail_relaxed_seq_cst(...)
|
||||
#define detail_acquire_relaxed(...)
|
||||
#define detail_acquire_acquire(...) __VA_ARGS__
|
||||
#define detail_acquire_release(...)
|
||||
#define detail_acquire_acq_rel(...)
|
||||
#define detail_acquire_seq_cst(...)
|
||||
#define detail_release_relaxed(...)
|
||||
#define detail_release_acquire(...)
|
||||
#define detail_release_release(...) __VA_ARGS__
|
||||
#define detail_release_acq_rel(...)
|
||||
#define detail_release_seq_cst(...)
|
||||
#define detail_acq_rel_relaxed(...)
|
||||
#define detail_acq_rel_acquire(...)
|
||||
#define detail_acq_rel_release(...)
|
||||
#define detail_acq_rel_acq_rel(...) __VA_ARGS__
|
||||
#define detail_acq_rel_seq_cst(...)
|
||||
#define detail_seq_cst_relaxed(...)
|
||||
#define detail_seq_cst_acquire(...)
|
||||
#define detail_seq_cst_release(...)
|
||||
#define detail_seq_cst_acq_rel(...)
|
||||
#define detail_seq_cst_seq_cst(...) __VA_ARGS__
|
||||
|
||||
|
||||
#define detail_relaxed(memory_order, ...) detail_relaxed_##memory_order(__VA_ARGS__)
|
||||
#define detail_acquire(memory_order, ...) detail_acquire_##memory_order(__VA_ARGS__)
|
||||
#define detail_release(memory_order, ...) detail_release_##memory_order(__VA_ARGS__)
|
||||
#define detail_acq_rel(memory_order, ...) detail_acq_rel_##memory_order(__VA_ARGS__)
|
||||
#define detail_seq_cst(memory_order, ...) detail_seq_cst_##memory_order(__VA_ARGS__)
|
||||
|
||||
// Intel
|
||||
// ------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
#if defined(_M_IX86) || defined(_M_X64)
|
||||
|
||||
#define detail_intrinsic_relaxed
|
||||
#define detail_intrinsic_acquire
|
||||
#define detail_intrinsic_release
|
||||
#define detail_intrinsic_acq_rel
|
||||
#define detail_intrinsic_seq_cst
|
||||
|
||||
#if defined(_M_X64)
|
||||
|
||||
#define detail_THREAD_FENCE(order, ...) \
|
||||
static COMPILER_FORCEINLINE void Baselib_atomic_thread_fence_##order() \
|
||||
{ \
|
||||
detail_acquire(order, _ReadWriteBarrier()); \
|
||||
detail_release(order, _ReadWriteBarrier()); \
|
||||
detail_acq_rel(order, _ReadWriteBarrier()); \
|
||||
detail_seq_cst(order, __faststorefence()); \
|
||||
}
|
||||
|
||||
#else // #defined(_M_IX86)
|
||||
|
||||
#define detail_THREAD_FENCE(order, ...) \
|
||||
static COMPILER_FORCEINLINE void Baselib_atomic_thread_fence_##order() \
|
||||
{ \
|
||||
detail_acquire(order, _ReadWriteBarrier()); \
|
||||
detail_release(order, _ReadWriteBarrier()); \
|
||||
detail_acq_rel(order, _ReadWriteBarrier()); \
|
||||
detail_seq_cst(order, _ReadWriteBarrier(); __int32 temp = 0; _InterlockedExchange32(&temp, 0); _ReadWriteBarrier()); \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define detail_LOAD_BITS_8(obj, result) *(__int8*)result = *(const volatile __int8*)obj
|
||||
#define detail_LOAD_BITS_16(obj, result) *(__int16*)result = *(const volatile __int16*)obj
|
||||
#define detail_LOAD_BITS_32(obj, result) *(__int32*)result = *(const volatile __int32*)obj
|
||||
#if PLATFORM_ARCH_64
|
||||
#define detail_LOAD_BITS_64(obj, result) *(__int64*)result = *(const volatile __int64*)obj
|
||||
#else
|
||||
// x86 32-bit load/store 64-bit integer.
|
||||
// - SSE2 enabled yields (identical to __mm_store/load):
|
||||
// movsd xmm0, QWORD PTR unsigned __int64 obj
|
||||
// movsd QWORD PTR unsigned __int64 result, xmm0
|
||||
// - No SSE2 enabled yields:
|
||||
// fld QWORD PTR unsigned __int64 obj
|
||||
// fstp QWORD PTR unsigned __int64 result
|
||||
// Link comparing various implementations: https://godbolt.org/z/T3zW5M
|
||||
#define detail_LOAD_BITS_64(obj, result) *(double*)result = *(const volatile double*)obj
|
||||
#endif
|
||||
|
||||
#define detail_LOAD(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(const void* obj, void* result) \
|
||||
{ \
|
||||
detail_LOAD_BITS_##bits(obj, result); \
|
||||
detail_acquire(order, _ReadWriteBarrier()); \
|
||||
detail_seq_cst(order, _ReadWriteBarrier()); \
|
||||
}
|
||||
|
||||
#define detail_LOAD_NOT_CONST(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
|
||||
{ \
|
||||
detail_LOAD_BITS_##bits(obj, result); \
|
||||
detail_acquire(order, _ReadWriteBarrier()); \
|
||||
detail_seq_cst(order, _ReadWriteBarrier()); \
|
||||
}
|
||||
|
||||
#define detail_STORE_BITS_8(obj, value) *(volatile __int8*)obj = *(const __int8*)value
|
||||
#define detail_STORE_BITS_16(obj, value) *(volatile __int16*)obj = *(const __int16*)value
|
||||
#define detail_STORE_BITS_32(obj, value) *(volatile __int32*)obj = *(const __int32*)value
|
||||
#if PLATFORM_ARCH_64
|
||||
#define detail_STORE_BITS_64(obj, value) *(volatile __int64*)obj = *(const __int64*)value
|
||||
#else
|
||||
#define detail_STORE_BITS_64(obj, value) *(volatile double*)obj = *(double*)value
|
||||
#endif
|
||||
|
||||
#define detail_STORE(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
|
||||
{ \
|
||||
detail_relaxed(order, detail_STORE_BITS_##bits(obj, value)); \
|
||||
detail_release(order, detail_STORE_BITS_##bits(obj, value); _ReadWriteBarrier()); \
|
||||
detail_seq_cst(order, _InterlockedExchange##bits((__int##bits*)obj, *(const __int##bits*)value)); \
|
||||
}
|
||||
|
||||
// ARM
|
||||
// ------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
#elif defined(_M_ARM) || defined(_M_ARM64)
|
||||
|
||||
#define detail_intrinsic_relaxed _nf
|
||||
#define detail_intrinsic_acquire _acq
|
||||
#define detail_intrinsic_release _rel
|
||||
#define detail_intrinsic_acq_rel
|
||||
#define detail_intrinsic_seq_cst
|
||||
|
||||
#define detail_THREAD_FENCE(order, ...) \
|
||||
static COMPILER_FORCEINLINE void Baselib_atomic_thread_fence_##order() \
|
||||
{ \
|
||||
detail_acquire(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
detail_release(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
detail_acq_rel(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
}
|
||||
|
||||
#define detail_LOAD(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(const void* obj, void* result) \
|
||||
{ \
|
||||
*(__int##bits*)result = __iso_volatile_load##bits((const __int##bits*)obj); \
|
||||
detail_acquire(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
}
|
||||
|
||||
#define detail_LOAD_NOT_CONST(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
|
||||
{ \
|
||||
*(__int##bits*)result = __iso_volatile_load##bits((const __int##bits*)obj); \
|
||||
detail_acquire(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
}
|
||||
|
||||
#define detail_STORE(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
|
||||
{ \
|
||||
detail_release(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
__iso_volatile_store##bits((__int##bits*) obj, *(const __int##bits*)value); \
|
||||
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// Common
|
||||
// ------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
#define detail_intrinsic_exchange _InterlockedExchange
|
||||
#define detail_intrinsic_fetch_add _InterlockedExchangeAdd
|
||||
#define detail_intrinsic_fetch_and _InterlockedAnd
|
||||
#define detail_intrinsic_fetch_or _InterlockedOr
|
||||
#define detail_intrinsic_fetch_xor _InterlockedXor
|
||||
|
||||
#define detail_LOAD_STORE(op, order, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
|
||||
{ \
|
||||
*(__int##bits##*)result = PP_CONCAT(detail_intrinsic_##op, bits, detail_intrinsic_##order)((__int##bits##*)obj, *(const __int##bits##*)value); \
|
||||
}
|
||||
|
||||
#define detail_CMP_XCHG(op, order1, order2, id , bits, int_type, ...) \
|
||||
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
|
||||
{ \
|
||||
__int##bits cmp = *(__int##bits##*)expected; \
|
||||
__int##bits result = PP_CONCAT(_InterlockedCompareExchange, bits, detail_intrinsic_##order1)((__int##bits##*)obj, *(__int##bits##*)value, cmp); \
|
||||
return result == cmp ? true : (*(__int##bits##*)expected = result, false); \
|
||||
}
|
||||
|
||||
#define detail_NOT_SUPPORTED(...)
|
||||
|
||||
// Setup implementation
|
||||
// ------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Baselib_Atomic_FOR_EACH_MEMORY_ORDER(
|
||||
detail_THREAD_FENCE
|
||||
)
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE(
|
||||
detail_LOAD, // load
|
||||
detail_STORE, // store
|
||||
detail_LOAD_STORE, // add
|
||||
detail_LOAD_STORE, // and
|
||||
detail_LOAD_STORE, // or
|
||||
detail_LOAD_STORE, // xor
|
||||
detail_LOAD_STORE, // exchange
|
||||
detail_CMP_XCHG, // compare_exchange_weak
|
||||
detail_CMP_XCHG // compare_exchange_strong
|
||||
)
|
||||
|
||||
#if PLATFORM_ARCH_64
|
||||
|
||||
// 128-bit implementation
|
||||
// There are more efficient ways of doing load, store and exchange on Arm64. Unfortunately MSVC doesn't provide intrinsics for those. The specific
|
||||
// instructions needed to perform atomic load, store and exchange are also not available on MSVC.
|
||||
// Hence we fallback to cmpxchg for all atomic ops.
|
||||
// ------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
#define detail_LOAD128(op, order, id, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
|
||||
{ \
|
||||
Baselib_atomic_compare_exchange_weak_128_##order##_##order##_v((void*)obj, result, result); \
|
||||
}
|
||||
|
||||
#define detail_STORE128(op, order, id, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
|
||||
{ \
|
||||
uint64_t comparand[2] = { ((volatile uint64_t*)obj)[0], ((volatile uint64_t*)obj)[1] }; \
|
||||
while(!Baselib_atomic_compare_exchange_weak_128_##order##_relaxed_v(obj, comparand, value)) \
|
||||
; \
|
||||
}
|
||||
|
||||
#define detail_XCHG128(op, order, id, ...) \
|
||||
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
|
||||
{ \
|
||||
((uint64_t*)result)[0] = ((volatile uint64_t*)obj)[0]; \
|
||||
((uint64_t*)result)[1] = ((volatile uint64_t*)obj)[1]; \
|
||||
while(!Baselib_atomic_compare_exchange_weak_128_##order##_relaxed_v(obj, result, value)) \
|
||||
; \
|
||||
}
|
||||
|
||||
#define detail_CMP_XCHG128(op, order1, order2, id, ...) \
|
||||
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
|
||||
{ \
|
||||
return PP_CONCAT(_InterlockedCompareExchange128, detail_intrinsic_##order1)( \
|
||||
(__int64*)obj, \
|
||||
((const __int64*)value)[1], \
|
||||
((const __int64*)value)[0], \
|
||||
(__int64*)expected \
|
||||
) == 1; \
|
||||
}
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
|
||||
detail_LOAD128, // load
|
||||
detail_STORE128, // store
|
||||
detail_NOT_SUPPORTED, // add
|
||||
detail_NOT_SUPPORTED, // and
|
||||
detail_NOT_SUPPORTED, // or
|
||||
detail_NOT_SUPPORTED, // xor
|
||||
detail_XCHG128, // exchange
|
||||
detail_CMP_XCHG128, // compare_exchange_weak
|
||||
detail_CMP_XCHG128, // compare_exchange_strong
|
||||
128
|
||||
)
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
|
||||
detail_LOAD128, // load
|
||||
detail_STORE128, // store
|
||||
detail_NOT_SUPPORTED, // add
|
||||
detail_NOT_SUPPORTED, // and
|
||||
detail_NOT_SUPPORTED, // or
|
||||
detail_NOT_SUPPORTED, // xor
|
||||
detail_XCHG128, // exchange
|
||||
detail_CMP_XCHG128, // compare_exchange_weak
|
||||
detail_CMP_XCHG128, // compare_exchange_strong
|
||||
ptr2x
|
||||
)
|
||||
|
||||
#undef detail_LOAD128
|
||||
#undef detail_STORE128
|
||||
#undef detail_XCHG128
|
||||
#undef detail_CMP_XCHG128
|
||||
|
||||
#else
|
||||
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
|
||||
detail_LOAD_NOT_CONST, // load
|
||||
detail_STORE, // store
|
||||
detail_NOT_SUPPORTED, // add
|
||||
detail_NOT_SUPPORTED, // and
|
||||
detail_NOT_SUPPORTED, // or
|
||||
detail_NOT_SUPPORTED, // xor
|
||||
detail_LOAD_STORE, // exchange
|
||||
detail_CMP_XCHG, // compare_exchange_weak
|
||||
detail_CMP_XCHG, // compare_exchange_strong
|
||||
ptr2x, 64, int64_t
|
||||
)
|
||||
|
||||
#endif
|
||||
|
||||
#undef detail_THREAD_FENCE
|
||||
#undef detail_LOAD
|
||||
#undef detail_LOAD_NOT_CONST
|
||||
#undef detail_STORE
|
||||
#undef detail_LOAD_STORE
|
||||
#undef detail_CMP_XCHG
|
||||
#undef detail_NOT_SUPPORTED
|
||||
|
||||
#undef detail_LOAD_BITS_8
|
||||
#undef detail_LOAD_BITS_16
|
||||
#undef detail_LOAD_BITS_32
|
||||
#undef detail_LOAD_BITS_64
|
||||
#undef detail_STORE_BITS_8
|
||||
#undef detail_STORE_BITS_16
|
||||
#undef detail_STORE_BITS_32
|
||||
#undef detail_STORE_BITS_64
|
||||
|
||||
#undef detail_intrinsic_exchange
|
||||
#undef detail_intrinsic_fetch_add
|
||||
#undef detail_intrinsic_fetch_and
|
||||
#undef detail_intrinsic_fetch_or
|
||||
#undef detail_intrinsic_fetch_xor
|
||||
|
||||
#undef detail_relaxed_relaxed
|
||||
#undef detail_relaxed_acquire
|
||||
#undef detail_relaxed_release
|
||||
#undef detail_relaxed_acq_rel
|
||||
#undef detail_relaxed_seq_cst
|
||||
#undef detail_acquire_relaxed
|
||||
#undef detail_acquire_acquire
|
||||
#undef detail_acquire_release
|
||||
#undef detail_acquire_acq_rel
|
||||
#undef detail_acquire_seq_cst
|
||||
#undef detail_release_relaxed
|
||||
#undef detail_release_acquire
|
||||
#undef detail_release_release
|
||||
#undef detail_release_acq_rel
|
||||
#undef detail_release_seq_cst
|
||||
#undef detail_acq_rel_relaxed
|
||||
#undef detail_acq_rel_acquire
|
||||
#undef detail_acq_rel_release
|
||||
#undef detail_acq_rel_acq_rel
|
||||
#undef detail_acq_rel_seq_cst
|
||||
#undef detail_seq_cst_relaxed
|
||||
#undef detail_seq_cst_acquire
|
||||
#undef detail_seq_cst_release
|
||||
#undef detail_seq_cst_acq_rel
|
||||
#undef detail_seq_cst_seq_cst
|
||||
|
||||
#undef detail_relaxed
|
||||
#undef detail_acquire
|
||||
#undef detail_release
|
||||
#undef detail_acq_rel
|
||||
#undef detail_seq_cst
|
||||
58
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_MsvcIntrinsics.h
vendored
Normal file
58
Libraries/external/baselib/Include/C/Internal/Compiler/Baselib_Atomic_MsvcIntrinsics.h
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
#pragma once
|
||||
|
||||
#include <intrin.h>
|
||||
|
||||
#ifndef _ARM_BARRIER_ISH
|
||||
#define _ARM_BARRIER_ISH 0xB
|
||||
#endif
|
||||
|
||||
#define _InterlockedCompareExchange32(obj, value, exp) _InterlockedCompareExchange((long*)obj, value, exp)
|
||||
#define _InterlockedCompareExchange32_nf(obj, value, exp) _InterlockedCompareExchange_nf((long*)obj, value, exp)
|
||||
#define _InterlockedCompareExchange32_acq(obj, value, exp) _InterlockedCompareExchange_acq((long*)obj, value, exp)
|
||||
#define _InterlockedCompareExchange32_rel(obj, value, exp) _InterlockedCompareExchange_rel((long*)obj, value, exp)
|
||||
#define _InterlockedExchange32(obj, value) _InterlockedExchange((long*)obj, value)
|
||||
#define _InterlockedExchange32_nf(obj, value) _InterlockedExchange_nf((long*)obj, value)
|
||||
#define _InterlockedExchange32_acq(obj, value) _InterlockedExchange_acq((long*)obj, value)
|
||||
#define _InterlockedExchange32_rel(obj, value) _InterlockedExchange_rel((long*)obj, value)
|
||||
#define _InterlockedExchangeAdd32(obj, value) _InterlockedExchangeAdd((long*)obj, value)
|
||||
#define _InterlockedExchangeAdd32_nf(obj, value) _InterlockedExchangeAdd_nf((long*)obj, value)
|
||||
#define _InterlockedExchangeAdd32_acq(obj, value) _InterlockedExchangeAdd_acq((long*)obj, value)
|
||||
#define _InterlockedExchangeAdd32_rel(obj, value) _InterlockedExchangeAdd_rel((long*)obj, value)
|
||||
#define _InterlockedAnd32(obj, value) _InterlockedAnd((long*)obj, value)
|
||||
#define _InterlockedAnd32_nf(obj, value) _InterlockedAnd_nf((long*)obj, value)
|
||||
#define _InterlockedAnd32_acq(obj, value) _InterlockedAnd_acq((long*)obj, value)
|
||||
#define _InterlockedAnd32_rel(obj, value) _InterlockedAnd_rel((long*)obj, value)
|
||||
#define _InterlockedOr32(obj, value) _InterlockedOr((long*)obj, value)
|
||||
#define _InterlockedOr32_nf(obj, value) _InterlockedOr_nf((long*)obj, value)
|
||||
#define _InterlockedOr32_acq(obj, value) _InterlockedOr_acq((long*)obj, value)
|
||||
#define _InterlockedOr32_rel(obj, value) _InterlockedOr_rel((long*)obj, value)
|
||||
#define _InterlockedXor32(obj, value) _InterlockedXor((long*)obj, value)
|
||||
#define _InterlockedXor32_nf(obj, value) _InterlockedXor_nf((long*)obj, value)
|
||||
#define _InterlockedXor32_acq(obj, value) _InterlockedXor_acq((long*)obj, value)
|
||||
#define _InterlockedXor32_rel(obj, value) _InterlockedXor_rel((long*)obj, value)
|
||||
|
||||
// Use cmp_xchg on x86 to emulate 64 bit exchange and alu ops
|
||||
#if defined(_M_IX86)
|
||||
|
||||
#undef _InterlockedExchange64
|
||||
#undef _InterlockedExchangeAdd64
|
||||
#undef _InterlockedOr64
|
||||
#undef _InterlockedAnd64
|
||||
#undef _InterlockedXor64
|
||||
|
||||
#define detail_CAS_OP(_name, ...) \
|
||||
static __forceinline __int64 _name(__int64* obj, __int64 value) \
|
||||
{ \
|
||||
__int64 p1, p2 = *obj; \
|
||||
do { p1 = p2; p2 = _InterlockedCompareExchange64(obj, (__VA_ARGS__), p1); } while (p1 != p2); \
|
||||
return p1; \
|
||||
}
|
||||
|
||||
detail_CAS_OP(_InterlockedExchange64, value);
|
||||
detail_CAS_OP(_InterlockedExchangeAdd64, p1 + value);
|
||||
detail_CAS_OP(_InterlockedOr64, p1 | value);
|
||||
detail_CAS_OP(_InterlockedAnd64, p1 & value);
|
||||
detail_CAS_OP(_InterlockedXor64, p1 ^ value);
|
||||
#undef detail_CAS_OP
|
||||
|
||||
#endif
|
||||
281
Libraries/external/baselib/Include/Cpp/Algorithm.h
vendored
Normal file
281
Libraries/external/baselib/Include/Cpp/Algorithm.h
vendored
Normal file
@@ -0,0 +1,281 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <limits>
|
||||
#include "Internal/TypeTraits.h"
|
||||
#include "Internal/Algorithm.inl.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace Algorithm
|
||||
{
|
||||
// Index of the most significant bit in a 32bit mask. Returns -1 if no bits are set.
|
||||
inline int HighestBit(uint32_t value);
|
||||
// Index of the most significant bit in a 32bit mask of size_t value. Returns -1 if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
|
||||
inline int HighestBit(T value) { return HighestBit(static_cast<uint32_t>(value)); }
|
||||
|
||||
// Index of the most significant bit in a 64bit mask. Returns -1 if no bits are set.
|
||||
inline int HighestBit(uint64_t value);
|
||||
// Index of the most significant bit in a 64bit mask of size_t value. Returns -1 if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
|
||||
inline int HighestBit(T value) { return HighestBit(static_cast<uint64_t>(value)); }
|
||||
|
||||
// Index of the most significant bit in a 32bit mask. Unspecified result if no bits are set.
|
||||
inline int HighestBitNonZero(uint32_t value);
|
||||
// Index of the most significant bit in a 32bit mask of size_t value. Unspecified result if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
|
||||
inline int HighestBitNonZero(T value) { return HighestBitNonZero(static_cast<uint32_t>(value)); }
|
||||
|
||||
// Index of the most significant bit in a 64bit mask. Unspecified result if no bits are set.
|
||||
inline int HighestBitNonZero(uint64_t value);
|
||||
// Index of the most significant bit in a 64bit mask of size_t value. Unspecified result if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
|
||||
inline int HighestBitNonZero(T value) { return HighestBitNonZero(static_cast<uint64_t>(value)); }
|
||||
|
||||
// Index of the least significant bit in a 32bit mask. Returns -1 if no bits are set.
|
||||
inline int LowestBit(uint32_t value);
|
||||
// Index of the least significant bit in a 32bit mask of size_t value. Returns -1 if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
|
||||
inline int LowestBit(T value) { return LowestBit(static_cast<uint32_t>(value)); }
|
||||
|
||||
// Index of the least significant bit in a 64bit mask. Returns -1 if no bits are set.
|
||||
inline int LowestBit(uint64_t value);
|
||||
// Index of the least significant bit in a 64bit mask of size_t value. Returns -1 if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
|
||||
inline int LowestBit(T value) { return LowestBit(static_cast<uint64_t>(value)); }
|
||||
|
||||
// Index of the least significant bit in a 32bit mask. Unspecified result if no bits are set.
|
||||
inline int LowestBitNonZero(uint32_t value);
|
||||
// Index of the least significant bit in a 32bit mask of size_t value. Unspecified result if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
|
||||
inline int LowestBitNonZero(T value) { return LowestBitNonZero(static_cast<uint32_t>(value)); }
|
||||
|
||||
// Index of the least significant bit in a 64bit mask. Unspecified result if no bits are set.
|
||||
inline int LowestBitNonZero(uint64_t value);
|
||||
// Index of the least significant bit in a 64bit mask of size_t value. Unspecified result if no bits are set.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
|
||||
inline int LowestBitNonZero(T value) { return LowestBitNonZero(static_cast<uint64_t>(value)); }
|
||||
|
||||
// Returns number of set bits in a 64 bit mask.
|
||||
inline int BitsInMask(uint64_t mask);
|
||||
// Returns number of set bits in a 32 bit mask.
|
||||
inline int BitsInMask(uint32_t mask);
|
||||
// Returns number of set bits in a 16 bit mask.
|
||||
inline int BitsInMask(uint16_t mask);
|
||||
// Returns number os set bits in a 8 bit mask.
|
||||
inline int BitsInMask(uint8_t mask);
|
||||
|
||||
// Number of set bits (population count) in an array of known size.
|
||||
// Using Robert Harley and David Seal's algorithm from Hacker's Delight,
|
||||
// variant that does 4 words in a loop iteration.
|
||||
// http://www.hackersdelight.org/revisions.pdf
|
||||
// http://www.hackersdelight.org/HDcode/newCode/pop_arrayHS.cc
|
||||
template<typename WordT, int WordCount>
|
||||
inline int BitsInArray(const WordT* data)
|
||||
{
|
||||
#define HarleySealCSAStep(h, l, a, b, c) {\
|
||||
WordT u = a ^ b; \
|
||||
h = (a & b) | (u & c); l = u ^ c; \
|
||||
}
|
||||
WordT ones, twos, twosA, twosB, fours;
|
||||
|
||||
int i = 0;
|
||||
int tot = 0;
|
||||
twos = ones = 0;
|
||||
for (; i <= WordCount - 4; i = i + 4)
|
||||
{
|
||||
HarleySealCSAStep(twosA, ones, ones, data[i], data[i + 1])
|
||||
HarleySealCSAStep(twosB, ones, ones, data[i + 2], data[i + 3])
|
||||
HarleySealCSAStep(fours, twos, twos, twosA, twosB)
|
||||
tot = tot + BitsInMask(fours);
|
||||
}
|
||||
tot = 4 * tot + 2 * BitsInMask(twos) + BitsInMask(ones);
|
||||
|
||||
for (; i < WordCount; i++) // Simply add in the last
|
||||
tot = tot + BitsInMask(data[i]); // 0 to 3 elements.
|
||||
|
||||
return tot;
|
||||
#undef HarleySealCSAStep
|
||||
}
|
||||
|
||||
// Checks if one integers is a multiple of another.
|
||||
template<typename T>
|
||||
constexpr inline bool AreIntegersMultiple(T a, T b)
|
||||
{
|
||||
static_assert(std::is_integral<T>::value, "AreIntegersMultiple requires integral types.");
|
||||
return a != 0 && b != 0 && // if at least one integer is 0, consider false (avoid div by 0 of the following modulo)
|
||||
((a % b) == 0 || (b % a) == 0);
|
||||
}
|
||||
|
||||
// Checks if value is a power-of-two.
|
||||
template<typename T>
|
||||
constexpr inline bool IsPowerOfTwo(T value)
|
||||
{
|
||||
static_assert(std::is_integral<T>::value, "IsPowerOfTwo works only with an integral type.");
|
||||
using T_unsigned = typename std::make_unsigned<T>::type;
|
||||
return (static_cast<T_unsigned>(value) & (static_cast<T_unsigned>(value) - 1)) == 0;
|
||||
}
|
||||
|
||||
// Returns the next power-of-two of a 32bit number or the current value if it is a power two.
|
||||
constexpr inline uint32_t CeilPowerOfTwo(uint32_t value)
|
||||
{
|
||||
return detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(value - 1, 16),
|
||||
8),
|
||||
4),
|
||||
2),
|
||||
1) + 1;
|
||||
}
|
||||
|
||||
// Returns the next power-of-two of a 32bit number of size_t value, or the current value if it is a power two.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
|
||||
constexpr inline uint32_t CeilPowerOfTwo(T value) { return CeilPowerOfTwo(static_cast<uint32_t>(value)); }
|
||||
|
||||
// Returns the next power-of-two of a 64bit number or the current value if it is a power two.
|
||||
constexpr inline uint64_t CeilPowerOfTwo(uint64_t value)
|
||||
{
|
||||
return detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(
|
||||
detail::LogicalOrRShiftOp(value - 1, 32),
|
||||
16),
|
||||
8),
|
||||
4),
|
||||
2),
|
||||
1) + 1;
|
||||
}
|
||||
|
||||
// Returns the next power-of-two of a 64bit number of size_t value, or the current value if it is a power two.
|
||||
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
|
||||
constexpr inline uint64_t CeilPowerOfTwo(T value) { return CeilPowerOfTwo(static_cast<uint64_t>(value)); }
|
||||
|
||||
// Returns the closest power-of-two of a 32bit number.
|
||||
template<typename T>
|
||||
constexpr inline T RoundPowerOfTwo(T value)
|
||||
{
|
||||
static_assert(std::is_unsigned<T>::value, "RoundPowerOfTwo works only with an unsigned integral type.");
|
||||
return (value - (CeilPowerOfTwo(value) >> 1) < CeilPowerOfTwo(value) - value) ? CeilPowerOfTwo(value) >> 1 : CeilPowerOfTwo(value);
|
||||
}
|
||||
|
||||
// Returns the next value aligned to `alignment`, or the current value if it is already aligned.
|
||||
// `alignment` is required to be a power of two value or the result is undefined. Zero `alignment` returns zero.
|
||||
template<typename T>
|
||||
constexpr inline T CeilAligned(T value, uint64_t alignment)
|
||||
{
|
||||
static_assert(std::is_integral<T>::value, "CeilAligned works only with an integral type.");
|
||||
return static_cast<T>((static_cast<typename std::make_unsigned<T>::type>(value) + alignment - 1) & ~(alignment - 1));
|
||||
}
|
||||
|
||||
// Returns true if addition of two given operands leads to an integer overflow.
|
||||
template<typename T>
|
||||
constexpr inline bool DoesAdditionOverflow(T a, T b)
|
||||
{
|
||||
static_assert(std::is_unsigned<T>::value, "Overflow checks apply only work on unsigned integral types.");
|
||||
return std::numeric_limits<T>::max() - a < b;
|
||||
}
|
||||
|
||||
// Returns true if multiplication of two given operands leads to an integer overflow.
|
||||
template<typename T>
|
||||
constexpr inline bool DoesMultiplicationOverflow(T a, T b)
|
||||
{
|
||||
static_assert(std::is_unsigned<T>::value, "Overflow checks apply only work on unsigned integral types.");
|
||||
return b != 0 && std::numeric_limits<T>::max() / b < a;
|
||||
}
|
||||
|
||||
// Clamp
|
||||
//
|
||||
// This function can be used with different types - `value` vs. `lo`, `hi`.
|
||||
// If `lo` if larger than `hi` this function has undefined bahavior.
|
||||
//
|
||||
// Return: clamped `value` of the same type as `lo`, `hi`.
|
||||
//
|
||||
COMPILER_WARNINGS_PUSH
|
||||
#if COMPILER_MSVC
|
||||
COMPILER_WARNINGS_DISABLE(4756)
|
||||
#endif
|
||||
template<typename RT, typename VT, typename std::enable_if<
|
||||
baselib::is_of_same_signedness<RT, VT>::value
|
||||
|| !std::is_integral<RT>::value
|
||||
|| !std::is_integral<VT>::value
|
||||
, bool>::type = 0>
|
||||
inline RT Clamp(VT value, RT lo, RT hi)
|
||||
{
|
||||
if (value < lo) return lo;
|
||||
if (value > hi) return hi;
|
||||
return static_cast<RT>(value);
|
||||
}
|
||||
|
||||
COMPILER_WARNINGS_POP
|
||||
|
||||
template<typename RT, typename VT, typename std::enable_if<
|
||||
std::is_integral<RT>::value && std::is_unsigned<RT>::value &&
|
||||
std::is_integral<VT>::value && std::is_signed<VT>::value
|
||||
, bool>::type = 0>
|
||||
inline RT Clamp(VT value, RT lo, RT hi)
|
||||
{
|
||||
if (value < 0)
|
||||
return lo;
|
||||
using UnsignedVT = typename std::make_unsigned<VT>::type;
|
||||
return Clamp(static_cast<UnsignedVT>(value), lo, hi);
|
||||
}
|
||||
|
||||
template<typename RT, typename VT, typename std::enable_if<
|
||||
std::is_integral<RT>::value && std::is_signed<RT>::value &&
|
||||
std::is_integral<VT>::value && std::is_unsigned<VT>::value
|
||||
, bool>::type = 0>
|
||||
inline RT Clamp(VT value, RT lo, RT hi)
|
||||
{
|
||||
if (hi < 0)
|
||||
return hi;
|
||||
if (lo < 0)
|
||||
lo = 0;
|
||||
using UnsignedRT = typename std::make_unsigned<RT>::type;
|
||||
return static_cast<RT>(Clamp(value, static_cast<UnsignedRT>(lo), static_cast<UnsignedRT>(hi)));
|
||||
}
|
||||
|
||||
// Clamp `value` by lowest and highest value of RT.
|
||||
//
|
||||
// Return: clamped `value` of the type RT.
|
||||
//
|
||||
template<typename RT, typename VT, typename std::enable_if<
|
||||
!(std::numeric_limits<RT>::has_infinity && std::numeric_limits<VT>::has_infinity)
|
||||
, bool>::type = 0>
|
||||
inline RT ClampToType(VT value)
|
||||
{
|
||||
return Clamp(value, std::numeric_limits<RT>::lowest(), std::numeric_limits<RT>::max());
|
||||
}
|
||||
|
||||
// Clamp `value` by lowest and highest value of RT.
|
||||
//
|
||||
// This function is guaranteed to only return infinity values if the source value was already an infinity number.
|
||||
//
|
||||
// Return: clamped `value` of the type RT.
|
||||
//
|
||||
template<typename RT, typename VT, typename std::enable_if<
|
||||
(std::numeric_limits<RT>::has_infinity && std::numeric_limits<VT>::has_infinity)
|
||||
, bool>::type = 0>
|
||||
inline RT ClampToType(VT value)
|
||||
{
|
||||
if (value == std::numeric_limits<VT>::infinity() || value == -std::numeric_limits<VT>::infinity())
|
||||
return static_cast<RT>(value);
|
||||
return Clamp(value, std::numeric_limits<RT>::lowest(), std::numeric_limits<RT>::max());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if COMPILER_MSVC
|
||||
#include "Internal/Compiler/Msvc/AlgorithmMsvc.inl.h"
|
||||
#elif COMPILER_GCC || COMPILER_CLANG
|
||||
#include "Internal/Compiler/ClangOrGcc/AlgorithmClangOrGcc.inl.h"
|
||||
#else
|
||||
#error "Unknown Compiler"
|
||||
#endif
|
||||
449
Libraries/external/baselib/Include/Cpp/Atomic.h
vendored
Normal file
449
Libraries/external/baselib/Include/Cpp/Atomic.h
vendored
Normal file
@@ -0,0 +1,449 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Atomic.h"
|
||||
#include "Internal/TypeTraits.h"
|
||||
|
||||
// Note that aligning by type is not possible with the C compatible COMPILER_ALIGN_AS as MSVC's own alignment attribute does not allow evaluation of sizeof
|
||||
#define ALIGN_ATOMIC(TYPE_) alignas(sizeof(TYPE_))
|
||||
#define ALIGNED_ATOMIC(TYPE_) ALIGN_ATOMIC(TYPE_) TYPE_
|
||||
|
||||
// Atomic interface that sticks closely to std::atomic
|
||||
// Major differences:
|
||||
// * free functions that operate on types other than baselib::atomic
|
||||
// * baselib::atomic allows access to its internal value
|
||||
// * no zero initialization on baselib::atomic
|
||||
// * no single parameter versions of compare_exchange
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
enum memory_order_relaxed_t { memory_order_relaxed = 0 }; // Equal to std::memory_order_relaxed
|
||||
enum memory_order_acquire_t { memory_order_acquire = 2 }; // Equal to std::memory_order_acquire
|
||||
enum memory_order_release_t { memory_order_release = 3 }; // Equal to std::memory_order_release
|
||||
enum memory_order_acq_rel_t { memory_order_acq_rel = 4 }; // Equal to std::memory_order_acq_rel
|
||||
enum memory_order_seq_cst_t { memory_order_seq_cst = 5 }; // Equal to std::memory_order_seq_cst
|
||||
|
||||
namespace detail
|
||||
{
|
||||
template<typename T, typename ... Rest>
|
||||
struct is_any : std::false_type {};
|
||||
|
||||
template<typename T, typename First>
|
||||
struct is_any<T, First> : std::is_same<T, First> {};
|
||||
|
||||
template<typename T, typename First, typename ... Rest>
|
||||
struct is_any<T, First, Rest...>
|
||||
: std::integral_constant<bool, std::is_same<T, First>::value || is_any<T, Rest...>::value>
|
||||
{};
|
||||
|
||||
#define TEST_ATOMICS_PREREQUISITES(_TYPE) \
|
||||
static_assert(baselib::is_trivially_copyable<_TYPE>::value, "atomic operation operands needs to be trivially copyable"); \
|
||||
static_assert(sizeof(_TYPE) <= sizeof(void*) * 2, "atomic operation operands need to be smaller or equal than two pointers");
|
||||
|
||||
template<typename T> static inline T fail();
|
||||
|
||||
template<typename T, typename MemoryOrder, typename ... AllowedMemoryOrders> static inline T fail_prerequisites()
|
||||
{
|
||||
TEST_ATOMICS_PREREQUISITES(T);
|
||||
static_assert(is_any<MemoryOrder, AllowedMemoryOrders...>::value, "the specified memory ordering is invalid for this atomic operation");
|
||||
return fail<T>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure> static inline T fail_prerequisites_cmpxchg()
|
||||
{
|
||||
TEST_ATOMICS_PREREQUISITES(T);
|
||||
static_assert(
|
||||
// fail: relaxed, success: relaxed/acquire/release/seq_cst
|
||||
(std::is_same<MemoryOrderFailure, baselib::memory_order_relaxed_t>::value &&
|
||||
is_any<MemoryOrderSuccess, baselib::memory_order_relaxed_t, baselib::memory_order_acquire_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>::value) ||
|
||||
// fail: acquire, success acquire/release/seq_cst
|
||||
(std::is_same<MemoryOrderFailure, baselib::memory_order_relaxed_t>::value &&
|
||||
is_any<MemoryOrderSuccess, baselib::memory_order_acquire_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>::value) ||
|
||||
// fail: seq_cst, success: seq_cst
|
||||
(std::is_same<MemoryOrderSuccess, baselib::memory_order_seq_cst_t>::value && std::is_same<MemoryOrderFailure, baselib::memory_order_seq_cst_t>::value),
|
||||
"the specified combination of memory ordering is invalid for compare exchange operations");
|
||||
return fail<T>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrder> static inline T fail_prerequisites_alu()
|
||||
{
|
||||
static_assert(std::is_integral<T>::value, "operands of arithmetic atomic operations need to be integral");
|
||||
return fail_prerequisites<T, MemoryOrder,
|
||||
baselib::memory_order_relaxed_t,
|
||||
baselib::memory_order_acquire_t,
|
||||
baselib::memory_order_release_t,
|
||||
baselib::memory_order_acq_rel_t,
|
||||
baselib::memory_order_seq_cst_t>();
|
||||
}
|
||||
}
|
||||
|
||||
// MACRO generated impl
|
||||
// re-directs to Baselib_atomic_ API
|
||||
// ----------------------------------------------------------------------------------------------------------------------------------
|
||||
#define detail_THREAD_FENCE(order, ...) \
|
||||
static FORCE_INLINE void atomic_thread_fence(memory_order_##order##_t order) \
|
||||
{ \
|
||||
return Baselib_atomic_thread_fence_##order(); \
|
||||
}
|
||||
|
||||
#define detail_LOAD(op, order, id, bits, ...) \
|
||||
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
|
||||
static FORCE_INLINE T atomic_load_explicit(const T& obj, memory_order_##order##_t order) \
|
||||
{ \
|
||||
T ret; \
|
||||
Baselib_atomic_load_##id##_##order##_v(&obj, &ret); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define detail_LOAD128(op, order, id, bits, ...) \
|
||||
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
|
||||
static FORCE_INLINE T atomic_load_explicit(const T& obj, memory_order_##order##_t order) \
|
||||
{ \
|
||||
T ret; \
|
||||
Baselib_atomic_load_##id##_##order##_v(const_cast<T*>(&obj), &ret); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define detail_STORE(op, order, id, bits, ...) \
|
||||
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
|
||||
static FORCE_INLINE void atomic_store_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order)\
|
||||
{ \
|
||||
return Baselib_atomic_store_##id##_##order##_v(&obj, &value); \
|
||||
}
|
||||
|
||||
#define detail_LOAD_STORE(op, order, id, bits, ...) \
|
||||
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
|
||||
static FORCE_INLINE T atomic_##op##_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order) \
|
||||
{ \
|
||||
T ret; \
|
||||
Baselib_atomic_##op##_##id##_##order##_v(&obj, &value, &ret); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define detail_ALU(op, order, id, bits, ...) \
|
||||
template<typename T, typename std::enable_if<baselib::is_integral_of_size<T, bits/8>::value, int>::type = 0> \
|
||||
static FORCE_INLINE T atomic_##op##_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order) \
|
||||
{ \
|
||||
T ret; \
|
||||
Baselib_atomic_##op##_##id##_##order##_v(&obj, &value, &ret); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define detail_CMP_XCHG(op, order1, order2, id, bits, ...) \
|
||||
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
|
||||
static FORCE_INLINE bool atomic_##op##_explicit(T& obj, \
|
||||
typename std::common_type<T>::type& expected, \
|
||||
typename std::common_type<T>::type desired, \
|
||||
memory_order_##order1##_t order_success, \
|
||||
memory_order_##order2##_t order_failure) \
|
||||
{ \
|
||||
return Baselib_atomic_##op##_##id##_##order1##_##order2##_v(&obj, &expected, &desired); \
|
||||
}
|
||||
|
||||
#define detail_NOT_SUPPORTED(...)
|
||||
|
||||
Baselib_Atomic_FOR_EACH_MEMORY_ORDER(
|
||||
detail_THREAD_FENCE
|
||||
)
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_INT_TYPE(
|
||||
detail_LOAD, // load
|
||||
detail_STORE, // store
|
||||
detail_ALU, // add
|
||||
detail_ALU, // and
|
||||
detail_ALU, // or
|
||||
detail_ALU, // xor
|
||||
detail_LOAD_STORE, // exchange
|
||||
detail_CMP_XCHG, // compare_exchange_weak
|
||||
detail_CMP_XCHG // compare_exchange_strong
|
||||
)
|
||||
|
||||
#if PLATFORM_ARCH_64
|
||||
// 128bit atomics
|
||||
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
|
||||
detail_LOAD128, // load
|
||||
detail_STORE, // store
|
||||
detail_NOT_SUPPORTED, // add
|
||||
detail_NOT_SUPPORTED, // and
|
||||
detail_NOT_SUPPORTED, // or
|
||||
detail_NOT_SUPPORTED, // xor
|
||||
detail_LOAD_STORE, // exchange
|
||||
detail_CMP_XCHG, // compare_exchange_weak
|
||||
detail_CMP_XCHG, // compare_exchange_strong
|
||||
128, 128)
|
||||
#endif
|
||||
|
||||
#undef detail_THREAD_FENCE
|
||||
#undef detail_LOAD128
|
||||
#undef detail_LOAD
|
||||
#undef detail_STORE
|
||||
#undef detail_LOAD_STORE
|
||||
#undef detail_ALU
|
||||
#undef detail_CMP_XCHG
|
||||
#undef detail_NOT_SUPPORTED
|
||||
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE T atomic_fetch_sub_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
|
||||
{
|
||||
return atomic_fetch_add_explicit(obj, 0 - value, order);
|
||||
}
|
||||
|
||||
// API documentation and default fallback for non-matching types
|
||||
// ----------------------------------------------------------------------------------------------------------------------
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE T atomic_load_explicit(const T& obj, MemoryOrder order)
|
||||
{
|
||||
return detail::fail_prerequisites<T, MemoryOrder, baselib::memory_order_relaxed_t, baselib::memory_order_acquire_t, baselib::memory_order_seq_cst_t>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE void atomic_store_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
|
||||
{
|
||||
detail::fail_prerequisites<T, MemoryOrder, baselib::memory_order_relaxed_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE T atomic_fetch_add_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
|
||||
{
|
||||
return detail::fail_prerequisites_alu<T, MemoryOrder>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE T atomic_fetch_and_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
|
||||
{
|
||||
return detail::fail_prerequisites_alu<T, MemoryOrder>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE T atomic_fetch_or_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
|
||||
{
|
||||
return detail::fail_prerequisites_alu<T, MemoryOrder>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE T atomic_fetch_xor_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
|
||||
{
|
||||
return detail::fail_prerequisites_alu<T, MemoryOrder>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrder>
|
||||
static FORCE_INLINE T atomic_exchange_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
|
||||
{
|
||||
return detail::fail_prerequisites<T, MemoryOrder>();
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure>
|
||||
static FORCE_INLINE bool atomic_compare_exchange_weak_explicit(T& obj,
|
||||
typename std::common_type<T>::type& expected,
|
||||
typename std::common_type<T>::type desired,
|
||||
MemoryOrderSuccess order_success,
|
||||
MemoryOrderFailure order_failure)
|
||||
{
|
||||
detail::fail_prerequisites_cmpxchg<T, MemoryOrderSuccess, MemoryOrderFailure>();
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure>
|
||||
static FORCE_INLINE bool atomic_compare_exchange_strong_explicit(T& obj,
|
||||
typename std::common_type<T>::type& expected,
|
||||
typename std::common_type<T>::type desired,
|
||||
MemoryOrderSuccess order_success,
|
||||
MemoryOrderFailure order_failure)
|
||||
{
|
||||
detail::fail_prerequisites_cmpxchg<T, MemoryOrderSuccess, MemoryOrderFailure>();
|
||||
return false;
|
||||
}
|
||||
|
||||
// default memory order (memory_order_seq_cst)
|
||||
// ----------------------------------------------------------------------------------------------------------------------
|
||||
template<typename T>
|
||||
static FORCE_INLINE T atomic_load(const T& obj)
|
||||
{
|
||||
return atomic_load_explicit(obj, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE void atomic_store(T& obj, typename std::common_type<T>::type value)
|
||||
{
|
||||
return atomic_store_explicit(obj, value, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE T atomic_fetch_add(T& obj, typename std::common_type<T>::type value)
|
||||
{
|
||||
return atomic_fetch_add_explicit(obj, value, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE T atomic_fetch_sub(T& obj, typename std::common_type<T>::type value)
|
||||
{
|
||||
return atomic_fetch_sub_explicit(obj, value, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE T atomic_fetch_and(T& obj, typename std::common_type<T>::type value)
|
||||
{
|
||||
return atomic_fetch_and_explicit(obj, value, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE T atomic_fetch_or(T& obj, typename std::common_type<T>::type value)
|
||||
{
|
||||
return atomic_fetch_or_explicit(obj, value, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE T atomic_fetch_xor(T& obj, typename std::common_type<T>::type value)
|
||||
{
|
||||
return atomic_fetch_xor_explicit(obj, value, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE T atomic_exchange(T& obj, typename std::common_type<T>::type value)
|
||||
{
|
||||
return atomic_exchange_explicit(obj, value, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE bool atomic_compare_exchange_weak(T& obj,
|
||||
typename std::common_type<T>::type& expected,
|
||||
typename std::common_type<T>::type desired)
|
||||
{
|
||||
return atomic_compare_exchange_weak_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static FORCE_INLINE bool atomic_compare_exchange_strong(T& obj,
|
||||
typename std::common_type<T>::type& expected,
|
||||
typename std::common_type<T>::type desired)
|
||||
{
|
||||
return atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
struct atomic_common
|
||||
{
|
||||
using value_type = T;
|
||||
|
||||
TEST_ATOMICS_PREREQUISITES(T);
|
||||
|
||||
ALIGNED_ATOMIC(T) obj;
|
||||
|
||||
FORCE_INLINE atomic_common() = default;
|
||||
|
||||
// Initializes atomic with a given value. Initialization is not atomic!
|
||||
FORCE_INLINE atomic_common(T value)
|
||||
{
|
||||
obj = value;
|
||||
}
|
||||
|
||||
FORCE_INLINE operator T() const { return atomic_load_explicit(obj, memory_order_seq_cst); }
|
||||
FORCE_INLINE T operator=(T value) { atomic_store_explicit(obj, value, memory_order_seq_cst); return value; }
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE T load(TMemoryOrder order = memory_order_seq_cst) const
|
||||
{
|
||||
return atomic_load_explicit(obj, order);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE void store(T value, TMemoryOrder order = memory_order_seq_cst)
|
||||
{
|
||||
return atomic_store_explicit(obj, value, order);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE T exchange(T value, TMemoryOrder order = memory_order_seq_cst)
|
||||
{
|
||||
return atomic_exchange_explicit(obj, value, order);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrderSuccess, typename TMemoryOrderFailure>
|
||||
FORCE_INLINE bool compare_exchange_weak(T& expected, T desired, TMemoryOrderSuccess order_success, TMemoryOrderFailure order_failure)
|
||||
{
|
||||
return atomic_compare_exchange_weak_explicit(obj, expected, desired, order_success, order_failure);
|
||||
}
|
||||
|
||||
FORCE_INLINE bool compare_exchange_weak(T& expected, T desired)
|
||||
{
|
||||
return atomic_compare_exchange_weak_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrderSuccess, typename TMemoryOrderFailure>
|
||||
FORCE_INLINE bool compare_exchange_strong(T& expected, T desired, TMemoryOrderSuccess order_success, TMemoryOrderFailure order_failure)
|
||||
{
|
||||
return atomic_compare_exchange_strong_explicit(obj, expected, desired, order_success, order_failure);
|
||||
}
|
||||
|
||||
FORCE_INLINE bool compare_exchange_strong(T& expected, T desired)
|
||||
{
|
||||
return atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T, bool IsIntegral>
|
||||
struct atomic_base {};
|
||||
|
||||
// Atomic type for integral types.
|
||||
template<typename T>
|
||||
struct atomic_base<T, true> : atomic_common<T>
|
||||
{
|
||||
using atomic_common<T>::atomic_common;
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE T fetch_add(T value, TMemoryOrder order = memory_order_seq_cst)
|
||||
{
|
||||
return atomic_fetch_add_explicit(atomic_common<T>::obj, value, order);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE T fetch_sub(T value, TMemoryOrder order = memory_order_seq_cst)
|
||||
{
|
||||
return atomic_fetch_sub_explicit(atomic_common<T>::obj, value, order);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE T fetch_and(T value, TMemoryOrder order = memory_order_seq_cst)
|
||||
{
|
||||
return atomic_fetch_and_explicit(atomic_common<T>::obj, value, order);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE T fetch_or(T value, TMemoryOrder order = memory_order_seq_cst)
|
||||
{
|
||||
return atomic_fetch_or_explicit(atomic_common<T>::obj, value, order);
|
||||
}
|
||||
|
||||
template<typename TMemoryOrder = memory_order_seq_cst_t>
|
||||
FORCE_INLINE T fetch_xor(T value, TMemoryOrder order = memory_order_seq_cst)
|
||||
{
|
||||
return atomic_fetch_xor_explicit(atomic_common<T>::obj, value, order);
|
||||
}
|
||||
|
||||
FORCE_INLINE T operator++(int) { return atomic_fetch_add_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst); }
|
||||
FORCE_INLINE T operator--(int) { return atomic_fetch_sub_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst); }
|
||||
FORCE_INLINE T operator++() { return atomic_fetch_add_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst) + T(1); }
|
||||
FORCE_INLINE T operator--() { return atomic_fetch_sub_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst) - T(1); }
|
||||
FORCE_INLINE T operator+=(T value) { return atomic_fetch_add_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) + value; }
|
||||
FORCE_INLINE T operator-=(T value) { return atomic_fetch_sub_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) - value; }
|
||||
FORCE_INLINE T operator&=(T value) { return atomic_fetch_and_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) & value; }
|
||||
FORCE_INLINE T operator|=(T value) { return atomic_fetch_or_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) | value; }
|
||||
FORCE_INLINE T operator^=(T value) { return atomic_fetch_xor_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) ^ value; }
|
||||
};
|
||||
|
||||
// Atomic type for non-integral types.
|
||||
template<typename T>
|
||||
struct atomic_base<T, false> : atomic_common<T>
|
||||
{
|
||||
using atomic_common<T>::atomic_common;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct atomic : atomic_base<T, std::is_integral<T>::value>
|
||||
{
|
||||
using atomic_base<T, std::is_integral<T>::value>::atomic_base;
|
||||
};
|
||||
|
||||
#undef TEST_ATOMICS_PREREQUISITES
|
||||
}
|
||||
}
|
||||
98
Libraries/external/baselib/Include/Cpp/Barrier.h
vendored
Normal file
98
Libraries/external/baselib/Include/Cpp/Barrier.h
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
#pragma once
|
||||
|
||||
#include "Atomic.h"
|
||||
#include "Semaphore.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In parallel computing, a barrier is a type of synchronization
|
||||
// method. A barrier for a group of threads or processes in the source
|
||||
// code means any thread/process must stop at this point and cannot
|
||||
// proceed until all other threads/processes reach this barrier.
|
||||
//
|
||||
// "Barrier (computer science)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/wiki/Barrier_(computer_science)
|
||||
//
|
||||
// For optimal performance, baselib::Barrier should be stored at a
|
||||
// cache aligned memory location.
|
||||
class Barrier
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
Barrier(const Barrier& other) = delete;
|
||||
Barrier& operator=(const Barrier& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
Barrier(Barrier&& other) = delete;
|
||||
Barrier& operator=(Barrier&& other) = delete;
|
||||
|
||||
// Creates a barrier with a set number of threads to synchronize.
|
||||
// Once a set of threads enter a Barrier, the *same* set of threads
|
||||
// must continue to use the Barrier - i.e. no additional threads may
|
||||
// enter any of the Acquires. For example, it is *not* allowed to
|
||||
// create a Barrier with threads_num=10, then send 30 threads into
|
||||
// barrier.Acquire() with the expectation 3 batches of 10 will be
|
||||
// released. However, once it is guaranteed that all threads have
|
||||
// exited all of the Acquire invocations, it is okay to reuse the
|
||||
// same barrier object with a different set of threads - for
|
||||
// example, after Join() has been called on all participating
|
||||
// threads and a new batch of threads is launched.
|
||||
//
|
||||
// \param threads_num Wait for this number of threads before letting all proceed.
|
||||
explicit Barrier(uint16_t threads_num)
|
||||
: m_threshold(threads_num), m_count(0)
|
||||
{
|
||||
}
|
||||
|
||||
// Block the current thread until the specified number of threads
|
||||
// also reach this `Acquire()`.
|
||||
void Acquire()
|
||||
{
|
||||
// If there is two Barrier::Acquire calls in a row, when the
|
||||
// first Acquire releases, one thread may jump out of the gate
|
||||
// so fast that it reaches the next Acquire and steals *another*
|
||||
// semaphore slot, continuing past the *second* Acquire, before
|
||||
// all threads have even left the first Acquire. So, we instead
|
||||
// construct two semaphores and alternate between them to
|
||||
// prevent this.
|
||||
|
||||
uint16_t previous_value = m_count.fetch_add(1, memory_order_relaxed);
|
||||
BaselibAssert(previous_value < m_threshold * 2);
|
||||
|
||||
// If count is in range [0, m_threshold), use semaphore A.
|
||||
// If count is in range [m_threshold, m_threshold * 2), use semaphore B.
|
||||
bool useSemaphoreB = previous_value >= m_threshold;
|
||||
Semaphore& semaphore = useSemaphoreB ? m_semaphoreB : m_semaphoreA;
|
||||
|
||||
// If (count % m_threshold) == (m_threshold - 1), then we're the last thread in the group, release the semaphore.
|
||||
bool do_release = previous_value % m_threshold == m_threshold - 1;
|
||||
|
||||
if (do_release)
|
||||
{
|
||||
if (previous_value == m_threshold * 2 - 1)
|
||||
{
|
||||
// Note this needs to happen before the Release to avoid
|
||||
// a race condition (if this thread yields right before
|
||||
// the Release, but after the add, the invariant of
|
||||
// previous_value < m_threshold * 2 may break for
|
||||
// another thread)
|
||||
m_count.fetch_sub(m_threshold * 2, memory_order_relaxed);
|
||||
}
|
||||
semaphore.Release(m_threshold - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
semaphore.Acquire();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
Semaphore m_semaphoreA;
|
||||
Semaphore m_semaphoreB;
|
||||
uint16_t m_threshold;
|
||||
atomic<uint16_t> m_count;
|
||||
};
|
||||
}
|
||||
}
|
||||
31
Libraries/external/baselib/Include/Cpp/Baselib_DynamicLibrary.h
vendored
Normal file
31
Libraries/external/baselib/Include/Cpp/Baselib_DynamicLibrary.h
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_DynamicLibrary.h"
|
||||
|
||||
// alias for Baselib_DynamicLibrary_OpenUtf8
|
||||
static inline Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_Open(
|
||||
const char* pathnameUtf8,
|
||||
Baselib_ErrorState* errorState
|
||||
)
|
||||
{
|
||||
return Baselib_DynamicLibrary_OpenUtf8(pathnameUtf8, errorState);
|
||||
}
|
||||
|
||||
// alias for Baselib_DynamicLibrary_OpenUtf16
|
||||
static inline Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_Open(
|
||||
const baselib_char16_t* pathnameUtf16,
|
||||
Baselib_ErrorState* errorState
|
||||
)
|
||||
{
|
||||
return Baselib_DynamicLibrary_OpenUtf16(pathnameUtf16, errorState);
|
||||
}
|
||||
|
||||
static inline bool operator==(const Baselib_DynamicLibrary_Handle& a, const Baselib_DynamicLibrary_Handle& b)
|
||||
{
|
||||
return a.handle == b.handle;
|
||||
}
|
||||
|
||||
static inline bool operator!=(const Baselib_DynamicLibrary_Handle& a, const Baselib_DynamicLibrary_Handle& b)
|
||||
{
|
||||
return a.handle != b.handle;
|
||||
}
|
||||
50
Libraries/external/baselib/Include/Cpp/BinarySemaphore.h
vendored
Normal file
50
Libraries/external/baselib/Include/Cpp/BinarySemaphore.h
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
#pragma once
|
||||
|
||||
#include "CappedSemaphore.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
|
||||
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
|
||||
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
|
||||
// decremented, or toggled) depending on programmer-defined conditions.
|
||||
//
|
||||
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
|
||||
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
|
||||
// resource becomes available.
|
||||
//
|
||||
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
|
||||
//
|
||||
// For optimal performance, baselib::BinarySemaphore should be stored at a cache aligned memory location.
|
||||
class BinarySemaphore : private CappedSemaphore
|
||||
{
|
||||
public:
|
||||
|
||||
// Creates a binary semaphore synchronization primitive.
|
||||
// Binary means the semaphore can at any given time have at most one token available for consummation.
|
||||
//
|
||||
// This is just an API facade for CappedSemaphore(1)
|
||||
//
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
BinarySemaphore() : CappedSemaphore(1) {}
|
||||
|
||||
using CappedSemaphore::Acquire;
|
||||
using CappedSemaphore::TryAcquire;
|
||||
using CappedSemaphore::TryTimedAcquire;
|
||||
|
||||
// Submit token to the semaphore.
|
||||
// If threads are waiting the token is consumed before this function return.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// \returns true if a token was submitted, false otherwise (meaning the BinarySemaphore already has a token)
|
||||
inline bool Release()
|
||||
{
|
||||
return CappedSemaphore::Release(1) == 1;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
112
Libraries/external/baselib/Include/Cpp/CappedSemaphore.h
vendored
Normal file
112
Libraries/external/baselib/Include/Cpp/CappedSemaphore.h
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_CappedSemaphore.h"
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
|
||||
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
|
||||
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
|
||||
// decremented, or toggled) depending on programmer-defined conditions.
|
||||
//
|
||||
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
|
||||
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
|
||||
// resource becomes available.
|
||||
//
|
||||
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
|
||||
//
|
||||
// For optimal performance, baselib::CappedSemaphore should be stored at a cache aligned memory location.
|
||||
class CappedSemaphore
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
CappedSemaphore(const CappedSemaphore& other) = delete;
|
||||
CappedSemaphore& operator=(const CappedSemaphore& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
CappedSemaphore(CappedSemaphore&& other) = delete;
|
||||
CappedSemaphore& operator=(CappedSemaphore&& other) = delete;
|
||||
|
||||
// Creates a capped counting semaphore synchronization primitive.
|
||||
// Cap is the number of tokens that can be held by the semaphore when there is no contention.
|
||||
//
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
CappedSemaphore(const uint16_t cap) : m_CappedSemaphoreData(Baselib_CappedSemaphore_Create(cap))
|
||||
{
|
||||
}
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, destructor will trigger an assert and may cause process abort.
|
||||
~CappedSemaphore()
|
||||
{
|
||||
Baselib_CappedSemaphore_Free(&m_CappedSemaphoreData);
|
||||
}
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
inline void Acquire()
|
||||
{
|
||||
return Baselib_CappedSemaphore_Acquire(&m_CappedSemaphoreData);
|
||||
}
|
||||
|
||||
// Try to consume a token and return immediately.
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Return: true if token was consumed. false if not.
|
||||
inline bool TryAcquire()
|
||||
{
|
||||
return Baselib_CappedSemaphore_TryAcquire(&m_CappedSemaphoreData);
|
||||
}
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// TryAcquire with a zero timeout differs from TryAcquire() in that TryAcquire() is guaranteed to be a user space operation
|
||||
// while Acquire with a zero timeout may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// Arguments:
|
||||
// - timeout: Time to wait for token to become available.
|
||||
//
|
||||
// Return: true if token was consumed. false if timeout was reached.
|
||||
inline bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
return Baselib_CappedSemaphore_TryTimedAcquire(&m_CappedSemaphoreData, timeoutInMilliseconds.count());
|
||||
}
|
||||
|
||||
// Submit tokens to the semaphore.
|
||||
// If threads are waiting an equal amount of tokens are consumed before this function return.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// \returns number of submitted tokens.
|
||||
inline uint16_t Release(const uint16_t count)
|
||||
{
|
||||
return Baselib_CappedSemaphore_Release(&m_CappedSemaphoreData, count);
|
||||
}
|
||||
|
||||
// Sets the semaphore token count to zero and release all waiting threads.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Return: number of released threads.
|
||||
inline uint32_t ResetAndReleaseWaitingThreads()
|
||||
{
|
||||
return Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(&m_CappedSemaphoreData);
|
||||
}
|
||||
|
||||
private:
|
||||
Baselib_CappedSemaphore m_CappedSemaphoreData;
|
||||
};
|
||||
}
|
||||
}
|
||||
96
Libraries/external/baselib/Include/Cpp/ConditionVariable.h
vendored
Normal file
96
Libraries/external/baselib/Include/Cpp/ConditionVariable.h
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
#pragma once
|
||||
|
||||
#include "Time.h"
|
||||
#include "Lock.h"
|
||||
#include <cstdint>
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#include "Internal/ConditionVariableData_FutexBased.inl.h"
|
||||
#else
|
||||
#include "Internal/ConditionVariableData_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// Conceptually a condition variable is a queue of threads, associated with a monitor, on which a thread may wait for some condition to become true.
|
||||
//
|
||||
// Thus each condition variable c is associated with an assertion Pc. While a thread is waiting on a condition variable, that thread is not considered
|
||||
// to occupy the monitor, and so other threads may enter the monitor to change the monitor's state. In most types of monitors, these other threads may
|
||||
// signal the condition variable c to indicate that assertion Pc is true in the current state.
|
||||
//
|
||||
// "Monitor (synchronization)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Monitor_(synchronization)&oldid=914426020#Condition_variables_2
|
||||
//
|
||||
// For optimal performance, baselib::ConditionVariable should be stored at a cache aligned memory location.
|
||||
class ConditionVariable
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
ConditionVariable(const ConditionVariable& other) = delete;
|
||||
ConditionVariable& operator=(const ConditionVariable& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
ConditionVariable(ConditionVariable&& other) = delete;
|
||||
ConditionVariable& operator=(ConditionVariable&& other) = delete;
|
||||
|
||||
// Creates a condition variable synchronization primitive.
|
||||
ConditionVariable(Lock& lock) : m_Lock(lock)
|
||||
{}
|
||||
|
||||
// Reclaim resources and memory held by the condition variable.
|
||||
//
|
||||
// If threads are waiting on the condition variable, destructor will trigger an assert and may cause process abort.
|
||||
~ConditionVariable()
|
||||
{
|
||||
BaselibAssert(!m_Data.HasWaiters(), "Destruction is not allowed when there are still threads waiting on the condition variable.");
|
||||
NotifyAll();
|
||||
}
|
||||
|
||||
// Wait for the condition variable to become available.
|
||||
//
|
||||
// The lock must have been previously acquired.
|
||||
// For the duration of the wait the lock is released and then re-acquired upon exit.
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
inline void Wait();
|
||||
|
||||
// Wait for the condition variable to become available.
|
||||
//
|
||||
// The lock must have been previously acquired.
|
||||
// For the duration of the wait the lock is released and then re-acquired upon exit.
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// TimedWait with a zero timeout is guaranteed to be a user space operation.
|
||||
//
|
||||
// \param timeoutInMilliseconds Time to wait for condition variable to become available.
|
||||
// \returns true if the condition variable is available, false if timeout was reached.
|
||||
inline bool TimedWait(const timeout_ms timeoutInMilliseconds);
|
||||
|
||||
// Wake up threads waiting for the condition variable.
|
||||
//
|
||||
// This function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// \param count At most, `count` waiting threads will be notified, but never more than there are currently waiting.
|
||||
inline void Notify(uint16_t count);
|
||||
|
||||
// Wake up all threads waiting for the condition variable.
|
||||
//
|
||||
// This function is guaranteed to emit a release barrier.
|
||||
inline void NotifyAll()
|
||||
{
|
||||
Notify(std::numeric_limits<uint16_t>::max());
|
||||
}
|
||||
|
||||
private:
|
||||
Lock& m_Lock;
|
||||
detail::ConditionVariableData m_Data;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#if PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#include "Internal/ConditionVariable_FutexBased.inl.h"
|
||||
#else
|
||||
#include "Internal/ConditionVariable_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
70
Libraries/external/baselib/Include/Cpp/CountdownTimer.h
vendored
Normal file
70
Libraries/external/baselib/Include/Cpp/CountdownTimer.h
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_CountdownTimer.h"
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
class CountdownTimer
|
||||
{
|
||||
public:
|
||||
//
|
||||
// Create a countdown timer that already expired.
|
||||
//
|
||||
// Guaranteed to not sample the system timer.
|
||||
//
|
||||
static CountdownTimer InitializeExpired()
|
||||
{
|
||||
return CountdownTimer();
|
||||
}
|
||||
|
||||
//
|
||||
// Create and start a countdown timer.
|
||||
//
|
||||
static CountdownTimer StartNew(const high_precision_clock::duration timeout)
|
||||
{
|
||||
return CountdownTimer(timeout);
|
||||
}
|
||||
|
||||
//
|
||||
// Get time left before timeout expires.
|
||||
//
|
||||
// This function is guaranteed to return zero once timeout expired.
|
||||
// It is also guaranteed that this function will not return zero until timeout expires.
|
||||
// Return the time left as a high precision duration.
|
||||
//
|
||||
high_precision_clock::duration GetTimeLeft() const
|
||||
{
|
||||
return high_precision_clock::duration_from_ticks(Baselib_CountdownTimer_GetTimeLeftInTicks(m_CountdownTimer));
|
||||
}
|
||||
|
||||
//
|
||||
// Get time left before timeout expires.
|
||||
//
|
||||
// This function is guaranteed to return zero once timeout expired.
|
||||
// It is also guaranteed that this function will not return zero until timeout expires.
|
||||
// Return the time left as a millisecond integer duration.
|
||||
//
|
||||
timeout_ms GetTimeLeftInMilliseconds() const
|
||||
{
|
||||
return timeout_ms(Baselib_CountdownTimer_GetTimeLeftInMilliseconds(m_CountdownTimer));
|
||||
}
|
||||
|
||||
//
|
||||
// Check if timout has been reached.
|
||||
//
|
||||
bool TimeoutExpired() const
|
||||
{
|
||||
return Baselib_CountdownTimer_TimeoutExpired(m_CountdownTimer);
|
||||
}
|
||||
|
||||
private:
|
||||
CountdownTimer() : m_CountdownTimer{0, 0} {}
|
||||
CountdownTimer(const high_precision_clock::duration timeout) : m_CountdownTimer(Baselib_CountdownTimer_StartTicks(high_precision_clock::ticks_from_duration_roundup(timeout))) {}
|
||||
|
||||
Baselib_CountdownTimer m_CountdownTimer;
|
||||
};
|
||||
}
|
||||
}
|
||||
121
Libraries/external/baselib/Include/Cpp/EventSemaphore.h
vendored
Normal file
121
Libraries/external/baselib/Include/Cpp/EventSemaphore.h
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_EventSemaphore.h"
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, an event (also called event semaphore) is a type of synchronization mechanism that is used to indicate to waiting processes when a
|
||||
// particular condition has become true.
|
||||
// An event is an abstract data type with a boolean state and the following operations:
|
||||
// * wait - when executed, causes the suspension of the executing process until the state of the event is set to true. If the state is already set to true has no effect.
|
||||
// * set - sets the event's state to true, release all waiting processes.
|
||||
// * clear - sets the event's state to false.
|
||||
//
|
||||
// "Event (synchronization primitive)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Event_(synchronization_primitive)&oldid=781517732
|
||||
//
|
||||
// For optimal performance, baselib::EventSemaphore should be stored at a cache aligned memory location.
|
||||
class EventSemaphore
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
EventSemaphore(const EventSemaphore& other) = delete;
|
||||
EventSemaphore& operator=(const EventSemaphore& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
EventSemaphore(EventSemaphore&& other) = delete;
|
||||
EventSemaphore& operator=(EventSemaphore&& other) = delete;
|
||||
|
||||
// Creates an event semaphore synchronization primitive. Initial state of event is unset.
|
||||
//
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
EventSemaphore() : m_EventSemaphoreData(Baselib_EventSemaphore_Create())
|
||||
{
|
||||
}
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
// If threads are waiting on the semaphore, calling free may trigger an assert and may cause process abort.
|
||||
~EventSemaphore()
|
||||
{
|
||||
Baselib_EventSemaphore_Free(&m_EventSemaphoreData);
|
||||
}
|
||||
|
||||
// Try to acquire semaphore.
|
||||
//
|
||||
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// \returns true if event is set, false other wise.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
inline bool TryAcquire()
|
||||
{
|
||||
return Baselib_EventSemaphore_TryAcquire(&m_EventSemaphoreData);
|
||||
}
|
||||
|
||||
// Acquire semaphore.
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
inline void Acquire()
|
||||
{
|
||||
return Baselib_EventSemaphore_Acquire(&m_EventSemaphoreData);
|
||||
}
|
||||
|
||||
// Try to acquire semaphore.
|
||||
//
|
||||
// If event is set this function return true, otherwise the thread will wait for event to be set or for release to be called.
|
||||
//
|
||||
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
|
||||
// while Acquire may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// \returns true if semaphore was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
inline bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
return Baselib_EventSemaphore_TryTimedAcquire(&m_EventSemaphoreData, timeoutInMilliseconds.count());
|
||||
}
|
||||
|
||||
// Sets the event
|
||||
//
|
||||
// Setting the event will cause all waiting threads to wakeup. And will let all future acquiring threads through until Baselib_EventSemaphore_Reset is called.
|
||||
// It is guaranteed that any thread waiting previously on the EventSemaphore will be woken up, even if the semaphore is immediately reset. (no lock stealing)
|
||||
//
|
||||
// Guaranteed to emit a release barrier.
|
||||
inline void Set()
|
||||
{
|
||||
return Baselib_EventSemaphore_Set(&m_EventSemaphoreData);
|
||||
}
|
||||
|
||||
// Reset event
|
||||
//
|
||||
// Resetting the event will cause all future acquiring threads to enter a wait state.
|
||||
// Has no effect if the EventSemaphore is already in a reset state.
|
||||
//
|
||||
// Guaranteed to emit a release barrier.
|
||||
inline void Reset()
|
||||
{
|
||||
return Baselib_EventSemaphore_Reset(&m_EventSemaphoreData);
|
||||
}
|
||||
|
||||
// Reset event and release all waiting threads
|
||||
//
|
||||
// Resetting the event will cause all future acquiring threads to enter a wait state.
|
||||
// If there were any threads waiting (i.e. the EventSemaphore was already in a release state) they will be released.
|
||||
//
|
||||
// Guaranteed to emit a release barrier.
|
||||
inline void ResetAndRelease()
|
||||
{
|
||||
return Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(&m_EventSemaphoreData);
|
||||
}
|
||||
|
||||
private:
|
||||
Baselib_EventSemaphore m_EventSemaphoreData;
|
||||
};
|
||||
}
|
||||
}
|
||||
104
Libraries/external/baselib/Include/Cpp/HighCapacitySemaphore.h
vendored
Normal file
104
Libraries/external/baselib/Include/Cpp/HighCapacitySemaphore.h
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_HighCapacitySemaphore.h"
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// baselib::HighCapacitySemaphore is similar to baselib::Semaphore but allows for far greater token count.
|
||||
// It is suitable to be used as resource counting semaphore.
|
||||
class HighCapacitySemaphore
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
HighCapacitySemaphore(const HighCapacitySemaphore& other) = delete;
|
||||
HighCapacitySemaphore& operator=(const HighCapacitySemaphore& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
HighCapacitySemaphore(HighCapacitySemaphore&& other) = delete;
|
||||
HighCapacitySemaphore& operator=(HighCapacitySemaphore&& other) = delete;
|
||||
|
||||
// This is the max number of tokens guaranteed to be held by the semaphore at
|
||||
// any given point in time. Tokens submitted that exceed this value may silently
|
||||
// be discarded.
|
||||
enum : int64_t { MaxGuaranteedCount = Baselib_HighCapacitySemaphore_MaxGuaranteedCount };
|
||||
|
||||
// Creates a counting semaphore synchronization primitive.
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
HighCapacitySemaphore() : m_SemaphoreData(Baselib_HighCapacitySemaphore_Create())
|
||||
{
|
||||
}
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, destructor will trigger an assert and may cause process abort.
|
||||
~HighCapacitySemaphore()
|
||||
{
|
||||
Baselib_HighCapacitySemaphore_Free(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
inline void Acquire()
|
||||
{
|
||||
return Baselib_HighCapacitySemaphore_Acquire(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
// Try to consume a token and return immediately.
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Return: true if token was consumed. false if not.
|
||||
inline bool TryAcquire()
|
||||
{
|
||||
return Baselib_HighCapacitySemaphore_TryAcquire(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// TryAcquire with a zero timeout differs from TryAcquire() in that TryAcquire() is guaranteed to be a user space operation
|
||||
// while Acquire with a zero timeout may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// Arguments:
|
||||
// - timeout: Time to wait for token to become available.
|
||||
//
|
||||
// Return: true if token was consumed. false if timeout was reached.
|
||||
inline bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
return Baselib_HighCapacitySemaphore_TryTimedAcquire(&m_SemaphoreData, timeoutInMilliseconds.count());
|
||||
}
|
||||
|
||||
// Submit tokens to the semaphore.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
|
||||
// If count reach `Baselib_HighCapacitySemaphore_MaxGuaranteedCount` this function may silently discard any overflow.
|
||||
inline void Release(uint32_t count)
|
||||
{
|
||||
return Baselib_HighCapacitySemaphore_Release(&m_SemaphoreData, count);
|
||||
}
|
||||
|
||||
// Sets the semaphore token count to zero and release all waiting threads.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Return: number of released threads.
|
||||
inline uint64_t ResetAndReleaseWaitingThreads()
|
||||
{
|
||||
return Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
private:
|
||||
Baselib_HighCapacitySemaphore m_SemaphoreData;
|
||||
};
|
||||
}
|
||||
}
|
||||
16
Libraries/external/baselib/Include/Cpp/Internal/Algorithm.inl.h
vendored
Normal file
16
Libraries/external/baselib/Include/Cpp/Internal/Algorithm.inl.h
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace Algorithm
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
template<typename T>
|
||||
static FORCE_INLINE constexpr T LogicalOrRShiftOp(T value, int shift) { return value | (value >> shift); }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
63
Libraries/external/baselib/Include/Cpp/Internal/Compiler/ClangOrGcc/AlgorithmClangOrGcc.inl.h
vendored
Normal file
63
Libraries/external/baselib/Include/Cpp/Internal/Compiler/ClangOrGcc/AlgorithmClangOrGcc.inl.h
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
#pragma once
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace Algorithm
|
||||
{
|
||||
inline int HighestBitNonZero(uint32_t value)
|
||||
{
|
||||
return 31 - __builtin_clz(value);
|
||||
}
|
||||
|
||||
inline int HighestBitNonZero(uint64_t value)
|
||||
{
|
||||
#if PLATFORM_ARCH_64
|
||||
return 63 - __builtin_clzll(value);
|
||||
#else
|
||||
return (value & 0xffffffff00000000ULL) ? (63 - __builtin_clz((uint32_t)(value >> 32))) : (31 - __builtin_clz((uint32_t)value));
|
||||
#endif
|
||||
}
|
||||
|
||||
inline int HighestBit(uint32_t value)
|
||||
{
|
||||
return value == 0 ? -1 : HighestBitNonZero(value);
|
||||
}
|
||||
|
||||
inline int HighestBit(uint64_t value)
|
||||
{
|
||||
return value == 0 ? -1 : HighestBitNonZero(value);
|
||||
}
|
||||
|
||||
inline int LowestBitNonZero(uint32_t value)
|
||||
{
|
||||
return __builtin_ctz(value);
|
||||
}
|
||||
|
||||
inline int LowestBitNonZero(uint64_t value)
|
||||
{
|
||||
#if PLATFORM_ARCH_64
|
||||
return __builtin_ctzll(value);
|
||||
#else
|
||||
return (value & 0x00000000ffffffffULL) ? __builtin_ctz((uint32_t)(value)) : (32 + __builtin_ctz((uint32_t)(value >> 32)));
|
||||
#endif
|
||||
}
|
||||
|
||||
inline int LowestBit(uint32_t value)
|
||||
{
|
||||
return value == 0 ? -1 : LowestBitNonZero(value);
|
||||
}
|
||||
|
||||
inline int LowestBit(uint64_t value)
|
||||
{
|
||||
return value == 0 ? -1 : LowestBitNonZero(value);
|
||||
}
|
||||
|
||||
inline int BitsInMask(uint64_t mask) { return __builtin_popcountll(mask); }
|
||||
inline int BitsInMask(uint32_t mask) { return __builtin_popcount(mask); }
|
||||
inline int BitsInMask(uint16_t mask) { return BitsInMask((uint32_t)mask); }
|
||||
inline int BitsInMask(uint8_t mask) { return BitsInMask((uint32_t)mask); }
|
||||
}
|
||||
}
|
||||
}
|
||||
131
Libraries/external/baselib/Include/Cpp/Internal/Compiler/Msvc/AlgorithmMsvc.inl.h
vendored
Normal file
131
Libraries/external/baselib/Include/Cpp/Internal/Compiler/Msvc/AlgorithmMsvc.inl.h
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
#pragma once
|
||||
|
||||
#include <intrin.h>
|
||||
|
||||
#pragma intrinsic(_BitScanReverse)
|
||||
#if PLATFORM_ARCH_64
|
||||
#pragma intrinsic(_BitScanReverse64)
|
||||
#endif
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace Algorithm
|
||||
{
|
||||
inline int HighestBit(uint32_t value)
|
||||
{
|
||||
unsigned long res;
|
||||
return _BitScanReverse(&res, value) ? (int)res : -1;
|
||||
}
|
||||
|
||||
inline int HighestBit(uint64_t value)
|
||||
{
|
||||
#if PLATFORM_ARCH_64
|
||||
unsigned long res;
|
||||
return _BitScanReverse64(&res, value) ? (int)res : -1;
|
||||
#else
|
||||
unsigned long lower, upper;
|
||||
int lower_int = _BitScanReverse(&lower, (uint32_t)value) ? (int)lower : -1;
|
||||
return _BitScanReverse(&upper, (uint32_t)(value >> 32)) ? (int)(32 + upper) : lower_int;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline int HighestBitNonZero(uint32_t value)
|
||||
{
|
||||
unsigned long res = 0;
|
||||
_BitScanReverse(&res, value);
|
||||
return (int)res;
|
||||
}
|
||||
|
||||
inline int HighestBitNonZero(uint64_t value)
|
||||
{
|
||||
#if PLATFORM_ARCH_64
|
||||
unsigned long res = 0;
|
||||
_BitScanReverse64(&res, value);
|
||||
return (int)res;
|
||||
#else
|
||||
unsigned long lower, upper;
|
||||
_BitScanReverse(&lower, (uint32_t)value);
|
||||
return _BitScanReverse(&upper, (uint32_t)(value >> 32)) ? (32 + upper) : lower;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline int LowestBit(uint32_t value)
|
||||
{
|
||||
unsigned long res;
|
||||
return _BitScanForward(&res, value) ? (int)res : -1;
|
||||
}
|
||||
|
||||
inline int LowestBit(uint64_t value)
|
||||
{
|
||||
#if PLATFORM_ARCH_64
|
||||
unsigned long res;
|
||||
return _BitScanForward64(&res, value) ? (int)res : -1;
|
||||
#else
|
||||
unsigned long lower, upper;
|
||||
int upper_int = _BitScanForward(&upper, (uint32_t)(value >> 32)) ? (int)upper : -33;
|
||||
return _BitScanForward(&lower, (uint32_t)(value)) ? (int)lower : (32 + upper_int);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline int LowestBitNonZero(uint32_t value)
|
||||
{
|
||||
unsigned long res = 0;
|
||||
_BitScanForward(&res, value);
|
||||
return (int)res;
|
||||
}
|
||||
|
||||
inline int LowestBitNonZero(uint64_t value)
|
||||
{
|
||||
#if PLATFORM_ARCH_64
|
||||
unsigned long res = 0;
|
||||
_BitScanForward64(&res, value);
|
||||
return (int)res;
|
||||
#else
|
||||
unsigned long lower, upper;
|
||||
_BitScanForward(&upper, (uint32_t)(value >> 32));
|
||||
return _BitScanForward(&lower, (uint32_t)(value)) ? (int)lower : (int)(32 + upper);
|
||||
#endif
|
||||
}
|
||||
|
||||
// __popcnt/__popcnt16/__popcnt64 were introduced as part of SSE4a
|
||||
// See https://en.wikipedia.org/wiki/SSE4#POPCNT_and_LZCNT
|
||||
// To check this accurately, we would need to check cpuid which itself is not for free.
|
||||
// However, compiling for some hardware, MSVC defines __AVX__ which is a superset of SSE4 so we can use that.
|
||||
// (as of writing there's no equivalent __SSE4__)
|
||||
#if defined(__AVX__)
|
||||
#ifdef _AMD64_
|
||||
inline int BitsInMask(uint64_t value) { return (int)__popcnt64(value); }
|
||||
#else
|
||||
inline int BitsInMask(uint64_t value) { return BitsInMask((uint32_t)value) + BitsInMask((uint32_t)(value >> 32)); }
|
||||
#endif
|
||||
inline int BitsInMask(uint32_t value) { return (int)__popcnt(value); }
|
||||
inline int BitsInMask(uint16_t value) { return (int)__popcnt16(value); }
|
||||
inline int BitsInMask(uint8_t value) { return BitsInMask((uint16_t)value); }
|
||||
|
||||
// Todo: Consider using VCNT instruction on arm (NEON)
|
||||
#else
|
||||
inline int BitsInMask(uint64_t value)
|
||||
{
|
||||
// From http://www-graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
|
||||
value = value - ((value >> 1) & (uint64_t) ~(uint64_t)0 / 3);
|
||||
value = (value & (uint64_t) ~(uint64_t)0 / 15 * 3) + ((value >> 2) & (uint64_t) ~(uint64_t)0 / 15 * 3);
|
||||
value = (value + (value >> 4)) & (uint64_t) ~(uint64_t)0 / 255 * 15;
|
||||
return (uint64_t)(value * ((uint64_t) ~(uint64_t)0 / 255)) >> (sizeof(uint64_t) - 1) * 8;
|
||||
}
|
||||
|
||||
inline int BitsInMask(uint32_t value)
|
||||
{
|
||||
// From http://www-graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
|
||||
value = value - ((value >> 1) & 0x55555555);
|
||||
value = (value & 0x33333333) + ((value >> 2) & 0x33333333);
|
||||
return (((value + (value >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
|
||||
}
|
||||
|
||||
inline int BitsInMask(uint16_t value) { return BitsInMask((uint32_t)value); }
|
||||
inline int BitsInMask(uint8_t value) { return BitsInMask((uint32_t)value); }
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
38
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariableData_FutexBased.inl.h
vendored
Normal file
38
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariableData_FutexBased.inl.h
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Atomic.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
struct ConditionVariableData
|
||||
{
|
||||
atomic<int32_t> waiters;
|
||||
atomic<int32_t> wakeups;
|
||||
|
||||
ConditionVariableData() : waiters(0), wakeups(0) {}
|
||||
|
||||
inline bool HasWaiters() const
|
||||
{
|
||||
return waiters.load(memory_order_acquire) > 0;
|
||||
}
|
||||
|
||||
inline bool TryConsumeWakeup()
|
||||
{
|
||||
int32_t previousCount = wakeups.load(memory_order_relaxed);
|
||||
while (previousCount > 0)
|
||||
{
|
||||
if (wakeups.compare_exchange_weak(previousCount, previousCount - 1, memory_order_acquire, memory_order_relaxed))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
26
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariableData_SemaphoreBased.inl.h
vendored
Normal file
26
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariableData_SemaphoreBased.inl.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Atomic.h"
|
||||
#include "../Semaphore.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
struct ConditionVariableData
|
||||
{
|
||||
Semaphore semaphore;
|
||||
atomic<uint32_t> waiters;
|
||||
|
||||
ConditionVariableData() : semaphore(), waiters(0) {}
|
||||
|
||||
inline bool HasWaiters() const
|
||||
{
|
||||
return waiters.load(memory_order_acquire) > 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
86
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariable_FutexBased.inl.h
vendored
Normal file
86
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariable_FutexBased.inl.h
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
#pragma once
|
||||
|
||||
#include "../CountdownTimer.h"
|
||||
#include "../../C/Baselib_SystemFutex.h"
|
||||
#include "../../C/Baselib_Thread.h"
|
||||
|
||||
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
|
||||
#error "Only use this implementation on top of a proper futex, in all other situations us ConditionVariable_SemaphoreBased.inl.h"
|
||||
#endif
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
inline void ConditionVariable::Wait()
|
||||
{
|
||||
m_Data.waiters.fetch_add(1, memory_order_relaxed);
|
||||
m_Lock.Release();
|
||||
while (!m_Data.TryConsumeWakeup())
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&m_Data.wakeups.obj, 0, std::numeric_limits<uint32_t>::max());
|
||||
}
|
||||
m_Lock.Acquire();
|
||||
}
|
||||
|
||||
inline bool ConditionVariable::TimedWait(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
m_Data.waiters.fetch_add(1, memory_order_relaxed);
|
||||
m_Lock.Release();
|
||||
|
||||
uint32_t timeLeft = timeoutInMilliseconds.count();
|
||||
auto timer = CountdownTimer::StartNew(timeoutInMilliseconds);
|
||||
do
|
||||
{
|
||||
Baselib_SystemFutex_Wait(&m_Data.wakeups.obj, 0, timeLeft);
|
||||
if (m_Data.TryConsumeWakeup())
|
||||
{
|
||||
m_Lock.Acquire();
|
||||
return true;
|
||||
}
|
||||
timeLeft = timer.GetTimeLeftInMilliseconds().count();
|
||||
}
|
||||
while (timeLeft);
|
||||
|
||||
do
|
||||
{
|
||||
int32_t waiters = m_Data.waiters.load(memory_order_relaxed);
|
||||
while (waiters > 0)
|
||||
{
|
||||
if (m_Data.waiters.compare_exchange_weak(waiters, waiters - 1, memory_order_relaxed, memory_order_relaxed))
|
||||
{
|
||||
m_Lock.Acquire();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!m_Data.TryConsumeWakeup());
|
||||
|
||||
m_Lock.Acquire();
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void ConditionVariable::Notify(uint16_t count)
|
||||
{
|
||||
int32_t waitingThreads = m_Data.waiters.load(memory_order_acquire);
|
||||
do
|
||||
{
|
||||
int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
if (threadsToWakeup == 0)
|
||||
{
|
||||
atomic_thread_fence(memory_order_release);
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_Data.waiters.compare_exchange_weak(waitingThreads, waitingThreads - threadsToWakeup, memory_order_relaxed, memory_order_relaxed))
|
||||
{
|
||||
m_Data.wakeups.fetch_add(threadsToWakeup, memory_order_release);
|
||||
Baselib_SystemFutex_Notify(&m_Data.wakeups.obj, threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
|
||||
return;
|
||||
}
|
||||
}
|
||||
while (waitingThreads > 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
61
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariable_SemaphoreBased.inl.h
vendored
Normal file
61
Libraries/external/baselib/Include/Cpp/Internal/ConditionVariable_SemaphoreBased.inl.h
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
#pragma once
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
inline void ConditionVariable::Wait()
|
||||
{
|
||||
m_Data.waiters.fetch_add(1, memory_order_relaxed);
|
||||
m_Lock.Release();
|
||||
m_Data.semaphore.Acquire();
|
||||
m_Lock.Acquire();
|
||||
}
|
||||
|
||||
inline bool ConditionVariable::TimedWait(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
m_Data.waiters.fetch_add(1, memory_order_relaxed);
|
||||
m_Lock.Release();
|
||||
|
||||
bool acquired = m_Data.semaphore.TryTimedAcquire(timeoutInMilliseconds);
|
||||
|
||||
if (acquired)
|
||||
{
|
||||
m_Lock.Acquire();
|
||||
return true;
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
uint32_t waiters = m_Data.waiters.load(memory_order_relaxed);
|
||||
while (waiters > 0)
|
||||
{
|
||||
if (m_Data.waiters.compare_exchange_weak(waiters, waiters - 1, memory_order_relaxed, memory_order_relaxed))
|
||||
{
|
||||
m_Lock.Acquire();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
while (!m_Data.semaphore.TryAcquire());
|
||||
|
||||
m_Lock.Acquire();
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void ConditionVariable::Notify(uint16_t count)
|
||||
{
|
||||
uint32_t waitingThreads, threadsToWakeup;
|
||||
do
|
||||
{
|
||||
waitingThreads = m_Data.waiters.load(memory_order_acquire);
|
||||
threadsToWakeup = count < waitingThreads ? count : waitingThreads;
|
||||
if (threadsToWakeup == 0)
|
||||
return;
|
||||
}
|
||||
while (!m_Data.waiters.compare_exchange_weak(waitingThreads, waitingThreads - threadsToWakeup, memory_order_relaxed, memory_order_relaxed));
|
||||
m_Data.semaphore.Release(threadsToWakeup);
|
||||
}
|
||||
}
|
||||
}
|
||||
22
Libraries/external/baselib/Include/Cpp/Internal/TypeTraits.h
vendored
Normal file
22
Libraries/external/baselib/Include/Cpp/Internal/TypeTraits.h
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// workaround for missing std::is_trivially_copyable
|
||||
// this can't be put inside compiler env due to __GLIBCXX__ not being set at that point
|
||||
#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20150623) || (COMPILER_GCC && __GNUC__ < 5)
|
||||
template<typename T> struct is_trivially_copyable : std::has_trivial_copy_constructor<T> {};
|
||||
#else
|
||||
template<typename T> struct is_trivially_copyable : std::is_trivially_copyable<T> {};
|
||||
#endif
|
||||
|
||||
template<typename T, size_t S> struct is_trivial_of_size : std::integral_constant<bool, is_trivially_copyable<T>::value && (sizeof(T) == S)> {};
|
||||
template<typename T, size_t S> struct is_integral_of_size : std::integral_constant<bool, std::is_integral<T>::value && (sizeof(T) == S)> {};
|
||||
|
||||
template<typename T, typename T2> struct is_of_same_signedness : std::integral_constant<bool, std::is_signed<T>::value == std::is_signed<T2>::value> {};
|
||||
}
|
||||
}
|
||||
129
Libraries/external/baselib/Include/Cpp/Internal/heap_allocator.inl.h
vendored
Normal file
129
Libraries/external/baselib/Include/Cpp/Internal/heap_allocator.inl.h
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../C/Baselib_Memory.h"
|
||||
|
||||
// Internal, to enable override of default C Api implementation for unit-tests
|
||||
#ifndef detail_BASELIB_HEAP_ALLOCATOR_TEST_IMPL
|
||||
#define detail_BASELIB_HEAP_ALLOCATOR_TEST_IMPL 0
|
||||
#endif
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
// Default memory allocation methods
|
||||
struct heap_allocator_impl
|
||||
{
|
||||
static constexpr auto Baselib_Memory_Allocate = ::Baselib_Memory_Allocate;
|
||||
static constexpr auto Baselib_Memory_Reallocate = ::Baselib_Memory_Reallocate;
|
||||
static constexpr auto Baselib_Memory_Free = ::Baselib_Memory_Free;
|
||||
static constexpr auto Baselib_Memory_AlignedAllocate = ::Baselib_Memory_AlignedAllocate;
|
||||
static constexpr auto Baselib_Memory_AlignedReallocate = ::Baselib_Memory_AlignedReallocate;
|
||||
static constexpr auto Baselib_Memory_AlignedFree = ::Baselib_Memory_AlignedFree;
|
||||
};
|
||||
|
||||
// Test memory allocation methods
|
||||
struct heap_allocator_impl_test
|
||||
{
|
||||
static void* Baselib_Memory_Allocate(size_t);
|
||||
static void* Baselib_Memory_Reallocate(void*, size_t);
|
||||
static void Baselib_Memory_Free(void*);
|
||||
static void* Baselib_Memory_AlignedAllocate(size_t, size_t);
|
||||
static void* Baselib_Memory_AlignedReallocate(void*, size_t, size_t);
|
||||
static void Baselib_Memory_AlignedFree(void*);
|
||||
};
|
||||
|
||||
template<uint32_t alignment>
|
||||
class heap_allocator
|
||||
{
|
||||
// Use test memory allocation implementation if detail_BASELIB_HEAP_ALLOCATOR_TEST_IMPL is true, otherwise Baselib_Memory_*
|
||||
using BaseImpl = typename std::conditional<detail_BASELIB_HEAP_ALLOCATOR_TEST_IMPL, heap_allocator_impl_test, heap_allocator_impl>::type;
|
||||
|
||||
// Memory allocation functions - alignment requirements <= Baselib_Memory_MinGuaranteedAlignment
|
||||
struct MinAlignedImpl
|
||||
{
|
||||
static void* allocate(size_t size, Baselib_ErrorState *error_state_ptr)
|
||||
{
|
||||
UNUSED(error_state_ptr);
|
||||
return BaseImpl::Baselib_Memory_Allocate(size);
|
||||
}
|
||||
|
||||
static void* reallocate(void* ptr, size_t old_size, size_t new_size, Baselib_ErrorState *error_state_ptr)
|
||||
{
|
||||
UNUSED(error_state_ptr);
|
||||
UNUSED(old_size);
|
||||
return BaseImpl::Baselib_Memory_Reallocate(ptr, new_size);
|
||||
}
|
||||
|
||||
static bool deallocate(void* ptr, size_t size, Baselib_ErrorState *error_state_ptr)
|
||||
{
|
||||
UNUSED(error_state_ptr);
|
||||
UNUSED(size);
|
||||
BaseImpl::Baselib_Memory_Free(ptr);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// Aligned memory allocation functions - alignment requirements > Baselib_Memory_MinGuaranteedAlignment
|
||||
struct AlignedImpl
|
||||
{
|
||||
static void* allocate(size_t size, Baselib_ErrorState *error_state_ptr)
|
||||
{
|
||||
UNUSED(error_state_ptr);
|
||||
return BaseImpl::Baselib_Memory_AlignedAllocate(size, alignment);
|
||||
}
|
||||
|
||||
static void* reallocate(void* ptr, size_t old_size, size_t new_size, Baselib_ErrorState *error_state_ptr)
|
||||
{
|
||||
UNUSED(error_state_ptr);
|
||||
UNUSED(old_size);
|
||||
return BaseImpl::Baselib_Memory_AlignedReallocate(ptr, new_size, alignment);
|
||||
}
|
||||
|
||||
static bool deallocate(void* ptr, size_t size, Baselib_ErrorState *error_state_ptr)
|
||||
{
|
||||
UNUSED(error_state_ptr);
|
||||
UNUSED(size);
|
||||
BaseImpl::Baselib_Memory_AlignedFree(ptr);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
static FORCE_INLINE constexpr size_t AlignedSize(size_t size)
|
||||
{
|
||||
return (size + alignment - 1) & ~(alignment - 1);
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr size_t max_alignment = Baselib_Memory_MaxAlignment;
|
||||
|
||||
static constexpr size_t optimal_size(size_t size)
|
||||
{
|
||||
return AlignedSize(size);
|
||||
}
|
||||
|
||||
// Use aligned memory allocations methods if alignment > Baselib_Memory_MinGuaranteedAlignment
|
||||
using Impl = typename std::conditional<(alignment > Baselib_Memory_MinGuaranteedAlignment), AlignedImpl, MinAlignedImpl>::type;
|
||||
|
||||
static void* allocate(size_t size, Baselib_ErrorState* error_state_ptr)
|
||||
{
|
||||
return Impl::allocate(size, error_state_ptr);
|
||||
}
|
||||
|
||||
static void* reallocate(void* ptr, size_t old_size, size_t new_size, Baselib_ErrorState* error_state_ptr)
|
||||
{
|
||||
return Impl::reallocate(ptr, old_size, new_size, error_state_ptr);
|
||||
}
|
||||
|
||||
static bool deallocate(void* ptr, size_t size, Baselib_ErrorState* error_state_ptr)
|
||||
{
|
||||
return Impl::deallocate(ptr, size, error_state_ptr);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef detail_BASELIB_HEAP_ALLOCATOR_TEST_IMPL
|
||||
95
Libraries/external/baselib/Include/Cpp/Internal/page_allocator.inl.h
vendored
Normal file
95
Libraries/external/baselib/Include/Cpp/Internal/page_allocator.inl.h
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../C/Baselib_Memory.h"
|
||||
#include "../../Cpp/Algorithm.h"
|
||||
|
||||
// Internal, to enable override of default C Api implementation for unit-tests
|
||||
#ifndef detail_BASELIB_PAGE_ALLOCATOR_TEST_IMPL
|
||||
#define detail_BASELIB_PAGE_ALLOCATOR_TEST_IMPL 0
|
||||
#endif
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
// Default memory allocation methods
|
||||
struct page_allocator_impl
|
||||
{
|
||||
static constexpr auto Baselib_Memory_AllocatePages = ::Baselib_Memory_AllocatePages;
|
||||
static constexpr auto Baselib_Memory_ReleasePages = ::Baselib_Memory_ReleasePages;
|
||||
static constexpr auto Baselib_Memory_SetPageState = ::Baselib_Memory_SetPageState;
|
||||
};
|
||||
|
||||
// Test memory allocation methods
|
||||
struct page_allocator_impl_test
|
||||
{
|
||||
static Baselib_Memory_PageAllocation Baselib_Memory_AllocatePages(uint64_t pageSize, uint64_t pageCount, uint64_t alignmentInMultipleOfPageSize, Baselib_Memory_PageState pageState, Baselib_ErrorState* errorState);
|
||||
static void Baselib_Memory_ReleasePages(Baselib_Memory_PageAllocation pageAllocation, Baselib_ErrorState* errorState);
|
||||
static void Baselib_Memory_SetPageState(void* addressOfFirstPage, uint64_t pageSize, uint64_t pageCount, Baselib_Memory_PageState pageState, Baselib_ErrorState* errorState);
|
||||
};
|
||||
|
||||
typedef enum Memory_PageState : int
|
||||
{
|
||||
Memory_PageState_Reserved = Baselib_Memory_PageState_Reserved,
|
||||
Memory_PageState_NoAccess = Baselib_Memory_PageState_NoAccess,
|
||||
Memory_PageState_ReadOnly = Baselib_Memory_PageState_ReadOnly,
|
||||
Memory_PageState_ReadWrite = Baselib_Memory_PageState_ReadWrite,
|
||||
Memory_PageState_ReadOnly_Executable = Baselib_Memory_PageState_ReadOnly_Executable | Baselib_Memory_PageState_ReadOnly,
|
||||
Memory_PageState_ReadWrite_Executable = Baselib_Memory_PageState_ReadWrite_Executable | Baselib_Memory_PageState_ReadWrite,
|
||||
} Memory_PageState;
|
||||
|
||||
template<uint32_t alignment>
|
||||
class page_allocator
|
||||
{
|
||||
// Use test memory allocation implementation if detail_BASELIB_HEAP_ALLOCATOR_TEST_IMPL is true
|
||||
using Impl = typename std::conditional<detail_BASELIB_PAGE_ALLOCATOR_TEST_IMPL, page_allocator_impl_test, page_allocator_impl>::type;
|
||||
|
||||
const size_t m_PageSize;
|
||||
const size_t m_PageSizeAligned;
|
||||
|
||||
FORCE_INLINE constexpr size_t PagedCountFromSize(size_t size) const
|
||||
{
|
||||
return (size + (m_PageSize - 1)) / m_PageSize;
|
||||
}
|
||||
|
||||
FORCE_INLINE size_t DefaultPageSize() const
|
||||
{
|
||||
Baselib_Memory_PageSizeInfo info;
|
||||
Baselib_Memory_GetPageSizeInfo(&info);
|
||||
return static_cast<size_t>(info.defaultPageSize);
|
||||
}
|
||||
|
||||
public:
|
||||
page_allocator() : page_allocator(DefaultPageSize()) {}
|
||||
page_allocator(size_t page_size) : m_PageSize(page_size), m_PageSizeAligned(page_size > alignment ? page_size : alignment) {}
|
||||
|
||||
void* allocate(size_t size, int state, Baselib_ErrorState *error_state_ptr) const
|
||||
{
|
||||
Baselib_Memory_PageAllocation pa = Impl::Baselib_Memory_AllocatePages(m_PageSize, PagedCountFromSize(size), m_PageSizeAligned / m_PageSize, (Baselib_Memory_PageState)state, error_state_ptr);
|
||||
return pa.ptr;
|
||||
}
|
||||
|
||||
bool deallocate(void* ptr, size_t size, Baselib_ErrorState *error_state_ptr) const
|
||||
{
|
||||
Impl::Baselib_Memory_ReleasePages({ptr, m_PageSize, PagedCountFromSize(size)}, error_state_ptr);
|
||||
return (error_state_ptr->code == Baselib_ErrorCode_Success);
|
||||
}
|
||||
|
||||
constexpr size_t optimal_size(size_t size) const
|
||||
{
|
||||
return (size + m_PageSizeAligned - 1) & ~(m_PageSizeAligned - 1);
|
||||
}
|
||||
|
||||
bool set_page_state(void* ptr, size_t size, int state, Baselib_ErrorState *error_state_ptr) const
|
||||
{
|
||||
Impl::Baselib_Memory_SetPageState(ptr, m_PageSize, PagedCountFromSize(size), (Baselib_Memory_PageState)state, error_state_ptr);
|
||||
return (error_state_ptr->code == Baselib_ErrorCode_Success);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef detail_BASELIB_PAGE_ALLOCATOR_TEST_IMPL
|
||||
365
Libraries/external/baselib/Include/Cpp/Internal/tlsf_allocator.inl.h
vendored
Normal file
365
Libraries/external/baselib/Include/Cpp/Internal/tlsf_allocator.inl.h
vendored
Normal file
@@ -0,0 +1,365 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Lock.h"
|
||||
#include "../mpmc_node_queue.h"
|
||||
#include "../Algorithm.h"
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
#include <cstring>
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
template<class Allocator>
|
||||
class tlsf_block_allocator
|
||||
{
|
||||
baselib::Lock m_CapacityLock;
|
||||
ALIGNED_ATOMIC(size_t) m_Capacity;
|
||||
baselib::mpmc_node_queue<baselib::mpmc_node> m_FreeBlocks;
|
||||
|
||||
struct Segment
|
||||
{
|
||||
uintptr_t data;
|
||||
size_t size;
|
||||
Segment *next;
|
||||
} *m_Segments;
|
||||
|
||||
void LinkSegment(Segment* segment, const size_t block_size, size_t block_count)
|
||||
{
|
||||
uintptr_t nodeData = segment->data;
|
||||
baselib::mpmc_node* firstNode = reinterpret_cast<baselib::mpmc_node*>(nodeData);
|
||||
baselib::mpmc_node* node = firstNode;
|
||||
for (size_t i = 0; i < block_count; ++i)
|
||||
{
|
||||
node = reinterpret_cast<baselib::mpmc_node*>(nodeData);
|
||||
nodeData += block_size;
|
||||
node->next.obj = reinterpret_cast<baselib::mpmc_node*>(nodeData);
|
||||
}
|
||||
m_FreeBlocks.push_back(firstNode, node);
|
||||
}
|
||||
|
||||
bool ExpandCapacity(size_t size, size_t block_size, Allocator& allocator)
|
||||
{
|
||||
if (size == 0)
|
||||
return true;
|
||||
|
||||
// Align to underlying allocator alignment. Size requested must also be of at least block_size
|
||||
block_size = baselib::Algorithm::CeilAligned(block_size, alignment);
|
||||
size = std::max(baselib::Algorithm::CeilAligned(size, alignment), block_size);
|
||||
|
||||
// Consider base allocator optimal size from required size. I.e if higher than size requested, expand using optimal size.
|
||||
const size_t minSize = size + sizeof(Segment);
|
||||
const size_t optimalSize = allocator.optimal_size(minSize);
|
||||
const size_t segment_size = std::max(optimalSize, minSize);
|
||||
const size_t block_count = size / block_size;
|
||||
|
||||
// Allocate one memory block that contains block data and Segment info.
|
||||
uintptr_t segmentMemory = reinterpret_cast<uintptr_t>(allocator.allocate(segment_size));
|
||||
if (segmentMemory == 0)
|
||||
return false;
|
||||
|
||||
// Store data ptr and size information in segment header
|
||||
Segment* segment = reinterpret_cast<Segment*>(segmentMemory + size);
|
||||
segment->data = segmentMemory;
|
||||
segment->size = segment_size;
|
||||
|
||||
// Link segment to existing segments and add capacity.
|
||||
// This function is in the scope of a locked `m_CapacityLock` which has an implicit acquire (lock) release (unlock) barrier.
|
||||
// Order of m_Segments and m_Capacity is irrelevant. Calling `allocate` from other threads may result in a successful allocation but
|
||||
// that is not a problem since this process repeats in the case of being called from `allocate` and container is pre-emtped.
|
||||
// The side effect of not
|
||||
segment->next = m_Segments;
|
||||
m_Segments = segment;
|
||||
LinkSegment(segment, block_size, block_count);
|
||||
baselib::atomic_fetch_add_explicit(m_Capacity, block_size * block_count, baselib::memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr uint32_t alignment = Allocator::alignment;
|
||||
|
||||
// non-copyable
|
||||
tlsf_block_allocator(const tlsf_block_allocator& other) = delete;
|
||||
tlsf_block_allocator& operator=(const tlsf_block_allocator& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
tlsf_block_allocator(tlsf_block_allocator&& other) = delete;
|
||||
tlsf_block_allocator& operator=(tlsf_block_allocator&& other) = delete;
|
||||
|
||||
tlsf_block_allocator() : m_CapacityLock(), m_Capacity(0), m_FreeBlocks(), m_Segments(nullptr) {}
|
||||
|
||||
void* allocate()
|
||||
{
|
||||
return m_FreeBlocks.try_pop_front();
|
||||
}
|
||||
|
||||
bool deallocate(void* ptr)
|
||||
{
|
||||
m_FreeBlocks.push_back(reinterpret_cast<baselib::mpmc_node*>(ptr));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool deallocate(void* ptr_first, void* ptr_last)
|
||||
{
|
||||
m_FreeBlocks.push_back(reinterpret_cast<baselib::mpmc_node*>(ptr_first), reinterpret_cast<baselib::mpmc_node*>(ptr_last));
|
||||
return true;
|
||||
}
|
||||
|
||||
void deallocate_segments(Allocator& allocator)
|
||||
{
|
||||
Segment *segment = m_Segments;
|
||||
while (segment)
|
||||
{
|
||||
Segment *nextSegment = segment->next;
|
||||
allocator.deallocate(reinterpret_cast<void *>(segment->data), segment->size);
|
||||
segment = nextSegment;
|
||||
}
|
||||
}
|
||||
|
||||
void reset_segments()
|
||||
{
|
||||
if (m_Segments)
|
||||
{
|
||||
m_Segments = nullptr;
|
||||
m_Capacity = 0;
|
||||
m_FreeBlocks.~mpmc_node_queue<baselib::mpmc_node>();
|
||||
new(&m_FreeBlocks) mpmc_node_queue<baselib::mpmc_node>();
|
||||
}
|
||||
}
|
||||
|
||||
bool reserve(size_t size, size_t capacity, Allocator& allocator)
|
||||
{
|
||||
bool result;
|
||||
m_CapacityLock.AcquireScoped([&] {
|
||||
result = capacity > m_Capacity ? ExpandCapacity(capacity - m_Capacity, size, allocator) : true;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
bool increase_capacity(size_t size, Allocator& allocator)
|
||||
{
|
||||
bool result = true;
|
||||
m_CapacityLock.AcquireScoped([&] {
|
||||
if (m_FreeBlocks.empty())
|
||||
result = ExpandCapacity(m_Capacity == 0 ? size : m_Capacity, size, allocator);
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t capacity() const
|
||||
{
|
||||
return baselib::atomic_load_explicit(m_Capacity, baselib::memory_order_relaxed);
|
||||
}
|
||||
|
||||
static constexpr size_t optimal_size(const size_t size)
|
||||
{
|
||||
return baselib::Algorithm::CeilAligned(size, alignment);
|
||||
}
|
||||
};
|
||||
|
||||
template<size_t min_size, size_t max_size, size_t linear_subdivisions, class BaseAllocator>
|
||||
class tlsf_allocator : private BaseAllocator
|
||||
{
|
||||
using BlockAllocator = detail::tlsf_block_allocator<BaseAllocator>;
|
||||
|
||||
public:
|
||||
static constexpr uint32_t alignment = BaseAllocator::alignment;
|
||||
|
||||
// non-copyable
|
||||
tlsf_allocator(const tlsf_allocator& other) = delete;
|
||||
tlsf_allocator& operator=(const tlsf_allocator& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
tlsf_allocator(tlsf_allocator&& other) = delete;
|
||||
tlsf_allocator& operator=(tlsf_allocator&& other) = delete;
|
||||
|
||||
tlsf_allocator() : m_Allocators() {}
|
||||
~tlsf_allocator() { DeallocateSegmentsImpl(); }
|
||||
|
||||
void* try_allocate(size_t size)
|
||||
{
|
||||
return getAllocator(size).allocate();
|
||||
}
|
||||
|
||||
void* allocate(size_t size)
|
||||
{
|
||||
BlockAllocator& allocator = getAllocator(size);
|
||||
do
|
||||
{
|
||||
void* p;
|
||||
if (OPTIMIZER_LIKELY(p = allocator.allocate()))
|
||||
return p;
|
||||
if (!allocator.increase_capacity(AllocatorSize(size), static_cast<BaseAllocator&>(*this)))
|
||||
return nullptr;
|
||||
}
|
||||
while (true);
|
||||
}
|
||||
|
||||
void* try_reallocate(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
return ReallocateImpl<true>(ptr, old_size, new_size);
|
||||
}
|
||||
|
||||
void* reallocate(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
return ReallocateImpl<false>(ptr, old_size, new_size);
|
||||
}
|
||||
|
||||
bool deallocate(void* ptr, size_t size)
|
||||
{
|
||||
return ptr == nullptr ? true : getAllocator(size).deallocate(ptr);
|
||||
}
|
||||
|
||||
void deallocate_all()
|
||||
{
|
||||
atomic_thread_fence(memory_order_acquire);
|
||||
DeallocateSegmentsImpl();
|
||||
for (auto& pow2Allocators : m_Allocators)
|
||||
for (auto& blockAllocator : pow2Allocators)
|
||||
blockAllocator.reset_segments();
|
||||
atomic_thread_fence(memory_order_release);
|
||||
}
|
||||
|
||||
bool batch_deallocate(void* ptr_first, void* ptr_last, size_t size)
|
||||
{
|
||||
return ((ptr_first == nullptr) || (ptr_last == nullptr)) ? false : getAllocator(size).deallocate(ptr_first, ptr_last);
|
||||
}
|
||||
|
||||
void batch_deallocate_link(void* ptr, void* ptr_next)
|
||||
{
|
||||
reinterpret_cast<baselib::mpmc_node*>(ptr)->next = reinterpret_cast<baselib::mpmc_node*>(ptr_next);
|
||||
}
|
||||
|
||||
bool reserve(size_t size, size_t capacity)
|
||||
{
|
||||
return getAllocator(size).reserve(AllocatorSize(size), capacity, static_cast<BaseAllocator&>(*this));
|
||||
}
|
||||
|
||||
size_t capacity(size_t size)
|
||||
{
|
||||
return getAllocator(size).capacity();
|
||||
}
|
||||
|
||||
static constexpr size_t optimal_size(const size_t size)
|
||||
{
|
||||
return size == 0 ? 0 : BlockAllocator::optimal_size(AllocatorSize(size));
|
||||
}
|
||||
|
||||
private:
|
||||
struct CompileTime
|
||||
{
|
||||
static constexpr size_t Log2Base(size_t value, size_t offset) { return (value > 1) ? Log2Base(value >> (size_t)1, offset + 1) : offset; }
|
||||
static constexpr size_t Log2Base(size_t value) { return Log2Base(value, 0); }
|
||||
static constexpr size_t Max(size_t a, size_t b) { return a > b ? a : b; }
|
||||
};
|
||||
|
||||
static constexpr size_t m_MinSize = CompileTime::Max(min_size, CompileTime::Max(CompileTime::Max(sizeof(void*), linear_subdivisions), alignment));
|
||||
static constexpr size_t m_MinSizePow2 = baselib::Algorithm::CeilPowerOfTwo(m_MinSize);
|
||||
static constexpr size_t m_MaxSizePow2 = baselib::Algorithm::CeilPowerOfTwo(CompileTime::Max(max_size, m_MinSize));
|
||||
static constexpr size_t m_MinSizeMask = static_cast<size_t>(1) << CompileTime::Log2Base(m_MinSizePow2 - 1);
|
||||
static constexpr size_t m_AllocatorCount = (CompileTime::Log2Base(m_MaxSizePow2) - CompileTime::Log2Base(m_MinSizePow2)) + 1;
|
||||
static constexpr size_t m_AllocatorBaseOffsetLog2 = CompileTime::Log2Base(m_MinSizePow2) - 1;
|
||||
static constexpr size_t m_LinearSubdivisionsLog2 = CompileTime::Log2Base(linear_subdivisions);
|
||||
|
||||
static constexpr size_t AllocatorSizeLog2(size_t size) { return baselib::Algorithm::HighestBitNonZero(size | m_MinSizeMask); }
|
||||
static constexpr size_t LinearAllocatorSizeLog2(size_t size, size_t sizeLog2) { return (size & ((size_t)1 << sizeLog2) - 1) >> (sizeLog2 - m_LinearSubdivisionsLog2); }
|
||||
|
||||
template<int value = ((m_AllocatorCount == 1 && linear_subdivisions == 1) ? 1 : 2), typename std::enable_if<(value == 1), int>::type = 0>
|
||||
static constexpr FORCE_INLINE size_t AllocatorSize(size_t size)
|
||||
{
|
||||
return m_MinSizePow2;
|
||||
}
|
||||
|
||||
template<int value = ((m_AllocatorCount != 1 && linear_subdivisions == 1) ? 3 : 4), typename std::enable_if<(value == 3), int>::type = 0>
|
||||
static constexpr FORCE_INLINE size_t AllocatorSize(size_t size)
|
||||
{
|
||||
return (size_t)1 << (AllocatorSizeLog2(size - 1) + 1);
|
||||
}
|
||||
|
||||
template<int value = (linear_subdivisions == 1) ? 0 : 1, typename std::enable_if<(value), int>::type = 0>
|
||||
static FORCE_INLINE size_t AllocatorSize(size_t size)
|
||||
{
|
||||
const size_t subDivSize = ((size_t)1 << baselib::Algorithm::HighestBitNonZero(size)) >> m_LinearSubdivisionsLog2;
|
||||
return (size - 1 & ~(subDivSize - 1)) + subDivSize;
|
||||
}
|
||||
|
||||
template<int value = ((m_AllocatorCount == 1 && linear_subdivisions == 1) ? 1 : 2), typename std::enable_if<(value == 1), int>::type = 0>
|
||||
BlockAllocator& getAllocator(size_t)
|
||||
{
|
||||
return m_Allocators[0][0];
|
||||
}
|
||||
|
||||
template<int value = ((m_AllocatorCount != 1 && linear_subdivisions == 1) ? 3 : 4), typename std::enable_if<(value == 3), int>::type = 0>
|
||||
BlockAllocator& getAllocator(const size_t size)
|
||||
{
|
||||
return m_Allocators[AllocatorSizeLog2(size - 1) - m_AllocatorBaseOffsetLog2][0];
|
||||
}
|
||||
|
||||
template<int value = ((m_AllocatorCount == 1 && linear_subdivisions != 1) ? 5 : 6), typename std::enable_if<(value == 5), int>::type = 0>
|
||||
BlockAllocator& getAllocator(size_t size)
|
||||
{
|
||||
--size;
|
||||
return m_Allocators[0][LinearAllocatorSizeLog2(size, AllocatorSizeLog2(size))];
|
||||
}
|
||||
|
||||
template<int value = ((m_AllocatorCount != 1 && linear_subdivisions != 1) ? 7 : 8), typename std::enable_if<(value == 7), int>::type = 0>
|
||||
BlockAllocator& getAllocator(size_t size)
|
||||
{
|
||||
--size;
|
||||
const size_t sizeLog2 = AllocatorSizeLog2(size);
|
||||
return m_Allocators[sizeLog2 - m_AllocatorBaseOffsetLog2][LinearAllocatorSizeLog2(size, sizeLog2)];
|
||||
}
|
||||
|
||||
template<typename T> struct has_deallocate_all
|
||||
{
|
||||
template<typename U, void (U::*)()> struct Check;
|
||||
template<typename U> static constexpr bool test(Check<U, &U::deallocate_all> *) { return true; }
|
||||
template<typename U> static constexpr bool test(...) { return false; }
|
||||
static constexpr bool value = test<T>(nullptr);
|
||||
};
|
||||
|
||||
template<bool value = has_deallocate_all<BaseAllocator>::value, typename std::enable_if<(value), int>::type = 0>
|
||||
void DeallocateSegmentsImpl()
|
||||
{
|
||||
BaseAllocator::deallocate_all();
|
||||
}
|
||||
|
||||
template<bool value = has_deallocate_all<BaseAllocator>::value, typename std::enable_if<(!value), int>::type = 0>
|
||||
void DeallocateSegmentsImpl()
|
||||
{
|
||||
for (auto& pow2Allocators : m_Allocators)
|
||||
for (auto& blockAllocator : pow2Allocators)
|
||||
blockAllocator.deallocate_segments(static_cast<BaseAllocator&>(*this));
|
||||
}
|
||||
|
||||
template<bool use_try_allocate>
|
||||
void* ReallocateImpl(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
if (ptr == nullptr)
|
||||
return use_try_allocate ? try_allocate(new_size) : allocate(new_size);
|
||||
|
||||
BlockAllocator& oldAllocator = getAllocator(old_size);
|
||||
BlockAllocator& newAllocator = getAllocator(new_size);
|
||||
if (&oldAllocator == &newAllocator)
|
||||
return ptr;
|
||||
|
||||
void* newPtr = newAllocator.allocate();
|
||||
if ((!use_try_allocate) && (newPtr == nullptr))
|
||||
newPtr = allocate(new_size);
|
||||
|
||||
if (newPtr)
|
||||
{
|
||||
std::memcpy(newPtr, ptr, std::min(new_size, old_size));
|
||||
oldAllocator.deallocate(ptr);
|
||||
}
|
||||
return newPtr;
|
||||
}
|
||||
|
||||
BlockAllocator m_Allocators[m_AllocatorCount][linear_subdivisions];
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
172
Libraries/external/baselib/Include/Cpp/Lock.h
vendored
Normal file
172
Libraries/external/baselib/Include/Cpp/Lock.h
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Lock.h"
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a lock or mutex (from mutual exclusion) is a synchronization mechanism for enforcing limits on access to a resource in an environment
|
||||
// where there are many threads of execution. A lock is designed to enforce a mutual exclusion concurrency control policy.
|
||||
//
|
||||
// "Lock (computer science)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Lock_(computer_science)&oldid=875674239
|
||||
class Lock
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
Lock(const Lock& other) = delete;
|
||||
Lock& operator=(const Lock& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
Lock(Lock&& other) = delete;
|
||||
Lock& operator=(Lock&& other) = delete;
|
||||
|
||||
// Creates a lock synchronization primitive.
|
||||
// If there are not enough system resources to create a lock, process abort is triggered.
|
||||
Lock() : m_LockData(Baselib_Lock_Create())
|
||||
{
|
||||
}
|
||||
|
||||
// Reclaim resources and memory held by lock.
|
||||
// If threads are waiting on the lock, calling free may trigger an assert and may cause process abort.
|
||||
~Lock()
|
||||
{
|
||||
Baselib_Lock_Free(&m_LockData);
|
||||
}
|
||||
|
||||
// Acquire lock.
|
||||
//
|
||||
// If lock is held, either by this or another thread, then the function wait for lock to be released.
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
inline void Acquire()
|
||||
{
|
||||
return Baselib_Lock_Acquire(&m_LockData);
|
||||
}
|
||||
|
||||
// Try to acquire lock and return immediately.
|
||||
// If lock is held, either by this or another thread, then lock is not acquired and function return false.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
FORCE_INLINE bool TryAcquire()
|
||||
{
|
||||
return Baselib_Lock_TryAcquire(&m_LockData);
|
||||
}
|
||||
|
||||
// Try to acquire lock.
|
||||
// If lock is held, either by this or another thread, then the function wait for timeoutInMilliseconds for lock to be released.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// TryAcquire with a zero timeout differs from TryAcquire() in that TryAcquire() is guaranteed to be a user space operation
|
||||
// while TryAcquire with zero timeout may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
FORCE_INLINE bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
return Baselib_Lock_TryTimedAcquire(&m_LockData, timeoutInMilliseconds.count());
|
||||
}
|
||||
|
||||
// Release lock and make it available to other threads.
|
||||
//
|
||||
// This function can be called from any thread, not only the thread that acquired the lock.
|
||||
// If no lock was previously held calling this function result in a no-op.
|
||||
//
|
||||
// When the lock is released this function is guaranteed to emit a release barrier.
|
||||
FORCE_INLINE void Release()
|
||||
{
|
||||
return Baselib_Lock_Release(&m_LockData);
|
||||
}
|
||||
|
||||
// Acquire lock and invoke user defined function.
|
||||
// If lock is held, either by this or another thread, then the function wait for lock to be released.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Example usage:
|
||||
// lock.AcquireScoped([] {
|
||||
// enteredCriticalSection++;
|
||||
// });
|
||||
template<class FunctionType>
|
||||
FORCE_INLINE void AcquireScoped(const FunctionType& func)
|
||||
{
|
||||
ReleaseOnDestroy releaseScope(*this);
|
||||
Acquire();
|
||||
func();
|
||||
}
|
||||
|
||||
// Try to acquire lock and invoke user defined function.
|
||||
// If lock is held, either by this or another thread, then lock is not acquired and function return false.
|
||||
// On failure to obtain lock the user defined function is not invoked.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Example usage:
|
||||
// lock.TryAcquireScoped([] {
|
||||
// enteredCriticalSection++;
|
||||
// });
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
template<class FunctionType>
|
||||
FORCE_INLINE bool TryAcquireScoped(const FunctionType& func)
|
||||
{
|
||||
if (TryAcquire())
|
||||
{
|
||||
ReleaseOnDestroy releaseScope(*this);
|
||||
func();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to acquire lock and invoke user defined function.
|
||||
// If lock is held, either by this or another thread, then the function wait for timeoutInMilliseconds for lock to be released.
|
||||
// On failure to obtain lock the user defined function is not invoked.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// Example usage:
|
||||
// bool lockAcquired = lock.TryTimedAcquireScoped(std::chrono::minutes(1), [] {
|
||||
// enteredCriticalSection++;
|
||||
// });
|
||||
// assert(lockAcquired);
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
template<class FunctionType>
|
||||
FORCE_INLINE bool TryTimedAcquireScoped(const timeout_ms timeoutInMilliseconds, const FunctionType& func)
|
||||
{
|
||||
if (TryTimedAcquire(timeoutInMilliseconds))
|
||||
{
|
||||
ReleaseOnDestroy releaseScope(*this);
|
||||
func();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
class ReleaseOnDestroy
|
||||
{
|
||||
public:
|
||||
FORCE_INLINE ReleaseOnDestroy(Lock& lockReference) : m_LockReference(lockReference) {}
|
||||
FORCE_INLINE ~ReleaseOnDestroy() { m_LockReference.Release(); }
|
||||
private:
|
||||
Lock& m_LockReference;
|
||||
};
|
||||
|
||||
Baselib_Lock m_LockData;
|
||||
};
|
||||
}
|
||||
}
|
||||
186
Libraries/external/baselib/Include/Cpp/ReentrantLock.h
vendored
Normal file
186
Libraries/external/baselib/Include/Cpp/ReentrantLock.h
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_ReentrantLock.h"
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, the reentrant mutex (recursive mutex, recursive lock) is particular type of mutual exclusion (mutex) device that may be locked multiple
|
||||
// times by the same process/thread, without causing a deadlock.
|
||||
|
||||
// While any attempt to perform the "lock" operation on an ordinary mutex (lock) would either fail or block when the mutex is already locked, on a recursive
|
||||
// mutex this operation will succeed if and only if the locking thread is the one that already holds the lock. Typically, a recursive mutex tracks the number
|
||||
// of times it has been locked, and requires equally many unlock operations to be performed before other threads may lock it.
|
||||
//
|
||||
// "Reentrant mutex", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Reentrant_mutex&oldid=818566928
|
||||
//
|
||||
// For optimal performance, baselib::ReentrantLock should be stored at a cache aligned memory location.
|
||||
class ReentrantLock
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
ReentrantLock(const ReentrantLock& other) = delete;
|
||||
ReentrantLock& operator=(const ReentrantLock& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
ReentrantLock(ReentrantLock&& other) = delete;
|
||||
ReentrantLock& operator=(ReentrantLock&& other) = delete;
|
||||
|
||||
// Creates a reentrant lock synchronization primitive.
|
||||
// If there are not enough system resources to create a lock, process abort is triggered.
|
||||
ReentrantLock() : m_ReentrantLockData(Baselib_ReentrantLock_Create())
|
||||
{
|
||||
}
|
||||
|
||||
// Reclaim resources and memory held by lock.
|
||||
//
|
||||
// If threads are waiting on the lock, calling free may trigger an assert and may cause process abort.
|
||||
// Calling this function with a nullptr result in a no-op
|
||||
~ReentrantLock()
|
||||
{
|
||||
Baselib_ReentrantLock_Free(&m_ReentrantLockData);
|
||||
}
|
||||
|
||||
// Acquire lock.
|
||||
//
|
||||
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
|
||||
// to be made before the lock is released.
|
||||
// If lock is held by another thread, this function wait for lock to be released.
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
inline void Acquire()
|
||||
{
|
||||
return Baselib_ReentrantLock_Acquire(&m_ReentrantLockData);
|
||||
}
|
||||
|
||||
// Try to acquire lock and return immediately.
|
||||
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
|
||||
// to be made before the lock is released.
|
||||
//
|
||||
// When lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
FORCE_INLINE bool TryAcquire()
|
||||
{
|
||||
return Baselib_ReentrantLock_TryAcquire(&m_ReentrantLockData);
|
||||
}
|
||||
|
||||
// Try to acquire lock.
|
||||
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
|
||||
// to be made before the lock is released.
|
||||
// If lock is held by another thread, this function wait for timeoutInMilliseconds for lock to be released.
|
||||
//
|
||||
// When lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// TryAcquire with a zero timeout differs from TryAcquire() in that TryAcquire() is guaranteed to be a user space operation
|
||||
// while TryAcquire with zero timeout may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
FORCE_INLINE bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
return Baselib_ReentrantLock_TryTimedAcquire(&m_ReentrantLockData, timeoutInMilliseconds.count());
|
||||
}
|
||||
|
||||
// Release lock.
|
||||
// If lock count is still higher than zero after the release operation then lock remain in a locked state.
|
||||
// If lock count reach zero the lock is unlocked and made available to other threads
|
||||
//
|
||||
// When the lock is released this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Calling this function from a thread that doesn't own the lock triggers an assert in debug and causes undefined behavior in release builds.
|
||||
FORCE_INLINE void Release()
|
||||
{
|
||||
return Baselib_ReentrantLock_Release(&m_ReentrantLockData);
|
||||
}
|
||||
|
||||
// Acquire lock and invoke user defined function.
|
||||
// If lock is held by another thread, this function wait for lock to be released.
|
||||
//
|
||||
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Example usage:
|
||||
// lock.AcquireScoped([] {
|
||||
// enteredCriticalSection++;
|
||||
// });
|
||||
template<class FunctionType>
|
||||
FORCE_INLINE void AcquireScoped(const FunctionType& func)
|
||||
{
|
||||
ReleaseOnDestroy releaseScope(*this);
|
||||
Acquire();
|
||||
func();
|
||||
}
|
||||
|
||||
// Try to acquire lock and invoke user defined function.
|
||||
// If lock is held by another thread, this function wait for timeoutInMilliseconds for lock to be released.
|
||||
// On failure to obtain lock the user defined function is not invoked.
|
||||
//
|
||||
// When lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Example usage:
|
||||
// lock.TryAcquireScoped([] {
|
||||
// enteredCriticalSection++;
|
||||
// });
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
template<class FunctionType>
|
||||
FORCE_INLINE bool TryAcquireScoped(const FunctionType& func)
|
||||
{
|
||||
if (TryAcquire())
|
||||
{
|
||||
ReleaseOnDestroy releaseScope(*this);
|
||||
func();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to acquire lock and invoke user defined function.
|
||||
// If lock is held by another thread, this function wait for timeoutInMilliseconds for lock to be released.
|
||||
// On failure to obtain lock the user defined function is not invoked.
|
||||
//
|
||||
// When lock is acquired this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// Example usage:
|
||||
// bool lockAcquired = lock.TryTimedAcquireScoped(std::chrono::minutes(1), [] {
|
||||
// enteredCriticalSection++;
|
||||
// });
|
||||
// assert(lockAcquired);
|
||||
//
|
||||
// Return: true if lock was acquired.
|
||||
template<class FunctionType>
|
||||
FORCE_INLINE bool TryTimedAcquireScoped(const timeout_ms timeoutInMilliseconds, const FunctionType& func)
|
||||
{
|
||||
if (TryTimedAcquire(timeoutInMilliseconds))
|
||||
{
|
||||
ReleaseOnDestroy releaseScope(*this);
|
||||
func();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
class ReleaseOnDestroy
|
||||
{
|
||||
public:
|
||||
FORCE_INLINE ReleaseOnDestroy(ReentrantLock& lockReference) : m_LockReference(lockReference) {}
|
||||
FORCE_INLINE ~ReleaseOnDestroy() { m_LockReference.Release(); }
|
||||
private:
|
||||
ReentrantLock& m_LockReference;
|
||||
};
|
||||
|
||||
Baselib_ReentrantLock m_ReentrantLockData;
|
||||
};
|
||||
}
|
||||
}
|
||||
115
Libraries/external/baselib/Include/Cpp/Semaphore.h
vendored
Normal file
115
Libraries/external/baselib/Include/Cpp/Semaphore.h
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Semaphore.h"
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
|
||||
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
|
||||
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
|
||||
// decremented, or toggled) depending on programmer-defined conditions.
|
||||
//
|
||||
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
|
||||
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
|
||||
// resource becomes available.
|
||||
//
|
||||
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
|
||||
//
|
||||
// For optimal performance, baselib::Semaphore should be stored at a cache aligned memory location.
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
// non-copyable
|
||||
Semaphore(const Semaphore& other) = delete;
|
||||
Semaphore& operator=(const Semaphore& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
Semaphore(Semaphore&& other) = delete;
|
||||
Semaphore& operator=(Semaphore&& other) = delete;
|
||||
|
||||
// This is the max number of tokens guaranteed to be held by the semaphore at
|
||||
// any given point in time. Tokens submitted that exceed this value may silently
|
||||
// be discarded.
|
||||
enum { MaxGuaranteedCount = Baselib_Semaphore_MaxGuaranteedCount };
|
||||
|
||||
// Creates a counting semaphore synchronization primitive.
|
||||
// If there are not enough system resources to create a semaphore, process abort is triggered.
|
||||
Semaphore() : m_SemaphoreData(Baselib_Semaphore_Create())
|
||||
{
|
||||
}
|
||||
|
||||
// Reclaim resources and memory held by the semaphore.
|
||||
//
|
||||
// If threads are waiting on the semaphore, destructor will trigger an assert and may cause process abort.
|
||||
~Semaphore()
|
||||
{
|
||||
Baselib_Semaphore_Free(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// This function is guaranteed to emit an acquire barrier.
|
||||
inline void Acquire()
|
||||
{
|
||||
return Baselib_Semaphore_Acquire(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
// Try to consume a token and return immediately.
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// Return: true if token was consumed. false if not.
|
||||
inline bool TryAcquire()
|
||||
{
|
||||
return Baselib_Semaphore_TryAcquire(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
// Wait for semaphore token to become available
|
||||
//
|
||||
// When successful this function is guaranteed to emit an acquire barrier.
|
||||
//
|
||||
// TryAcquire with a zero timeout differs from TryAcquire() in that TryAcquire() is guaranteed to be a user space operation
|
||||
// while Acquire with a zero timeout may enter the kernel and cause a context switch.
|
||||
//
|
||||
// Timeout passed to this function may be subject to system clock resolution.
|
||||
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
|
||||
//
|
||||
// Arguments:
|
||||
// - timeout: Time to wait for token to become available.
|
||||
//
|
||||
// Return: true if token was consumed. false if timeout was reached.
|
||||
inline bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
|
||||
{
|
||||
return Baselib_Semaphore_TryTimedAcquire(&m_SemaphoreData, timeoutInMilliseconds.count());
|
||||
}
|
||||
|
||||
// Submit tokens to the semaphore.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
|
||||
// If count reach `Baselib_Semaphore_MaxGuaranteedCount` this function may silently discard any overflow.
|
||||
inline void Release(uint16_t count)
|
||||
{
|
||||
return Baselib_Semaphore_Release(&m_SemaphoreData, count);
|
||||
}
|
||||
|
||||
// Sets the semaphore token count to zero and release all waiting threads.
|
||||
//
|
||||
// When successful this function is guaranteed to emit a release barrier.
|
||||
//
|
||||
// Return: number of released threads.
|
||||
inline uint32_t ResetAndReleaseWaitingThreads()
|
||||
{
|
||||
return Baselib_Semaphore_ResetAndReleaseWaitingThreads(&m_SemaphoreData);
|
||||
}
|
||||
|
||||
private:
|
||||
Baselib_Semaphore m_SemaphoreData;
|
||||
};
|
||||
}
|
||||
}
|
||||
39
Libraries/external/baselib/Include/Cpp/Stopwatch.h
vendored
Normal file
39
Libraries/external/baselib/Include/Cpp/Stopwatch.h
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
|
||||
#include "Time.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// Stopwatch
|
||||
// Simplistic stopwatch tool to take accurate time measurements using Baselib_Timer
|
||||
//
|
||||
// Usage example:
|
||||
// auto watch = Stopwatch::StartNew();
|
||||
// HeavyOperation();
|
||||
// printf("Time passed: %fs", watch.GetElapsedTime().ToSeconds());
|
||||
class Stopwatch
|
||||
{
|
||||
public:
|
||||
static Stopwatch StartNew() { return Stopwatch(); }
|
||||
|
||||
high_precision_clock::duration GetElapsedTime() const
|
||||
{
|
||||
return high_precision_clock::duration_from_ticks(high_precision_clock::now_in_ticks() - m_StartTime);
|
||||
}
|
||||
|
||||
high_precision_clock::duration Restart()
|
||||
{
|
||||
high_precision_clock::duration elapsed = GetElapsedTime();
|
||||
m_StartTime = high_precision_clock::now_in_ticks();
|
||||
return elapsed;
|
||||
}
|
||||
|
||||
private:
|
||||
Stopwatch() : m_StartTime(high_precision_clock::now_in_ticks()) {}
|
||||
|
||||
Baselib_Timer_Ticks m_StartTime;
|
||||
};
|
||||
}
|
||||
}
|
||||
135
Libraries/external/baselib/Include/Cpp/Thread.h
vendored
Normal file
135
Libraries/external/baselib/Include/Cpp/Thread.h
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Thread.h"
|
||||
#include "Time.h"
|
||||
|
||||
#include <memory>
|
||||
#if !COMPILER_SUPPORTS_GENERIC_LAMBDA_EXPRESSIONS
|
||||
#include <functional>
|
||||
#endif
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
/*
|
||||
This class is not supposed to be used as-is.
|
||||
Instead separate thread class should be created to explicitely define thread lifetime.
|
||||
This is useful to avoid having timeout constants all over the codebase.
|
||||
|
||||
class ApplicationThread : public baselib::Thread
|
||||
{
|
||||
public:
|
||||
// Expose base class constructors.
|
||||
using baselib::Thread::Thread;
|
||||
|
||||
void Join()
|
||||
{
|
||||
// Thread must join with-in 10 seconds, or this is an error.
|
||||
// Use application specific methods to report error and/or try again.
|
||||
assert(baselib::Thread::TryJoin(10 * 1000) == true);
|
||||
}
|
||||
};
|
||||
|
||||
*/
|
||||
class BASELIB_API Thread
|
||||
{
|
||||
public:
|
||||
// Default constructor does nothing, useful when declaring thread as field in classes/structs
|
||||
Thread() = default;
|
||||
|
||||
// Generic Constructor
|
||||
template<class FunctionType , class ... Args>
|
||||
Thread(FunctionType && f, Args && ... args)
|
||||
{
|
||||
#if COMPILER_SUPPORTS_GENERIC_LAMBDA_EXPRESSIONS
|
||||
// This generates cleaner and nicer-to-debug code
|
||||
auto wrapped = [ = ] {f(args ...);};
|
||||
#else
|
||||
auto wrapped = std::bind(f, args ...);
|
||||
#endif
|
||||
using Container = decltype(wrapped);
|
||||
|
||||
// Small object optimization.
|
||||
constexpr bool smallObject = (sizeof(Container) <= sizeof(void*)) && (alignof(Container) <= alignof(void*));
|
||||
if (smallObject)
|
||||
{
|
||||
union
|
||||
{
|
||||
// sizeof(void*) will trigger placement new errors
|
||||
// even if code path is not executed
|
||||
char buf[sizeof(Container)];
|
||||
void* smallObject;
|
||||
};
|
||||
smallObject = nullptr; // to avoid -Wmaybe-uninitialized
|
||||
// We have to move it to pointer, otherwise wrapped destructor will be called
|
||||
new(buf) Container(std::move(wrapped));
|
||||
|
||||
thread = CreateThread(ThreadProxySmallObject<Container>, smallObject);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::unique_ptr<Container> ptr(new Container(std::move(wrapped)));
|
||||
thread = CreateThread(ThreadProxyHeap<Container>, ptr.get());
|
||||
if (thread)
|
||||
ptr.release();
|
||||
}
|
||||
}
|
||||
|
||||
// Thread has to be joined before destructor is called
|
||||
~Thread();
|
||||
|
||||
// Non-copyable
|
||||
Thread(const Thread&) = delete;
|
||||
Thread& operator=(const Thread&) = delete;
|
||||
|
||||
// Movable
|
||||
Thread(Thread&& other);
|
||||
Thread& operator=(Thread&& other);
|
||||
|
||||
// Return true if threads are supported
|
||||
static bool SupportsThreads();
|
||||
|
||||
// Return true if join succeeded
|
||||
COMPILER_WARN_UNUSED_RESULT bool TryJoin(timeout_ms timeout);
|
||||
|
||||
// Yields execution
|
||||
static inline void YieldExecution()
|
||||
{
|
||||
Baselib_Thread_YieldExecution();
|
||||
}
|
||||
|
||||
// Returns thread id
|
||||
inline Baselib_Thread_Id GetId()
|
||||
{
|
||||
return Baselib_Thread_GetId(thread);
|
||||
}
|
||||
|
||||
// Returns current thread id
|
||||
static inline Baselib_Thread_Id GetCurrentId()
|
||||
{
|
||||
return Baselib_Thread_GetCurrentThreadId();
|
||||
}
|
||||
|
||||
private:
|
||||
Baselib_Thread* thread = nullptr;
|
||||
|
||||
static Baselib_Thread* CreateThread(Baselib_Thread_EntryPointFunction function, void* arg);
|
||||
|
||||
template<class T>
|
||||
static void ThreadProxyHeap(void* data)
|
||||
{
|
||||
std::unique_ptr<T> ptr(reinterpret_cast<T*>(data));
|
||||
(*ptr)();
|
||||
}
|
||||
|
||||
template<class T>
|
||||
static void ThreadProxySmallObject(void* data)
|
||||
{
|
||||
T* ptr = reinterpret_cast<T*>(&data);
|
||||
(*ptr)();
|
||||
ptr->~T();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
103
Libraries/external/baselib/Include/Cpp/ThreadLocalStorage.h
vendored
Normal file
103
Libraries/external/baselib/Include/Cpp/ThreadLocalStorage.h
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_ThreadLocalStorage.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// Thread Local Storage provides a variable that can be global but have different value in every thread.
|
||||
// For more details see Baselib_ThreadLocalStorage.
|
||||
// On some platforms this might be fiber local storage.
|
||||
//
|
||||
// Example of usage:
|
||||
// static ThreadLocalStorage<int32_t> threadErrorState;
|
||||
template<typename T>
|
||||
class ThreadLocalStorage
|
||||
{
|
||||
public:
|
||||
// by nature of TLS slots, they must be non-copyable, so
|
||||
ThreadLocalStorage(const ThreadLocalStorage & other) = delete;
|
||||
ThreadLocalStorage& operator=(const ThreadLocalStorage & other) = delete;
|
||||
|
||||
ThreadLocalStorage()
|
||||
{
|
||||
static_assert(sizeof(T) <= sizeof(uintptr_t), "Provided type is too large to be stored in ThreadLocalStorage");
|
||||
handle = Baselib_TLS_Alloc();
|
||||
}
|
||||
|
||||
~ThreadLocalStorage()
|
||||
{
|
||||
if (IsValid())
|
||||
{
|
||||
Baselib_TLS_Free(handle);
|
||||
handle = InvalidTLSHandle;
|
||||
}
|
||||
}
|
||||
|
||||
ThreadLocalStorage(ThreadLocalStorage && other)
|
||||
{
|
||||
// ensure that we don't leak local handle
|
||||
if (handle != InvalidTLSHandle)
|
||||
Baselib_TLS_Free(handle);
|
||||
handle = other.handle;
|
||||
other.handle = InvalidTLSHandle;
|
||||
}
|
||||
|
||||
// Check if variable is valid.
|
||||
// The only case when variable might be invalid is if it was moved to some other instance.
|
||||
inline bool IsValid() const
|
||||
{
|
||||
return handle != InvalidTLSHandle;
|
||||
}
|
||||
|
||||
// Resets value in all threads.
|
||||
void Reset()
|
||||
{
|
||||
Baselib_TLS_Free(handle);
|
||||
handle = Baselib_TLS_Alloc();
|
||||
}
|
||||
|
||||
inline T operator=(T value)
|
||||
{
|
||||
Baselib_TLS_Set(handle, (uintptr_t)value);
|
||||
return value;
|
||||
}
|
||||
|
||||
inline ThreadLocalStorage<T>& operator=(ThreadLocalStorage&& other)
|
||||
{
|
||||
// swap values
|
||||
Baselib_TLS_Handle t = handle;
|
||||
handle = other.handle;
|
||||
other.handle = t;
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline operator T() const
|
||||
{
|
||||
return (T)Baselib_TLS_Get(handle);
|
||||
}
|
||||
|
||||
inline T operator->() const
|
||||
{
|
||||
return (T)Baselib_TLS_Get(handle);
|
||||
}
|
||||
|
||||
inline T operator++()
|
||||
{
|
||||
*this = *this + 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline T operator--()
|
||||
{
|
||||
*this = *this - 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
Baselib_TLS_Handle handle = InvalidTLSHandle;
|
||||
static constexpr uintptr_t InvalidTLSHandle = UINTPTR_MAX;
|
||||
};
|
||||
}
|
||||
}
|
||||
50
Libraries/external/baselib/Include/Cpp/Time.h
vendored
Normal file
50
Libraries/external/baselib/Include/Cpp/Time.h
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Timer.h"
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
using timeout_ms = std::chrono::duration<uint32_t, std::milli>;
|
||||
using timeout_us = std::chrono::duration<uint64_t, std::micro>;
|
||||
|
||||
struct high_precision_clock
|
||||
{
|
||||
using duration = std::chrono::duration<double, std::nano>;
|
||||
using time_point = std::chrono::time_point<high_precision_clock, duration>;
|
||||
using rep = duration::rep;
|
||||
using period = duration::period;
|
||||
|
||||
static constexpr bool is_steady = true;
|
||||
|
||||
static time_point now()
|
||||
{
|
||||
return time_point_from_ticks(now_in_ticks());
|
||||
}
|
||||
|
||||
static Baselib_Timer_Ticks now_in_ticks()
|
||||
{
|
||||
return Baselib_Timer_GetHighPrecisionTimerTicks();
|
||||
}
|
||||
|
||||
static duration duration_from_ticks(Baselib_Timer_Ticks ticks)
|
||||
{
|
||||
return duration(ticks * Baselib_Timer_TickToNanosecondsConversionFactor);
|
||||
}
|
||||
|
||||
static Baselib_Timer_Ticks ticks_from_duration_roundup(duration d)
|
||||
{
|
||||
double ticks = d.count() / Baselib_Timer_TickToNanosecondsConversionFactor;
|
||||
return (Baselib_Timer_Ticks)std::ceil(ticks);
|
||||
}
|
||||
|
||||
static time_point time_point_from_ticks(Baselib_Timer_Ticks ticks)
|
||||
{
|
||||
return time_point(duration_from_ticks(ticks));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
123
Libraries/external/baselib/Include/Cpp/affix_allocator.h
vendored
Normal file
123
Libraries/external/baselib/Include/Cpp/affix_allocator.h
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include "Algorithm.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// Baselib affix allocator implementation providing optional prefix and suffix memory regions in addition to requested size.
|
||||
//
|
||||
// The affix allocator purpose is to provide memory regions directly adjacent to allocated memory of requested size and alignment.
|
||||
// It is not intended to be a turn-key, general purpose solution, but rather act as a template building block for derived allocators which may extend,
|
||||
// add or ignore methods for specific needs.
|
||||
//
|
||||
// Allocation methods allocate, reallocate and deallocate are using the `Allocator` implementation for memory allocation, as are alignment properties.
|
||||
// As a rule of thumb, Allocator method calls may fail depending on their specific implementation.
|
||||
// What (if any) action is to be taken in such cases is intentionally left to be implemented by the derived class.
|
||||
//
|
||||
// No operations, synchronisation or alignment concept are applied to the prefix or suffix memory.
|
||||
// Prefix memory address is obtained using the `prefix` function and is always allocated memory pointer minus prefix_size (ptr - prefix_size).
|
||||
// Suffix memory address is obtained using the `suffix` function and is always directly adjacent to the end of allocated memory (ptr + size).
|
||||
//
|
||||
// Notes on memory footprint:
|
||||
// Internally allocated memory must be large enough to hold requested allocation size, prefix_size, suffix_size and alignment padding.
|
||||
// The internally allocated size is calculated as follows: size + suffix_size + (prefix_size rounded up to alignment).
|
||||
// If alignment padding is significant, it may be preferable to use a suffix over a prefix to reduce memory footprint.
|
||||
//
|
||||
template<class Allocator, size_t prefix_size, size_t suffix_size>
|
||||
class affix_allocator : protected Allocator
|
||||
{
|
||||
public:
|
||||
// Allocated memory is guaranteed to always be aligned to at least the value of `alignment`.
|
||||
static constexpr uint32_t alignment = Allocator::alignment;
|
||||
|
||||
// Allocates a memory block large enough to hold `size` number of bytes. Zero size is valid.
|
||||
//
|
||||
// \returns Address to memory block of allocated memory.
|
||||
void* allocate(size_t size)
|
||||
{
|
||||
return OffsetPtrChecked(Allocator::allocate(size + m_AffixSize), m_PrefixAlignedSize);
|
||||
}
|
||||
|
||||
// Reallocates previously allocated or reallocated memory block pointer reference `ptr` from `old_size` to `new_size` number of bytes.
|
||||
// Passing `nullptr` in `ptr` yield the same result as calling `allocate`.
|
||||
// If `suffix_size` is non-zero, the suffix memory is moved to the new location.
|
||||
//
|
||||
// \returns Address to memory block of reallocated memory.
|
||||
void* reallocate(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
return ptr == nullptr ? allocate(new_size) : ReallocateImpl(ptr, old_size, new_size);
|
||||
}
|
||||
|
||||
// Deallocates memory block previously allocated or reallocated with `size` pointed to by `ptr`.
|
||||
// Passing `nullptr` in `ptr` result in a no-op.
|
||||
//
|
||||
// \returns Always returns `true` (see notes on operation failure).
|
||||
bool deallocate(void* ptr, size_t size)
|
||||
{
|
||||
return Allocator::deallocate(OffsetPtr(ptr, -m_PrefixAlignedSize), size + m_AffixSize);
|
||||
}
|
||||
|
||||
// Calculate optimal allocation of size of `Allocator` allocator given `size`.
|
||||
//
|
||||
// \returns Optimal size of allocations when allocating memory given `size`.
|
||||
constexpr size_t optimal_size(size_t size) const
|
||||
{
|
||||
return Allocator::optimal_size(size);
|
||||
}
|
||||
|
||||
// Get prefix memory block address of allocation pointed to by `ptr`.
|
||||
// Memory must be a valid allocation from `allocate` or `reallocate`, or result is undefined.
|
||||
//
|
||||
// \returns Prefix memory address or nullptr if `prefix_size` is zero.
|
||||
void* prefix(void* ptr) const
|
||||
{
|
||||
return prefix_size == 0 ? nullptr : OffsetPtr(ptr, -static_cast<ptrdiff_t>(prefix_size));
|
||||
}
|
||||
|
||||
// Get suffix memory block address of allocation with `size` pointed to by `ptr`.
|
||||
// Memory must be a valid allocation from `allocate` or `reallocate`, or result is undefined.
|
||||
//
|
||||
// \returns Suffix memory address or nullptr if `suffix_size` is zero.
|
||||
void* suffix(void* ptr, size_t size) const
|
||||
{
|
||||
return suffix_size == 0 ? nullptr : OffsetPtr(ptr, size);
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t AlignSize(size_t size) { return (size + Allocator::alignment - 1) & ~(Allocator::alignment - 1); }
|
||||
|
||||
static FORCE_INLINE constexpr void *OffsetPtrChecked(const void *ptr, size_t offset) { return ptr == nullptr ? nullptr : OffsetPtr(ptr, offset); }
|
||||
static FORCE_INLINE constexpr void *OffsetPtr(const void *ptr, size_t offset)
|
||||
{
|
||||
return reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(ptr) + offset);
|
||||
}
|
||||
|
||||
template<size_t value = suffix_size, typename std::enable_if<value == 0, bool>::type = 0>
|
||||
FORCE_INLINE void* ReallocateImpl(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
return OffsetPtrChecked(Allocator::reallocate(OffsetPtr(ptr, -m_PrefixAlignedSize), old_size + m_PrefixAlignedSize, new_size + m_PrefixAlignedSize), m_PrefixAlignedSize);
|
||||
}
|
||||
|
||||
template<size_t value = suffix_size, typename std::enable_if<value != 0, bool>::type = 0>
|
||||
FORCE_INLINE void* ReallocateImpl(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
uint8_t tmpSuffix[m_SuffixSize];
|
||||
memcpy(tmpSuffix, suffix(ptr, old_size), m_SuffixSize);
|
||||
ptr = Allocator::reallocate(OffsetPtr(ptr, -m_PrefixAlignedSize), old_size + m_AffixSize, new_size + m_AffixSize);
|
||||
if (ptr)
|
||||
{
|
||||
ptr = OffsetPtr(ptr, m_PrefixAlignedSize);
|
||||
memcpy(suffix(ptr, new_size), tmpSuffix, m_SuffixSize);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static constexpr ptrdiff_t m_PrefixAlignedSize = AlignSize(prefix_size);
|
||||
static constexpr ptrdiff_t m_SuffixSize = suffix_size;
|
||||
static constexpr ptrdiff_t m_AffixSize = m_PrefixAlignedSize + m_SuffixSize;
|
||||
};
|
||||
}
|
||||
}
|
||||
78
Libraries/external/baselib/Include/Cpp/fallback_allocator.h
vendored
Normal file
78
Libraries/external/baselib/Include/Cpp/fallback_allocator.h
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <algorithm>
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// Baselib fallback allocator implementation with baselib allocators method coverage.
|
||||
// If the `Primary` allocator fail to allocate the request it's passed to the `Fallback` Allocator.
|
||||
//
|
||||
// The fallback allocator purpose is to provide a template for implementation using an allocator composition approach.
|
||||
// While providing for the baselib allocators interface(s), it's not intended to be a turn-key, general purpose solution, but rather
|
||||
// act as a template building block for derived allocators which may extend, add or ignore methods for specific needs.
|
||||
//
|
||||
// As a rule of thumb, Both Primary and Secondary allocator method calls may fail depending on their specific implementation.
|
||||
// What (if any) action is to be taken in such cases is intentionally left to be implemented by the derived class.
|
||||
//
|
||||
template<class Primary, class Fallback>
|
||||
class fallback_allocator : protected Primary, protected Fallback
|
||||
{
|
||||
public:
|
||||
// Allocations are guaranteed to always be aligned to at least the value of `alignment`
|
||||
// Alignment is the minimal value of Primary and Fallback allocator alignment, which is what can be guaranteed.
|
||||
static constexpr unsigned alignment = (Primary::alignment < Fallback::alignment) ? Primary::alignment : Fallback::alignment;
|
||||
|
||||
// Allocates a memory block large enough to hold `size` number of bytes.
|
||||
//
|
||||
// \returns Address to memory block of allocated memory or nullptr if allocation failed.
|
||||
void* allocate(size_t size)
|
||||
{
|
||||
void *ptr = Primary::allocate(size);
|
||||
if (ptr == nullptr)
|
||||
ptr = Fallback::allocate(size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// Reallocates previously allocated or reallocated memory block pointer reference `ptr` from `old_size` to `new_size` number of bytes.
|
||||
// Reallocation will fail if the ownership of the new allocation can't be preserved.
|
||||
//
|
||||
// \returns Address to memory block of reallocated memory or nullptr if reallocation failed.
|
||||
void* reallocate(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
if (Primary::owns(ptr, old_size))
|
||||
return Primary::reallocate(ptr, old_size, new_size);
|
||||
return Fallback::reallocate(ptr, old_size, new_size);
|
||||
}
|
||||
|
||||
// Deallocates memory block previously allocated or reallocated with `size` pointed to by `ptr`.
|
||||
//
|
||||
// \returns True if the operation was successful.
|
||||
bool deallocate(void* ptr, size_t size)
|
||||
{
|
||||
if (Primary::owns(ptr, size))
|
||||
return Primary::deallocate(ptr, size);
|
||||
return Fallback::deallocate(ptr, size);
|
||||
}
|
||||
|
||||
// Calculate optimal allocation size of the primary allocator given `size`.
|
||||
//
|
||||
// \returns Optimal size of the primary allocator when allocating memory given `size`.
|
||||
constexpr size_t optimal_size(size_t size) const
|
||||
{
|
||||
return Primary::optimal_size(size);
|
||||
}
|
||||
|
||||
// Checks for the ownership allocation given `ptr` and `size`
|
||||
// It is implementation defined if either or both of `ptr` and `size` are considered to determine ownership.
|
||||
//
|
||||
// \returns True if the primary allocator owns the allocation.
|
||||
bool owns(const void* ptr, size_t size) const
|
||||
{
|
||||
return Primary::owns(ptr, size);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
99
Libraries/external/baselib/Include/Cpp/heap_allocator.h
vendored
Normal file
99
Libraries/external/baselib/Include/Cpp/heap_allocator.h
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
#pragma once
|
||||
|
||||
#include "Internal/heap_allocator.inl.h"
|
||||
#include "Algorithm.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// Heap allocator implementation providing platform dependent system heap allocation.
|
||||
//
|
||||
// Allocations are guaranteed to be aligned to at least the value of `default_alignment`.
|
||||
// For optimal performance, platform aligned allocation calls are only used when `default_alignment` exceeds platform minimum alignment guarantee.
|
||||
// This allocator is a stateless allocator (empty class).
|
||||
//
|
||||
// Notes on operation failure of allocator methods:
|
||||
// Operation failures will currently trigger process abort by the underlying system.
|
||||
// As a result the heap allocator currently will never return `nullptr`/`false` to signal failure, as is standard behaviour (nor any error state information).
|
||||
//
|
||||
template<uint32_t default_alignment = 8>
|
||||
class heap_allocator
|
||||
{
|
||||
using impl = detail::heap_allocator<default_alignment>;
|
||||
static_assert((default_alignment <= impl::max_alignment), "'default_alignment' exceeded max value");
|
||||
static_assert((default_alignment != 0), "'default_alignment' must not be a zero value");
|
||||
static_assert(::baselib::Algorithm::IsPowerOfTwo(default_alignment), "'default_alignment' must be a power of two value");
|
||||
|
||||
public:
|
||||
// Allocated memory is guaranteed to always be aligned to at least the value of `alignment`.
|
||||
static constexpr uint32_t alignment = default_alignment;
|
||||
|
||||
// Typedefs
|
||||
typedef Baselib_ErrorState error_state;
|
||||
|
||||
// Allocates a memory block large enough to hold `size` number of bytes. Zero size is valid.
|
||||
//
|
||||
// \returns Address to memory block of allocated memory.
|
||||
void* allocate(size_t size) const
|
||||
{
|
||||
error_state result = Baselib_ErrorState_Create();
|
||||
return impl::allocate(size, &result);
|
||||
}
|
||||
|
||||
// Allocates a memory block large enough to hold `size` number of bytes. Zero size is valid.
|
||||
//
|
||||
// \returns Address to memory block of allocated memory.
|
||||
void* allocate(size_t size, error_state *error_state_ptr) const
|
||||
{
|
||||
return impl::allocate(size, error_state_ptr);
|
||||
}
|
||||
|
||||
// Reallocates previously allocated or reallocated memory block pointer reference `ptr` from `old_size` to `new_size` number of bytes.
|
||||
// Passing `nullptr` in `ptr` yield the same result as calling `allocate`.
|
||||
//
|
||||
// \returns Address to memory block of reallocated memory.
|
||||
void* reallocate(void* ptr, size_t old_size, size_t new_size) const
|
||||
{
|
||||
error_state result = Baselib_ErrorState_Create();
|
||||
return impl::reallocate(ptr, old_size, new_size, &result);
|
||||
}
|
||||
|
||||
// Reallocates previously allocated or reallocated memory block pointer reference `ptr` from `old_size` to `new_size` number of bytes.
|
||||
// Passing `nullptr` in `ptr` yield the same result as calling `allocate`.
|
||||
//
|
||||
// \returns Address to memory block of reallocated memory.
|
||||
void* reallocate(void* ptr, size_t old_size, size_t new_size, error_state *error_state_ptr) const
|
||||
{
|
||||
return impl::reallocate(ptr, old_size, new_size, error_state_ptr);
|
||||
}
|
||||
|
||||
// Deallocates memory block previously allocated or reallocated with `size` pointed to by `ptr`.
|
||||
// Passing `nullptr` in `ptr` result in a no-op.
|
||||
//
|
||||
// \returns Always returns `true` (see notes on operation failure).
|
||||
bool deallocate(void* ptr, size_t size) const
|
||||
{
|
||||
error_state result = Baselib_ErrorState_Create();
|
||||
return impl::deallocate(ptr, size, &result);
|
||||
}
|
||||
|
||||
// Deallocates memory block previously allocated or reallocated with `size` pointed to by `ptr`.
|
||||
// Passing `nullptr` in `ptr` result in a no-op.
|
||||
//
|
||||
// \returns Always returns `true` (see notes on operation failure).
|
||||
bool deallocate(void* ptr, size_t size, error_state *error_state_ptr) const
|
||||
{
|
||||
return impl::deallocate(ptr, size, error_state_ptr);
|
||||
}
|
||||
|
||||
// Calculate optimal allocation size given `size`.
|
||||
//
|
||||
// \returns Optimal size when allocating memory given `size`.
|
||||
constexpr size_t optimal_size(size_t size) const
|
||||
{
|
||||
return impl::optimal_size(size);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
238
Libraries/external/baselib/Include/Cpp/mpmc_fixed_queue.h
vendored
Normal file
238
Libraries/external/baselib/Include/Cpp/mpmc_fixed_queue.h
vendored
Normal file
@@ -0,0 +1,238 @@
|
||||
#pragma once
|
||||
|
||||
#include "Atomic.h"
|
||||
#include "heap_allocator.h"
|
||||
#include "../C/Baselib_Memory.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a queue is a collection in which the entities in the collection are kept in order and the principal (or only) operations on the
|
||||
// collection are the addition of entities to the rear terminal position, known as enqueue, and removal of entities from the front terminal position, known
|
||||
// as dequeue. This makes the queue a First-In-First-Out (FIFO) data structure. In a FIFO data structure, the first element added to the queue will be the
|
||||
// first one to be removed. This is equivalent to the requirement that once a new element is added, all elements that were added before have to be removed
|
||||
// before the new element can be removed. Often a peek or front operation is also entered, returning the value of the front element without dequeuing it.
|
||||
// A queue is an example of a linear data structure, or more abstractly a sequential collection.
|
||||
//
|
||||
// "Queue (abstract data type)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Queue_(abstract_data_type)&oldid=878671332
|
||||
//
|
||||
|
||||
// This implementation is a fixed size queue capable of handling multiple concurrent producers and consumers
|
||||
//
|
||||
// Implementation of the queue is lockfree in the sense that one thread always progress. Either by inserting an element or failing to insert an element.
|
||||
// Not though, that the data structure in it self is not lock free. In theory if a thread writing an element gets pre-emptied that thread may block reads
|
||||
// from proceeding past that point until the writer thread wake up and complete it's operation.
|
||||
template<typename value_type, bool cacheline_aligned = true>
|
||||
class mpmc_fixed_queue
|
||||
{
|
||||
public:
|
||||
// Create a new queue instance capable of holding at most `capacity` number of elements.
|
||||
// `buffer` is an optional user defined memory block large enough to hold the queue data structure.
|
||||
// The size required is obtained by `buffer_size`, alignment requirements by `buffer_alignment`.
|
||||
// If `buffer` is not set (default), the queue will internally allocate memory using baselib heap_allocator.
|
||||
mpmc_fixed_queue(uint32_t capacity, void *buffer = nullptr)
|
||||
: m_SlotAllocator()
|
||||
, m_Slot(static_cast<Slot*>(buffer ? buffer : m_SlotAllocator.allocate(buffer_size(capacity))))
|
||||
, m_UserAllocatedSlots(buffer ? nullptr : m_Slot)
|
||||
, m_NumberOfSlots(capacity ? capacity : 2)
|
||||
, m_Capacity(capacity)
|
||||
, m_ReadPos(0)
|
||||
, m_WritePos(0)
|
||||
{
|
||||
// a zero sized queue uses two slots - the first indicating the queue is empty, the other indicating it is full.
|
||||
if (capacity == 0)
|
||||
{
|
||||
m_Slot[0].checksum.store(WriteableChecksum(0), baselib::memory_order_relaxed);
|
||||
m_Slot[1].checksum.store(ReadableChecksumPrevGen(1), baselib::memory_order_relaxed);
|
||||
m_WritePos = 1; // Point at the second slot which indicates a full queue
|
||||
}
|
||||
else
|
||||
{
|
||||
// fill queue with 'writable slots'
|
||||
for (uint32_t pos = 0; pos < capacity; ++pos)
|
||||
m_Slot[pos].checksum.store(WriteableChecksum(pos), baselib::memory_order_relaxed);
|
||||
}
|
||||
|
||||
baselib::atomic_thread_fence(baselib::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Destroy queue, guaranteed to also destroy any elements held by the queue.
|
||||
//
|
||||
// If there are other threads currently accessing the queue behavior is undefined.
|
||||
~mpmc_fixed_queue()
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
const uint32_t pos = m_ReadPos.fetch_add(1, baselib::memory_order_relaxed);
|
||||
Slot& slot = m_Slot[SlotIndex(pos)];
|
||||
if (slot.checksum.load(baselib::memory_order_acquire) != ReadableChecksum(pos))
|
||||
break;
|
||||
slot.value.~value_type();
|
||||
}
|
||||
m_SlotAllocator.deallocate(m_UserAllocatedSlots, buffer_size(static_cast<uint32_t>(m_Capacity)));
|
||||
baselib::atomic_thread_fence(baselib::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Try to pop front most element off the queue
|
||||
//
|
||||
// Note that if several push operations are executed in parallel, the one returning first might not have pushed a new head.
|
||||
// Which means that for the user it seems there is a new element in the queue, whereas for the queue the still non-present head will block the removal of any entries.
|
||||
//
|
||||
// \returns true if element was popped, false if queue was empty
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
bool try_pop_front(value_type& value)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
// Load current position and checksum.
|
||||
uint32_t pos = m_ReadPos.load(baselib::memory_order_relaxed);
|
||||
Slot* slot = &m_Slot[SlotIndex(pos)];
|
||||
uint32_t checksum = slot->checksum.load(baselib::memory_order_acquire);
|
||||
|
||||
// As long as it looks like we can read from this slot.
|
||||
while (checksum == ReadableChecksum(pos))
|
||||
{
|
||||
// Try to acquire it and read slot on success.
|
||||
if (m_ReadPos.compare_exchange_weak(pos, pos + 1, baselib::memory_order_relaxed, baselib::memory_order_relaxed))
|
||||
{
|
||||
value = std::move(slot->value);
|
||||
slot->value.~value_type();
|
||||
slot->checksum.store(WriteableChecksumNextGen(pos), baselib::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
// Reload checksum and try again (compare_exchange already reloaded the position)
|
||||
else
|
||||
{
|
||||
slot = &m_Slot[SlotIndex(pos)];
|
||||
checksum = slot->checksum.load(baselib::memory_order_acquire);
|
||||
}
|
||||
}
|
||||
|
||||
// Is queue empty?
|
||||
if (checksum == WriteableChecksum(pos))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Try to append a new element to the end of the queue.
|
||||
//
|
||||
// Note that if several pop operations are executed in parallel, the one returning first might not have popped the head.
|
||||
// Which means that for the user it seems there is a new free slot in the queue, whereas for the queue the still present head will block the addition of new entries.
|
||||
//
|
||||
// \returns true if element was appended, false if queue was full.
|
||||
template<class ... Args>
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
bool try_emplace_back(Args&& ... args)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
// Load current position and checksum.
|
||||
uint32_t pos = m_WritePos.load(baselib::memory_order_relaxed);
|
||||
Slot* slot = &m_Slot[SlotIndex(pos)];
|
||||
uint32_t checksum = slot->checksum.load(baselib::memory_order_acquire);
|
||||
|
||||
// As long as it looks like we can write to this slot.
|
||||
while (checksum == WriteableChecksum(pos))
|
||||
{
|
||||
// Try to acquire it and write slot on success.
|
||||
if (m_WritePos.compare_exchange_weak(pos, pos + 1, baselib::memory_order_relaxed, baselib::memory_order_relaxed))
|
||||
{
|
||||
new(&slot->value) value_type(std::forward<Args>(args)...);
|
||||
slot->checksum.store(ReadableChecksum(pos), baselib::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
// Reload checksum and try again (compare_exchange already reloaded the position)
|
||||
else
|
||||
{
|
||||
slot = &m_Slot[SlotIndex(pos)];
|
||||
checksum = slot->checksum.load(baselib::memory_order_acquire);
|
||||
}
|
||||
}
|
||||
|
||||
// Is queue full?
|
||||
if (checksum == ReadableChecksumPrevGen(pos))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Try to push an element to the end of the queue.
|
||||
//
|
||||
// Note that if several pop operations are executed in parallel, the one returning first might not have popped the head.
|
||||
// Which means that for the user it seems there is a new free slot in the queue, whereas for the queue the still present head will block the addition of new entries.
|
||||
//
|
||||
// \returns true if element was pushed, false if queue was full.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
bool try_push_back(const value_type& value)
|
||||
{
|
||||
return try_emplace_back(value);
|
||||
}
|
||||
|
||||
// Try to push an element to the end of the queue.
|
||||
//
|
||||
// Note that if several pop operations are executed in parallel, the one returning first might not have popped the head.
|
||||
// Which means that for the user it seems there is a new free slot in the queue, whereas for the queue the still present head will block the addition of new entries.
|
||||
//
|
||||
// \returns true if element was pushed, false if queue was full.
|
||||
COMPILER_WARN_UNUSED_RESULT
|
||||
bool try_push_back(value_type&& value)
|
||||
{
|
||||
return try_emplace_back(std::forward<value_type>(value));
|
||||
}
|
||||
|
||||
// \returns the number of elements that can fit in the queue.
|
||||
size_t capacity() const
|
||||
{
|
||||
return m_Capacity;
|
||||
}
|
||||
|
||||
// Calculate the size in bytes of an memory buffer required to hold `capacity` number of elements.
|
||||
//
|
||||
// \returns Buffer size in bytes.
|
||||
static constexpr size_t buffer_size(uint32_t capacity)
|
||||
{
|
||||
return sizeof(Slot) * (capacity ? capacity : 2);
|
||||
}
|
||||
|
||||
// Calculate the required alignment for a memory buffer containing `value_type` elements.
|
||||
//
|
||||
// \returns Alignment requirement
|
||||
static constexpr size_t buffer_alignment()
|
||||
{
|
||||
return SlotAlignment;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr uint32_t MinTypeAlignment = alignof(value_type) > sizeof(void*) ? alignof(value_type) : sizeof(void*);
|
||||
static constexpr uint32_t SlotAlignment = cacheline_aligned && PLATFORM_CACHE_LINE_SIZE > MinTypeAlignment ? PLATFORM_CACHE_LINE_SIZE : MinTypeAlignment;
|
||||
static constexpr uint32_t ReadableBit = (uint32_t)1 << 31;
|
||||
static constexpr uint32_t WritableMask = ~ReadableBit;
|
||||
static constexpr uint32_t WriteableChecksum(uint32_t pos) { return pos & WritableMask; }
|
||||
static constexpr uint32_t ReadableChecksum(uint32_t pos) { return pos | ReadableBit; }
|
||||
constexpr uint32_t WriteableChecksumNextGen(uint32_t pos) const { return (pos + m_NumberOfSlots) & WritableMask; }
|
||||
constexpr uint32_t ReadableChecksumPrevGen(uint32_t pos) const { return (pos - m_NumberOfSlots) | ReadableBit; }
|
||||
|
||||
constexpr uint32_t SlotIndex(uint32_t pos) const { return pos % m_NumberOfSlots; }
|
||||
|
||||
const baselib::heap_allocator<SlotAlignment> m_SlotAllocator;
|
||||
|
||||
struct alignas(SlotAlignment) Slot
|
||||
{
|
||||
value_type value;
|
||||
baselib::atomic<uint32_t> checksum;
|
||||
};
|
||||
Slot *const m_Slot;
|
||||
void *const m_UserAllocatedSlots;
|
||||
|
||||
// benchmarks show using uint32_t gives ~3x perf boost on 64bit platforms compared to size_t (uint64_t)
|
||||
const uint32_t m_NumberOfSlots;
|
||||
const size_t m_Capacity;
|
||||
|
||||
alignas(PLATFORM_CACHE_LINE_SIZE) baselib::atomic<uint32_t> m_ReadPos;
|
||||
alignas(PLATFORM_CACHE_LINE_SIZE) baselib::atomic<uint32_t> m_WritePos;
|
||||
};
|
||||
}
|
||||
}
|
||||
17
Libraries/external/baselib/Include/Cpp/mpmc_node.h
vendored
Normal file
17
Libraries/external/baselib/Include/Cpp/mpmc_node.h
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include "Atomic.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// mpmc_node container node class. All nodes used by mpmc_node containers must derive from this class.
|
||||
// No initialization or other restrictions apply. Inherited class is not accessed by the mpmc_node containers.
|
||||
class mpmc_node
|
||||
{
|
||||
public:
|
||||
baselib::atomic<mpmc_node*> next;
|
||||
};
|
||||
}
|
||||
}
|
||||
239
Libraries/external/baselib/Include/Cpp/mpmc_node_queue.h
vendored
Normal file
239
Libraries/external/baselib/Include/Cpp/mpmc_node_queue.h
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Memory.h"
|
||||
#include "../C/Baselib_Atomic_LLSC.h"
|
||||
#include "mpmc_node.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a queue is a collection in which the entities in the collection are kept in order and the principal (or only) operations on the
|
||||
// collection are the addition of entities to the rear terminal position, known as enqueue, and removal of entities from the front terminal position, known
|
||||
// as dequeue. This makes the queue a First-In-First-Out (FIFO) data structure. In a FIFO data structure, the first element added to the queue will be the
|
||||
// first one to be removed. This is equivalent to the requirement that once a new element is added, all elements that were added before have to be removed
|
||||
// before the new element can be removed. Often a peek or front operation is also entered, returning the value of the front element without dequeuing it.
|
||||
// A queue is an example of a linear data structure, or more abstractly a sequential collection.
|
||||
//
|
||||
// "Queue (abstract data type)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Queue_(abstract_data_type)&oldid=878671332
|
||||
//
|
||||
|
||||
// This implementation is a lockless node queue capable of handling multiple concurrent producers and consumers
|
||||
//
|
||||
// Node types are required to inherit the mpmc_node class. No data from the inherited class is modified/copied, so no restrictions apply.
|
||||
// The node memory is allocated and destroyed by the user (user owned).
|
||||
// Dequeued nodes may be overwritten/discarded and/or reused.
|
||||
// Dequeued nodes may not be deleted (released from user space memory) while any consumer thread is in the scope of a deque call.
|
||||
//
|
||||
// Notes consumer threads:
|
||||
// While dequeued nodes may be reused and/or overwritten they must however remain in application readable memory (user space memory) until it can be
|
||||
// guaranteed no consumer thread is still processing the node i.e. not within the scope of a dequeue call.
|
||||
// Even though the value is ignored (discarded by version check) any consumer thread may still read the node link information.
|
||||
// Consumer threads are concurrently attempting to dequeue the front in a DCAS loop and the first to succeed will update the queue front and other
|
||||
// threads continue processing the next front node in the queue. Threads are garuanteed to progress dequeuing nodes even if another consumer
|
||||
// thread falls asleep during a dequeue, but may fail to dequeue in the combination of the queue getting pre-emptied and the thread resetting the
|
||||
// state (reload back) falls asleep while swapping the back (between 2x consecutive CAS operations).
|
||||
// This is usually an extremely infrequent occurence due to the combination required (can not happen unless there's exactly one item in the queue).
|
||||
// Producer threads always progress independently.
|
||||
//
|
||||
// Notes on producer threads:
|
||||
// A producer thread swaps the back and writes the link information in two consecutive atomic operations. If a producer thread falls asleep after the
|
||||
// swap and before the link information has been written, the consumer thread(s) will not advance past this point since it doesn't have
|
||||
// the information yet. Therefore the consumer threads calls will yield null until that particular producer thread wakes back up.
|
||||
//
|
||||
template<typename T>
|
||||
class alignas(sizeof(intptr_t) * 2)mpmc_node_queue
|
||||
{
|
||||
public:
|
||||
// Create a new queue instance.
|
||||
mpmc_node_queue()
|
||||
{
|
||||
m_FrontIntPtr = 1;
|
||||
m_Front.obj.idx = 1;
|
||||
m_Back.obj = 0;
|
||||
atomic_thread_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Returns true if queue is empty.
|
||||
bool empty() const
|
||||
{
|
||||
return m_Back.load(memory_order_relaxed) == 0;
|
||||
}
|
||||
|
||||
// Push a node to the back of the queue.
|
||||
void push_back(T* node)
|
||||
{
|
||||
node->next.store(0, memory_order_relaxed);
|
||||
if (T* prev = m_Back.exchange(node, memory_order_release))
|
||||
{
|
||||
prev->next.store(node, memory_order_release);
|
||||
}
|
||||
else
|
||||
{
|
||||
// store the new front (reload) and add one which will put idx back to an
|
||||
// even number, releasing the consumer threads (ptr is always null and idx odd at this point).
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
m_FrontPair.ptr.store(node, memory_order_release);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_FrontPair.ptr.store(node, memory_order_relaxed);
|
||||
m_FrontPair.idx.fetch_add(1, memory_order_release);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push a linked list of nodes to the back of the queue.
|
||||
void push_back(T* first_node, T* last_node)
|
||||
{
|
||||
last_node->next.store(0, memory_order_relaxed);
|
||||
if (T* prev = m_Back.exchange(last_node, memory_order_release))
|
||||
{
|
||||
prev->next.store(first_node, memory_order_release);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
m_FrontPair.ptr.store(first_node, memory_order_release);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_FrontPair.ptr.store(first_node, memory_order_relaxed);
|
||||
m_FrontPair.idx.fetch_add(1, memory_order_release);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to pop frontmost node of the queue.
|
||||
//
|
||||
// Note that if null is returned, there may still be push operations in progress in a producer thread.
|
||||
// Use the "empty" function to check if a queue is empty.
|
||||
//
|
||||
// \returns front node of the queue or null.
|
||||
T* try_pop_front()
|
||||
{
|
||||
T* node, *next;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
intptr_t value;
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Front, &node, &next,
|
||||
{
|
||||
// If front bit 0 is set, queue back is being reloaded or queue is empty.
|
||||
value = reinterpret_cast<intptr_t>(node);
|
||||
if (value & 1)
|
||||
{
|
||||
Baselib_atomic_llsc_break();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Fetch next node. If zero, node is the current backnode. LLSC Monitor is internally cleared by subsequent cmpxchg.
|
||||
if (!(next = static_cast<T*>(node->next.obj)))
|
||||
goto BackNode;
|
||||
});
|
||||
return node;
|
||||
|
||||
BackNode:
|
||||
// - filters obsolete nodes
|
||||
// - Exclusive access (re-entrant block)
|
||||
T * front = node;
|
||||
if (!m_FrontPair.ptr.compare_exchange_strong(front, reinterpret_cast<T*>(value | 1), memory_order_acquire, memory_order_relaxed))
|
||||
return 0;
|
||||
|
||||
// - filters incomplete nodes
|
||||
// - check if node is back == retrigger new back
|
||||
if (!m_Back.compare_exchange_strong(front, 0, memory_order_acquire, memory_order_relaxed))
|
||||
{
|
||||
// Back progressed or node is incomplete, restore access and return 0
|
||||
m_FrontIntPtr.fetch_and(~1, memory_order_release);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Success, back == front node, back was set to zero above and index / access is restored by producers, so we return the back node.
|
||||
// LLSC monitors invalidates any obsolete nodes still in process in other threads.
|
||||
return node;
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedFrontPtr front, value;
|
||||
|
||||
// Get front node. The DCAS while operation will update front on retry
|
||||
front = m_Front.load(memory_order_acquire);
|
||||
do
|
||||
{
|
||||
// If front idx bit 0 is set, queue back is being reloaded or queue is empty.
|
||||
if (front.idx & 1)
|
||||
return 0;
|
||||
|
||||
// Fetch next node. If zero, node is the current backnode
|
||||
node = front.ptr;
|
||||
if (!(next = static_cast<T*>(node->next.load(memory_order_relaxed))))
|
||||
goto BackNodeDCAS;
|
||||
|
||||
// On success, replace the current with the next node and return node. On fail, retry with updated front.
|
||||
value.ptr = next;
|
||||
value.idx = front.idx + 2;
|
||||
}
|
||||
while (!m_Front.compare_exchange_strong(front, value, memory_order_acquire, memory_order_relaxed));
|
||||
return node;
|
||||
|
||||
BackNodeDCAS:
|
||||
// - filters obsolete nodes
|
||||
// - Exclusive access (re-entrant block)
|
||||
value.ptr = front.ptr;
|
||||
value.idx = front.idx | 1;
|
||||
if (!m_Front.compare_exchange_strong(front, value, memory_order_acquire, memory_order_relaxed))
|
||||
return 0;
|
||||
|
||||
// - filters incomplete nodes
|
||||
// - check if node is back == retrigger new back
|
||||
value.ptr = node;
|
||||
if (!m_Back.compare_exchange_strong(value.ptr, 0, memory_order_acquire, memory_order_relaxed))
|
||||
{
|
||||
// Back progressed or node is incomplete, restore access and return 0
|
||||
m_FrontPair.idx.fetch_and(~1, memory_order_release);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Success, back == front node, back was set to zero above and index / access is restored by producers, so we return the back node.
|
||||
// Version check invalidates any obsolete nodes in still in process in other threads.
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
typedef struct
|
||||
{
|
||||
T* ptr;
|
||||
intptr_t idx;
|
||||
} SequencedFrontPtr;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
atomic<T*> ptr;
|
||||
atomic<intptr_t> idx;
|
||||
} FrontPair;
|
||||
|
||||
// Space out atomic members to individual cache lines. Required for native LLSC operations on some architectures, others to avoid false sharing
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE];
|
||||
union
|
||||
{
|
||||
atomic<intptr_t> m_FrontIntPtr;
|
||||
FrontPair m_FrontPair;
|
||||
atomic<SequencedFrontPtr> m_Front;
|
||||
};
|
||||
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(SequencedFrontPtr)];
|
||||
atomic<T*> m_Back;
|
||||
char _cachelineSpacer2[PLATFORM_CACHE_LINE_SIZE - sizeof(T*)];
|
||||
|
||||
// FrontPair is atomic reflections of the SequencedFront fields used for CAS vs DCAS ops. They must match in size and layout.
|
||||
// Do note that we can not check layout (offsetof) as the template class is incomplete!
|
||||
static_assert(sizeof(mpmc_node_queue::m_FrontPair) == sizeof(mpmc_node_queue::m_Front), "SequencedFrontPtr and FrontPair must be of equal size");
|
||||
|
||||
// Verify mpmc_node is base of T
|
||||
static_assert(std::is_base_of<baselib::mpmc_node, T>::value, "Node class/struct used with baselib::mpmc_node_queue must derive from baselib::mpmc_node.");
|
||||
};
|
||||
}
|
||||
}
|
||||
187
Libraries/external/baselib/Include/Cpp/mpmc_node_stack.h
vendored
Normal file
187
Libraries/external/baselib/Include/Cpp/mpmc_node_stack.h
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Memory.h"
|
||||
#include "../C/Baselib_Atomic_LLSC.h"
|
||||
#include "mpmc_node.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a stack is an abstract data type that serves as a collection of elements, with two principal operations:
|
||||
// * push, which adds an element to the collection, and
|
||||
// * pop, which removes the most recently added element that was not yet removed.
|
||||
// The order in which elements come off a stack gives rise to its alternative name, LIFO (last in, first out).
|
||||
// Additionally, a peek operation may give access to the top without modifying the stack.
|
||||
// The name "stack" for this type of structure comes from the analogy to a set of physical items stacked on top of each other,
|
||||
// which makes it easy to take an item off the top of the stack, while getting to an item deeper in the stack may require taking off multiple other items first.
|
||||
// Considered as a linear data structure, or more abstractly a sequential collection, the push and pop operations occur only at one end of the structure,
|
||||
// referred to as the top of the stack. This makes it possible to implement a stack as a singly linked list and a pointer to the top element.
|
||||
// A stack may be implemented to have a bounded capacity. If the stack is full and does not contain enough space to accept an entity to be pushed,
|
||||
// the stack is then considered to be in an overflow state. The pop operation removes an item from the top of the stack.
|
||||
//
|
||||
// "Stack (abstract data type)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
|
||||
//
|
||||
|
||||
// This implementation is a lockless node stack capable of handling multiple concurrent producers and consumers
|
||||
//
|
||||
// Node types are required to inherit the mpmc_node class. No data from the inherited class is modified/copied, so no restrictions apply.
|
||||
// The node memory is allocated and destroyed by the user (user owned).
|
||||
// Popped nodes may be overwritten/discarded and/or reused.
|
||||
// Popped nodes may not be deleted (released from user space memory) while any consumer thread is in the scope of a pop call.
|
||||
//
|
||||
// Notes consumer threads:
|
||||
// While popped nodes may be reused and/or overwritten they must however remain in application readable memory (user space memory) until it can be
|
||||
// guaranteed no consumer thread is still processing the node i.e. not within the scope of a pop call.
|
||||
// Even though the value is ignored (discarded by version check) any consumer thread may still read the node link information.
|
||||
// Consumer threads are concurrently attempting to pop the top of the stack in a DCAS loop and the first to succeed will update the stack top and other
|
||||
// threads continue processing the next top node in the stack. Threads are garuanteed to progress to pop nodes even if another consumer
|
||||
// thread falls asleep during a pop call
|
||||
//
|
||||
template<typename T>
|
||||
class alignas(sizeof(intptr_t) * 2)mpmc_node_stack
|
||||
{
|
||||
public:
|
||||
// Create a new stack instance.
|
||||
mpmc_node_stack()
|
||||
{
|
||||
m_Top.obj.ptr = 0;
|
||||
m_Top.obj.idx = 0;
|
||||
atomic_thread_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Returns true if stack is empty.
|
||||
bool empty() const
|
||||
{
|
||||
return m_Top.load(memory_order_relaxed).ptr == 0;
|
||||
}
|
||||
|
||||
// Push a node to the top of the stack.
|
||||
void push_back(T* node)
|
||||
{
|
||||
SequencedTopPtr newtop;
|
||||
newtop.ptr = node;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &node->next.obj, &newtop, );
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
node->next.store(top.ptr, memory_order_relaxed);
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_release, memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
// Push a linked list of nodes to the top of the stack.
|
||||
void push_back(T* first_node, T* last_node)
|
||||
{
|
||||
SequencedTopPtr newtop;
|
||||
newtop.ptr = first_node;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &last_node->next.obj, &newtop, );
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
last_node->next.store(top.ptr, memory_order_relaxed);
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_release, memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
// Try to pop node from the top of the stack.
|
||||
//
|
||||
// \returns top node of the stack or null if the stack is empty.
|
||||
T* try_pop_back()
|
||||
{
|
||||
T* node;
|
||||
SequencedTopPtr newtop;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &node, &newtop,
|
||||
{
|
||||
if (!node)
|
||||
{
|
||||
Baselib_atomic_llsc_break();
|
||||
break;
|
||||
}
|
||||
newtop.ptr = static_cast<T*>(node->next.obj);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
node = top.ptr;
|
||||
if (!node)
|
||||
break;
|
||||
newtop.ptr = static_cast<T*>(node->next.load(memory_order_relaxed));
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_acquire, memory_order_relaxed));
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
// Try to pop all nodes from the stack.
|
||||
//
|
||||
// \returns linked list of nodes or null if the stack is empty.
|
||||
T* try_pop_all()
|
||||
{
|
||||
T* node;
|
||||
SequencedTopPtr newtop;
|
||||
newtop.ptr = 0;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &node, &newtop,
|
||||
{
|
||||
if (!node)
|
||||
{
|
||||
Baselib_atomic_llsc_break();
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
node = top.ptr;
|
||||
if (!node)
|
||||
break;
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_acquire, memory_order_relaxed));
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
private:
|
||||
typedef struct
|
||||
{
|
||||
T* ptr;
|
||||
intptr_t idx;
|
||||
} SequencedTopPtr;
|
||||
|
||||
// Space out atomic members to individual cache lines. Required for native LLSC operations on some architectures, others to avoid false sharing
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE];
|
||||
atomic<SequencedTopPtr> m_Top;
|
||||
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(SequencedTopPtr)];
|
||||
|
||||
// Verify mpmc_node is base of T
|
||||
static_assert(std::is_base_of<baselib::mpmc_node, T>::value, "Node class/struct used with baselib::mpmc_node_stack must derive from baselib::mpmc_node.");
|
||||
};
|
||||
}
|
||||
}
|
||||
17
Libraries/external/baselib/Include/Cpp/mpsc_node.h
vendored
Normal file
17
Libraries/external/baselib/Include/Cpp/mpsc_node.h
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include "Atomic.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// mpsc_node container node class. All nodes used by mpsc_node containers must derive from this class.
|
||||
// No initialization or other restrictions apply. Inherited class is not accessed by the mpsc_node containers.
|
||||
class mpsc_node
|
||||
{
|
||||
public:
|
||||
atomic<mpsc_node*> next;
|
||||
};
|
||||
}
|
||||
}
|
||||
134
Libraries/external/baselib/Include/Cpp/mpsc_node_queue.h
vendored
Normal file
134
Libraries/external/baselib/Include/Cpp/mpsc_node_queue.h
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Memory.h"
|
||||
#include "mpsc_node.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a queue is a collection in which the entities in the collection are kept in order and the principal (or only) operations on the
|
||||
// collection are the addition of entities to the rear terminal position, known as enqueue, and removal of entities from the front terminal position, known
|
||||
// as dequeue. This makes the queue a First-In-First-Out (FIFO) data structure. In a FIFO data structure, the first element added to the queue will be the
|
||||
// first one to be removed. This is equivalent to the requirement that once a new element is added, all elements that were added before have to be removed
|
||||
// before the new element can be removed. Often a peek or front operation is also entered, returning the value of the front element without dequeuing it.
|
||||
// A queue is an example of a linear data structure, or more abstractly a sequential collection.
|
||||
//
|
||||
// "Queue (abstract data type)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/w/index.php?title=Queue_(abstract_data_type)&oldid=878671332
|
||||
//
|
||||
|
||||
// This implementation is a lockless node queue capable of handling multiple producers and a single consumer (exclusive access)
|
||||
//
|
||||
// Node types are required to inherit the mpsc_node class. No data from the inherited class is modified/copied, so no restrictions apply.
|
||||
// The node memory is allocated and destroyed by the user (user owned).
|
||||
// Dequeued nodes may be deleted, overwritten/discarded and/or reused.
|
||||
//
|
||||
// Notes consumer threads:
|
||||
// Only one consumer thread will exclusively access the front node. Other consumer threads will always progress, either by failing to dequeue or
|
||||
// successfully dequeuing the next node once the current thread thread opens access. As opposed to the parallel consumer implementation,
|
||||
// this is significantly more performant as no DCAS-operations/loops are involved, but if the consumer thread with current exclusive access falls asleep
|
||||
// when dequeuing, no other threads will successfully dequeue until the thread wakes up.
|
||||
// Producer threads always progress independently.
|
||||
//
|
||||
// Notes on producer threads:
|
||||
// A producer thread swaps the back and writes the link information in two consecutive atomic operations. If a producer thread falls asleep after the
|
||||
// swap and before the link information has been written, the consumer thread(s) will not advance past this point since it doesn't have
|
||||
// the information yet. Therefore the consumer threads calls will yield null until that particular producer thread wakes back up.
|
||||
//
|
||||
template<typename T>
|
||||
class alignas(sizeof(intptr_t) * 2)mpsc_node_queue
|
||||
{
|
||||
public:
|
||||
// Create a new queue instance.
|
||||
mpsc_node_queue()
|
||||
{
|
||||
m_Front.obj = 0;
|
||||
m_Back.obj = 0;
|
||||
atomic_thread_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Returns true if queue is empty.
|
||||
bool empty() const
|
||||
{
|
||||
return m_Back.load(memory_order_relaxed) == 0;
|
||||
}
|
||||
|
||||
// Push a node to the back of the queue.
|
||||
void push_back(T* node)
|
||||
{
|
||||
node->next.store(0, memory_order_relaxed);
|
||||
if (T* prev = m_Back.exchange(node, memory_order_release))
|
||||
prev->next.store(node, memory_order_release);
|
||||
else
|
||||
m_Front.store(node, memory_order_release);
|
||||
}
|
||||
|
||||
// Push a linked list of nodes to the back of the queue.
|
||||
void push_back(T* first_node, T* last_node)
|
||||
{
|
||||
last_node->next.store(0, memory_order_relaxed);
|
||||
if (T* prev = m_Back.exchange(last_node, memory_order_release))
|
||||
prev->next.store(first_node, memory_order_release);
|
||||
else
|
||||
m_Front.store(first_node, memory_order_release);
|
||||
}
|
||||
|
||||
// Try to pop frontmost node of the queue.
|
||||
//
|
||||
// Note that if null is returned, there may still be push operations in progress in a producer thread.
|
||||
// Use the "empty" function to check if a queue is empty.
|
||||
//
|
||||
// \returns front node of the queue or null.
|
||||
T* try_pop_front()
|
||||
{
|
||||
T* node, *next, *expected;
|
||||
|
||||
// acquire thread exclusive access of front node, return 0 if fail or queue is empty
|
||||
intptr_t front = m_FrontIntPtr.fetch_or(1, memory_order_acquire);
|
||||
if ((front & 1) | !(front >> 1))
|
||||
return 0;
|
||||
|
||||
node = (T*)front;
|
||||
next = static_cast<T*>(node->next.load(memory_order_relaxed));
|
||||
if (!next)
|
||||
{
|
||||
// Set to zero, assuming we got the head. Exclusive access maintained as only producer can write zero.
|
||||
m_Front.store(0, memory_order_release);
|
||||
|
||||
// - filters incomplete nodes
|
||||
// - check if node is back == retrigger new back
|
||||
expected = node;
|
||||
if (!m_Back.compare_exchange_strong(expected, 0, memory_order_acquire, memory_order_relaxed))
|
||||
{
|
||||
// Back progressed or node is incomplete, reset front ptr and return 0.
|
||||
m_Front.store(node, memory_order_release);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Successfully got the back, so just return node.
|
||||
return node;
|
||||
}
|
||||
|
||||
// Store next (clear block) and return node
|
||||
m_Front.store(next, memory_order_release);
|
||||
return node;
|
||||
}
|
||||
|
||||
private:
|
||||
// Space out atomic members to individual cache lines. Required for native LLSC operations on some architectures, others to avoid false sharing
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE];
|
||||
union
|
||||
{
|
||||
atomic<T*> m_Front;
|
||||
atomic<intptr_t> m_FrontIntPtr;
|
||||
};
|
||||
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(T*)];
|
||||
atomic<T*> m_Back;
|
||||
char _cachelineSpacer2[PLATFORM_CACHE_LINE_SIZE - sizeof(T*)];
|
||||
|
||||
// Verify mpsc_node is base of T
|
||||
static_assert(std::is_base_of<baselib::mpsc_node, T>::value, "Node class/struct used with baselib::mpsc_node_queue must derive from baselib::mpsc_node.");
|
||||
};
|
||||
}
|
||||
}
|
||||
199
Libraries/external/baselib/Include/Cpp/mpsc_node_stack.h
vendored
Normal file
199
Libraries/external/baselib/Include/Cpp/mpsc_node_stack.h
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
#pragma once
|
||||
|
||||
#include "../C/Baselib_Memory.h"
|
||||
#include "../C/Baselib_Atomic_LLSC.h"
|
||||
#include "mpsc_node.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// In computer science, a stack is an abstract data type that serves as a collection of elements, with two principal operations:
|
||||
// * push, which adds an element to the collection, and
|
||||
// * pop, which removes the most recently added element that was not yet removed.
|
||||
// The order in which elements come off a stack gives rise to its alternative name, LIFO (last in, first out).
|
||||
// Additionally, a peek operation may give access to the top without modifying the stack.
|
||||
// The name "stack" for this type of structure comes from the analogy to a set of physical items stacked on top of each other,
|
||||
// which makes it easy to take an item off the top of the stack, while getting to an item deeper in the stack may require taking off multiple other items first.
|
||||
// Considered as a linear data structure, or more abstractly a sequential collection, the push and pop operations occur only at one end of the structure,
|
||||
// referred to as the top of the stack. This makes it possible to implement a stack as a singly linked list and a pointer to the top element.
|
||||
// A stack may be implemented to have a bounded capacity. If the stack is full and does not contain enough space to accept an entity to be pushed,
|
||||
// the stack is then considered to be in an overflow state. The pop operation removes an item from the top of the stack.
|
||||
//
|
||||
// "Stack (abstract data type)", Wikipedia: The Free Encyclopedia
|
||||
// https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
|
||||
//
|
||||
|
||||
// This implementation is a lockless node stack capable of handling multiple producers and a single consumer (exclusive access)
|
||||
//
|
||||
// Node types are required to inherit the mpsc_node class. No data from the inherited class is modified/copied, so no restrictions apply.
|
||||
// The node memory is allocated and destroyed by the user (user owned).
|
||||
// Popped nodes may be deleted, overwritten/discarded and/or reused.
|
||||
//
|
||||
// Notes consumer threads:
|
||||
// Only one consumer thread will exclusively access the top node. Other consumer threads will always progress, either by failing to pop or
|
||||
// successfully pop the next node once the current thread thread opens access i.e. if the consumer thread with current exclusive access falls asleep
|
||||
// when popping, no other threads will successfully pop until the thread wakes up.
|
||||
// Producer threads always progress independently.
|
||||
//
|
||||
template<typename T>
|
||||
class alignas(sizeof(intptr_t) * 2)mpsc_node_stack
|
||||
{
|
||||
public:
|
||||
// Create a new stack instance.
|
||||
mpsc_node_stack()
|
||||
{
|
||||
m_Top.obj.ptr = 0;
|
||||
m_Top.obj.idx = 0;
|
||||
m_ConsumerLock.obj = false;
|
||||
atomic_thread_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Returns true if stack is empty.
|
||||
bool empty() const
|
||||
{
|
||||
return m_Top.load(memory_order_relaxed).ptr == 0;
|
||||
}
|
||||
|
||||
// Push a node to the top of the stack.
|
||||
void push_back(T* node)
|
||||
{
|
||||
SequencedTopPtr newtop;
|
||||
newtop.ptr = node;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &node->next.obj, &newtop, );
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
node->next.store(top.ptr, memory_order_relaxed);
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_release, memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
// Push a linked list of nodes to the top of the stack.
|
||||
void push_back(T* first_node, T* last_node)
|
||||
{
|
||||
SequencedTopPtr newtop;
|
||||
newtop.ptr = first_node;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &last_node->next.obj, &newtop, );
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
last_node->next.store(top.ptr, memory_order_relaxed);
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_release, memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
// Try to pop node from the top of the stack.
|
||||
//
|
||||
// Note that if null can be returned if another consumer thread has exclusive read access.
|
||||
// Use the "empty" function to check if a stack is empty.
|
||||
//
|
||||
// \returns top node of the stack or null.
|
||||
T* try_pop_back()
|
||||
{
|
||||
if (m_ConsumerLock.exchange(true, memory_order_acquire))
|
||||
return 0;
|
||||
T* node;
|
||||
SequencedTopPtr newtop;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &node, &newtop,
|
||||
{
|
||||
if (!node)
|
||||
{
|
||||
Baselib_atomic_llsc_break();
|
||||
break;
|
||||
}
|
||||
newtop.ptr = static_cast<T*>(node->next.obj);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
node = top.ptr;
|
||||
if (!node)
|
||||
break;
|
||||
newtop.ptr = static_cast<T*>(node->next.load(memory_order_relaxed));
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_relaxed, memory_order_relaxed));
|
||||
}
|
||||
m_ConsumerLock.store(false, memory_order_release);
|
||||
return node;
|
||||
}
|
||||
|
||||
// Try to pop all nodes from the stack.
|
||||
//
|
||||
// Note that if null can be returned if another consumer thread has exclusive read access.
|
||||
// Use the "empty" function to check if a stack is empty.
|
||||
//
|
||||
// \returns linked list of nodes or null.
|
||||
T* try_pop_all()
|
||||
{
|
||||
if (m_ConsumerLock.exchange(true, memory_order_acquire))
|
||||
return 0;
|
||||
T* node;
|
||||
SequencedTopPtr newtop;
|
||||
newtop.ptr = 0;
|
||||
if (PLATFORM_LLSC_NATIVE_SUPPORT)
|
||||
{
|
||||
Baselib_atomic_llsc_ptr_acquire_release_v(&m_Top, &node, &newtop,
|
||||
{
|
||||
if (!node)
|
||||
{
|
||||
Baselib_atomic_llsc_break();
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
SequencedTopPtr top = m_Top.load(memory_order_relaxed);
|
||||
do
|
||||
{
|
||||
node = top.ptr;
|
||||
if (!node)
|
||||
break;
|
||||
newtop.idx = top.idx + 1;
|
||||
}
|
||||
while (!m_Top.compare_exchange_strong(top, newtop, memory_order_relaxed, memory_order_relaxed));
|
||||
}
|
||||
m_ConsumerLock.store(false, memory_order_release);
|
||||
return node;
|
||||
}
|
||||
|
||||
private:
|
||||
typedef struct
|
||||
{
|
||||
T* ptr;
|
||||
intptr_t idx;
|
||||
} SequencedTopPtr;
|
||||
|
||||
// Space out atomic members to individual cache lines. Required for native LLSC operations on some architectures, others to avoid false sharing
|
||||
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE];
|
||||
atomic<SequencedTopPtr> m_Top;
|
||||
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(SequencedTopPtr)];
|
||||
atomic<bool> m_ConsumerLock;
|
||||
char _cachelineSpacer2[PLATFORM_CACHE_LINE_SIZE - sizeof(bool)];
|
||||
|
||||
// Verify mpsc_node is base of T
|
||||
static_assert(std::is_base_of<baselib::mpsc_node, T>::value, "Node class/struct used with baselib::mpsc_node_stack must derive from baselib::mpsc_node.");
|
||||
};
|
||||
}
|
||||
}
|
||||
174
Libraries/external/baselib/Include/Cpp/page_allocator.h
vendored
Normal file
174
Libraries/external/baselib/Include/Cpp/page_allocator.h
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
#pragma once
|
||||
|
||||
#include "Internal/page_allocator.inl.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// Page allocator implementation providing platform dependent system page allocation.
|
||||
//
|
||||
// Allocations are guaranteed to be aligned to at least the value of `default_alignment`.
|
||||
// All methods with no page state parameter input will default to `default_page_state` where applicable.
|
||||
//
|
||||
// Notes on allocation size:
|
||||
// All sizes are by allocator standards in bytes. The page allocator internally rounds up sizes to the nearest page size value. Consider this when
|
||||
// allocating. Use `optimal_size` to retreive number of bytes allocated given a specific size (1 to retreive the page size value).
|
||||
// Large alignments may lead to a significantly higher use of virtual address space than the amount of memory requested.
|
||||
// This may result in an aligned page allocation to fail where a less/non-aligned allocation would succeed.
|
||||
// Note that this is especially common in 32bit applications but a platform may impose additional restrictions on the size of its virtual address space.
|
||||
// Whether a page allocation is pure virtual address space or already commited memory depends on the platform and passed page state flag.
|
||||
//
|
||||
|
||||
// Page state options
|
||||
typedef enum Memory_PageState
|
||||
{
|
||||
// The page are in a reserved state and any access will cause a seg-fault/access violation.
|
||||
// On some platforms that support this state this may be just a hint to the OS and there is no guarantee pages in this state behave
|
||||
// differently the `NoAccess` state.
|
||||
// The `page_allocator` implementation does a best effort and tries to ensure as best as possible that pages in this state are not commited.
|
||||
Memory_PageState_Reserved = detail::Memory_PageState_Reserved,
|
||||
// This is a no access page and will cause a seg-fault/access violation when accessed.
|
||||
Memory_PageState_NoAccess = detail::Memory_PageState_NoAccess,
|
||||
// The memory can only be read.
|
||||
Memory_PageState_ReadOnly = detail::Memory_PageState_ReadOnly,
|
||||
// The memory can be read and written.
|
||||
Memory_PageState_ReadWrite = detail::Memory_PageState_ReadWrite,
|
||||
// The memory can be used to execute code and can be read.
|
||||
Memory_PageState_ReadOnly_Executable = detail::Memory_PageState_ReadOnly_Executable,
|
||||
// The memory can be used to execute code and can be both read and written.
|
||||
Memory_PageState_ReadWrite_Executable = detail::Memory_PageState_ReadWrite_Executable,
|
||||
} Memory_PageState;
|
||||
|
||||
// Allocator
|
||||
template<uint32_t default_alignment = 4096, Memory_PageState default_page_state = Memory_PageState_ReadWrite>
|
||||
class page_allocator
|
||||
{
|
||||
static_assert((default_alignment != 0), "'default_alignment' must not be zero");
|
||||
static_assert(::baselib::Algorithm::IsPowerOfTwo(default_alignment), "'default_alignment' must be a power of two value");
|
||||
|
||||
using impl = detail::page_allocator<default_alignment>;
|
||||
const impl m_Impl;
|
||||
|
||||
public:
|
||||
// Allocated memory is guaranteed to always be aligned to at least the value of `alignment`.
|
||||
static constexpr uint32_t alignment = default_alignment;
|
||||
|
||||
// Typedefs
|
||||
typedef Baselib_ErrorState error_state;
|
||||
|
||||
// Create a new instance with system default page size.
|
||||
page_allocator() : m_Impl() {}
|
||||
|
||||
// Create a new instance with `page_size` sized pages. Page size is required to be supported by the target system.
|
||||
page_allocator(size_t page_size) : m_Impl(page_size)
|
||||
{
|
||||
BaselibAssert((page_size != 0), "'page_size' must not be a zero value");
|
||||
BaselibAssert(::baselib::Algorithm::IsPowerOfTwo(page_size), "'page_size' must be a power of two value");
|
||||
}
|
||||
|
||||
// Allocates number of pages required to hold `size` number of bytes, with initial page state set to `state`
|
||||
//
|
||||
// \returns Address to memory block of allocated memory or `nullptr` if allocation failed.
|
||||
void* allocate(size_t size, Memory_PageState state = default_page_state) const
|
||||
{
|
||||
error_state result = Baselib_ErrorState_Create();
|
||||
return allocate(size, state, &result);
|
||||
}
|
||||
|
||||
// Allocates number of pages required to hold `size` number of bytes, with initial page state set to `state`
|
||||
//
|
||||
// If operation failed `error_state_ptr` contains one of the following error codes:
|
||||
// - Baselib_ErrorCode_InvalidPageSize: Page size doesn't match any of the available page sizes.
|
||||
// - Baselib_ErrorCode_InvalidPageCount: Requested number of pages is zero.
|
||||
// - Baselib_ErrorCode_UnsupportedAlignment: Requested alignment is invalid.
|
||||
// - Baselib_ErrorCode_UnsupportedPageState: The underlying system doesn't support the requested page state.
|
||||
// - Baselib_ErrorCode_OutOfMemory: If there is not enough continuous address space available, or physical memory space when acquiring committed memory.
|
||||
//
|
||||
// \returns Address to memory block of allocated memory or `nullptr` if allocation failed.
|
||||
void* allocate(size_t size, Memory_PageState state, error_state *error_state_ptr) const
|
||||
{
|
||||
return m_Impl.allocate(size, state, error_state_ptr);
|
||||
}
|
||||
|
||||
// Reallocate is not supported by the page allocator. The operation is a no-op.
|
||||
//
|
||||
// If `error_state_ptr` is passed it contains the following error code:
|
||||
// - Baselib_ErrorCode_NotSupported: The operation is not supported by the underlying system.
|
||||
//
|
||||
// \returns Always returns `nullptr`.
|
||||
void* reallocate(void* ptr, size_t old_size, size_t new_size, error_state *error_state_ptr = nullptr) const
|
||||
{
|
||||
if (error_state_ptr)
|
||||
*error_state_ptr |= RaiseError(Baselib_ErrorCode_NotSupported);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Deallocates memory block in previously allocated or reallocated with `size` pointed to by `ptr`.
|
||||
// A single call of deallocate must encompass the size that were originally allocated with a single call of `allocate`.
|
||||
//
|
||||
// \returns True if the operation was successful.
|
||||
bool deallocate(void* ptr, size_t size) const
|
||||
{
|
||||
error_state result = Baselib_ErrorState_Create();
|
||||
return deallocate(ptr, size, &result);
|
||||
}
|
||||
|
||||
// Deallocates memory block previously allocated or reallocated with `size` pointed to by `ptr`.
|
||||
// A single call of deallocate must encompass the size that were originally allocated with a single call of `allocate`.
|
||||
//
|
||||
// If operation failed `error_state_ptr` contains one of the following error codes:
|
||||
// - Baselib_ErrorCode_InvalidAddressRange: Address range was detected to not match a valid allocation.
|
||||
// CAUTION: Not all platforms are able to detect this and may either raise an error or cause undefined behavior.
|
||||
// Note to implementors: Raising the error is strongly preferred as it helps identifying issues in user code.
|
||||
// - Baselib_ErrorCode_InvalidPageSize: If page size doesn't match size with previous call to `allocate` with address in `ptr`.
|
||||
//
|
||||
// \returns True if the operation was successful.
|
||||
bool deallocate(void* ptr, size_t size, error_state *error_state_ptr) const
|
||||
{
|
||||
return m_Impl.deallocate(ptr, size, error_state_ptr);
|
||||
}
|
||||
|
||||
// Calculate optimal allocation size given `size`.
|
||||
// The result size is the number of bytes allocated given a specific size.
|
||||
//
|
||||
// \returns Optimal size when allocating memory given `size`.
|
||||
constexpr size_t optimal_size(size_t size) const
|
||||
{
|
||||
return m_Impl.optimal_size(size);
|
||||
}
|
||||
|
||||
// Modifies the page state property of an already allocated virtual address in `ptr` of `size` to `state`.
|
||||
// It is possible to modify only some of the memory allocated by `allocate`.
|
||||
// Address is the address of the first page to modify and so must be aligned to size of page size.
|
||||
// Size is rounded up to the next multiple of page size used.
|
||||
// Passing `nullptr` or a zero page count result in a no-op.
|
||||
//
|
||||
// \returns True if the operation was successful.
|
||||
bool set_page_state(void* ptr, size_t size, Memory_PageState state) const
|
||||
{
|
||||
error_state result = Baselib_ErrorState_Create();
|
||||
return set_page_state(ptr, size, state, &result);
|
||||
}
|
||||
|
||||
// Modifies the page state property of an already allocated virtual address in `ptr` of `size` to `state`.
|
||||
// It is possible to modify only some of the memory allocated by `allocate`.
|
||||
// Address is the address of the first page to modify and so must be aligned to size of page size.
|
||||
// Size is rounded up to the next multiple of page size used.
|
||||
// Passing `nullptr` or a zero page count result in a no-op.
|
||||
//
|
||||
// If operation failed `error_state_ptr` contains one of the following error codes:
|
||||
// - Baselib_ErrorCode_InvalidAddressRange: Address range is not covered by a valid allocation.
|
||||
// Platforms that emulate page allocations (e.g. Emscripten) are not able to present this error and
|
||||
// will pass the function call silently.
|
||||
// - Baselib_ErrorCode_InvalidPageSize: If page size doesn't match the previous allocation in `ptr`.
|
||||
// - Baselib_ErrorCode_UnsupportedPageState: The underlying system doesn't support the requested page state.
|
||||
//
|
||||
// \returns True if the operation was successful.
|
||||
bool set_page_state(void* ptr, size_t size, Memory_PageState state, error_state *error_state_ptr) const
|
||||
{
|
||||
return m_Impl.set_page_state(ptr, size, state, error_state_ptr);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
219
Libraries/external/baselib/Include/Cpp/tlsf_allocator.h
vendored
Normal file
219
Libraries/external/baselib/Include/Cpp/tlsf_allocator.h
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
#pragma once
|
||||
|
||||
#include "Internal/tlsf_allocator.inl.h"
|
||||
#include "heap_allocator.h"
|
||||
|
||||
namespace baselib
|
||||
{
|
||||
BASELIB_CPP_INTERFACE
|
||||
{
|
||||
// tlsf_allocator (Two-Level Segregated Fit)
|
||||
// Lockless, dynamic-sized allocator capable of handling multiple concurrent allocations and deallocations (unless otherwise stated).
|
||||
// The cost (in processor instructions) allocating from the pool is O(1).
|
||||
// Allocating from the pool is lockless, except when capacity is required to increase, in which case the capacity is doubled for the a size range
|
||||
// of the particular allocation size request (see details below).
|
||||
//
|
||||
// Strict segregated fit allocation mechanism is applied, a requested size is rounded up to the next allocator provided size.
|
||||
// The granularity of provided sizes (size ranges) are defined by the `min_size`, `max_size` and `linear_subdivisions` parameters provided.
|
||||
// `optimal_size` can be called to obtain the actual/best fit size of an allocation for a certain requested size.
|
||||
//
|
||||
// A two-Level segregated fit allocator can be said to have two dimensions, or levels.
|
||||
// The first level provides size ranges of pow2 segments.
|
||||
// The second level provides size ranges of the first level pow2 segments size range divided by the `linear_subdivisions` parameter value.
|
||||
// Size range of a given size is be calculated as follows:
|
||||
//
|
||||
// int invSizeMask = ((1 << (int)log2(size)) / linear_subdivisions) - 1; // Inverse subdivision mask based on Pow2 of `size`, which is effectively range
|
||||
// int lowerBound = (size - 1 & ~invSizeMask) + 1;
|
||||
// int upperBound = lowerBound + invSizeMask;
|
||||
//
|
||||
// As an example, the (internal) size allocated for a requested size of 1500 with linear_subdivisions of 16 is 1536 range(1473-1536).
|
||||
//
|
||||
// Notes on performance/memory requirements:
|
||||
//
|
||||
// - This implementation is a segregated storage algorithm and does not, unlike a segregated fit algorithm (aka buddy allocator) split and coalesce
|
||||
// memory blocks. A segregated fit is well suited for a single threaded/lock-based implementation but would require multiple atomic operations to split
|
||||
// or coalesce blocks.
|
||||
//
|
||||
// - All allocators share a common base instance of the backing allocator `Allocator`, which is used for allocation when the capacity is required to
|
||||
// increase. Memory is only freed up when the tlsf allocator `deallocate_all` or destructor is invoked.
|
||||
// Furthermore, `deallocate_all` is optional to declare in the backing allocator `Allocator` and is if so invoked (once) instead of multiple `deallocate`
|
||||
// calls when `deallocate_all` (or destructor) is invoked on the tlsf allocator.
|
||||
//
|
||||
// - The allocator is constructed with only as many block allocators required for the selected min-max range with linear_subdivisions.
|
||||
// I.e. one allocator with (min_size, max_size, linear_subdivisions) 32,1024,8 has the same memory footprint as two 32,512,8 and 513,1024,8.
|
||||
// If either level allocator only requires a single allocator providing a range, code for calculating allocator indices is optimized away by template
|
||||
// construction. Additionally, if size is known at compile-time (const or sizeof) lookup can be optimized away by the compiler.
|
||||
//
|
||||
// - No overhead per allocation (no header information).
|
||||
//
|
||||
// - Internally, all memory block sizes must be rounded up to a multiple of alignment. I.e if alignment is 64, buckets containing 96 byte size allocations
|
||||
// will in internally use 128 byte blocks. Additionally, smallest size allocated will always be greater than or equal to `linear_subdivisions`.
|
||||
//
|
||||
// - The allocator relies on that the free memory pool must be persisted and read/write accessible as link information of free memory blocks are
|
||||
// read/written to by the allocator operations.
|
||||
//
|
||||
// Examples:
|
||||
// Range is within a single pow2 range, no subdivisions. No lookup code needed.
|
||||
// using BlockAllocator = tlsf_allocator<17, 32, 1>;
|
||||
//
|
||||
// Range is within a single pow2 range with 8 subdivisions, so in this case with linear increments (128/8=16) of bucket sizes. Second level lookup only.
|
||||
// using SegregatedFitAllocatorLinear = tlsf_allocator<129, 256, 8>;
|
||||
//
|
||||
// Range is several pow2 ranges, no subdivisions so pow2 size increments of bucket sizes.
|
||||
// using SegregatedFitAllocatorPow2 = tlsf_allocator<129, 2048, 1>;
|
||||
//
|
||||
// Range is several pow2 ranges, with 32 subdivisions each, so pow2 size increments where each pow2 contains an array of buckets with linear size
|
||||
// increments (pow2sz/32) of bucket sizes.
|
||||
// using TLSFAllocator = tlsf_allocator<129, 2048, 32>;
|
||||
//
|
||||
//
|
||||
// tlsf_allocator<size_t min_size, size_t max_size, size_t linear_subdivisions = 1, class Allocator = baselib::heap_allocator<>>
|
||||
//
|
||||
// min_size - valid minimum size of allocations.
|
||||
// max_size - valid maximum size of allocations. Must be less or equal to the size addressable by integral type `size_t` divided by two plus 1.
|
||||
// linear_subdivisions - number of linear subdivisions of second level allocators (defaults to 1). Must be a power of two and less or equal to `min_size`
|
||||
// Allocator - Backing memory allocator. Defaults to baselib heap_allocator.
|
||||
//
|
||||
template<size_t min_size, size_t max_size, size_t linear_subdivisions = 1, class Allocator = baselib::heap_allocator<> >
|
||||
class tlsf_allocator : protected detail::tlsf_allocator<min_size, max_size, linear_subdivisions, Allocator>
|
||||
{
|
||||
using Impl = detail::tlsf_allocator<min_size, max_size, linear_subdivisions, Allocator>;
|
||||
|
||||
static_assert(min_size <= max_size, "min_size > max_size");
|
||||
static_assert(min_size >= linear_subdivisions, "min_size < linear_subdivisions");
|
||||
static_assert(max_size <= std::numeric_limits<size_t>::max() / 2 + 1, "max_size > std::numeric_limits<size_t>::max() / 2 + 1");
|
||||
static_assert(baselib::Algorithm::IsPowerOfTwo(linear_subdivisions), "linear_subdivisions != pow2");
|
||||
|
||||
public:
|
||||
// non-copyable
|
||||
tlsf_allocator(const tlsf_allocator& other) = delete;
|
||||
tlsf_allocator& operator=(const tlsf_allocator& other) = delete;
|
||||
|
||||
// non-movable (strictly speaking not needed but listed to signal intent)
|
||||
tlsf_allocator(tlsf_allocator&& other) = delete;
|
||||
tlsf_allocator& operator=(tlsf_allocator&& other) = delete;
|
||||
|
||||
// Allocated memory is guaranteed to always be aligned to at least the value of `alignment`.
|
||||
static constexpr uint32_t alignment = Impl::alignment;
|
||||
|
||||
// Creates a new instance
|
||||
tlsf_allocator()
|
||||
{
|
||||
atomic_thread_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Destroy allocator, deallocates any memory allocated.
|
||||
//
|
||||
// If there are other threads currently accessing the allocator behavior is undefined.
|
||||
~tlsf_allocator() {}
|
||||
|
||||
// Allocates a memory block large enough to hold `size` number of bytes if allocation does not require increasing capacity.
|
||||
//
|
||||
// \returns Address to memory block of allocated memory or nullptr if failed or outside of size range.
|
||||
void* try_allocate(size_t size)
|
||||
{
|
||||
return owns(nullptr, size) ? Impl::try_allocate(size) : nullptr;
|
||||
}
|
||||
|
||||
// Allocates a memory block large enough to hold `size` number of bytes.
|
||||
//
|
||||
// \returns Address to memory block of allocated memory or nullptr if failed or outside of size range
|
||||
void* allocate(size_t size)
|
||||
{
|
||||
return owns(nullptr, size) ? Impl::allocate(size) : nullptr;
|
||||
}
|
||||
|
||||
// Reallocates previously allocated or reallocated memory pointed to by `ptr` from `old_size` to `new_size` number of bytes if reallocation does not
|
||||
// require increasing capacity. Passing `nullptr` in `ptr` yield the same result as calling `try_allocate`.
|
||||
//
|
||||
// \returns Address to memory block of reallocated memory or nullptr if failed or if `new_size` is outside of size range.
|
||||
void* try_reallocate(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
return owns(nullptr, new_size) ? Impl::try_reallocate(ptr, old_size, new_size) : nullptr;
|
||||
}
|
||||
|
||||
// Reallocates previously allocated or reallocated memory pointed to by `ptr` from `old_size` to `new_size` number of bytes.
|
||||
// Passing `nullptr` in `ptr` yield the same result as calling `allocate`.
|
||||
//
|
||||
// \returns Address to memory block of reallocated memory or nullptr if failed or if `new_size` is outside of size range
|
||||
void* reallocate(void* ptr, size_t old_size, size_t new_size)
|
||||
{
|
||||
return owns(nullptr, new_size) ? Impl::reallocate(ptr, old_size, new_size) : nullptr;
|
||||
}
|
||||
|
||||
// Deallocates memory block previously allocated or reallocated with `size` pointed to by `ptr`.
|
||||
// Passing `nullptr` in `ptr` result in a no-op.
|
||||
//
|
||||
// \returns Always returns `true`
|
||||
bool deallocate(void* ptr, size_t size)
|
||||
{
|
||||
return Impl::deallocate(ptr, size);
|
||||
}
|
||||
|
||||
// Free a linked list of allocations created using `batch_deallocate_link` with `size`.
|
||||
// `first` to `last` is first and last allocation of a `batch_deallocate_link` series of calls.
|
||||
//
|
||||
// \returns Always returns `true`
|
||||
bool batch_deallocate(void* ptr_first, void* ptr_last, size_t size)
|
||||
{
|
||||
return Impl::batch_deallocate(ptr_first, ptr_last, size);
|
||||
}
|
||||
|
||||
// Link previously allocated memory of `size` to another.
|
||||
//
|
||||
// Use to create a linked list of allocations for use with `batch_deallocate(first, last, size)`
|
||||
// Size of linked allocations are required to be equal to `size`.
|
||||
// `nullptr` is a valid argument for `ptr_next`, but is not needed to terminate a linked list.
|
||||
// This is implicit transfer of ownership of the memory back to the allocator.
|
||||
// Memory of the allocation must not be accessed/modified once linked.
|
||||
void batch_deallocate_link(void* ptr, void* ptr_next, size_t size)
|
||||
{
|
||||
Impl::batch_deallocate_link(ptr, ptr_next);
|
||||
}
|
||||
|
||||
// Release all resources and set capacity to zero
|
||||
//
|
||||
// Calling this function invalidates any currently allocated memory
|
||||
// If there are other threads currently accessing the allocator behavior is undefined.
|
||||
void deallocate_all()
|
||||
{
|
||||
Impl::deallocate_all();
|
||||
}
|
||||
|
||||
// Requests that the allocator capacity be at least enough to contain `capacity` for allocations of `size`.
|
||||
//
|
||||
// If `capacity` is less or equal to current capacity for allocations of `size`, the capacity is not affected.
|
||||
// Note that internally, `capacity` is rounded up to the nearest optimal allocation size based on `Allocator` attributes.
|
||||
//
|
||||
// \returns true if successful.
|
||||
bool reserve(size_t size, size_t capacity)
|
||||
{
|
||||
return owns(nullptr, size) ? Impl::reserve(size, capacity) : false;
|
||||
}
|
||||
|
||||
// Get the current capacity of allocations with `size`.
|
||||
size_t capacity(size_t size)
|
||||
{
|
||||
return owns(nullptr, size) ? Impl::capacity(size) : 0;
|
||||
}
|
||||
|
||||
// Calculate optimal allocation size given `size`.
|
||||
//
|
||||
// \returns Optimal size when allocating memory given `size` or zero if outside size range.
|
||||
static constexpr size_t optimal_size(const size_t size)
|
||||
{
|
||||
return owns(nullptr, size) ? Impl::optimal_size(size) : 0;
|
||||
}
|
||||
|
||||
// Checks for the ownership allocation given `ptr` and `size`
|
||||
// It is implementation defined if either or both of `ptr` and `size` are considered to determine ownership.
|
||||
// This allocator does not consider `ptr`.
|
||||
//
|
||||
// \returns True if the allocator owns the allocation.
|
||||
static constexpr bool owns(const void *, size_t size)
|
||||
{
|
||||
return size - min_size <= max_size - min_size;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
60
Libraries/external/baselib/Include/Internal/ArchitectureDetection.h
vendored
Normal file
60
Libraries/external/baselib/Include/Internal/ArchitectureDetection.h
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
#pragma once
|
||||
|
||||
// Detect 64/32bit if not user defined.
|
||||
#if !defined(PLATFORM_ARCH_64) && !defined(PLATFORM_ARCH_32)
|
||||
#if defined(_AMD64_) || defined(__LP64__) || defined(_WIN64) || defined(_M_ARM64)
|
||||
#define PLATFORM_ARCH_64 1
|
||||
#define PLATFORM_ARCH_32 0
|
||||
#else
|
||||
#define PLATFORM_ARCH_64 0
|
||||
#define PLATFORM_ARCH_32 1
|
||||
#endif
|
||||
#elif !defined(PLATFORM_ARCH_64)
|
||||
#define PLATFORM_ARCH_64 (PLATFORM_ARCH_32 ? 0 : 1)
|
||||
#elif !defined(PLATFORM_ARCH_32)
|
||||
#define PLATFORM_ARCH_32 (PLATFORM_ARCH_64 ? 0 : 1)
|
||||
#endif
|
||||
|
||||
// Cache line size in bytes
|
||||
#ifndef PLATFORM_CACHE_LINE_SIZE
|
||||
#define PLATFORM_CACHE_LINE_SIZE 64
|
||||
#endif
|
||||
|
||||
// Detect endianess if not user defined.
|
||||
#if !defined(PLATFORM_ARCH_BIG_ENDIAN) && !defined(PLATFORM_ARCH_LITTLE_ENDIAN)
|
||||
#if defined(__BIG_ENDIAN__)
|
||||
#define PLATFORM_ARCH_BIG_ENDIAN 1
|
||||
#define PLATFORM_ARCH_LITTLE_ENDIAN 0
|
||||
#else
|
||||
#define PLATFORM_ARCH_BIG_ENDIAN 0
|
||||
#define PLATFORM_ARCH_LITTLE_ENDIAN 1
|
||||
#endif
|
||||
#elif !defined(PLATFORM_ARCH_BIG_ENDIAN)
|
||||
#define PLATFORM_ARCH_BIG_ENDIAN (PLATFORM_ARCH_LITTLE_ENDIAN ? 0 : 1)
|
||||
#elif !defined(PLATFORM_ARCH_LITTLE_ENDIAN)
|
||||
#define PLATFORM_ARCH_LITTLE_ENDIAN (PLATFORM_ARCH_BIG_ENDIAN ? 0 : 1)
|
||||
#endif
|
||||
|
||||
|
||||
// Detect SIMD features.
|
||||
|
||||
// SSE2
|
||||
// Naming is inherited from Unity and indicates full SSE2 support.
|
||||
#ifndef PLATFORM_SUPPORTS_SSE
|
||||
#if (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(_M_AMD64) || defined(_M_X64) || defined(__SSE2__)
|
||||
#define PLATFORM_SUPPORTS_SSE 1
|
||||
#else
|
||||
#define PLATFORM_SUPPORTS_SSE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// NEON
|
||||
// Indicates general availability. Note that there can be some differences in the exact instructions available.
|
||||
#ifndef PLATFORM_SUPPORTS_NEON
|
||||
#if defined(__ARM_NEON) || defined(__ARM_NEON__) || defined(__ARM_NEON_FP) || \
|
||||
(defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)))
|
||||
#define PLATFORM_SUPPORTS_NEON 1
|
||||
#else
|
||||
#define PLATFORM_SUPPORTS_NEON 0
|
||||
#endif
|
||||
#endif
|
||||
47
Libraries/external/baselib/Include/Internal/Assert.h
vendored
Normal file
47
Libraries/external/baselib/Include/Internal/Assert.h
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
#pragma once
|
||||
|
||||
#ifndef BASELIB_ENABLE_ASSERTIONS
|
||||
#ifdef NDEBUG
|
||||
#define BASELIB_ENABLE_ASSERTIONS 0
|
||||
#else
|
||||
#define BASELIB_ENABLE_ASSERTIONS 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "../C/Baselib_Debug.h"
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
BASELIB_C_INTERFACE
|
||||
{
|
||||
#endif
|
||||
|
||||
#if COMPILER_CLANG || COMPILER_GCC
|
||||
__attribute__((format(printf, 1, 2)))
|
||||
#endif
|
||||
BASELIB_API void detail_AssertLog(const char* format, ...);
|
||||
|
||||
#define DETAIL__ASSERT_LOG(ASSERT_EXPRESSION_, message, ...) \
|
||||
PP_EVAL(PP_IF_ELSE(PP_VARG_IS_NONEMPTY(__VA_ARGS__)) \
|
||||
(detail_AssertLog("%s(%d): Assertion failed (%s) - " message "\n", __FILE__, __LINE__, #ASSERT_EXPRESSION_, __VA_ARGS__)) \
|
||||
(detail_AssertLog("%s(%d): Assertion failed (%s) - %s\n", __FILE__, __LINE__, #ASSERT_EXPRESSION_, message)) \
|
||||
)
|
||||
|
||||
#define BaselibAssert(ASSERT_EXPRESSION_, ...) \
|
||||
do { \
|
||||
if (BASELIB_ENABLE_ASSERTIONS) \
|
||||
{ \
|
||||
if(!(ASSERT_EXPRESSION_)) \
|
||||
{ \
|
||||
PP_EVAL(PP_IF_ELSE(PP_VARG_IS_NONEMPTY(__VA_ARGS__)) \
|
||||
(DETAIL__ASSERT_LOG(ASSERT_EXPRESSION_, __VA_ARGS__)) \
|
||||
(detail_AssertLog("%s(%d): Assertion failed (%s)\n", __FILE__, __LINE__, #ASSERT_EXPRESSION_)) \
|
||||
); \
|
||||
Baselib_Debug_Break(); \
|
||||
} \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // BASELIB_C_INTERFACE
|
||||
#endif
|
||||
16
Libraries/external/baselib/Include/Internal/BasicTypes.h
vendored
Normal file
16
Libraries/external/baselib/Include/Internal/BasicTypes.h
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// Default for PLATFORM_MEMORY_MALLOC_MIN_ALIGNMENT if not specified by platform.
|
||||
#ifndef PLATFORM_MEMORY_MALLOC_MIN_ALIGNMENT
|
||||
#define PLATFORM_MEMORY_MALLOC_MIN_ALIGNMENT COMPILER_ALIGN_OF(max_align_t)
|
||||
#endif
|
||||
|
||||
// Custom type suitable for representing a UTF-16 codepoint crossplatform.
|
||||
// Because char16_t is not available on all platforms,
|
||||
// uint16_t is chosen as a type that inflicts the same behavior across platforms,
|
||||
// as is requiring a cast from platform specific UTF-16 representation.
|
||||
typedef uint16_t baselib_char16_t;
|
||||
74
Libraries/external/baselib/Include/Internal/Compiler/CompilerEnvironmentClang.h
vendored
Normal file
74
Libraries/external/baselib/Include/Internal/Compiler/CompilerEnvironmentClang.h
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
#pragma once
|
||||
|
||||
// This defines the compiler environment for clang based compilers. Please make sure to define all required features
|
||||
// (see VerifyCompilerEnvironment.h for reference)
|
||||
|
||||
#if defined(__cplusplus) && __cplusplus < 201103L
|
||||
#error "Baselib requires C++11 support"
|
||||
#endif
|
||||
|
||||
#define COMPILER_CLANG 1
|
||||
|
||||
#define HAS_CLANG_FEATURE(x) (__has_feature(x))
|
||||
|
||||
#define COMPILER_SUPPORTS_EXCEPTIONS HAS_CLANG_FEATURE(cxx_exceptions)
|
||||
#define COMPILER_SUPPORTS_RTTI HAS_CLANG_FEATURE(cxx_rtti)
|
||||
#define COMPILER_SUPPORTS_GENERIC_LAMBDA_EXPRESSIONS HAS_CLANG_FEATURE(cxx_generic_lambdas) // Clang >=3.4
|
||||
|
||||
#define COMPILER_BUILTIN_EXPECT(X_, Y_) __builtin_expect((X_), (Y_))
|
||||
|
||||
// Tells the compiler to assume that this statement is never reached.
|
||||
// (reaching it anyways is undefined behavior!)
|
||||
#define COMPILER_BUILTIN_UNREACHABLE() __builtin_unreachable()
|
||||
// Tells the compiler to assume that the given expression is true until the expression is modified.
|
||||
// (it is undefined behavior if the expression is not true after all)
|
||||
#define COMPILER_BUILTIN_ASSUME(EXPR_) __builtin_assume(EXPR_)
|
||||
|
||||
|
||||
#define COMPILER_NOINLINE __attribute__((unused, noinline)) // unused is needed to avoid warning when a function is not used
|
||||
#define COMPILER_INLINE __attribute__((unused)) inline
|
||||
#define COMPILER_FORCEINLINE __attribute__((unused, always_inline, nodebug)) inline
|
||||
#define COMPILER_EMPTYINLINE __attribute__((const, always_inline, nodebug)) inline
|
||||
#define COMPILER_NORETURN __attribute__((noreturn))
|
||||
|
||||
#if __has_extension(attribute_deprecated_with_message)
|
||||
#define COMPILER_DEPRECATED(msg) __attribute__((deprecated(msg)))
|
||||
#if __has_extension(enumerator_attributes)
|
||||
#define COMPILER_DEPRECATED_ENUM_VALUE(msg) __attribute__((deprecated(msg)))
|
||||
#else
|
||||
#define COMPILER_DEPRECATED_ENUM_VALUE(msg)
|
||||
#endif
|
||||
#else
|
||||
#define COMPILER_DEPRECATED(msg) __attribute__((deprecated))
|
||||
#if __has_extension(enumerator_attributes)
|
||||
#define COMPILER_DEPRECATED_ENUM_VALUE(msg) __attribute__((deprecated))
|
||||
#else
|
||||
#define COMPILER_DEPRECATED_ENUM_VALUE(msg)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define COMPILER_ALIGN_OF(TYPE_) __alignof__(TYPE_)
|
||||
#define COMPILER_ALIGN_AS(ALIGN_) __attribute__((aligned(ALIGN_)))
|
||||
|
||||
#define COMPILER_C_STATIC_ASSERT(EXPR_, MSG_) _Static_assert(EXPR_, MSG_)
|
||||
|
||||
#define COMPILER_ATTRIBUTE_UNUSED __attribute__((unused))
|
||||
|
||||
// Note that this is how the compiler defines a debug break which is not necessarily the standard way on any given platform.
|
||||
// For a platform friendly implementation, use `BASELIB_DEBUG_TRAP`
|
||||
#define COMPILER_DEBUG_TRAP() __builtin_debugtrap()
|
||||
|
||||
#define COMPILER_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
|
||||
|
||||
// Warning management
|
||||
// pragma message on clang does always generate a warning that cannot be disabled, therefore the clang version
|
||||
// of COMPILER_PRINT_MESSAGE() does nothing
|
||||
#define COMPILER_PRINT_MESSAGE(MESSAGE_)
|
||||
#define COMPILER_PRINT_WARNING(MESSAGE_) _Pragma(PP_STRINGIZE(message(__FILE__ "warning: " MESSAGE_)))
|
||||
|
||||
#define COMPILER_WARNING_UNUSED_VARIABLE PP_STRINGIZE(-Wunused-variable)
|
||||
#define COMPILER_WARNING_DEPRECATED PP_STRINGIZE(-Wdeprecated)
|
||||
|
||||
#define COMPILER_WARNINGS_PUSH _Pragma(PP_STRINGIZE(clang diagnostic push))
|
||||
#define COMPILER_WARNINGS_POP _Pragma(PP_STRINGIZE(clang diagnostic pop))
|
||||
#define COMPILER_WARNINGS_DISABLE(Warn) _Pragma(PP_STRINGIZE(clang diagnostic ignored Warn))
|
||||
99
Libraries/external/baselib/Include/Internal/Compiler/CompilerEnvironmentGcc.h
vendored
Normal file
99
Libraries/external/baselib/Include/Internal/Compiler/CompilerEnvironmentGcc.h
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
#pragma once
|
||||
|
||||
// Verify that the GCC is correctly defining __cplusplus. This is not the case for
|
||||
// GCC versions < 4.7, where it is just defined to 1. We only error here in case of linux build
|
||||
// as we, as we use the commandline --std=xxx to select the featureset there. On armcc and cxppc
|
||||
// we do not use any >C99 features, so the detection works correctly with __cplusplus==1
|
||||
#if (__cplusplus == 1) && defined(LINUX)
|
||||
#error "This version of GCC is not supported. Please update to a more recent one."
|
||||
#endif
|
||||
#if defined(__cplusplus) && __cplusplus < 201103L
|
||||
#error "Baselib requires C++11 support"
|
||||
#endif
|
||||
|
||||
#define COMPILER_GCC 1
|
||||
|
||||
// __cpp_exceptions is the correct way to check whether exceptions are enabled or not, but is unfortunately not supported
|
||||
// by GCC versions before 5.0. For Pre 5.0 GCC, we also need to check the __EXCEPTIONS macro
|
||||
#if defined(__cpp_exceptions) || __EXCEPTIONS == 1
|
||||
#define COMPILER_SUPPORTS_EXCEPTIONS 1
|
||||
#else
|
||||
#define COMPILER_SUPPORTS_EXCEPTIONS 0
|
||||
#endif
|
||||
|
||||
// __cpp_rtti is the correct way to check whether RTTI is enabled or not, but is unfortunately not supported
|
||||
// by GCC versions before 5.0. For Pre 5.0 GCC, we also need to check the __GXX_RTTI macro
|
||||
#if defined(__cpp_rtti) || __GXX_RTTI == 1
|
||||
#define COMPILER_SUPPORTS_RTTI 1
|
||||
#else
|
||||
#define COMPILER_SUPPORTS_RTTI 0
|
||||
#endif
|
||||
|
||||
// GCC >=4.9
|
||||
#if defined(__cpp_generic_lambdas) && (__cpp_generic_lambdas >= 201304)
|
||||
#define COMPILER_SUPPORTS_GENERIC_LAMBDA_EXPRESSIONS 1
|
||||
#else
|
||||
#define COMPILER_SUPPORTS_GENERIC_LAMBDA_EXPRESSIONS 0
|
||||
#endif
|
||||
|
||||
#define COMPILER_BUILTIN_EXPECT(X_, Y_) __builtin_expect((X_), (Y_))
|
||||
|
||||
// Tells the compiler to assume that this statement is never reached.
|
||||
// (reaching it anyways is undefined behavior!)
|
||||
#define COMPILER_BUILTIN_UNREACHABLE() __builtin_unreachable()
|
||||
// Tells the compiler to assume that the given expression is true until the expression is modified.
|
||||
// (it is undefined behavior if the expression is not true after all)
|
||||
#define COMPILER_BUILTIN_ASSUME(EXPR_) do { if (!(EXPR_)) COMPILER_BUILTIN_UNREACHABLE(); } while(false)
|
||||
|
||||
#define COMPILER_NOINLINE __attribute__((unused, noinline)) // unused is needed to avoid warning when a function is not used
|
||||
#define COMPILER_INLINE __attribute__((unused)) inline
|
||||
#define COMPILER_FORCEINLINE __attribute__((unused, always_inline)) inline
|
||||
#define COMPILER_EMPTYINLINE __attribute__((const, always_inline)) inline
|
||||
#define COMPILER_NORETURN __attribute__((noreturn))
|
||||
|
||||
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) || __GNUC__ > 4
|
||||
#define COMPILER_DEPRECATED(msg) __attribute__((deprecated(msg)))
|
||||
#else
|
||||
#define COMPILER_DEPRECATED(msg) __attribute__((deprecated))
|
||||
#endif
|
||||
|
||||
// Support for attributes on enumerators is GCC 6
|
||||
#if __GNUC__ >= 6
|
||||
#define COMPILER_DEPRECATED_ENUM_VALUE(msg) __attribute__((deprecated(msg)))
|
||||
#else
|
||||
#define COMPILER_DEPRECATED_ENUM_VALUE(msg)
|
||||
#endif
|
||||
|
||||
#define COMPILER_ALIGN_OF(TYPE_) __alignof__(TYPE_)
|
||||
#define COMPILER_ALIGN_AS(ALIGN_) __attribute__((aligned(ALIGN_)))
|
||||
|
||||
#define COMPILER_C_STATIC_ASSERT(EXPR_, MSG_) _Static_assert(EXPR_, MSG_)
|
||||
|
||||
#define COMPILER_ATTRIBUTE_UNUSED __attribute__((unused))
|
||||
|
||||
// Some versions of GCC do provide __builtin_debugtrap, but it seems to be unreliable.
|
||||
// See https://github.com/scottt/debugbreak/issues/13
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#define COMPILER_DEBUG_TRAP() __asm__ volatile("int $0x03")
|
||||
#elif defined(__thumb__)
|
||||
#define COMPILER_DEBUG_TRAP() __asm__ volatile(".inst 0xde01")
|
||||
#elif defined(__arm__) && !defined(__thumb__)
|
||||
#define COMPILER_DEBUG_TRAP() __asm__ volatile(".inst 0xe7f001f0")
|
||||
#elif defined(__aarch64__)
|
||||
#define COMPILER_DEBUG_TRAP() __asm__ volatile(".inst 0xd4200000")
|
||||
#endif
|
||||
|
||||
#define COMPILER_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
|
||||
|
||||
#define HAS_CLANG_FEATURE(x) 0
|
||||
|
||||
// Warning management
|
||||
#define COMPILER_PRINT_MESSAGE(MESSAGE_) _Pragma(PP_STRINGIZE(message(__FILE__ "info: " MESSAGE_)))
|
||||
#define COMPILER_PRINT_WARNING(MESSAGE_) _Pragma(PP_STRINGIZE(message(__FILE__ "warning: " MESSAGE_)))
|
||||
|
||||
#define COMPILER_WARNING_UNUSED_VARIABLE PP_STRINGIZE(-Wunused-variable)
|
||||
#define COMPILER_WARNING_DEPRECATED PP_STRINGIZE(-Wdeprecated)
|
||||
|
||||
#define COMPILER_WARNINGS_PUSH _Pragma("GCC diagnostic push")
|
||||
#define COMPILER_WARNINGS_POP _Pragma("GCC diagnostic pop")
|
||||
#define COMPILER_WARNINGS_DISABLE(Warn) _Pragma(PP_STRINGIZE(GCC diagnostic ignored Warn))
|
||||
68
Libraries/external/baselib/Include/Internal/Compiler/CompilerEnvironmentMsvc.h
vendored
Normal file
68
Libraries/external/baselib/Include/Internal/Compiler/CompilerEnvironmentMsvc.h
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#if _MSC_VER < 1900
|
||||
#error "Baselib requires C++11 support, i.e. MSVC 2015 or newer"
|
||||
#endif
|
||||
|
||||
#define COMPILER_MSVC 1
|
||||
|
||||
#ifdef _CPPUNWIND
|
||||
#define COMPILER_SUPPORTS_EXCEPTIONS _CPPUNWIND
|
||||
#else
|
||||
#define COMPILER_SUPPORTS_EXCEPTIONS 0
|
||||
#endif
|
||||
|
||||
#ifdef _CPPRTTI
|
||||
#define COMPILER_SUPPORTS_RTTI _CPPRTTI
|
||||
#else
|
||||
#define COMPILER_SUPPORTS_RTTI 0
|
||||
#endif
|
||||
|
||||
#define COMPILER_SUPPORTS_GENERIC_LAMBDA_EXPRESSIONS 1 // _MSC_VER >= 1900
|
||||
|
||||
#define COMPILER_BUILTIN_EXPECT(X_, Y_) (X_)
|
||||
|
||||
// Tells the compiler to assume that this statement is never reached.
|
||||
// (reaching it anyways is undefined behavior!)
|
||||
#define COMPILER_BUILTIN_UNREACHABLE() __assume(false)
|
||||
// Tells the compiler to assume that the given expression is true until the expression is modified.
|
||||
// (it is undefined behavior if the expression is not true after all)
|
||||
#define COMPILER_BUILTIN_ASSUME(EXPR_) __assume(EXPR_)
|
||||
|
||||
#define HAS_CLANG_FEATURE(x) 0
|
||||
|
||||
// Warning management
|
||||
#define COMPILER_PRINT_MESSAGE(MESSAGE_) __pragma(message(__FILE__ "(" PP_STRINGIZE(__LINE__) ") : info: " MESSAGE_))
|
||||
#define COMPILER_PRINT_WARNING(MESSAGE_) __pragma(message(__FILE__ "(" PP_STRINGIZE(__LINE__) ") : warning: " MESSAGE_))
|
||||
|
||||
#define COMPILER_WARNING_UNUSED_VARIABLE 4101
|
||||
#define COMPILER_WARNING_DEPRECATED 4995 4996
|
||||
|
||||
#define COMPILER_WARNINGS_PUSH __pragma(warning(push))
|
||||
#define COMPILER_WARNINGS_POP __pragma(warning(pop))
|
||||
#define COMPILER_WARNINGS_DISABLE(Warn) __pragma(warning(disable : Warn))
|
||||
|
||||
#define COMPILER_NOINLINE __declspec(noinline)
|
||||
#define COMPILER_INLINE inline
|
||||
#define COMPILER_FORCEINLINE __forceinline
|
||||
#define COMPILER_EMPTYINLINE __forceinline
|
||||
#define COMPILER_NORETURN __declspec(noreturn)
|
||||
|
||||
#define COMPILER_DEPRECATED(msg) __declspec(deprecated(msg))
|
||||
#define COMPILER_DEPRECATED_ENUM_VALUE(msg) /* no equivalent for this in MSVC */
|
||||
|
||||
#define COMPILER_ALIGN_OF(TYPE_) __alignof(TYPE_)
|
||||
#define COMPILER_ALIGN_AS(ALIGN_) __declspec(align(ALIGN_))
|
||||
|
||||
#define COMPILER_C_STATIC_ASSERT(EXPR_, MSG_) typedef char __static_assert_t[(EXPR_) != 0]
|
||||
|
||||
#define COMPILER_ATTRIBUTE_UNUSED __pragma(warning(suppress:4100))
|
||||
|
||||
#define COMPILER_DEBUG_TRAP() __debugbreak()
|
||||
|
||||
// Note that this is best effort, as "/analyze" compiler flag required to make warning appear
|
||||
#define COMPILER_WARN_UNUSED_RESULT _Check_return_
|
||||
|
||||
#if !defined(alloca)
|
||||
#define alloca _alloca
|
||||
#endif
|
||||
291
Libraries/external/baselib/Include/Internal/CoreMacros.h
vendored
Normal file
291
Libraries/external/baselib/Include/Internal/CoreMacros.h
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
// DO NOT PUT #pragma once or include guard check here
|
||||
// This header is designed to be able to be included multiple times
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// this macros are undefined in UndefineCoreMacros.h
|
||||
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
// !!! IF YOU ADD A NEW MACRO TO THIS SECTION !!!
|
||||
// !!! please add it to UndefineCoreMacros.h !!!
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
// this is where we collect context-free macros of general utility. it's a holding area until the new sub-core layer
|
||||
// project is started.
|
||||
//
|
||||
// IMPORTANT: only macros! and no non-system #includes!
|
||||
|
||||
// FORCE_INLINE forwarded to compiler defined macro
|
||||
#define FORCE_INLINE COMPILER_FORCEINLINE
|
||||
|
||||
// You may use OPTIMIZER_LIKELY / OPTIMIZER_UNLIKELY to provide the compiler with branch prediction information.
|
||||
//
|
||||
// The return value is the value of 'EXPR_', which should be an integral expression.
|
||||
//
|
||||
// OPTIMIZER_LIKELY makes it so that the branch predictor chooses to take the branch.
|
||||
// OPTIMIZER_UNLIKELY makes it so that the branch predictor chooses not to take the branch.
|
||||
//
|
||||
#define OPTIMIZER_LIKELY(EXPR_) COMPILER_BUILTIN_EXPECT(!!(EXPR_), 1)
|
||||
#define OPTIMIZER_UNLIKELY(EXPR_) COMPILER_BUILTIN_EXPECT(!!(EXPR_), 0)
|
||||
|
||||
// UNUSED will tell the compiler not to warn about a given variable being unused. "yeah, we know - this is unused."
|
||||
//
|
||||
// the internet says that (void)sizeof(expr) is the right way to do this, but not for us, not with our
|
||||
// compilers. the below is the result of much experimentation by @lucas, who says that we have at least one compiler
|
||||
// that does not consider sizeof(expr) to be a 'usage' of the variable(s) inside of expr.
|
||||
//
|
||||
// also note that we do not have the 'if+const expr' warning enabled because combining #if and if expression/constants
|
||||
// (which we often need to do - for example 'caps->gles.requireClearAlpha = PLATFORM_WEBGL || PLATFORM_STV') is super noisy.
|
||||
//
|
||||
#define UNUSED(EXPR_) \
|
||||
do { if (false) (void)(EXPR_); } while(0)
|
||||
|
||||
// COMPILER_WARNING will generate a compiler warning. this will work for all our compilers, though note the usage
|
||||
// requires a pragma. (based on http://goodliffe.blogspot.dk/2009/07/c-how-to-say-warning-to-visual-studio-c.html)
|
||||
//
|
||||
// usage:
|
||||
//
|
||||
// #pragma COMPILER_WARNING("this file is obsolete! use foo/bar.h instead.")
|
||||
//
|
||||
#define COMPILER_WARNING(MESSAGE_) message(__FILE__ "(" UNITY_STRINGIFY(__LINE__) ") : warning: " MESSAGE_)
|
||||
|
||||
#define UNSIGNED_FLAGS_1(FLAG1_) static_cast<unsigned int>(FLAG1_)
|
||||
#define UNSIGNED_FLAGS_2(FLAG1_, FLAG2_) UNSIGNED_FLAGS_1(FLAG1_) | UNSIGNED_FLAGS_1(FLAG2_)
|
||||
#define UNSIGNED_FLAGS_3(FLAG1_, FLAG2_, FLAG3_) UNSIGNED_FLAGS_1(FLAG1_) | UNSIGNED_FLAGS_2(FLAG2_, FLAG3_)
|
||||
#define UNSIGNED_FLAGS_4(FLAG1_, FLAG2_, FLAG3_, FLAG4_) UNSIGNED_FLAGS_1(FLAG1_) | UNSIGNED_FLAGS_3(FLAG2_, FLAG3_, FLAG4_)
|
||||
#define UNSIGNED_FLAGS_5(FLAG1_, FLAG2_, FLAG3_, FLAG4_, FLAG5_) UNSIGNED_FLAGS_1(FLAG1_) | UNSIGNED_FLAGS_4(FLAG2_, FLAG3_, FLAG4_, FLAG5_)
|
||||
#define UNSIGNED_FLAGS_6(FLAG1_, FLAG2_, FLAG3_, FLAG4_, FLAG5_, FLAG6_) UNSIGNED_FLAGS_1(FLAG1_) | UNSIGNED_FLAGS_5(FLAG2_, FLAG3_, FLAG4_, FLAG5_, FLAG6_)
|
||||
#define UNSIGNED_FLAGS(...) PP_VARG_SELECT_OVERLOAD(UNSIGNED_FLAGS_, (__VA_ARGS__))
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
// this macros are not undefined in UndefineCoreMacros.h, hence we put a guard to not define them twice
|
||||
#ifndef DETAIL__PP_AND_DETAILS_CORE_MACROS_DEFINED
|
||||
#define DETAIL__PP_AND_DETAILS_CORE_MACROS_DEFINED
|
||||
|
||||
// when putting control-flow, multiple statements, or unknown code (e.g. passed via an outer macro) inside of a macro,
|
||||
// wrap it in PP_WRAP_CODE to be safe. https://q.unity3d.com/answers/1382/view.html
|
||||
//
|
||||
// (also see http://stackoverflow.com/questions/154136/do-while-and-if-else-statements-in-c-c-macros)
|
||||
//
|
||||
// things not to use PP_WRAP_CODE for:
|
||||
//
|
||||
// * 'break' or 'continue' statements that are expected to operate on the scope containing the macro
|
||||
// * introduction of variables that are expected not to go out of scope at macro end
|
||||
//
|
||||
#define PP_WRAP_CODE(CODE_) \
|
||||
do { CODE_; } while (0)
|
||||
|
||||
// PP_EMPTY_STATEMENT is used to insert an empty statement in a macro to require a semicolon terminator where used.
|
||||
// most useful when creating "function style" macros where there is no natural place inside the macro to leave off a
|
||||
// semicolon so as to require it in usage (for example when the internals end with a closing brace).
|
||||
//
|
||||
#define PP_EMPTY_STATEMENT \
|
||||
do { } while (0)
|
||||
|
||||
// PP_VARG_COUNT expands to the the number of arguments passed to the macro. It supports 1 to 20 arguments (0 is not
|
||||
// supported)
|
||||
//
|
||||
#define PP_VARG_COUNT(...) \
|
||||
DETAIL__PP_EXPAND_2(DETAIL__PP_VARG_COUNT, (__VA_ARGS__, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1))
|
||||
|
||||
// PP_VARG_SELECT_OVERLOAD calls the correct overloaded version of the macro "name" name1, name2 etc.
|
||||
//
|
||||
// WARNING: **Varargs break Intellisense**. Intellisense gives us the argument list, which most of the time should be
|
||||
// sufficient documentation for using a macro. Macro overloading hides the args and possibly makes it less safe as
|
||||
// well. So be certain this tradeoff is worth it!
|
||||
//
|
||||
// Use like this:
|
||||
//
|
||||
// #define FORWARD_DECLARE_CLASS_1(CLASSNAME_) class CLASSNAME_;
|
||||
// #define FORWARD_DECLARE_CLASS_2(NAMESPACE_, CLASSNAME_) namespace NAMESPACE_ { class CLASSNAME_; }
|
||||
// #define FORWARD_DECLARE_CLASS_3(NAMESPACE_1_, NAMESPACE_2_, CLASSNAME_) namespace NAMESPACE_1_ { namespace NAMESPACE_1_ { class CLASSNAME_; } }
|
||||
// // Up to 10 overloads can be added
|
||||
// #define FORWARD_DECLARE_CLASS(...) PP_VARG_SELECT_OVERLOAD(FORWARD_DECLARE_CLASS_, (__VA_ARGS__))
|
||||
//
|
||||
// ...which can then be used with optional number of arguments like this:
|
||||
//
|
||||
// FORWARD_DECLARE_CLASS(GlobalClass)
|
||||
// FORWARD_DECLARE_CLASS(FooNamespace, FooClass)
|
||||
// FORWARD_DECLARE_CLASS(FooNamespace, BarNamespace, FooBarClass)
|
||||
//
|
||||
#define PP_VARG_SELECT_OVERLOAD(NAME_, ARGS_) \
|
||||
DETAIL__PP_EXPAND_2(DETAIL__PP_VARG_CONCAT(NAME_, PP_VARG_COUNT ARGS_), ARGS_)
|
||||
|
||||
// PP_CONCAT concatenates all passed preprocessor tokens after macro-expanding them
|
||||
#define PP_CONCAT(...) PP_VARG_SELECT_OVERLOAD(DETAIL__PP_CONCAT_, (__VA_ARGS__))
|
||||
|
||||
// PP_NOOP does nothing, but is useful for forcing the preprocessor to re-evaluate expressions after expansion.
|
||||
#define PP_NOOP()
|
||||
|
||||
// PP_DEFER defers evaluation of EXPR_ to the next expansion pass.
|
||||
#define PP_DEFER(EXPR_) EXPR_ PP_NOOP ()
|
||||
|
||||
// PP_DEFER2 defers evaluation of EXPR_ to the expansion pass *after* the next one
|
||||
#define PP_DEFER2(EXPR_) EXPR_ PP_NOOP PP_NOOP ()()
|
||||
|
||||
// PP_DEFER3 defers evaluation of EXP_ to the expansion pass *after* the expansion pass after the next one
|
||||
#define PP_DEFER3(EXPR_) EXPR_ PP_NOOP PP_NOOP PP_NOOP ()()()
|
||||
|
||||
// PP_RECURSE allows recursive expansion of macros; PP_RECURSE(A) will expand to A_RECURSE which you should define to A.
|
||||
#define PP_RECURSE(MACRO_) PP_DEFER(MACRO_##_RECURSE)()
|
||||
|
||||
// Use PP_EVAL to force up to 1024 evaluation passes on an expression, ensuring that everything is fully expanded.
|
||||
#define PP_EVAL(...) DETAIL__PP_EVAL1024(__VA_ARGS__)
|
||||
|
||||
// Use PP_STRINGIZE to wrap the precise characters in the given argument in double quotes (auto-escaping where necessary).
|
||||
// This is most often used to convert an expression to a string, but be aware that the contents aren't limited to what a
|
||||
// C expression permits! For example PP_STRINGIZE(pork & beans ("awesome")!) results in the literal "pork & beans (\"awesome\")!"
|
||||
#define PP_STRINGIZE(ARG_) DETAIL__PP_STRINGIZE_EXPAND(ARG_)
|
||||
|
||||
// PP_FIRST expands to the first argument in a list of arguments
|
||||
#define PP_FIRST(A_, ...) A_
|
||||
|
||||
// PP_SECOND expands to the second argument in a list of (at least two) arguments
|
||||
#define PP_SECOND(A_, B_, ...) B_
|
||||
|
||||
// PP_BOOLIFY expands to 0 if the argument is 0, and 1 otherwise
|
||||
// It should be used inside a PP_EVAL expression.
|
||||
#define PP_BOOLIFY(EXPR_) DETAIL__PP_BOOLIFY_NOT(DETAIL__PP_BOOLIFY_NOT(EXPR_))
|
||||
|
||||
// PP_VARG_IS_NONEMPTY evaluates to 0 if no arguments are provided, and 1 otherwise
|
||||
// It should be used inside a PP_EVAL expression.
|
||||
#if COMPILER_MSVC
|
||||
#define PP_VARG_IS_NONEMPTY(...) PP_BOOLIFY(PP_FIRST(__VA_ARGS__ DETAIL__PP_VARG_END_MARKER)())
|
||||
#else
|
||||
#define PP_VARG_IS_NONEMPTY(...) PP_BOOLIFY(PP_DEFER(PP_FIRST)(DETAIL__PP_VARG_END_MARKER DETAIL__PP_VARG_UNPAREN_FIRST(__VA_ARGS__))())
|
||||
#endif
|
||||
|
||||
// PP_IF_ELSE(EXPR_)(A_)(B_) evaluates to A_ if EXPR_ is nonzero, or to B_ if EXPR_ is 0.
|
||||
// It should be used inside a PP_EVAL expression.
|
||||
#define PP_IF_ELSE(EXPR_) DETAIL__PP_IF_ELSE(PP_BOOLIFY(EXPR_))
|
||||
|
||||
// PP_MAP applies MACRO_ to each of the following arguments.
|
||||
// It should be used inside a PP_EVAL expression.
|
||||
#define PP_MAP(MACRO_, ...) PP_EVAL(PP_IF_ELSE(PP_VARG_IS_NONEMPTY(__VA_ARGS__))(PP_DEFER3(DETAIL__PP_MAP_NONOPTIONAL)(MACRO_, __VA_ARGS__))())
|
||||
|
||||
// PP_UNPAREN removes one set of optional parenthesis around the argument.
|
||||
// Useful for implementing macros that take types as argument since the commas in templated types
|
||||
// normally are seen as macro argument separators.
|
||||
// #define ARRAY(NAME_, TYPE_, COUNT_) PP_UNPAREN(TYPE_) NAME_[COUNT_]
|
||||
// The type passed to the ARRAY macro ARRAY macro can now be used like:
|
||||
// ARRAY(array_of_maps, (map<int,int>), 8);
|
||||
// while still accepting:
|
||||
// ARRAY(array_of_ints, int, 8);
|
||||
#define PP_UNPAREN(EXPR_) DETAIL__PP_UNPAREN_EVAL_AND_CONCAT_FIRST_2_ARGS(DETAIL__PP_UNPAREN_EMPTY_, DETAIL__PP_UNPAREN_HELPER EXPR_)
|
||||
|
||||
#if __cplusplus
|
||||
|
||||
// PP_IS_STRING evaluates to true if the argument is of const char* type, false otherwise
|
||||
#define PP_IS_STRING(EXPR_) (sizeof(core::detail::ReturnCharIfString(EXPR_)) == sizeof(char))
|
||||
|
||||
// PP_CONST_VALUE takes a value that _may_ be an int, or may be a string, and produces a constant expression suitable for use as an enum
|
||||
// initializer. If you pass it a string, the result will still be a constant expression, but it will have undefined value.
|
||||
#define PP_CONST_VALUE(EXPR_) ((int)DETAIL__PP_CONST_VALUE_HIGHBITS(EXPR_) + (int)DETAIL__PP_CONST_VALUE_LOWBITS(EXPR_))
|
||||
|
||||
#endif
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// implementation details for above macros follow. we have this in a separate section to cut down on clutter above.
|
||||
|
||||
// visual c++ requires two levels of indirection to ensure proper macro expansion of PP_CONCAT with certain arguments involving __VA_ARGS__
|
||||
// and other scenarios like the one below.
|
||||
// #define PP_CAT(a,b) a##b
|
||||
// #define PP_CAT2(a,b) PP_CAT(a,b)
|
||||
// #define PP_CAT3(a,b) PP_CAT2(a,b)
|
||||
//
|
||||
// #define E(a) QQ a
|
||||
// #define QQ() Q
|
||||
//
|
||||
// #define T2() PP_CAT2(_,E(()))
|
||||
// #define T3() PP_CAT3(_,E(()))
|
||||
//
|
||||
// T2() and T3() will expand differently with VC but not with other preprocessors.
|
||||
#define DETAIL__PP_CONCAT_Y(A_, B_) A_##B_
|
||||
#define DETAIL__PP_CONCAT_X(A_, B_) DETAIL__PP_CONCAT_Y(A_, B_)
|
||||
|
||||
#define DETAIL__PP_EXPAND_2(A_, B_) A_ B_
|
||||
|
||||
|
||||
#define DETAIL__PP_VARG_CONCAT_Y(A_, B_) A_##B_
|
||||
#define DETAIL__PP_VARG_CONCAT_X(A_, B_) DETAIL__PP_VARG_CONCAT_Y(A_, B_)
|
||||
#define DETAIL__PP_VARG_CONCAT(A_, B_) DETAIL__PP_VARG_CONCAT_X(A_, B_)
|
||||
#define DETAIL__PP_VARG_COUNT(ARG0_, ARG1_, ARG2_, ARG3_, ARG4_, ARG5_, ARG6_, ARG7_, ARG8_, ARG9_, ARG10_, ARG11_, ARG12_, ARG13_, ARG14_, ARG15_, ARG16_, ARG17_, ARG18_, ARG19_, RESULT_, ...) RESULT_
|
||||
|
||||
#define DETAIL__PP_CONCAT_1(A_) DETAIL__PP_CONCAT_X(A_,)
|
||||
#define DETAIL__PP_CONCAT_2(A_, B_) DETAIL__PP_CONCAT_X(A_, B_)
|
||||
#define DETAIL__PP_CONCAT_3(A_, B_, C_) DETAIL__PP_CONCAT_2(DETAIL__PP_CONCAT_2(A_, B_), C_)
|
||||
#define DETAIL__PP_CONCAT_4(A_, B_, C_, D_) DETAIL__PP_CONCAT_2(DETAIL__PP_CONCAT_2(A_, B_), DETAIL__PP_CONCAT_2(C_, D_))
|
||||
#define DETAIL__PP_CONCAT_5(A_, B_, C_, D_, E_) DETAIL__PP_CONCAT_2(DETAIL__PP_CONCAT_2(A_, B_), DETAIL__PP_CONCAT_3(C_, D_, E_))
|
||||
#define DETAIL__PP_CONCAT_6(A_, B_, C_, D_, E_, F_) DETAIL__PP_CONCAT_2(DETAIL__PP_CONCAT_2(A_, B_), DETAIL__PP_CONCAT_4(C_, D_, E_, F_))
|
||||
#define DETAIL__PP_CONCAT_7(A_, B_, C_, D_, E_, F_, G_) DETAIL__PP_CONCAT_2(DETAIL__PP_CONCAT_3(A_, B_, C_), DETAIL__PP_CONCAT_4(D_, E_, F_, G_))
|
||||
#define DETAIL__PP_CONCAT_8(A_, B_, C_, D_, E_, F_, G_, H_) DETAIL__PP_CONCAT_2(DETAIL__PP_CONCAT_4(A_, B_, C_, D_), DETAIL__PP_CONCAT_4(E_, F_, G_, H_))
|
||||
|
||||
#define DETAIL__PP_EVAL1024(...) DETAIL__PP_EVAL512(DETAIL__PP_EVAL512(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL512(...) DETAIL__PP_EVAL256(DETAIL__PP_EVAL256(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL256(...) DETAIL__PP_EVAL128(DETAIL__PP_EVAL128(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL128(...) DETAIL__PP_EVAL64(DETAIL__PP_EVAL64(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL64(...) DETAIL__PP_EVAL32(DETAIL__PP_EVAL32(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL32(...) DETAIL__PP_EVAL16(DETAIL__PP_EVAL16(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL16(...) DETAIL__PP_EVAL8(DETAIL__PP_EVAL8(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL8(...) DETAIL__PP_EVAL4(DETAIL__PP_EVAL4(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL4(...) DETAIL__PP_EVAL2(DETAIL__PP_EVAL2(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL2(...) DETAIL__PP_EVAL1(DETAIL__PP_EVAL1(__VA_ARGS__))
|
||||
#define DETAIL__PP_EVAL1(...) __VA_ARGS__
|
||||
|
||||
#define DETAIL__PP_CONST_VALUE_ARR(EXPR_) core::detail::ConstValueHelper<sizeof(core::detail::ReturnCharIfString(EXPR_))>::arr
|
||||
|
||||
// Extract the high bits of x. We cannot just do (x & 0xffff0000) because 0x7fffffff is the maximum permitted array size on 32bit, so we have to shift
|
||||
// and the array is not allowed to be size 0, so we add 0x10000 to ensure nonzero
|
||||
#define DETAIL__PP_CONST_VALUE_HIGHBITS(EXPR_) ((sizeof(DETAIL__PP_CONST_VALUE_ARR(EXPR_)[ ((ptrdiff_t)(EXPR_) >> 16) + 0x10000]) - 0x10000) << 16)
|
||||
|
||||
// Extract the low bits of x - as with the high bits, the array cannot be zero-length, so we add 1 and then subtract it again after the sizeof
|
||||
#define DETAIL__PP_CONST_VALUE_LOWBITS(EXPR_) (sizeof(DETAIL__PP_CONST_VALUE_ARR(EXPR_)[ ((ptrdiff_t)(EXPR_) & 0xFFFF) + 1]) - 1)
|
||||
|
||||
#define DETAIL__PP_STRINGIZE_EXPAND(EXPR_) #EXPR_
|
||||
|
||||
// Expand to 1 if the first argument is DETAIL__PP_PROBE(), 0 otherwise
|
||||
#define DETAIL__PP_IS_PROBE(...) PP_DEFER(PP_SECOND)(__VA_ARGS__, 0)
|
||||
#define DETAIL__PP_PROBE() _, 1
|
||||
|
||||
#define DETAIL__PP_BOOLIFY_NOT(EXPR_) PP_DEFER(DETAIL__PP_IS_PROBE)(PP_CONCAT(DETAIL__PP_BOOLIFY_NOT_PROBE_, EXPR_))
|
||||
#define DETAIL__PP_BOOLIFY_NOT_PROBE_0 DETAIL__PP_PROBE()
|
||||
#define DETAIL__PP_BOOLIFY_NOT_PROBE_1 0
|
||||
|
||||
#define DETAIL__PP_VARG_END_MARKER() 0
|
||||
#define DETAIL__PP_VARG_UNPAREN_FIRST(...) DETAIL__PP_UNPAREN_EVAL_AND_CONCAT_FIRST_2_ARGS(DETAIL__PP_UNPAREN_EMPTY_, DETAIL__PP_UNPAREN_HELPER __VA_ARGS__)
|
||||
|
||||
#define DETAIL__PP_IF_ELSE(EXPR_) PP_CONCAT(DETAIL__PP_IF_, EXPR_)
|
||||
#define DETAIL__PP_IF_1(...) __VA_ARGS__ DETAIL__PP_IF_1_ELSE
|
||||
#define DETAIL__PP_IF_0(...) DETAIL__PP_IF_0_ELSE
|
||||
#define DETAIL__PP_IF_1_ELSE(...)
|
||||
#define DETAIL__PP_IF_0_ELSE(...) __VA_ARGS__
|
||||
|
||||
#define DETAIL__PP_MAP_NONOPTIONAL(MACRO_, FIRST_, ...) MACRO_(FIRST_) PP_IF_ELSE(PP_VARG_IS_NONEMPTY(__VA_ARGS__))( PP_DEFER2(DETAIL__PP_MAP_RECURSE)()(MACRO_, __VA_ARGS__) )()
|
||||
#define DETAIL__PP_MAP_RECURSE() DETAIL__PP_MAP_NONOPTIONAL
|
||||
|
||||
#define DETAIL__PP_UNPAREN_CONCAT_FIRST_2_ARGS(x, ...) x##__VA_ARGS__
|
||||
#define DETAIL__PP_UNPAREN_EVAL_AND_CONCAT_FIRST_2_ARGS(x, ...) DETAIL__PP_UNPAREN_CONCAT_FIRST_2_ARGS(x, __VA_ARGS__)
|
||||
#define DETAIL__PP_UNPAREN_EMPTY_DETAIL__PP_UNPAREN_HELPER
|
||||
#define DETAIL__PP_UNPAREN_HELPER(...) DETAIL__PP_UNPAREN_HELPER __VA_ARGS__
|
||||
|
||||
#if __cplusplus
|
||||
|
||||
namespace core
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
char ReturnCharIfString(const char*);
|
||||
long ReturnCharIfString(unsigned int);
|
||||
long ReturnCharIfString(int);
|
||||
long ReturnCharIfString(float);
|
||||
|
||||
template<int dummy> struct ConstValueHelper { typedef char arr; };
|
||||
template<> struct ConstValueHelper<sizeof(char)> { static char arr[1]; };
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* DETAIL__PP_AND_DETAILS_CORE_MACROS_DEFINED */
|
||||
53
Libraries/external/baselib/Include/Internal/PlatformDetection.h
vendored
Normal file
53
Libraries/external/baselib/Include/Internal/PlatformDetection.h
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
|
||||
// Detect BASELIB_PLATFORM_X define.
|
||||
//
|
||||
// Note that PLATFORM_X defines in Unity code base may refer to one or more platforms defined by BASELIB_PLATFORM_X
|
||||
// Platforms here are very loosely defined on the set of available system apis.
|
||||
// They have closest relation with the platform toolchains defined in Bee.
|
||||
|
||||
#if defined(_XBOX_ONE)
|
||||
#define BASELIB_PLATFORM_XBOXONE 1
|
||||
#elif defined(__NX__)
|
||||
#define BASELIB_PLATFORM_SWITCH 1
|
||||
#elif defined __ORBIS__
|
||||
#define BASELIB_PLATFORM_PS4 1
|
||||
#elif defined __PROSPERO__
|
||||
#define BASELIB_PLATFORM_PS5 1
|
||||
#elif defined __EMSCRIPTEN__
|
||||
#define BASELIB_PLATFORM_EMSCRIPTEN 1
|
||||
#elif defined __wasi__
|
||||
#define BASELIB_PLATFORM_WASI 1
|
||||
#elif defined(__APPLE__)
|
||||
#include <TargetConditionals.h>
|
||||
#if TARGET_OS_IOS
|
||||
#define BASELIB_PLATFORM_IOS 1
|
||||
#elif TARGET_OS_TV
|
||||
#define BASELIB_PLATFORM_TVOS 1
|
||||
#elif TARGET_OS_OSX || TARGET_OS_MAC
|
||||
#define BASELIB_PLATFORM_MACOS 1
|
||||
#endif
|
||||
#elif defined(__NetBSD__)
|
||||
#define BASELIB_PLATFORM_NETBSD 1
|
||||
#elif defined(linux) || defined(__linux__)
|
||||
#if defined(LUMIN)
|
||||
#define BASELIB_PLATFORM_LUMIN 1
|
||||
#elif defined(GGP)
|
||||
#define BASELIB_PLATFORM_STADIA 1
|
||||
#elif defined(ANDROID) || defined(__ANDROID__)
|
||||
#define BASELIB_PLATFORM_ANDROID 1
|
||||
#elif defined(EMBEDDED_LINUX)
|
||||
#define BASELIB_PLATFORM_EMBEDDED_LINUX 1
|
||||
#else
|
||||
#define BASELIB_PLATFORM_LINUX 1
|
||||
#endif
|
||||
#elif defined(_WIN32) || defined(__WIN32__)
|
||||
#include <winapifamily.h>
|
||||
#if (defined(WINAPI_FAMILY_GAMES) && (WINAPI_FAMILY == WINAPI_FAMILY_GAMES))
|
||||
#define BASELIB_PLATFORM_WINDOWSGAMES 1
|
||||
#elif WINAPI_FAMILY == WINAPI_FAMILY_APP
|
||||
#define BASELIB_PLATFORM_WINRT 1
|
||||
#else
|
||||
#define BASELIB_PLATFORM_WINDOWS 1
|
||||
#endif
|
||||
#endif
|
||||
24
Libraries/external/baselib/Include/Internal/PlatformEnvironment.h
vendored
Normal file
24
Libraries/external/baselib/Include/Internal/PlatformEnvironment.h
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
#pragma once
|
||||
|
||||
// This header handles the selection of the correct compiler and platform
|
||||
// environment for the current build.
|
||||
|
||||
#if _MSC_VER
|
||||
#include "Compiler/CompilerEnvironmentMsvc.h"
|
||||
#elif __clang__
|
||||
#include "Compiler/CompilerEnvironmentClang.h"
|
||||
#elif __GNUC__ || __GCC__
|
||||
#include "Compiler/CompilerEnvironmentGcc.h"
|
||||
#else
|
||||
#error "Unknown Compiler"
|
||||
#endif
|
||||
|
||||
// There is one platform specific environment header for every platform.
|
||||
// You need to specify the right platform specific include path for the correct one to be picked up.
|
||||
#include "BaselibPlatformSpecificEnvironment.h"
|
||||
#include "VerifyPlatformEnvironment.h"
|
||||
|
||||
|
||||
#ifndef BASELIB_DEBUG_TRAP
|
||||
#define BASELIB_DEBUG_TRAP COMPILER_DEBUG_TRAP
|
||||
#endif
|
||||
41
Libraries/external/baselib/Include/Internal/RedefineCompilerMacros.h
vendored
Normal file
41
Libraries/external/baselib/Include/Internal/RedefineCompilerMacros.h
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// DO NOT PUT #pragma once or include guard check here
|
||||
// This header is designed to be able to be included multiple times
|
||||
|
||||
// This header is used to redefine compiler macros after they were temporary undefined by UndefineCompilerMacros.h
|
||||
// Please make sure to always use this paired with the UndefineCompilerMacros.h header.
|
||||
//
|
||||
// ex.
|
||||
//
|
||||
// #include "UndefineCompilerMacros.h"
|
||||
// #include "Some3rdParty.h"
|
||||
// #include "RedefineCompilerMacros.h"
|
||||
|
||||
#ifndef DETAIL__COMPILERMACROS_HAD_BEEN_UNDEFINED_BY_UNDEFINECOMPILER_H
|
||||
#error "RedefineCompilerMacros.h can only be used after UndefinePlatforms.h got included before."
|
||||
#endif
|
||||
|
||||
#undef DETAIL__COMPILERMACROS_HAD_BEEN_UNDEFINED_BY_UNDEFINECOMPILER_H
|
||||
|
||||
#undef COMPILER_GCC
|
||||
#if defined(DETAIL__TEMP_COMPILER_GCC_WAS_1)
|
||||
#undef DETAIL__TEMP_COMPILER_GCC_WAS_1
|
||||
#define COMPILER_GCC 1
|
||||
#else
|
||||
#define COMPILER_GCC 0
|
||||
#endif
|
||||
|
||||
#undef COMPILER_CLANG
|
||||
#if defined(DETAIL__TEMP_COMPILER_CLANG_WAS_1)
|
||||
#undef DETAIL__TEMP_COMPILER_CLANG_WAS_1
|
||||
#define COMPILER_CLANG 1
|
||||
#else
|
||||
#define COMPILER_CLANG 0
|
||||
#endif
|
||||
|
||||
#undef COMPILER_MSVC
|
||||
#if defined(DETAIL__TEMP_COMPILER_MSVC_WAS_1)
|
||||
#undef DETAIL__TEMP_COMPILER_MSVC_WAS_1
|
||||
#define COMPILER_MSVC 1
|
||||
#else
|
||||
#define COMPILER_MSVC 0
|
||||
#endif
|
||||
32
Libraries/external/baselib/Include/Internal/UndefineCompilerMacros.h
vendored
Normal file
32
Libraries/external/baselib/Include/Internal/UndefineCompilerMacros.h
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// DO NOT PUT #pragma once or include guard check here
|
||||
// This header is designed to be able to be included multiple times
|
||||
|
||||
// This header is used to temporary undefine all compiler macros in case there is a naming conflict with
|
||||
// 3rd party code. Please make sure to always use this paired with the RedefineCompilerMacros.h header.
|
||||
//
|
||||
// ex.
|
||||
//
|
||||
// #include "UndefineCompilerMacros.h"
|
||||
// #include "Some3rdParty.h"
|
||||
// #include "RedefineCompilerMacros.h"
|
||||
|
||||
#ifdef DETAIL__COMPILERMACROS_HAD_BEEN_UNDEFINED_BY_UNDEFINECOMPILER_H
|
||||
#error "UndefineCompilerMacros.h has been included more than once or RedefineCompilerMacros.h is missing."
|
||||
#endif
|
||||
|
||||
#if COMPILER_GCC
|
||||
#define DETAIL__TEMP_COMPILER_GCC_WAS_1
|
||||
#endif
|
||||
#undef COMPILER_GCC
|
||||
|
||||
#if COMPILER_CLANG
|
||||
#define DETAIL__TEMP_COMPILER_CLANG_WAS_1
|
||||
#endif
|
||||
#undef COMPILER_CLANG
|
||||
|
||||
#if COMPILER_MSVC
|
||||
#define DETAIL__TEMP_COMPILER_MSVC_WAS_1
|
||||
#endif
|
||||
#undef COMPILER_MSVC
|
||||
|
||||
#define DETAIL__COMPILERMACROS_HAD_BEEN_UNDEFINED_BY_UNDEFINECOMPILER_H
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user