[add] first

This commit is contained in:
2023-10-08 10:24:48 +08:00
commit b1ae0510a9
1048 changed files with 3254361 additions and 0 deletions

View File

@@ -0,0 +1 @@
typedef void (*CodegenRegistrationFunction) (); CodegenRegistrationFunction g_CodegenRegistration;

View File

@@ -0,0 +1,2 @@
bool il2cpp_no_exceptions = false;
extern "C" bool Unity_il2cppNoExceptions() { return il2cpp_no_exceptions; }

View File

@@ -0,0 +1,12 @@
#include "RegisterFeatures.h"
extern "C" void UnityEnableGyroscope(bool value);
extern "C" void UnityEnableStylusTouch(bool value);
void RegisterFeatures()
{
UnityEnableGyroscope(false);
UnityEnableStylusTouch(true);
}

View File

@@ -0,0 +1,3 @@
#pragma once
void RegisterFeatures();

View File

@@ -0,0 +1,14 @@
extern "C"{
#import "UIView+Toast.h"
#import "UnityAppController.h"
void _showToast(char *message, Boolean *isLong){
NSString *messageFromUnity = [NSString stringWithUTF8String:message];
UIView *displayView = [UIApplication sharedApplication].keyWindow.rootViewController.view;
if(isLong){
[displayView makeToast: messageFromUnity duration:3.5 position:CSToastPositionBottom ];
} else {
[displayView makeToast: messageFromUnity duration:2.0 position:CSToastPositionBottom ];
}
}
}

View File

@@ -0,0 +1,446 @@
//
// UIView+Toast.h
// Toast
//
// Copyright (c) 2011-2017 Charles Scalesse.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#import <UIKit/UIKit.h>
extern const NSString * CSToastPositionTop;
extern const NSString * CSToastPositionCenter;
extern const NSString * CSToastPositionBottom;
@class CSToastStyle;
/**
Toast is an Objective-C category that adds toast notifications to the UIView
object class. It is intended to be simple, lightweight, and easy to use. Most
toast notifications can be triggered with a single line of code.
The `makeToast:` methods create a new view and then display it as toast.
The `showToast:` methods display any view as toast.
*/
@interface UIView (Toast)
/**
Creates and presents a new toast view with a message and displays it with the
default duration and position. Styled using the shared style.
@param message The message to be displayed
*/
- (void)makeToast:(NSString *)message;
/**
Creates and presents a new toast view with a message. Duration and position
can be set explicitly. Styled using the shared style.
@param message The message to be displayed
@param duration The toast duration
@param position The toast's center point. Can be one of the predefined CSToastPosition
constants or a `CGPoint` wrapped in an `NSValue` object.
*/
- (void)makeToast:(NSString *)message
duration:(NSTimeInterval)duration
position:(id)position;
/**
Creates and presents a new toast view with a message. Duration, position, and
style can be set explicitly.
@param message The message to be displayed
@param duration The toast duration
@param position The toast's center point. Can be one of the predefined CSToastPosition
constants or a `CGPoint` wrapped in an `NSValue` object.
@param style The style. The shared style will be used when nil
*/
- (void)makeToast:(NSString *)message
duration:(NSTimeInterval)duration
position:(id)position
style:(CSToastStyle *)style;
/**
Creates and presents a new toast view with a message, title, and image. Duration,
position, and style can be set explicitly. The completion block executes when the
toast view completes. `didTap` will be `YES` if the toast view was dismissed from
a tap.
@param message The message to be displayed
@param duration The toast duration
@param position The toast's center point. Can be one of the predefined CSToastPosition
constants or a `CGPoint` wrapped in an `NSValue` object.
@param title The title
@param image The image
@param style The style. The shared style will be used when nil
@param completion The completion block, executed after the toast view disappears.
didTap will be `YES` if the toast view was dismissed from a tap.
*/
- (void)makeToast:(NSString *)message
duration:(NSTimeInterval)duration
position:(id)position
title:(NSString *)title
image:(UIImage *)image
style:(CSToastStyle *)style
completion:(void(^)(BOOL didTap))completion;
/**
Creates a new toast view with any combination of message, title, and image.
The look and feel is configured via the style. Unlike the `makeToast:` methods,
this method does not present the toast view automatically. One of the showToast:
methods must be used to present the resulting view.
@warning if message, title, and image are all nil, this method will return nil.
@param message The message to be displayed
@param title The title
@param image The image
@param style The style. The shared style will be used when nil
@return The newly created toast view
*/
- (UIView *)toastViewForMessage:(NSString *)message
title:(NSString *)title
image:(UIImage *)image
style:(CSToastStyle *)style;
/**
Hides the active toast. If there are multiple toasts active in a view, this method
hides the oldest toast (the first of the toasts to have been presented).
@see `hideAllToasts` to remove all active toasts from a view.
@warning This method has no effect on activity toasts. Use `hideToastActivity` to
hide activity toasts.
*/
- (void)hideToast;
/**
Hides an active toast.
@param toast The active toast view to dismiss. Any toast that is currently being displayed
on the screen is considered active.
@warning this does not clear a toast view that is currently waiting in the queue.
*/
- (void)hideToast:(UIView *)toast;
/**
Hides all active toast views and clears the queue.
*/
- (void)hideAllToasts;
/**
Hides all active toast views, with options to hide activity and clear the queue.
@param includeActivity If `true`, toast activity will also be hidden. Default is `false`.
@param clearQueue If `true`, removes all toast views from the queue. Default is `true`.
*/
- (void)hideAllToasts:(BOOL)includeActivity clearQueue:(BOOL)clearQueue;
/**
Removes all toast views from the queue. This has no effect on toast views that are
active. Use `hideAllToasts` to hide the active toasts views and clear the queue.
*/
- (void)clearToastQueue;
/**
Creates and displays a new toast activity indicator view at a specified position.
@warning Only one toast activity indicator view can be presented per superview. Subsequent
calls to `makeToastActivity:` will be ignored until hideToastActivity is called.
@warning `makeToastActivity:` works independently of the showToast: methods. Toast activity
views can be presented and dismissed while toast views are being displayed. `makeToastActivity:`
has no effect on the queueing behavior of the showToast: methods.
@param position The toast's center point. Can be one of the predefined CSToastPosition
constants or a `CGPoint` wrapped in an `NSValue` object.
*/
- (void)makeToastActivity:(id)position;
/**
Dismisses the active toast activity indicator view.
*/
- (void)hideToastActivity;
/**
Displays any view as toast using the default duration and position.
@param toast The view to be displayed as toast
*/
- (void)showToast:(UIView *)toast;
/**
Displays any view as toast at a provided position and duration. The completion block
executes when the toast view completes. `didTap` will be `YES` if the toast view was
dismissed from a tap.
@param toast The view to be displayed as toast
@param duration The notification duration
@param position The toast's center point. Can be one of the predefined CSToastPosition
constants or a `CGPoint` wrapped in an `NSValue` object.
@param completion The completion block, executed after the toast view disappears.
didTap will be `YES` if the toast view was dismissed from a tap.
*/
- (void)showToast:(UIView *)toast
duration:(NSTimeInterval)duration
position:(id)position
completion:(void(^)(BOOL didTap))completion;
@end
/**
`CSToastStyle` instances define the look and feel for toast views created via the
`makeToast:` methods as well for toast views created directly with
`toastViewForMessage:title:image:style:`.
@warning `CSToastStyle` offers relatively simple styling options for the default
toast view. If you require a toast view with more complex UI, it probably makes more
sense to create your own custom UIView subclass and present it with the `showToast:`
methods.
*/
@interface CSToastStyle : NSObject
/**
The background color. Default is `[UIColor blackColor]` at 80% opacity.
*/
@property (strong, nonatomic) UIColor *backgroundColor;
/**
The title color. Default is `[UIColor whiteColor]`.
*/
@property (strong, nonatomic) UIColor *titleColor;
/**
The message color. Default is `[UIColor whiteColor]`.
*/
@property (strong, nonatomic) UIColor *messageColor;
/**
A percentage value from 0.0 to 1.0, representing the maximum width of the toast
view relative to it's superview. Default is 0.8 (80% of the superview's width).
*/
@property (assign, nonatomic) CGFloat maxWidthPercentage;
/**
A percentage value from 0.0 to 1.0, representing the maximum height of the toast
view relative to it's superview. Default is 0.8 (80% of the superview's height).
*/
@property (assign, nonatomic) CGFloat maxHeightPercentage;
/**
The spacing from the horizontal edge of the toast view to the content. When an image
is present, this is also used as the padding between the image and the text.
Default is 10.0.
*/
@property (assign, nonatomic) CGFloat horizontalPadding;
/**
The spacing from the vertical edge of the toast view to the content. When a title
is present, this is also used as the padding between the title and the message.
Default is 10.0.
*/
@property (assign, nonatomic) CGFloat verticalPadding;
/**
The corner radius. Default is 10.0.
*/
@property (assign, nonatomic) CGFloat cornerRadius;
/**
The title font. Default is `[UIFont boldSystemFontOfSize:16.0]`.
*/
@property (strong, nonatomic) UIFont *titleFont;
/**
The message font. Default is `[UIFont systemFontOfSize:16.0]`.
*/
@property (strong, nonatomic) UIFont *messageFont;
/**
The title text alignment. Default is `NSTextAlignmentLeft`.
*/
@property (assign, nonatomic) NSTextAlignment titleAlignment;
/**
The message text alignment. Default is `NSTextAlignmentLeft`.
*/
@property (assign, nonatomic) NSTextAlignment messageAlignment;
/**
The maximum number of lines for the title. The default is 0 (no limit).
*/
@property (assign, nonatomic) NSInteger titleNumberOfLines;
/**
The maximum number of lines for the message. The default is 0 (no limit).
*/
@property (assign, nonatomic) NSInteger messageNumberOfLines;
/**
Enable or disable a shadow on the toast view. Default is `NO`.
*/
@property (assign, nonatomic) BOOL displayShadow;
/**
The shadow color. Default is `[UIColor blackColor]`.
*/
@property (strong, nonatomic) UIColor *shadowColor;
/**
A value from 0.0 to 1.0, representing the opacity of the shadow.
Default is 0.8 (80% opacity).
*/
@property (assign, nonatomic) CGFloat shadowOpacity;
/**
The shadow radius. Default is 6.0.
*/
@property (assign, nonatomic) CGFloat shadowRadius;
/**
The shadow offset. The default is `CGSizeMake(4.0, 4.0)`.
*/
@property (assign, nonatomic) CGSize shadowOffset;
/**
The image size. The default is `CGSizeMake(80.0, 80.0)`.
*/
@property (assign, nonatomic) CGSize imageSize;
/**
The size of the toast activity view when `makeToastActivity:` is called.
Default is `CGSizeMake(100.0, 100.0)`.
*/
@property (assign, nonatomic) CGSize activitySize;
/**
The fade in/out animation duration. Default is 0.2.
*/
@property (assign, nonatomic) NSTimeInterval fadeDuration;
/**
Creates a new instance of `CSToastStyle` with all the default values set.
*/
- (instancetype)initWithDefaultStyle NS_DESIGNATED_INITIALIZER;
/**
@warning Only the designated initializer should be used to create
an instance of `CSToastStyle`.
*/
- (instancetype)init NS_UNAVAILABLE;
@end
/**
`CSToastManager` provides general configuration options for all toast
notifications. Backed by a singleton instance.
*/
@interface CSToastManager : NSObject
/**
Sets the shared style on the singleton. The shared style is used whenever
a `makeToast:` method (or `toastViewForMessage:title:image:style:`) is called
with with a nil style. By default, this is set to `CSToastStyle`'s default
style.
@param sharedStyle the shared style
*/
+ (void)setSharedStyle:(CSToastStyle *)sharedStyle;
/**
Gets the shared style from the singlton. By default, this is
`CSToastStyle`'s default style.
@return the shared style
*/
+ (CSToastStyle *)sharedStyle;
/**
Enables or disables tap to dismiss on toast views. Default is `YES`.
@param tapToDismissEnabled YES or NO
*/
+ (void)setTapToDismissEnabled:(BOOL)tapToDismissEnabled;
/**
Returns `YES` if tap to dismiss is enabled, otherwise `NO`.
Default is `YES`.
@return BOOL YES or NO
*/
+ (BOOL)isTapToDismissEnabled;
/**
Enables or disables queueing behavior for toast views. When `YES`,
toast views will appear one after the other. When `NO`, multiple Toast
views will appear at the same time (potentially overlapping depending
on their positions). This has no effect on the toast activity view,
which operates independently of normal toast views. Default is `NO`.
@param queueEnabled YES or NO
*/
+ (void)setQueueEnabled:(BOOL)queueEnabled;
/**
Returns `YES` if the queue is enabled, otherwise `NO`.
Default is `NO`.
@return BOOL
*/
+ (BOOL)isQueueEnabled;
/**
Sets the default duration. Used for the `makeToast:` and
`showToast:` methods that don't require an explicit duration.
Default is 3.0.
@param duration The toast duration
*/
+ (void)setDefaultDuration:(NSTimeInterval)duration;
/**
Returns the default duration. Default is 3.0.
@return duration The toast duration
*/
+ (NSTimeInterval)defaultDuration;
/**
Sets the default position. Used for the `makeToast:` and
`showToast:` methods that don't require an explicit position.
Default is `CSToastPositionBottom`.
@param position The default center point. Can be one of the predefined
CSToastPosition constants or a `CGPoint` wrapped in an `NSValue` object.
*/
+ (void)setDefaultPosition:(id)position;
/**
Returns the default toast position. Default is `CSToastPositionBottom`.
@return position The default center point. Will be one of the predefined
CSToastPosition constants or a `CGPoint` wrapped in an `NSValue` object.
*/
+ (id)defaultPosition;
@end

View File

@@ -0,0 +1,586 @@
//
// UIView+Toast.m
// Toast
//
// Copyright (c) 2011-2017 Charles Scalesse.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#import "UIView+Toast.h"
#import <QuartzCore/QuartzCore.h>
#import <objc/runtime.h>
// Positions
NSString * CSToastPositionTop = @"CSToastPositionTop";
NSString * CSToastPositionCenter = @"CSToastPositionCenter";
NSString * CSToastPositionBottom = @"CSToastPositionBottom";
// Keys for values associated with toast views
static const NSString * CSToastTimerKey = @"CSToastTimerKey";
static const NSString * CSToastDurationKey = @"CSToastDurationKey";
static const NSString * CSToastPositionKey = @"CSToastPositionKey";
static const NSString * CSToastCompletionKey = @"CSToastCompletionKey";
// Keys for values associated with self
static const NSString * CSToastActiveKey = @"CSToastActiveKey";
static const NSString * CSToastActivityViewKey = @"CSToastActivityViewKey";
static const NSString * CSToastQueueKey = @"CSToastQueueKey";
@interface UIView (ToastPrivate)
/**
These private methods are being prefixed with "cs_" to reduce the likelihood of non-obvious
naming conflicts with other UIView methods.
@discussion Should the public API also use the cs_ prefix? Technically it should, but it
results in code that is less legible. The current public method names seem unlikely to cause
conflicts so I think we should favor the cleaner API for now.
*/
- (void)cs_showToast:(UIView *)toast duration:(NSTimeInterval)duration position:(id)position;
- (void)cs_hideToast:(UIView *)toast;
- (void)cs_hideToast:(UIView *)toast fromTap:(BOOL)fromTap;
- (void)cs_toastTimerDidFinish:(NSTimer *)timer;
- (void)cs_handleToastTapped:(UITapGestureRecognizer *)recognizer;
- (CGPoint)cs_centerPointForPosition:(id)position withToast:(UIView *)toast;
- (NSMutableArray *)cs_toastQueue;
@end
@implementation UIView (Toast)
#pragma mark - Make Toast Methods
- (void)makeToast:(NSString *)message {
[self makeToast:message duration:[CSToastManager defaultDuration] position:[CSToastManager defaultPosition] style:nil];
}
- (void)makeToast:(NSString *)message duration:(NSTimeInterval)duration position:(id)position {
[self makeToast:message duration:duration position:position style:nil];
}
- (void)makeToast:(NSString *)message duration:(NSTimeInterval)duration position:(id)position style:(CSToastStyle *)style {
UIView *toast = [self toastViewForMessage:message title:nil image:nil style:style];
[self showToast:toast duration:duration position:position completion:nil];
}
- (void)makeToast:(NSString *)message duration:(NSTimeInterval)duration position:(id)position title:(NSString *)title image:(UIImage *)image style:(CSToastStyle *)style completion:(void(^)(BOOL didTap))completion {
UIView *toast = [self toastViewForMessage:message title:title image:image style:style];
[self showToast:toast duration:duration position:position completion:completion];
}
#pragma mark - Show Toast Methods
- (void)showToast:(UIView *)toast {
[self showToast:toast duration:[CSToastManager defaultDuration] position:[CSToastManager defaultPosition] completion:nil];
}
- (void)showToast:(UIView *)toast duration:(NSTimeInterval)duration position:(id)position completion:(void(^)(BOOL didTap))completion {
// sanity
if (toast == nil) return;
// store the completion block on the toast view
objc_setAssociatedObject(toast, &CSToastCompletionKey, completion, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
if ([CSToastManager isQueueEnabled] && [self.cs_activeToasts count] > 0) {
// we're about to queue this toast view so we need to store the duration and position as well
objc_setAssociatedObject(toast, &CSToastDurationKey, @(duration), OBJC_ASSOCIATION_RETAIN_NONATOMIC);
objc_setAssociatedObject(toast, &CSToastPositionKey, position, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
// enqueue
[self.cs_toastQueue addObject:toast];
} else {
// present
[self cs_showToast:toast duration:duration position:position];
}
}
#pragma mark - Hide Toast Methods
- (void)hideToast {
[self hideToast:[[self cs_activeToasts] firstObject]];
}
- (void)hideToast:(UIView *)toast {
// sanity
if (!toast || ![[self cs_activeToasts] containsObject:toast]) return;
[self cs_hideToast:toast];
}
- (void)hideAllToasts {
[self hideAllToasts:NO clearQueue:YES];
}
- (void)hideAllToasts:(BOOL)includeActivity clearQueue:(BOOL)clearQueue {
if (clearQueue) {
[self clearToastQueue];
}
for (UIView *toast in [self cs_activeToasts]) {
[self hideToast:toast];
}
if (includeActivity) {
[self hideToastActivity];
}
}
- (void)clearToastQueue {
[[self cs_toastQueue] removeAllObjects];
}
#pragma mark - Private Show/Hide Methods
- (void)cs_showToast:(UIView *)toast duration:(NSTimeInterval)duration position:(id)position {
toast.center = [self cs_centerPointForPosition:position withToast:toast];
toast.alpha = 0.0;
if ([CSToastManager isTapToDismissEnabled]) {
UITapGestureRecognizer *recognizer = [[UITapGestureRecognizer alloc] initWithTarget:self action:@selector(cs_handleToastTapped:)];
[toast addGestureRecognizer:recognizer];
toast.userInteractionEnabled = YES;
toast.exclusiveTouch = YES;
}
[[self cs_activeToasts] addObject:toast];
[self addSubview:toast];
[UIView animateWithDuration:[[CSToastManager sharedStyle] fadeDuration]
delay:0.0
options:(UIViewAnimationOptionCurveEaseOut | UIViewAnimationOptionAllowUserInteraction)
animations:^{
toast.alpha = 1.0;
} completion:^(BOOL finished) {
NSTimer *timer = [NSTimer timerWithTimeInterval:duration target:self selector:@selector(cs_toastTimerDidFinish:) userInfo:toast repeats:NO];
[[NSRunLoop mainRunLoop] addTimer:timer forMode:NSRunLoopCommonModes];
objc_setAssociatedObject(toast, &CSToastTimerKey, timer, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
}];
}
- (void)cs_hideToast:(UIView *)toast {
[self cs_hideToast:toast fromTap:NO];
}
- (void)cs_hideToast:(UIView *)toast fromTap:(BOOL)fromTap {
NSTimer *timer = (NSTimer *)objc_getAssociatedObject(toast, &CSToastTimerKey);
[timer invalidate];
[UIView animateWithDuration:[[CSToastManager sharedStyle] fadeDuration]
delay:0.0
options:(UIViewAnimationOptionCurveEaseIn | UIViewAnimationOptionBeginFromCurrentState)
animations:^{
toast.alpha = 0.0;
} completion:^(BOOL finished) {
[toast removeFromSuperview];
// remove
[[self cs_activeToasts] removeObject:toast];
// execute the completion block, if necessary
void (^completion)(BOOL didTap) = objc_getAssociatedObject(toast, &CSToastCompletionKey);
if (completion) {
completion(fromTap);
}
if ([self.cs_toastQueue count] > 0) {
// dequeue
UIView *nextToast = [[self cs_toastQueue] firstObject];
[[self cs_toastQueue] removeObjectAtIndex:0];
// present the next toast
NSTimeInterval duration = [objc_getAssociatedObject(nextToast, &CSToastDurationKey) doubleValue];
id position = objc_getAssociatedObject(nextToast, &CSToastPositionKey);
[self cs_showToast:nextToast duration:duration position:position];
}
}];
}
#pragma mark - View Construction
- (UIView *)toastViewForMessage:(NSString *)message title:(NSString *)title image:(UIImage *)image style:(CSToastStyle *)style {
// sanity
if (message == nil && title == nil && image == nil) return nil;
// default to the shared style
if (style == nil) {
style = [CSToastManager sharedStyle];
}
// dynamically build a toast view with any combination of message, title, & image
UILabel *messageLabel = nil;
UILabel *titleLabel = nil;
UIImageView *imageView = nil;
UIView *wrapperView = [[UIView alloc] init];
wrapperView.autoresizingMask = (UIViewAutoresizingFlexibleLeftMargin | UIViewAutoresizingFlexibleRightMargin | UIViewAutoresizingFlexibleTopMargin | UIViewAutoresizingFlexibleBottomMargin);
wrapperView.layer.cornerRadius = style.cornerRadius;
if (style.displayShadow) {
wrapperView.layer.shadowColor = style.shadowColor.CGColor;
wrapperView.layer.shadowOpacity = style.shadowOpacity;
wrapperView.layer.shadowRadius = style.shadowRadius;
wrapperView.layer.shadowOffset = style.shadowOffset;
}
wrapperView.backgroundColor = style.backgroundColor;
if(image != nil) {
imageView = [[UIImageView alloc] initWithImage:image];
imageView.contentMode = UIViewContentModeScaleAspectFit;
imageView.frame = CGRectMake(style.horizontalPadding, style.verticalPadding, style.imageSize.width, style.imageSize.height);
}
CGRect imageRect = CGRectZero;
if(imageView != nil) {
imageRect.origin.x = style.horizontalPadding;
imageRect.origin.y = style.verticalPadding;
imageRect.size.width = imageView.bounds.size.width;
imageRect.size.height = imageView.bounds.size.height;
}
if (title != nil) {
titleLabel = [[UILabel alloc] init];
titleLabel.numberOfLines = style.titleNumberOfLines;
titleLabel.font = style.titleFont;
titleLabel.textAlignment = style.titleAlignment;
titleLabel.lineBreakMode = NSLineBreakByTruncatingTail;
titleLabel.textColor = style.titleColor;
titleLabel.backgroundColor = [UIColor clearColor];
titleLabel.alpha = 1.0;
titleLabel.text = title;
// size the title label according to the length of the text
CGSize maxSizeTitle = CGSizeMake((self.bounds.size.width * style.maxWidthPercentage) - imageRect.size.width, self.bounds.size.height * style.maxHeightPercentage);
CGSize expectedSizeTitle = [titleLabel sizeThatFits:maxSizeTitle];
// UILabel can return a size larger than the max size when the number of lines is 1
expectedSizeTitle = CGSizeMake(MIN(maxSizeTitle.width, expectedSizeTitle.width), MIN(maxSizeTitle.height, expectedSizeTitle.height));
titleLabel.frame = CGRectMake(0.0, 0.0, expectedSizeTitle.width, expectedSizeTitle.height);
}
if (message != nil) {
messageLabel = [[UILabel alloc] init];
messageLabel.numberOfLines = style.messageNumberOfLines;
messageLabel.font = style.messageFont;
messageLabel.textAlignment = style.messageAlignment;
messageLabel.lineBreakMode = NSLineBreakByTruncatingTail;
messageLabel.textColor = style.messageColor;
messageLabel.backgroundColor = [UIColor clearColor];
messageLabel.alpha = 1.0;
messageLabel.text = message;
CGSize maxSizeMessage = CGSizeMake((self.bounds.size.width * style.maxWidthPercentage) - imageRect.size.width, self.bounds.size.height * style.maxHeightPercentage);
CGSize expectedSizeMessage = [messageLabel sizeThatFits:maxSizeMessage];
// UILabel can return a size larger than the max size when the number of lines is 1
expectedSizeMessage = CGSizeMake(MIN(maxSizeMessage.width, expectedSizeMessage.width), MIN(maxSizeMessage.height, expectedSizeMessage.height));
messageLabel.frame = CGRectMake(0.0, 0.0, expectedSizeMessage.width, expectedSizeMessage.height);
}
CGRect titleRect = CGRectZero;
if(titleLabel != nil) {
titleRect.origin.x = imageRect.origin.x + imageRect.size.width + style.horizontalPadding;
titleRect.origin.y = style.verticalPadding;
titleRect.size.width = titleLabel.bounds.size.width;
titleRect.size.height = titleLabel.bounds.size.height;
}
CGRect messageRect = CGRectZero;
if(messageLabel != nil) {
messageRect.origin.x = imageRect.origin.x + imageRect.size.width + style.horizontalPadding;
messageRect.origin.y = titleRect.origin.y + titleRect.size.height + style.verticalPadding;
messageRect.size.width = messageLabel.bounds.size.width;
messageRect.size.height = messageLabel.bounds.size.height;
}
CGFloat longerWidth = MAX(titleRect.size.width, messageRect.size.width);
CGFloat longerX = MAX(titleRect.origin.x, messageRect.origin.x);
// Wrapper width uses the longerWidth or the image width, whatever is larger. Same logic applies to the wrapper height.
CGFloat wrapperWidth = MAX((imageRect.size.width + (style.horizontalPadding * 2.0)), (longerX + longerWidth + style.horizontalPadding));
CGFloat wrapperHeight = MAX((messageRect.origin.y + messageRect.size.height + style.verticalPadding), (imageRect.size.height + (style.verticalPadding * 2.0)));
wrapperView.frame = CGRectMake(0.0, 0.0, wrapperWidth, wrapperHeight);
if(titleLabel != nil) {
titleLabel.frame = titleRect;
[wrapperView addSubview:titleLabel];
}
if(messageLabel != nil) {
messageLabel.frame = messageRect;
[wrapperView addSubview:messageLabel];
}
if(imageView != nil) {
[wrapperView addSubview:imageView];
}
return wrapperView;
}
#pragma mark - Storage
- (NSMutableArray *)cs_activeToasts {
NSMutableArray *cs_activeToasts = objc_getAssociatedObject(self, &CSToastActiveKey);
if (cs_activeToasts == nil) {
cs_activeToasts = [[NSMutableArray alloc] init];
objc_setAssociatedObject(self, &CSToastActiveKey, cs_activeToasts, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
}
return cs_activeToasts;
}
- (NSMutableArray *)cs_toastQueue {
NSMutableArray *cs_toastQueue = objc_getAssociatedObject(self, &CSToastQueueKey);
if (cs_toastQueue == nil) {
cs_toastQueue = [[NSMutableArray alloc] init];
objc_setAssociatedObject(self, &CSToastQueueKey, cs_toastQueue, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
}
return cs_toastQueue;
}
#pragma mark - Events
- (void)cs_toastTimerDidFinish:(NSTimer *)timer {
[self cs_hideToast:(UIView *)timer.userInfo];
}
- (void)cs_handleToastTapped:(UITapGestureRecognizer *)recognizer {
UIView *toast = recognizer.view;
NSTimer *timer = (NSTimer *)objc_getAssociatedObject(toast, &CSToastTimerKey);
[timer invalidate];
[self cs_hideToast:toast fromTap:YES];
}
#pragma mark - Activity Methods
- (void)makeToastActivity:(id)position {
// sanity
UIView *existingActivityView = (UIView *)objc_getAssociatedObject(self, &CSToastActivityViewKey);
if (existingActivityView != nil) return;
CSToastStyle *style = [CSToastManager sharedStyle];
UIView *activityView = [[UIView alloc] initWithFrame:CGRectMake(0.0, 0.0, style.activitySize.width, style.activitySize.height)];
activityView.center = [self cs_centerPointForPosition:position withToast:activityView];
activityView.backgroundColor = style.backgroundColor;
activityView.alpha = 0.0;
activityView.autoresizingMask = (UIViewAutoresizingFlexibleLeftMargin | UIViewAutoresizingFlexibleRightMargin | UIViewAutoresizingFlexibleTopMargin | UIViewAutoresizingFlexibleBottomMargin);
activityView.layer.cornerRadius = style.cornerRadius;
if (style.displayShadow) {
activityView.layer.shadowColor = style.shadowColor.CGColor;
activityView.layer.shadowOpacity = style.shadowOpacity;
activityView.layer.shadowRadius = style.shadowRadius;
activityView.layer.shadowOffset = style.shadowOffset;
}
UIActivityIndicatorView *activityIndicatorView = [[UIActivityIndicatorView alloc] initWithActivityIndicatorStyle:UIActivityIndicatorViewStyleWhiteLarge];
activityIndicatorView.center = CGPointMake(activityView.bounds.size.width / 2, activityView.bounds.size.height / 2);
[activityView addSubview:activityIndicatorView];
[activityIndicatorView startAnimating];
// associate the activity view with self
objc_setAssociatedObject (self, &CSToastActivityViewKey, activityView, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
[self addSubview:activityView];
[UIView animateWithDuration:style.fadeDuration
delay:0.0
options:UIViewAnimationOptionCurveEaseOut
animations:^{
activityView.alpha = 1.0;
} completion:nil];
}
- (void)hideToastActivity {
UIView *existingActivityView = (UIView *)objc_getAssociatedObject(self, &CSToastActivityViewKey);
if (existingActivityView != nil) {
[UIView animateWithDuration:[[CSToastManager sharedStyle] fadeDuration]
delay:0.0
options:(UIViewAnimationOptionCurveEaseIn | UIViewAnimationOptionBeginFromCurrentState)
animations:^{
existingActivityView.alpha = 0.0;
} completion:^(BOOL finished) {
[existingActivityView removeFromSuperview];
objc_setAssociatedObject (self, &CSToastActivityViewKey, nil, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
}];
}
}
#pragma mark - Helpers
- (CGPoint)cs_centerPointForPosition:(id)point withToast:(UIView *)toast {
CSToastStyle *style = [CSToastManager sharedStyle];
UIEdgeInsets safeInsets = UIEdgeInsetsZero;
if (@available(iOS 11.0, *)) {
safeInsets = self.safeAreaInsets;
}
CGFloat topPadding = style.verticalPadding + safeInsets.top;
CGFloat bottomPadding = style.verticalPadding + safeInsets.bottom;
if([point isKindOfClass:[NSString class]]) {
if([point caseInsensitiveCompare:CSToastPositionTop] == NSOrderedSame) {
return CGPointMake(self.bounds.size.width / 2.0, (toast.frame.size.height / 2.0) + topPadding);
} else if([point caseInsensitiveCompare:CSToastPositionCenter] == NSOrderedSame) {
return CGPointMake(self.bounds.size.width / 2.0, self.bounds.size.height / 2.0);
}
} else if ([point isKindOfClass:[NSValue class]]) {
return [point CGPointValue];
}
// default to bottom
return CGPointMake(self.bounds.size.width / 2.0, (self.bounds.size.height - (toast.frame.size.height / 2.0)) - bottomPadding);
}
@end
@implementation CSToastStyle
#pragma mark - Constructors
- (instancetype)initWithDefaultStyle {
self = [super init];
if (self) {
self.backgroundColor = [[UIColor blackColor] colorWithAlphaComponent:0.8];
self.titleColor = [UIColor whiteColor];
self.messageColor = [UIColor whiteColor];
self.maxWidthPercentage = 0.8;
self.maxHeightPercentage = 0.8;
self.horizontalPadding = 10.0;
self.verticalPadding = 10.0;
self.cornerRadius = 10.0;
self.titleFont = [UIFont boldSystemFontOfSize:16.0];
self.messageFont = [UIFont systemFontOfSize:16.0];
self.titleAlignment = NSTextAlignmentLeft;
self.messageAlignment = NSTextAlignmentLeft;
self.titleNumberOfLines = 0;
self.messageNumberOfLines = 0;
self.displayShadow = NO;
self.shadowOpacity = 0.8;
self.shadowRadius = 6.0;
self.shadowOffset = CGSizeMake(4.0, 4.0);
self.imageSize = CGSizeMake(80.0, 80.0);
self.activitySize = CGSizeMake(100.0, 100.0);
self.fadeDuration = 0.2;
}
return self;
}
- (void)setMaxWidthPercentage:(CGFloat)maxWidthPercentage {
_maxWidthPercentage = MAX(MIN(maxWidthPercentage, 1.0), 0.0);
}
- (void)setMaxHeightPercentage:(CGFloat)maxHeightPercentage {
_maxHeightPercentage = MAX(MIN(maxHeightPercentage, 1.0), 0.0);
}
- (instancetype)init NS_UNAVAILABLE {
return nil;
}
@end
@interface CSToastManager ()
@property (strong, nonatomic) CSToastStyle *sharedStyle;
@property (assign, nonatomic, getter=isTapToDismissEnabled) BOOL tapToDismissEnabled;
@property (assign, nonatomic, getter=isQueueEnabled) BOOL queueEnabled;
@property (assign, nonatomic) NSTimeInterval defaultDuration;
@property (strong, nonatomic) id defaultPosition;
@end
@implementation CSToastManager
#pragma mark - Constructors
+ (instancetype)sharedManager {
static CSToastManager *_sharedManager = nil;
static dispatch_once_t oncePredicate;
dispatch_once(&oncePredicate, ^{
_sharedManager = [[self alloc] init];
});
return _sharedManager;
}
- (instancetype)init {
self = [super init];
if (self) {
self.sharedStyle = [[CSToastStyle alloc] initWithDefaultStyle];
self.tapToDismissEnabled = YES;
self.queueEnabled = NO;
self.defaultDuration = 3.0;
self.defaultPosition = CSToastPositionBottom;
}
return self;
}
#pragma mark - Singleton Methods
+ (void)setSharedStyle:(CSToastStyle *)sharedStyle {
[[self sharedManager] setSharedStyle:sharedStyle];
}
+ (CSToastStyle *)sharedStyle {
return [[self sharedManager] sharedStyle];
}
+ (void)setTapToDismissEnabled:(BOOL)tapToDismissEnabled {
[[self sharedManager] setTapToDismissEnabled:tapToDismissEnabled];
}
+ (BOOL)isTapToDismissEnabled {
return [[self sharedManager] isTapToDismissEnabled];
}
+ (void)setQueueEnabled:(BOOL)queueEnabled {
[[self sharedManager] setQueueEnabled:queueEnabled];
}
+ (BOOL)isQueueEnabled {
return [[self sharedManager] isQueueEnabled];
}
+ (void)setDefaultDuration:(NSTimeInterval)duration {
[[self sharedManager] setDefaultDuration:duration];
}
+ (NSTimeInterval)defaultDuration {
return [[self sharedManager] defaultDuration];
}
+ (void)setDefaultPosition:(id)position {
if ([position isKindOfClass:[NSString class]] || [position isKindOfClass:[NSValue class]]) {
[[self sharedManager] setDefaultPosition:position];
}
}
+ (id)defaultPosition {
return [[self sharedManager] defaultPosition];
}
@end

BIN
Libraries/baselib.a Normal file

Binary file not shown.

View File

@@ -0,0 +1,10 @@
#import "UnityAds/UnityAds.h"
typedef void (*InitSuccessCallback)(void *initListener);
typedef void (*InitFailCallback)(void *initListener, int error, const char *message);
@interface UnityAdsInitializationListener : NSObject <UnityAdsInitializationDelegate>
@property (assign) InitSuccessCallback initSuccessCallback;
@property (assign) InitFailCallback initFailCallback;
- (id)initWithSuccessCallback:(InitSuccessCallback)initSuccessCallback failCallback:(InitFailCallback)initFailCallback;
@end

View File

@@ -0,0 +1,50 @@
#import "UnityAdsInitializationListener.h"
@implementation UnityAdsInitializationListener
- (id)initWithSuccessCallback:(InitSuccessCallback)initSuccessCallback failCallback:(InitFailCallback)initFailCallback {
self = [super init];
if (self) {
self.initSuccessCallback = initSuccessCallback;
self.initFailCallback = initFailCallback;
}
return self;
}
- (void)initializationFailed:(UnityAdsInitializationError)error withMessage:(NSString *)message {
if (self.initFailCallback) {
self.initFailCallback((__bridge void *)self, (int)error, [message UTF8String]);
}
}
- (void)initializationComplete {
if (self.initSuccessCallback) {
self.initSuccessCallback((__bridge void *)self);
}
}
@end
#ifdef __cplusplus
extern "C" {
#endif
void * UnityAdsInitializationListenerCreate(InitSuccessCallback initSuccessCallback, InitFailCallback initFailCallback) {
UnityAdsInitializationListener *listener = [[UnityAdsInitializationListener alloc] initWithSuccessCallback:initSuccessCallback failCallback:initFailCallback];
return (__bridge_retained void *)listener;
}
void UnityAdsInitializationListenerDestroy(void *ptr) {
if (!ptr) return;
UnityAdsInitializationListener *listener = (__bridge_transfer UnityAdsInitializationListener *)ptr;
listener.initSuccessCallback = nil;
listener.initFailCallback = nil;
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,10 @@
#import "UnityAds/UnityAds.h"
typedef void (*LoadSuccessCallback)(void *loadListener, const char *placementId);
typedef void (*LoadFailureCallback)(void *loadListener, const char *placementId, int error, const char *message);
@interface UnityAdsLoadListener : NSObject <UnityAdsLoadDelegate>
@property (assign) LoadSuccessCallback loadSuccessCallback;
@property (assign) LoadFailureCallback loadFailureCallback;
- (id)initWithSuccessCallback:(LoadSuccessCallback)loadSuccessCallback failCallback:(LoadFailureCallback)loadFailureCallback;
@end

View File

@@ -0,0 +1,50 @@
#import "UnityAdsLoadListener.h"
@implementation UnityAdsLoadListener
- (id)initWithSuccessCallback:(LoadSuccessCallback)loadSuccessCallback failCallback:(LoadFailureCallback)loadFailureCallback {
self = [super init];
if (self) {
self.loadSuccessCallback = loadSuccessCallback;
self.loadFailureCallback = loadFailureCallback;
}
return self;
}
- (void)unityAdsAdFailedToLoad:(NSString *)placementId withError:(UnityAdsLoadError)error withMessage:(NSString *)message {
if (self.loadFailureCallback) {
self.loadFailureCallback((__bridge void *)self, [placementId UTF8String], (int)error, [message UTF8String]);
}
}
- (void)unityAdsAdLoaded:(NSString *)placementId {
if (self.loadSuccessCallback) {
self.loadSuccessCallback((__bridge void *)self, [placementId UTF8String]);
}
}
@end
#ifdef __cplusplus
extern "C" {
#endif
void * UnityAdsLoadListenerCreate(LoadSuccessCallback loadSuccessCallback, LoadFailureCallback loadFailureCallback) {
UnityAdsLoadListener *listener = [[UnityAdsLoadListener alloc] initWithSuccessCallback:loadSuccessCallback failCallback:loadFailureCallback];
return (__bridge_retained void *)listener;
}
void UnityAdsLoadListenerDestroy(void *ptr) {
if (!ptr) return;
UnityAdsLoadListener *listener = (__bridge_transfer UnityAdsLoadListener *)ptr;
listener.loadSuccessCallback = nil;
listener.loadFailureCallback = nil;
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,15 @@
#import <Foundation/Foundation.h>
#ifdef __cplusplus
extern "C" {
#endif
void UnityAdsBridgeTransfer(void *x) {
if (!x) return;
(__bridge_transfer id)x;
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,6 @@
typedef void (*UnityAdsPurchasingDidInitiatePurchasingCommandCallback)(const char * eventString);
typedef void (*UnityAdsPurchasingGetProductCatalogCallback)();
typedef void (*UnityAdsPurchasingGetPurchasingVersionCallback)();
typedef void (*UnityAdsPurchasingInitializeCallback)();
void InitializeUnityAdsPurchasingWrapper();

View File

@@ -0,0 +1,71 @@
#import "UnityAds/UADSPurchasing.h"
#import "UnityAdsPurchasingWrapper.h"
#import "UnityAdsUtilities.h"
static UnityAdsPurchasingDidInitiatePurchasingCommandCallback iapCommandCallback = NULL;
static UnityAdsPurchasingGetProductCatalogCallback iapCatalogCallback = NULL;
static UnityAdsPurchasingGetPurchasingVersionCallback iapVersionCallback = NULL;
static UnityAdsPurchasingInitializeCallback iapInitializeCallback = NULL;
@interface UnityAdsPurchasingWrapperDelegate : NSObject <UADSPurchasingDelegate>
@end
@implementation UnityAdsPurchasingWrapperDelegate
- (void)unityAdsPurchasingGetProductCatalog {
if(iapCatalogCallback != NULL) {
iapCatalogCallback();
}
}
- (void)unityAdsPurchasingGetPurchasingVersion {
if(iapVersionCallback != NULL) {
iapVersionCallback();
}
}
- (void)unityAdsPurchasingInitialize {
if(iapInitializeCallback != NULL) {
iapInitializeCallback();
}
}
- (void)unityAdsPurchasingDidInitiatePurchasingCommand:(NSString *)eventString {
if(iapCommandCallback != NULL) {
const char * rawEventString = UnityAdsCopyString([eventString UTF8String]);
iapCommandCallback(rawEventString);
free((void *)rawEventString);
}
}
@end
void InitializeUnityAdsPurchasingWrapper() {
static id<UADSPurchasingDelegate> delegate = nil;
if (delegate == nil) {
delegate = [[UnityAdsPurchasingWrapperDelegate alloc] init];
[UADSPurchasing initialize:delegate];
}
}
void UnityAdsSetDidInitiatePurchasingCommandCallback(UnityAdsPurchasingDidInitiatePurchasingCommandCallback callback) {
iapCommandCallback = callback;
}
void UnityAdsSetGetProductCatalogCallback(UnityAdsPurchasingGetProductCatalogCallback callback) {
iapCatalogCallback = callback;
}
void UnityAdsSetGetVersionCallback(UnityAdsPurchasingGetPurchasingVersionCallback callback) {
iapVersionCallback = callback;
}
void UnityAdsSetInitializePurchasingCallback(UnityAdsPurchasingInitializeCallback callback) {
iapInitializeCallback = callback;
}
void UnityAdsPurchasingDispatchReturnEvent(UnityAdsPurchasingEvent event, const char * payload) {
if (payload == NULL) {
payload = "";
}
[UADSPurchasing dispatchReturnEvent:event withPayload:[NSString stringWithUTF8String:payload]];
}

View File

@@ -0,0 +1,15 @@
#import "UnityAds/UnityAds.h"
typedef void (*ShowFailureCallback)(void *showlistener, const char *placementId, int error, const char *message);
typedef void (*ShowStartCallback)(void *showListener, const char *placementId);
typedef void (*ShowClickCallback)(void *showListener, const char *placementId);
typedef void (*ShowCompleteCallback)(void *showListener, const char *placementId, int completionState);
@interface UnityAdsShowListener : NSObject <UnityAdsShowDelegate>
@property (assign) ShowFailureCallback showFailureCallback;
@property (assign) ShowStartCallback showStartCallback;
@property (assign) ShowClickCallback showClickCallback;
@property (assign) ShowCompleteCallback showCompleteCallback;
- (id)initWithFailureCallback:(ShowFailureCallback)showFailureCallback startCallback:(ShowStartCallback)showStartCallback clickCallback:(ShowClickCallback)showClickCallback completeCallback:(ShowCompleteCallback)showCompleteCallback;
@end

View File

@@ -0,0 +1,65 @@
#import "UnityAdsShowListener.h"
@implementation UnityAdsShowListener
- (id)initWithFailureCallback:(ShowFailureCallback)showFailureCallback startCallback:(ShowStartCallback)showStartCallback clickCallback:(ShowClickCallback)showClickCallback completeCallback:(ShowCompleteCallback)showCompleteCallback {
self = [super init];
if (self) {
self.showFailureCallback = showFailureCallback;
self.showStartCallback = showStartCallback;
self.showClickCallback = showClickCallback;
self.showCompleteCallback = showCompleteCallback;
}
return self;
}
- (void)unityAdsShowFailed:(NSString *)placementId withError:(UnityAdsShowError)error withMessage:(NSString *)message {
if (self.showFailureCallback) {
self.showFailureCallback((__bridge void *)self, [placementId UTF8String], (int)error, [message UTF8String]);
}
}
- (void)unityAdsShowStart:(NSString *)placementId {
if (self.showStartCallback) {
self.showStartCallback((__bridge void *)self, [placementId UTF8String]);
}
}
- (void)unityAdsShowClick:(NSString *)placementId {
if (self.showClickCallback) {
self.showClickCallback((__bridge void *)self, [placementId UTF8String]);
}
}
- (void)unityAdsShowComplete:(NSString *)placementId withFinishState:(UnityAdsShowCompletionState)state {
if (self.showCompleteCallback) {
self.showCompleteCallback((__bridge void *)self, [placementId UTF8String], (int)state);
}
}
@end
#ifdef __cplusplus
extern "C" {
#endif
void * UnityAdsShowListenerCreate(ShowFailureCallback showFailureCallback, ShowStartCallback showStartCallback, ShowClickCallback showClickCallback, ShowCompleteCallback showCompleteCallback) {
UnityAdsShowListener *listener = [[UnityAdsShowListener alloc] initWithFailureCallback:showFailureCallback startCallback:showStartCallback clickCallback:showClickCallback completeCallback:showCompleteCallback];
return (__bridge_retained void *)listener;
}
void UnityAdsShowListenerDestroy(void *ptr) {
if (!ptr) return;
UnityAdsShowListener *listener = (__bridge_transfer UnityAdsShowListener *)ptr;
listener.showFailureCallback = nil;
listener.showStartCallback = nil;
listener.showClickCallback = nil;
listener.showCompleteCallback = nil;
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,160 @@
#import "UnityAppController.h"
#import "Unity/UnityInterface.h"
#import "UnityAds/UnityAds.h"
#import <UnityAds/UADSBanner.h>
#import "UnityAds/UADSMetaData.h"
#import "UnityAdsUtilities.h"
#import "UnityAdsPurchasingWrapper.h"
#import "UnityAdsInitializationListener.h"
#import "UnityAdsLoadListener.h"
#import "UnityAdsShowListener.h"
#import <UnityAds/UnityAdsFinishState.h>
typedef void (*UnityAdsReadyCallback)(const char * placementId);
typedef void (*UnityAdsDidErrorCallback)(long rawError, const char * message);
typedef void (*UnityAdsDidStartCallback)(const char * placementId);
typedef void (*UnityAdsDidFinishCallback)(const char * placementId, long rawFinishState);
static UnityAdsReadyCallback readyCallback = NULL;
static UnityAdsDidErrorCallback errorCallback = NULL;
static UnityAdsDidStartCallback startCallback = NULL;
static UnityAdsDidFinishCallback finishCallback = NULL;
@interface UnityAdsUnityWrapperDelegate : NSObject <UnityAdsDelegate>
@end
@implementation UnityAdsUnityWrapperDelegate
- (void)unityAdsReady:(NSString *)placementId {
if(readyCallback != NULL) {
const char * rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
readyCallback(rawPlacementId);
free((void *)rawPlacementId);
}
}
- (void)unityAdsDidError:(UnityAdsError)error withMessage:(NSString *)message {
if(errorCallback != NULL) {
const char * rawMessage = UnityAdsCopyString([message UTF8String]);
errorCallback(error, rawMessage);
free((void *)rawMessage);
}
}
- (void)unityAdsDidStart:(NSString *)placementId {
UnityPause(1);
if(startCallback != NULL) {
const char * rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
startCallback(rawPlacementId);
free((void *)rawPlacementId);
}
}
- (void)unityAdsDidFinish:(NSString *)placementId withFinishState:(UnityAdsFinishState)state {
UnityPause(0);
if(finishCallback != NULL) {
const char * rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
finishCallback(rawPlacementId, state);
free((void *)rawPlacementId);
}
}
@end
void UnityAdsInitialize(const char * gameId, bool testMode, bool enablePerPlacementLoad, void *listenerPtr) {
UnityAdsInitializationListener *listener = listenerPtr ? (__bridge UnityAdsInitializationListener *)listenerPtr : nil;
[UnityAds initialize:[NSString stringWithUTF8String:gameId] testMode:testMode enablePerPlacementLoad:enablePerPlacementLoad initializationDelegate:listener];
InitializeUnityAdsPurchasingWrapper();
}
void UnityAdsLoad(const char * placementId, void *listenerPtr) {
UnityAdsLoadListener *listener = listenerPtr ? (__bridge UnityAdsLoadListener *)listenerPtr : nil;
[UnityAds load:[NSString stringWithUTF8String:placementId] loadDelegate:listener];
}
void UnityAdsShow(const char * placementId, void *listenerPtr) {
UnityAdsShowListener *listener = listenerPtr ? (__bridge UnityAdsShowListener *)listenerPtr : nil;
[UnityAds show:UnityGetGLViewController() placementId:NSSTRING_OR_EMPTY(placementId) showDelegate:listener];
}
const char *UnityAdsGetDefaultPlacementID() {
NSString *returnedPlacementID = @"";
id placement = NSClassFromString(@"UADSPlacement");
if (placement) {
SEL getPlacementSelector = NSSelectorFromString(@"getDefaultPlacement");
if ([placement respondsToSelector:getPlacementSelector]) {
IMP getPlacementIMP = [placement methodForSelector:getPlacementSelector];
id (*getPlacementFunc)(void) = (void *) getPlacementIMP;
NSString *placementString = getPlacementFunc();
if (placementString != NULL) {
returnedPlacementID = placementString;
}
}
}
return CStringFromNSString(returnedPlacementID);
}
bool UnityAdsGetDebugMode() {
return [UnityAds getDebugMode];
}
void UnityAdsSetDebugMode(bool debugMode) {
[UnityAds setDebugMode:debugMode];
}
bool UnityAdsIsSupported() {
return [UnityAds isSupported];
}
bool UnityAdsIsReady(const char * placementId) {
if(placementId == NULL) {
return [UnityAds isReady];
} else {
return [UnityAds isReady:[NSString stringWithUTF8String:placementId]];
}
}
long UnityAdsGetPlacementState(const char * placementId) {
if(placementId == NULL) {
return [UnityAds getPlacementState];
} else {
return [UnityAds getPlacementState:[NSString stringWithUTF8String:placementId]];
}
}
const char * UnityAdsGetVersion() {
return UnityAdsCopyString([[UnityAds getVersion] UTF8String]);
}
bool UnityAdsIsInitialized() {
return [UnityAds isInitialized];
}
void UnityAdsSetMetaData(const char * category, const char * data) {
if(category != NULL && data != NULL) {
UADSMetaData* metaData = [[UADSMetaData alloc] initWithCategory:[NSString stringWithUTF8String:category]];
NSDictionary* json = [NSJSONSerialization JSONObjectWithData:[[NSString stringWithUTF8String:data] dataUsingEncoding:NSUTF8StringEncoding] options:0 error:nil];
for(id key in json) {
[metaData set:key value:[json objectForKey:key]];
}
[metaData commit];
}
}
void UnityAdsSetReadyCallback(UnityAdsReadyCallback callback) {
readyCallback = callback;
}
void UnityAdsSetDidErrorCallback(UnityAdsDidErrorCallback callback) {
errorCallback = callback;
}
void UnityAdsSetDidStartCallback(UnityAdsDidStartCallback callback) {
startCallback = callback;
}
void UnityAdsSetDidFinishCallback(UnityAdsDidFinishCallback callback) {
finishCallback = callback;
}

View File

@@ -0,0 +1,23 @@
const char * UnityAdsCopyString(const char * string);
/**
* Returns the size of an Il2CppString
*/
size_t Il2CppStringLen(const ushort* str);
/**
* Converts an ushort string to an NSString
*/
NSString* NSStringFromIl2CppString(const ushort* str);
/**
* Converts a char string to an NSString. Does pre checks for null pointer
*/
NSString* NSStringFromCString(const char* string);
/**
* Converts a NSString to a char string.Does pre checks for null pointer
*/
const char * CStringFromNSString(const NSString * string);
#define NSSTRING_OR_EMPTY(string) NSStringFromCString(string) ?: @""

View File

@@ -0,0 +1,38 @@
const char * UnityAdsCopyString(const char * string) {
char * copy = (char *)malloc(strlen(string) + 1);
strcpy(copy, string);
return copy;
}
/**
* Returns the size of an Il2CppString
*/
size_t Il2CppStringLen(const ushort* str) {
const ushort* start = str;
while (*str) ++str;
return str - start;
}
/**
* Converts an ushort string to an NSString
*/
NSString* NSStringFromIl2CppString(const ushort* str) {
size_t len = Il2CppStringLen(str);
return [[NSString alloc] initWithBytes:(const void*)str
length:sizeof(ushort) * len
encoding:NSUTF16LittleEndianStringEncoding];
}
/**
* Converts an NSString to a char string.Does pre checks for null pointer
*/
const char * CStringFromNSString(const NSString * string) {
return string != NULL ? UnityAdsCopyString([string UTF8String]) : NULL;
}
/**
* Converts a char string to an NSString.Does pre checks for null pointer
*/
NSString* NSStringFromCString(const char* string) {
return string != NULL ? [NSString stringWithUTF8String: string] : NULL;
}

View File

@@ -0,0 +1,31 @@
#import <UnityAds/UANAApiAnalytics.h>
#import "UnityAdsUtilities.h"
typedef void (*UANAEngineTriggerAddExtras)(const char *payload);
static UANAEngineTriggerAddExtras triggerAddExtras = NULL;
void UANAEngineDelegateSetTriggerAddExtras(UANAEngineTriggerAddExtras trigger) {
triggerAddExtras = trigger;
}
@interface UANAEngineWrapper : NSObject <UANAEngineDelegate>
@end
@implementation UANAEngineWrapper
- (void)addExtras:(NSString *)extras {
if (triggerAddExtras) {
const char * rawExtrasString = UnityAdsCopyString([extras UTF8String]);
triggerAddExtras(rawExtrasString);
free((void *)rawExtrasString);
}
}
@end
void InitializeUANAEngineWrapper() {
static id<UANAEngineDelegate> delegate = nil;
if (delegate == nil) {
delegate = [[UANAEngineWrapper alloc] init];
[UANAApiAnalytics setAnalyticsDelegate:delegate];
}
}

View File

@@ -0,0 +1,140 @@
#import <UnityAds/UADSBanner.h>
#import "UnityAdsUtilities.h"
typedef void (*UnityAdsBannerShowCallback)(const char* placementId);
typedef void (*UnityAdsBannerHideCallback)(const char* placementId);
typedef void (*UnityAdsBannerClickCallback)(const char* placementId);
typedef void (*UnityAdsBannerUnloadCallback)(const char* placementId);
typedef void (*UnityAdsBannerLoadCallback)(const char* placementId);
typedef void (*UnityAdsBannerErrorCallback)(const char* message);
static UnityAdsBannerShowCallback bannerShowCallback = NULL;
static UnityAdsBannerHideCallback bannerHideCallback = NULL;
static UnityAdsBannerClickCallback bannerClickCallback = NULL;
static UnityAdsBannerErrorCallback bannerErrorCallback = NULL;
static UnityAdsBannerLoadCallback bannerLoadCallback = NULL;
static UnityAdsBannerUnloadCallback bannerUnloadCallback = NULL;
static UIView* s_banner;
static bool s_showAfterLoad;
@interface UnityBannersUnityWrapper : NSObject<UnityAdsBannerDelegate>
@end
@implementation UnityBannersUnityWrapper
- (void)unityAdsBannerDidError:(NSString *)message {
if (bannerErrorCallback != NULL) {
const char * rawMessage = UnityAdsCopyString([message UTF8String]);
bannerErrorCallback(rawMessage);
free((void *)rawMessage);
}
}
- (void)unityAdsBannerDidHide:(NSString *)placementId {
if (bannerHideCallback != NULL) {
const char * rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
bannerHideCallback(rawPlacementId);
free((void *)rawPlacementId);
}
}
-(void)unityAdsBannerDidClick:(NSString *)placementId {
if (bannerClickCallback != NULL) {
const char * rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
bannerClickCallback(rawPlacementId);
free((void *)rawPlacementId);
}
}
- (void)unityAdsBannerDidShow:(NSString *)placementId {
if (bannerShowCallback != NULL) {
const char * rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
bannerShowCallback(rawPlacementId);
free((void *)rawPlacementId);
}
}
- (void)unityAdsBannerDidLoad:(NSString *)placementId view:(UIView*)view {
s_banner = view;
const char * rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
if (bannerLoadCallback != NULL) {
bannerLoadCallback(rawPlacementId);
free((void *)rawPlacementId);
}
if (s_showAfterLoad) {
s_showAfterLoad = false;
UIView *container = UnityGetGLViewController().view;
[container addSubview:s_banner];
bannerShowCallback(rawPlacementId);
}
}
- (void)unityAdsBannerDidUnload:(NSString *)placementId {
}
@end
void UnityAdsBannerShow(const char * placementId, bool showAfterLoad) {
if (s_banner == nil) {
s_showAfterLoad = showAfterLoad;
if(placementId == NULL) {
[UnityAdsBanner loadBanner];
} else {
[UnityAdsBanner loadBanner:[NSString stringWithUTF8String:placementId]];
}
} else {
if (s_banner.superview == nil) {
UIView *container = UnityGetGLViewController().view;
[container addSubview:s_banner];
bannerShowCallback(placementId);
}
}
}
void UnityAdsBannerHide(bool shouldDestroy) {
if (shouldDestroy) {
[UnityAdsBanner destroy];
s_banner = nil;
} else {
if (s_banner != nil && s_banner.superview != nil) {
[s_banner removeFromSuperview];
}
}
}
bool UnityAdsBannerIsLoaded() {
return s_banner != nil;
}
void UnityAdsBannerSetPosition(int position) {
[UnityAdsBanner setBannerPosition:(UnityAdsBannerPosition)position];
}
void UnityAdsSetBannerShowCallback(UnityAdsBannerShowCallback callback) {
bannerShowCallback = callback;
}
void UnityAdsSetBannerHideCallback(UnityAdsBannerHideCallback callback) {
bannerHideCallback = callback;
}
void UnityAdsSetBannerClickCallback(UnityAdsBannerClickCallback callback) {
bannerClickCallback = callback;
}
void UnityAdsSetBannerErrorCallback(UnityAdsBannerErrorCallback callback) {
bannerErrorCallback = callback;
}
void UnityAdsSetBannerUnloadCallback(UnityAdsBannerUnloadCallback callback) {
bannerUnloadCallback = callback;
}
void UnityAdsSetBannerLoadCallback(UnityAdsBannerLoadCallback callback) {
bannerLoadCallback = callback;
}
void UnityBannerInitialize() {
static UnityBannersUnityWrapper* delegate = nil;
if (delegate == nil) {
delegate = [[UnityBannersUnityWrapper alloc] init];
}
[UnityAdsBanner setDelegate:delegate];
}

View File

@@ -0,0 +1,19 @@
#import <UnityAds/UPURTransactionDetails.h>
#import <UnityAds/UPURTransactionErrorDetails.h>
#import <UnityAds/UMONCustomEvent.h>
NS_ASSUME_NONNULL_BEGIN
@interface UPURTransactionDetails (UnityJsonAdditions)
+(nullable instancetype)buildWithJson:(NSString *)json error:(NSError **)error;
@end
@interface UPURTransactionErrorDetails (UnityJsonAdditions)
+(nullable instancetype)buildWithJson:(NSString *)json error:(NSError **)error;
@end
@interface UMONCustomEvent (UnityJsonAdditions)
+(nullable instancetype)buildWithJson:(NSString *)json error: (NSError **)error;
@end
NS_ASSUME_NONNULL_END

View File

@@ -0,0 +1,139 @@
#import "UnityJsonAdditions.h"
NSString *const NSUnityPurchasingTransactionDetailErrorDomain = @"NSUPURTransactionDetailErrorDomain";
NSString *const NSUnityPurchasingTransactionErrorDetailErrorDomain = @"NSUPURTransactionErrorDetailErrorDomain";
UPURTransactionError UPURTransactionErrorFromNSString(NSString *error) {
if (error) {
if ([error isEqualToString:@"NotSupported"]) {
return kUPURTransactionErrorNotSupported;
} else if ([error isEqualToString:@"Item_Unavailable"]) {
return kUPURTransactionErrorItemUnavailable;
} else if ([error isEqualToString:@"UserCancelled"]) {
return kUPURTransactionErrorUserCancelled;
} else if ([error isEqualToString:@"NetworkError"]) {
return kUPURTransactionErrorNetworkError;
} else if ([error isEqualToString:@"ServerError"]) {
return kUPURTransactionErrorServerError;
} else if ([error isEqualToString:@"UnknownError"]) {
return kUPURTransactionErrorUnknownError;
} else {
return kUPURTransactionErrorUnknownError;
}
} else {
return kUPURTransactionErrorUnknownError;
}
}
UPURStore UPURStoreFromNSString(NSString *store) {
if (store) {
if ([store isEqualToString:@"GooglePlay"]) {
return kUPURStoreGooglePlay;
} else if ([store isEqualToString:@"AmazonAppStore"]) {
return kUPURStoreAmazonAppStore;
} else if ([store isEqualToString:@"CloudMoolah"]) {
return kUPURStoreCloudMoolah;
} else if ([store isEqualToString:@"SamsungApps"]) {
return kUPURStoreSamsungApps;
} else if ([store isEqualToString:@"XiaomiMiPay"]) {
return kUPURStoreXiaomiMiPay;
} else if ([store isEqualToString:@"MacAppStore"]) {
return kUPURStoreMacAppStore;
} else if ([store isEqualToString:@"AppleAppStore"]) {
return kUPURStoreAppleAppStore;
} else if ([store isEqualToString:@"WinRT"]) {
return kUPURStoreWinRT;
} else if ([store isEqualToString:@"TizenStore"]) {
return kUPURStoreTizenStore;
} else if ([store isEqualToString:@"FacebookStore"]) {
return kUPURStoreFacebookStore;
} else if ([store isEqualToString:@"NotSpecified"]) {
return kUPURStoreNotSpecified;
} else {
return kUPURStoreNotSpecified;
}
} else {
return kUPURStoreNotSpecified;
}
}
@implementation UPURTransactionDetails (UnityJsonAdditions)
// must check error before using object
+(instancetype)buildWithJson:(NSString *)json error:(NSError **)error {
id object = [NSJSONSerialization JSONObjectWithData:[json dataUsingEncoding:NSUTF8StringEncoding] options:NSJSONReadingAllowFragments error:error];
if (*error) {
NSLog(@"UPURTransactionDetails Unable to serialize from json: %@", [*error description]);
return nil;
} else if ([object isKindOfClass:[NSDictionary class]]) {
return [UPURTransactionDetails build:^(UPURTransactionDetailsBuilder *builder) {
NSDictionary *dictionary = (NSDictionary *) object;
builder.productId = [dictionary valueForKey:@"productId"];
builder.transactionId = [dictionary valueForKey:@"transactionId"];
builder.receipt = [dictionary valueForKey:@"receipt"];
builder.price = [dictionary valueForKey:@"price"];
builder.currency = [dictionary valueForKey:@"currency"];
id extras = [dictionary valueForKey:@"extras"];
if (![extras isKindOfClass:[NSNull class]]) {
builder.extras = extras;
}
}];
} else {
NSMutableDictionary *info = [NSMutableDictionary dictionary];
[info setValue:@"UPURTransactionDetails Expected json object to be a NSDictionary but it was not" forKey:@"Reason"];
*error = [NSError errorWithDomain:NSUnityPurchasingTransactionDetailErrorDomain code:1 userInfo:info];
return nil;
}
}
@end
@implementation UPURTransactionErrorDetails (UnityJsonAdditions)
// must check error before using object
+(instancetype)buildWithJson:(NSString *)json error:(NSError **)error {
id object = [NSJSONSerialization JSONObjectWithData:[json dataUsingEncoding:NSUTF8StringEncoding] options: NSJSONReadingAllowFragments error:error];
if (*error) {
NSLog(@"UPURTransactionErrorDetails Unable to serialize from json: %@", [*error description]);
return nil;
} else if ([object isKindOfClass:[NSDictionary class]]) {
return [UPURTransactionErrorDetails build:^(UPURTransactionErrorDetailsBuilder *builder) {
NSDictionary *dictionary = (NSDictionary *) object;
builder.transactionError = UPURTransactionErrorFromNSString([dictionary valueForKey:@"transactionError"]);
builder.exceptionMessage = [dictionary valueForKey:@"exceptionMessage"];
builder.store = UPURStoreFromNSString([dictionary valueForKey:@"store"]);
builder.storeSpecificErrorCode = [dictionary valueForKey:@"storeSpecificErrorCode"];
id extras = [dictionary valueForKey:@"extras"];
if (![extras isKindOfClass:[NSNull class]]) {
builder.extras = extras;
}
}];
} else {
NSMutableDictionary *info = [NSMutableDictionary dictionary];
[info setValue:@"UPURTransactionErrorDetails Expected json object to be a NSDictionary but it was not" forKey:@"Reason"];
*error = [NSError errorWithDomain:NSUnityPurchasingTransactionErrorDetailErrorDomain code:1 userInfo:info];
return nil;
}
}
@end
@implementation UMONCustomEvent (UnityJsonAdditions)
// must check error before using object
+(instancetype)buildWithJson:(NSString *)json error: (NSError **)error {
id object = [NSJSONSerialization JSONObjectWithData:[json dataUsingEncoding:NSUTF8StringEncoding] options:NSJSONReadingAllowFragments error:error];
if (*error) {
NSLog(@"UMONCustomEvent Unable to serialize from json: %@", [*error description]);
return nil;
} else if ([object isKindOfClass:[NSDictionary class]]) {
return [UMONCustomEvent build:^(UMONCustomEventBuilder *builder) {
NSDictionary *dictionary = (NSDictionary *) object;
builder.category = [dictionary valueForKey:@"category"];
builder.type = [dictionary valueForKey:@"type"];
builder.userInfo = [dictionary valueForKey:@"userInfo"];
}];
} else {
NSMutableDictionary *info = [NSMutableDictionary dictionary];
[info setValue:@"UMONCustomEvent Expected json object to be a NSDictionary but it was not" forKey:@"Reason"];
*error = [NSError errorWithDomain:NSUnityPurchasingTransactionDetailErrorDomain code:1 userInfo:info];
return nil;
}
}
@end

View File

@@ -0,0 +1,177 @@
#import <UnityAds/UnityMonetization.h>
#import <Unity/UnityInterface.h>
#import "UnityAdsUtilities.h"
#import "UnityJsonAdditions.h"
typedef void (*UnityMonetizationShowAdStartCallback)();
typedef void (*UnityMonetizationShowAdFinishCallback)(int finishState);
@interface UnityMonetizationUnityShowAdDelegate : NSObject<UMONShowAdDelegate>
@property (nonatomic) UnityMonetizationShowAdStartCallback startCallback;
@property (nonatomic) UnityMonetizationShowAdFinishCallback finishCallback;
-(instancetype)initWithCallbacks:(UnityMonetizationShowAdStartCallback)startCallback finishCallback:(UnityMonetizationShowAdFinishCallback)finishCallback;
@end
@implementation UnityMonetizationUnityShowAdDelegate
- (instancetype)initWithCallbacks:(UnityMonetizationShowAdStartCallback)startCallback finishCallback:(UnityMonetizationShowAdFinishCallback)finishCallback {
if (self = [super init]) {
self.startCallback = startCallback;
self.finishCallback = finishCallback;
}
return self;
}
-(void)unityAdsDidFinish:(NSString *)placementId withFinishState:(UnityAdsFinishState)finishState {
UnityPause(0);
if (self.finishCallback) {
self.finishCallback(finishState);
}
}
-(void)unityAdsDidStart:(NSString *)placementId {
if (self.startCallback) {
self.startCallback();
}
}
@end
const ushort* Il2CppStringFromNSString(NSString* str) {
size_t len = str.length;
NSData* cStr = [str dataUsingEncoding:NSUTF16LittleEndianStringEncoding];
ushort* buffer = (ushort*)malloc(len * sizeof(ushort) + 1);
memset(buffer, 0, (len + 1) * sizeof(ushort));
[cStr getBytes:buffer length:len * sizeof(ushort)];
return buffer;
}
const ushort* serializeJsonToIl2CppString(NSDictionary* dict) {
NSError* error;
NSData* data = [NSJSONSerialization dataWithJSONObject:dict options:0 error:&error];
NSString* str = [[NSString alloc] initWithData:data encoding:NSUTF8StringEncoding];
if (data != nil) {
return Il2CppStringFromNSString(str);
}
return NULL;
}
NSDictionary* getJsonDictionaryFromItem(UMONItem* item) {
return @{
@"itemType": item.type ? item.type : [NSNull null],
@"productId": item.productId ? item.productId : [NSNull null],
@"quantity": @(item.quantity)
};
}
NSArray* getJsonArrayFromItemArray(NSArray<UMONItem*>* items) {
NSMutableArray* array = [[NSMutableArray alloc] init];
for (UMONItem* item in items) {
[array addObject:getJsonDictionaryFromItem(item)];
}
return [array copy];
}
BOOL isValidPrice(NSDecimalNumber* number) {
return number && ![number isEqualToNumber:[NSDecimalNumber notANumber]];
}
NSDictionary* getJsonDictionaryFromProduct(UPURProduct* product) {
return @{
@"productId": product.productId ? product.productId : [NSNull null],
@"localizedTitle": product.localizedTitle ? product.localizedTitle : [NSNull null],
@"localizedDescription": product.localizedDescription ? product.localizedDescription : [NSNull null],
@"localizedPriceString": product.localizedPriceString ? product.localizedPriceString : [NSNull null],
@"isoCurrencyCode": product.isoCurrencyCode ? product.isoCurrencyCode : [NSNull null],
@"localizedPrice": isValidPrice(product.localizedPrice) ? product.localizedPrice : [NSNull null],
@"productType": product.productType ? product.productType : [NSNull null]
};
}
NSDictionary* getPromoMetadataDictionary(UMONPromoMetaData* metadata) {
return @{
@"impressionDate": metadata.impressionDate ? @([metadata.impressionDate timeIntervalSince1970] * 1000) : [NSNull null],
@"offerDuration": @(metadata.offerDuration),
@"costs": getJsonArrayFromItemArray(metadata.costs),
@"payouts": getJsonArrayFromItemArray(metadata.payouts),
@"premiumProduct": getJsonDictionaryFromProduct(metadata.premiumProduct)
};
}
const ushort* serializePromoMetadataToJson(UMONPromoMetaData* metadata) {
NSDictionary* dict = getPromoMetadataDictionary(metadata);
return serializeJsonToIl2CppString([dict copy]);
}
bool UnityMonetizationPlacementContentIsReady(const void* pPlacementContent) {
UMONPlacementContent* placementContent = (__bridge UMONPlacementContent*)pPlacementContent;
return placementContent.ready;
}
bool UnityMonetizationPlacementContentSendCustomEvent(const void* pPlacementContent, const ushort* customEventJson) {
NSString *customEventJsonString = NSStringFromIl2CppString(customEventJson);
NSError *error = nil;
UMONCustomEvent *event = [UMONCustomEvent buildWithJson:customEventJsonString error:&error];
if (error) {
// do nothing
NSLog(@"UnityMonetizationPlacementContentSendCustomEvent error occurred : %@", [error description]);
return false;
} else if (event) {
// make sure details is non-null
UMONPlacementContent* placementContent = (__bridge UMONPlacementContent*)pPlacementContent;
[placementContent sendCustomEvent:event];
return true;
} else {
NSLog(@"UnityMonetizationPlacementContentSendCustomEvent was not able to send event");
return false;
}
}
const ushort* UnityMonetizationGetPlacementContentExtras(const void* pPlacementContent) {
if (pPlacementContent) {
UMONPlacementContent* placementContent = (__bridge UMONPlacementContent*) pPlacementContent;
NSDictionary* dict = placementContent.userInfo;
if (dict != nil) {
return serializeJsonToIl2CppString(dict);
}
}
return NULL;
}
bool UnityMonetizationPlacementContentIsRewarded(const void* pPlacementContent) {
UMONRewardablePlacementContent* placementContent = (__bridge UMONRewardablePlacementContent*)pPlacementContent;
return placementContent.rewarded;
}
const ushort* UnityMonetizationPlacementContentGetRewardId(const void* pPlacementContent) {
UMONRewardablePlacementContent* placementContent = (__bridge UMONRewardablePlacementContent*)pPlacementContent;
return Il2CppStringFromNSString(placementContent.rewardId);
}
void UnityMonetizationPlacementContentShowAd(const void* pPlacementContent, UnityMonetizationShowAdStartCallback startCallback, UnityMonetizationShowAdFinishCallback finishCallback) {
UMONShowAdPlacementContent* placementContent = (__bridge UMONShowAdPlacementContent*)pPlacementContent;
UnityPause(1);
[placementContent show:UnityGetGLViewController() withDelegate:[[UnityMonetizationUnityShowAdDelegate alloc] initWithCallbacks:startCallback finishCallback:finishCallback]];
}
const ushort* UnityMonetizationGetPromoAdMetadata(const void* pPlacementContent) {
if (pPlacementContent) {
UMONPromoAdPlacementContent* placementContent = (__bridge UMONPromoAdPlacementContent*)pPlacementContent;
return serializePromoMetadataToJson(placementContent.metadata);
}
return NULL;
}
const char* UnityMonetizationGetPlacementContentType(const void* pPlacementContent) {
UMONPlacementContent* placementContent = (__bridge UMONPlacementContent*)pPlacementContent;
// NOTE: il2cpp will free this pointer after invocation!
return UnityAdsCopyString([placementContent.type UTF8String]);
}
int UnityMonetizationGetPlacementContentState(const void* pPlacementContent) {
UMONPlacementContent* placementContent = (__bridge UMONPlacementContent*)pPlacementContent;
return placementContent.state;
}
void UnityMonetizationPlacementContentReleaseReference(const void* pPlacementContent) {
CFBridgingRelease(pPlacementContent);
}

View File

@@ -0,0 +1,36 @@
#import <UnityAds/UnityMonetization.h>
#import "UnityJsonAdditions.h"
const void* UnityMonetizationCreateNativePromoAdapter(const void* pPlacementContent) {
if (pPlacementContent) {
UMONPromoAdPlacementContent* placementContent = (__bridge UMONPromoAdPlacementContent*)pPlacementContent;
UMONNativePromoAdapter* adapter = [[UMONNativePromoAdapter alloc] initWithPromo:placementContent];
return CFBridgingRetain(adapter);
}
return NULL;
}
void UnityMonetizationReleaseNativePromoAdapter(const void* pPlacementContent) {
CFBridgingRelease(pPlacementContent);
}
void UnityMonetizationNativePromoAdapterOnShown(const void* pNativePromoAdapter, int showType) {
if (pNativePromoAdapter) {
UMONNativePromoAdapter* adapter = (__bridge UMONNativePromoAdapter*)pNativePromoAdapter;
[adapter promoDidShow:(UMONNativePromoShowType)showType];
}
}
void UnityMonetizationNativePromoAdapterOnClicked(const void* pNativePromoAdapter) {
if (pNativePromoAdapter) {
UMONNativePromoAdapter* adapter = (__bridge UMONNativePromoAdapter*)pNativePromoAdapter;
[adapter promoDidClick];
}
}
void UnityMonetizationNativePromoAdapterOnClosed(const void* pNativePromoAdapter) {
if (pNativePromoAdapter) {
UMONNativePromoAdapter* adapter = (__bridge UMONNativePromoAdapter*)pNativePromoAdapter;
[adapter promoDidClose];
}
}

View File

@@ -0,0 +1,146 @@
#import <UnityAds/USRVUnityPurchasing.h>
#import "UnityAdsUtilities.h"
#import "UnityJsonAdditions.h"
struct UnityPurchasingProduct {
const ushort* productId;
const ushort* localizedTitle;
const ushort* localizedDescription;
const ushort* localizedPriceString;
const ushort* isoCurrencyCode;
const ushort* productType;
double localizedPrice;
};
struct UnityPurchasingPurchaseCallbacks {
const void* completionHandler;
const void* errorHandler;
};
// Callback called to C# that handles retrieiving the products.
// It is assumed that pDelegate will be the same delegate passed into
// UnityPurchasingInvokeRetrieveProductsCallback.
typedef void (*UnityPurchasingOnRetrieveProductsCallback)(const void *pDelegate);
UnityPurchasingOnRetrieveProductsCallback unityPurchasingOnRetrieveProductsCallback;
// Callback called to C# that handles the purchasing flow.
// It is assumed that pDelegate will be the same delegate passed into
// UnityPurchasingInvokeTransactionCompleteCallback and
// UnityPurchasingInvokeTransactionErrorCallback
typedef void (*UnityPurchasingOnPurchaseCallback)(const char *productId, struct UnityPurchasingPurchaseCallbacks* callbacks);
UnityPurchasingOnPurchaseCallback unityPurchasingOnPurchaseCallback;
struct UnityPurchasingAdapterCallbacks {
UnityPurchasingOnRetrieveProductsCallback unityPurchasingOnRetrieveProductsCallback;
UnityPurchasingOnPurchaseCallback unityPurchasingOnPurchaseCallback;
};
@interface UnityPurchasingAdapterDelegate : NSObject <USRVUnityPurchasingDelegate>
@end
@implementation UnityPurchasingAdapterDelegate
-(void)loadProducts:(void (^)(NSArray<UPURProduct*> *products))completionHandler {
unityPurchasingOnRetrieveProductsCallback(CFBridgingRetain(completionHandler));
}
-(void)purchaseProduct:(NSString *)productId completionHandler:(UnityPurchasingTransactionCompletionHandler)completionHandler errorHandler:(UnityPurchasingTransactionErrorHandler)errorHandler userInfo:(nullable NSDictionary *)extras {
struct UnityPurchasingPurchaseCallbacks* callbacks;
callbacks->completionHandler = CFBridgingRetain(completionHandler);
callbacks->errorHandler = CFBridgingRetain(errorHandler);
unityPurchasingOnPurchaseCallback([productId UTF8String], callbacks);
}
@end
static id <USRVUnityPurchasingDelegate> unityPurchasingAdapterDelegate;
/**
* Sets the callbacks for invoking purchasing adapter functionality into C#.
*/
void UnityPurchasingSetPurchasingAdapterCallbacks(struct UnityPurchasingAdapterCallbacks *callbacks) {
unityPurchasingOnRetrieveProductsCallback = callbacks->unityPurchasingOnRetrieveProductsCallback;
unityPurchasingOnPurchaseCallback = callbacks->unityPurchasingOnPurchaseCallback;
if (unityPurchasingAdapterDelegate == NULL) {
unityPurchasingAdapterDelegate = [[UnityPurchasingAdapterDelegate alloc] init];
[USRVUnityPurchasing setDelegate:unityPurchasingAdapterDelegate];
}
}
/**
* Allocates a fixed sized array to be returned as a pointer to C#.
*/
const void *UnityPurchasingAdapterAllocateProductsArray(int num) {
NSMutableArray *array = [NSMutableArray arrayWithCapacity:num];
return CFBridgingRetain(array);
}
/**
* Appends the given product to the end of the product array.
*/
void UnityPurchasingAddItemToProductsArray(const void *pArray, struct UnityPurchasingProduct *pProduct) {
NSMutableArray *array = (__bridge NSMutableArray *) pArray;
UPURProduct *product = [UPURProduct build:^(UPURProductBuilder* builder) {
builder.productId = NSStringFromIl2CppString(pProduct->productId);
builder.localizedTitle = NSStringFromIl2CppString(pProduct->localizedTitle);
builder.localizedDescription = NSStringFromIl2CppString(pProduct->localizedDescription);
builder.localizedPriceString = NSStringFromIl2CppString(pProduct->localizedPriceString);
builder.isoCurrencyCode = NSStringFromIl2CppString(pProduct->isoCurrencyCode);
builder.productType = NSStringFromIl2CppString(pProduct->productType);
builder.localizedPrice = [[NSDecimalNumber alloc] initWithDouble:pProduct->localizedPrice];
}];
[array addObject:product];
}
/**
* Invokes the given retrieve products delegate with the given products.
*/
void UnityPurchasingInvokeRetrieveProductsCallback(const void *pDelegate, const void *pProducts) {
if (pDelegate != NULL && pProducts != NULL) {
NSArray *products = (__bridge NSArray *) pProducts;
UnityPurchasingLoadProductsCompletionHandler completionHandler = (__bridge UnityPurchasingLoadProductsCompletionHandler)pDelegate;
completionHandler(products);
CFBridgingRelease(pDelegate);
CFBridgingRelease(pProducts);
}
}
/**
* Invokes the given transaction delegate's complete callback with the given details
*/
void UnityPurchasingInvokeTransactionCompleteCallback(struct UnityPurchasingPurchaseCallbacks* callbacks, ushort* transactionDetailsJson) {
if (callbacks != NULL && transactionDetailsJson != NULL) {
NSString *transactionDetailsJsonString = NSStringFromIl2CppString(transactionDetailsJson);
NSError *error = nil;
UPURTransactionDetails *details = [UPURTransactionDetails buildWithJson:transactionDetailsJsonString error:&error];
if (error) {
// do nothing
} else if (details) {
// make sure details is non-null
UnityPurchasingTransactionCompletionHandler handler = (__bridge UnityPurchasingTransactionCompletionHandler)(callbacks->completionHandler);
handler(details);
}
CFBridgingRelease(callbacks->completionHandler);
CFBridgingRelease(callbacks->errorHandler);
free(callbacks);
}
}
/**
* Invokes the given transaction delegate's error callback with the given error and message.
*/
void UnityPurchasingInvokeTransactionErrorCallback(struct UnityPurchasingPurchaseCallbacks* callbacks, ushort* transactionErrorDetailsJson) {
if (callbacks != NULL && transactionErrorDetailsJson != NULL) {
NSString *transactionErrorDetailsJsonString = NSStringFromIl2CppString(transactionErrorDetailsJson);
NSError *error = nil;
UPURTransactionErrorDetails *details = [UPURTransactionErrorDetails buildWithJson: transactionErrorDetailsJsonString error:&error];
if (error) {
// do nothing
} else if (details) {
// make sure details is non-null
UnityPurchasingTransactionErrorHandler handler = (__bridge UnityPurchasingTransactionErrorHandler)(callbacks->errorHandler);
handler(details);
}
CFBridgingRelease(callbacks->completionHandler);
CFBridgingRelease(callbacks->errorHandler);
free(callbacks);
}
}

View File

@@ -0,0 +1,82 @@
#import <UnityAds/UnityMonetization.h>
#import "UnityAdsUtilities.h"
#import <UnityAds/UnityServices.h>
#import "UnityAdsPurchasingWrapper.h"
/**
* Callback invoked into C# when a decision state has changed.
*/
typedef void (*UnityMonetizationPlacementContentStateChangedCallback)(const char *placementId, const void *pDecision, int newState, int oldState);
static UnityMonetizationPlacementContentStateChangedCallback unityMonetizationPlacementContentStateChangedCallback = NULL;
/**
* Callback invoked into C# when a decision is ready.
*/
typedef void (*UnityMonetizationPlacementContentReadyCallback)(const char *placementId, const void *pDecision);
static UnityMonetizationPlacementContentReadyCallback unityMonetizationPlacementContentReadyCallback = NULL;
/**
* Callback invoked into C# when an error occurred
*/
typedef void (*UnityMonetizationErrorCallback)(long err, const char* message);
static UnityMonetizationErrorCallback unityMonetizationErrorCallback = NULL;
struct UnityMonetizationMonetizationCallbacks {
UnityMonetizationPlacementContentReadyCallback unityMonetizationDecisionReadyCallback;
UnityMonetizationPlacementContentStateChangedCallback unityMonetizationDecisionStateChangedCallback;
UnityMonetizationErrorCallback unityMonetizationErrorCallback;
};
static id<UnityMonetizationDelegate> monetizationDelegate = nil;
@interface UnityMonetizationUnityDecisionDelegate : NSObject<UnityMonetizationDelegate>
@end
@implementation UnityMonetizationUnityDecisionDelegate
-(void)placementContentReady:(NSString *)placementId placementContent:(UMONPlacementContent *)decision {
if (unityMonetizationPlacementContentReadyCallback != NULL) {
const char* rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
// Note, we are bridging instead of retaining as we don't need to track the lifecycle of this
// object in C#. When the decision is replaced, ready will be called and C# will be notified.
unityMonetizationPlacementContentReadyCallback(rawPlacementId, CFBridgingRetain(decision));
free((void*)rawPlacementId);
}
}
-(void)placementContentStateDidChange:(NSString *)placementId placementContent:(UMONPlacementContent *)placementContent previousState:(UnityMonetizationPlacementContentState)previousState newState:(UnityMonetizationPlacementContentState)newState {
if (unityMonetizationPlacementContentStateChangedCallback != NULL) {
const char* rawPlacementId = UnityAdsCopyString([placementId UTF8String]);
unityMonetizationPlacementContentStateChangedCallback(rawPlacementId, (__bridge void*)placementContent, previousState, newState);
free((void*)rawPlacementId);
}
}
-(void)unityServicesDidError:(UnityServicesError)error withMessage:(NSString *)message {
if (unityMonetizationErrorCallback != NULL) {
const char* rawError = UnityAdsCopyString([message UTF8String]);
unityMonetizationErrorCallback(error, rawError);
free((void*)rawError);
}
}
@end
void UnityMonetizationSetMonetizationCallbacks(struct UnityMonetizationMonetizationCallbacks *callbacks) {
unityMonetizationPlacementContentStateChangedCallback = callbacks->unityMonetizationDecisionStateChangedCallback;
unityMonetizationPlacementContentReadyCallback = callbacks->unityMonetizationDecisionReadyCallback;
unityMonetizationErrorCallback = callbacks->unityMonetizationErrorCallback;
if (monetizationDelegate == nil) {
monetizationDelegate = [[UnityMonetizationUnityDecisionDelegate alloc] init];
}
}
void UnityMonetizationInitialize(const char *gameId, bool isTestMode) {
InitializeUnityAdsPurchasingWrapper();
[UnityMonetization initialize:[NSString stringWithUTF8String:gameId] delegate:monetizationDelegate testMode:isTestMode];
}
bool UnityMonetizationIsReady(const char* placementId) {
return [UnityMonetization isReady:[NSString stringWithUTF8String:placementId]];
}
bool UnityMonetizationIsSupported() {
return [UnityServices isSupported];
}

View File

@@ -0,0 +1,25 @@
#pragma once
#import <StoreKit/StoreKit.h>
#import "LifeCycleListener.h"
@protocol UnityEarlyTransactionObserverDelegate<NSObject>
- (void)promotionalPurchaseAttempted:(SKPayment *)payment;
@end
@interface UnityEarlyTransactionObserver : NSObject<SKPaymentTransactionObserver, LifeCycleListener>
{
NSMutableSet *m_QueuedPayments;
}
@property BOOL readyToReceiveTransactionUpdates;
// The delegate exists so that the observer can notify it of attempted promotional purchases.
@property(nonatomic, weak) id<UnityEarlyTransactionObserverDelegate> delegate;
+ (UnityEarlyTransactionObserver*)defaultObserver;
- (void)initiateQueuedPayments;
@end

View File

@@ -0,0 +1,105 @@
#import "UnityEarlyTransactionObserver.h"
#import "UnityPurchasing.h"
void Log(NSString *message)
{
NSLog(@"UnityIAP UnityEarlyTransactionObserver: %@\n", message);
}
@implementation UnityEarlyTransactionObserver
static UnityEarlyTransactionObserver *s_Observer = nil;
+ (void)load
{
if (!s_Observer)
{
s_Observer = [[UnityEarlyTransactionObserver alloc] init];
Log(@"Created");
[s_Observer registerLifeCycleListener];
}
}
+ (UnityEarlyTransactionObserver*)defaultObserver
{
return s_Observer;
}
- (void)registerLifeCycleListener
{
UnityRegisterLifeCycleListener(self);
Log(@"Registered for lifecycle events");
}
- (void)didFinishLaunching:(NSNotification*)notification
{
Log(@"Added to the payment queue");
[[SKPaymentQueue defaultQueue] addTransactionObserver: self];
}
- (void)setDelegate:(id<UnityEarlyTransactionObserverDelegate>)delegate
{
_delegate = delegate;
[self sendQueuedPaymentsToInterceptor];
}
- (BOOL)paymentQueue:(SKPaymentQueue *)queue shouldAddStorePayment:(SKPayment *)payment forProduct:(SKProduct *)product
{
Log(@"Payment queue shouldAddStorePayment");
if (self.readyToReceiveTransactionUpdates && !self.delegate)
{
return YES;
}
else
{
if (m_QueuedPayments == nil)
{
m_QueuedPayments = [[NSMutableSet alloc] init];
}
// If there is a delegate and we have not seen this payment yet, it means we should intercept promotional purchases
// and just return the payment to the delegate.
// Do not try to process it now.
if (self.delegate && [m_QueuedPayments member: payment] == nil)
{
[self.delegate promotionalPurchaseAttempted: payment];
}
[m_QueuedPayments addObject: payment];
return NO;
}
return YES;
}
- (void)paymentQueue:(SKPaymentQueue *)queue updatedTransactions:(NSArray<SKPaymentTransaction *> *)transactions {}
- (void)initiateQueuedPayments
{
Log(@"Request to initiate queued payments");
if (m_QueuedPayments != nil)
{
Log(@"Initiating queued payments");
for (SKPayment *payment in m_QueuedPayments)
{
[[SKPaymentQueue defaultQueue] addPayment: payment];
}
[m_QueuedPayments removeAllObjects];
}
}
- (void)sendQueuedPaymentsToInterceptor
{
Log(@"Request to send queued payments to interceptor");
if (m_QueuedPayments != nil)
{
Log(@"Sending queued payments to interceptor");
for (SKPayment *payment in m_QueuedPayments)
{
if (self.delegate)
{
[self.delegate promotionalPurchaseAttempted: payment];
}
}
}
}
@end

View File

@@ -0,0 +1,49 @@
#import <StoreKit/StoreKit.h>
// Callback to Unity identifying the subject, JSON message body and optional app receipt.
// Note that App Receipts are sent separately to the JSON body for performance reasons.
typedef void (*UnityPurchasingCallback)(const char* subject, const char* payload, const char* receipt, const char* transactionId);
@interface ProductDefinition : NSObject
@property (nonatomic, strong) NSString *id;
@property (nonatomic, strong) NSString *storeSpecificId;
@property (nonatomic, strong) NSString *type;
@end
@interface ReceiptRefresher : NSObject<SKRequestDelegate>
@property (nonatomic, strong) void (^callback)(BOOL);
@end
@interface UnityPurchasing : NSObject<SKProductsRequestDelegate, SKPaymentTransactionObserver>
{
UnityPurchasingCallback messageCallback;
NSMutableDictionary* validProducts;
NSSet* productIds;
SKProductsRequest *request;
NSMutableDictionary *pendingTransactions;
NSMutableSet *finishedTransactions;
// Dictionary that maps product IDs to the most recent transaction receipt (base 64 encoded).
NSMutableDictionary<NSString *, NSString *> *transactionReceipts;
}
+ (NSArray*)deserializeProductDefs:(NSString*)json;
+ (ProductDefinition*)deserializeProductDef:(NSString*)json;
+ (NSString*)serializeProductMetadata:(NSArray*)products;
- (void)restorePurchases;
- (NSString*)getAppReceipt;
- (NSString*)getTransactionReceiptForProductId:(NSString *)productId;
- (void)addTransactionObserver;
@property (nonatomic, strong) ReceiptRefresher* receiptRefresher;
@property (nonatomic, strong) SKReceiptRefreshRequest* refreshRequest;
@property BOOL simulateAskToBuyEnabled;
@property (nonatomic, copy, readwrite) NSString* applicationUsername;
@property (nonatomic) BOOL interceptPromotionalPurchases;
@end

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
#import <Foundation/Foundation.h>
#import <AVFoundation/AVAudioSession.h>
extern "C" {
float unity_services_analytics_get_device_volume() {
return [AVAudioSession sharedInstance].outputVolume;
}
}

View File

@@ -0,0 +1,20 @@
#include <stdlib.h>
#include <string.h>
extern "C" {
const char *unity_services_current_language_code() {
NSLocale *locale = [NSLocale currentLocale];
unsigned long len = locale.languageCode.length;
char *locale_str = 0;
locale_str = (char *)malloc(len + 1);
for (unsigned long i = 0; i < len; ++i) {
char c = [locale.languageCode characterAtIndex:i];
locale_str[i] = c;
}
locale_str[len] = 0;
return locale_str;
}
}

View File

@@ -0,0 +1,36 @@
#include <string>
#ifdef __cplusplus
extern "C" {
#endif
char* UOCPUserDefaultsGetString(const char *key) {
if (!key) {
return nil;
}
NSString* stringKey = [NSString stringWithUTF8String:key];
NSString* stringValue = [[NSUserDefaults standardUserDefaults] stringForKey:stringKey];
if (!stringValue) {
return nil;
}
return strdup([stringValue UTF8String]);
}
void UOCPUserDefaultsSetString(const char *key, const char *value) {
if (!key) {
return;
}
NSString* stringKey = [NSString stringWithUTF8String:key];
if (!value)
{
[[NSUserDefaults standardUserDefaults] removeObjectForKey:stringKey];
} else {
NSString* stringValue = [NSString stringWithUTF8String:value];
[[NSUserDefaults standardUserDefaults] setValue:stringValue forKey:stringKey];
}
[[NSUserDefaults standardUserDefaults] synchronize];
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,41 @@
#pragma once
#include "Internal/PlatformDetection.h"
#include "Internal/ArchitectureDetection.h"
#include "Internal/PlatformEnvironment.h"
#ifdef BASELIB_INLINE_NAMESPACE
#ifndef __cplusplus
#error "BASELIB_INLINE_NAMESPACE is not available when compiling C code"
#endif
#define BASELIB_CPP_INTERFACE inline namespace BASELIB_INLINE_NAMESPACE
#define BASELIB_C_INTERFACE BASELIB_CPP_INTERFACE
#else
#define BASELIB_CPP_INTERFACE extern "C++"
#define BASELIB_C_INTERFACE extern "C"
#endif
#if defined(BASELIB_USE_DYNAMICLIBRARY)
#define BASELIB_API IMPORTED_SYMBOL
#elif defined(BASELIB_DYNAMICLIBRARY)
#define BASELIB_API EXPORTED_SYMBOL
#else
#define BASELIB_API
#endif
// BASELIB_BINDING_GENERATION is set by the bindings generator and by BindingsExposedInlineImplementations.cpp
// in order to selectively provide symbols bindings can link to for some our our inline implementations.
#ifdef BASELIB_BINDING_GENERATION
#define BASELIB_INLINE_API BASELIB_API
#define BASELIB_FORCEINLINE_API BASELIB_API
#else
#define BASELIB_INLINE_API static inline
#define BASELIB_FORCEINLINE_API static COMPILER_FORCEINLINE
#endif
#include "Internal/BasicTypes.h"
#include "Internal/CoreMacros.h"
#include "Internal/Assert.h"

View File

@@ -0,0 +1,21 @@
#pragma once
#ifndef BASELIB_ALIGN_OF
#if defined(__cplusplus) // We assume C++11 support (also, note that Mscv has correct version numbers on this attribute as opt-in)
#define BASELIB_ALIGN_OF(TYPE_) alignof(TYPE_)
// As of gcc8+clang 8, alignof and _Alignof return the ABI alignment of a type, as opposed to the preferred alignment.
// __alignof still returns the preferred alignment.
// Also see:
// https://gcc.gnu.org/gcc-8/porting_to.html#alignof
// https://releases.llvm.org/8.0.0/tools/clang/docs/ReleaseNotes.html#modified-compiler-flags
#elif STDC_VERSION >= 201112L
#define BASELIB_ALIGN_OF(TYPE_) _Alignof(TYPE_)
#else
#define BASELIB_ALIGN_OF(TYPE_) COMPILER_ALIGN_OF(TYPE_)
#endif
#endif
#ifndef BASELIB_ALIGN_AS
#define BASELIB_ALIGN_AS(ALIGNMENT_) COMPILER_ALIGN_AS(ALIGNMENT_)
#endif

View File

@@ -0,0 +1,411 @@
#pragma once
// This API is not type safe. For a type safe version use Baselib_Atomic_TypeSafe.h (C) or Atomic.h (C++)
//
// Atomics closely mimic C11/C++11 implementation, with the following differences:
//
// *) C API: as Visual Studio C compiler doesn't support _Generic we can't have a single named function operating on different types, or
// selecting different implementations depending on memory order.
// This leads to having to explicitly specify type size and ordering in the function name, for example
// 'Baselib_atomic_load_32_acquire' instead of 'Baselib_atomic_load' as one would have available in in C11 or equivalent in C++11.
//
// not type specific
// ----------------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE void Baselib_atomic_thread_fence_relaxed(void);
static FORCE_INLINE void Baselib_atomic_thread_fence_acquire(void);
static FORCE_INLINE void Baselib_atomic_thread_fence_release(void);
static FORCE_INLINE void Baselib_atomic_thread_fence_acq_rel(void);
static FORCE_INLINE void Baselib_atomic_thread_fence_seq_cst(void);
static FORCE_INLINE void Baselib_atomic_load_8_relaxed_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_8_acquire_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_8_seq_cst_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_store_8_relaxed_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_8_release_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_8_seq_cst_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_fetch_add_8_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_8_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_8_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_8_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_8_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_8_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_8_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_8_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_8_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_8_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_8_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_8_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_8_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_8_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_8_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_8_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_8_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_8_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_8_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_8_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_8_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
// 16-bit declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE void Baselib_atomic_load_16_relaxed_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_16_acquire_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_16_seq_cst_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_store_16_relaxed_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_16_release_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_16_seq_cst_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_fetch_add_16_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_16_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_16_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_16_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_16_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_16_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_16_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_16_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_16_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_16_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_16_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_16_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_16_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_16_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_16_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_16_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_16_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_16_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_16_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_16_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_16_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
// 32-bit declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE void Baselib_atomic_load_32_relaxed_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_32_acquire_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_32_seq_cst_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_store_32_relaxed_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_32_release_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_32_seq_cst_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_fetch_add_32_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_32_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_32_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_32_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_32_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_32_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_32_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_32_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_32_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_32_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_32_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_32_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_32_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_32_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_32_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_32_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_32_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_32_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_32_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_32_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_32_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
// 64-bit declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE void Baselib_atomic_load_64_relaxed_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_64_acquire_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_64_seq_cst_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_store_64_relaxed_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_64_release_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_64_seq_cst_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_fetch_add_64_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_64_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_64_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_64_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_64_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_64_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_64_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_64_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_64_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_64_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_64_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_64_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_64_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_64_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_64_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_64_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_64_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_64_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_64_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_64_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_64_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
// 128-bit declarations
// ------------------------------------------------------------------------------------------------------------------------------
#if PLATFORM_ARCH_64
// commented out const:
// 128bit loads are guranteed to not change obj but may need a store to confirm atomicity
static FORCE_INLINE void Baselib_atomic_load_128_relaxed_v(/* const */ void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_128_acquire_v(/* const */ void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_128_seq_cst_v(/* const */ void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_store_128_relaxed_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_128_release_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_128_seq_cst_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_exchange_128_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_128_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_128_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_128_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_128_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_128_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_128_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
#endif
// ptr declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE void Baselib_atomic_load_ptr_relaxed_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_ptr_acquire_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_ptr_seq_cst_v(const void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_store_ptr_relaxed_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_ptr_release_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_ptr_seq_cst_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_add_ptr_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_and_ptr_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_or_ptr_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_fetch_xor_ptr_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
// ptr2x declarations
// ------------------------------------------------------------------------------------------------------------------------------
// commented out const:
// 128bit loads are guranteed to not change obj but may need a store to confirm atomicity
static FORCE_INLINE void Baselib_atomic_load_ptr2x_relaxed_v(/* const */ void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_ptr2x_acquire_v(/* const */ void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_load_ptr2x_seq_cst_v(/* const */ void* obj, void* result);
static FORCE_INLINE void Baselib_atomic_store_ptr2x_relaxed_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_ptr2x_release_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_store_ptr2x_seq_cst_v(void* obj, const void* value);
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_relaxed_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_acquire_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_release_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_acq_rel_v(void* obj, const void* value, void* result);
static FORCE_INLINE void Baselib_atomic_exchange_ptr2x_seq_cst_v(void* obj, const void* value, void* result);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr2x_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_relaxed_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acquire_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acquire_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_release_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acq_rel_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_acq_rel_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_seq_cst_relaxed_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_seq_cst_acquire_v(void* obj, void* expected, const void* value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr2x_seq_cst_seq_cst_v(void* obj, void* expected, const void* value);
// Compiler Specific Implementation
// ----------------------------------------------------------------------------------------------------------------------------------------
#if PLATFORM_CUSTOM_ATOMICS
// Platform header does not know where macro header lives and likely needs it.
#include "../../Include/C/Baselib_Atomic_Macros.h"
#include "C/Baselib_Atomic_Platform.inl.h"
#elif COMPILER_CLANG || COMPILER_GCC
#include "Internal/Compiler/Baselib_Atomic_Gcc.h"
#elif COMPILER_MSVC
#include "Internal/Compiler/Baselib_Atomic_Msvc.h"
#endif

View File

@@ -0,0 +1,151 @@
#pragma once
// In computer science, load-link and store-conditional (LL/SC) are a pair of instructions used in multithreading to achieve synchronization.
// Load-link returns the current value of a memory location, while a subsequent store-conditional to the same memory location will store a new
// value only if no updates have occurred to that location since the load-link. Together, this implements a lock-free atomic read-modify-write operation.
//
// Comparison of LL/SC and compare-and-swap
// If any updates have occurred, the store-conditional is guaranteed to fail, even if the value read by the load-link has since been restored.
// As such, an LL/SC pair is stronger than a read followed by a compare-and-swap (CAS), which will not detect updates if the old value has been restored
// (see ABA problem).
//
// "Load-link/store-conditional", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Load-link/store-conditional&oldid=916413430
//
//
// Baselib_atomic_llsc_break
//
// This is has no functional effect, but can improve performance on some Arm architectures
//
// Example:
// Baselib_atomic_llsc_32_relaxed_relaxed_v(&obj, &expected, &value, { if (expected == 0) { Baselib_atomic_llsc_break(); break; } );
//
#define Baselib_atomic_llsc_break() detail_Baselib_atomic_llsc_break()
//
// Baselib_atomic_llsc_<int_type>_<load order>_<store_order>_v(obj, expected, value, code)
//
// int_type - 8, 16, 32, 64, ptr, ptr2x, 128 (128 available only on 64-bit architectures)
// load_order - relaxed, acquire, seq_cst
// store_order - relaxed, release, seq_cst
//
// obj - address to memory to store 'value' into.
// Must be cache-line size aligned and sized. Any update of this memory between the LL/SC pair results in unpredictable behaviour.
// expected - address to memory to load 'obj' into.
// Loaded by LL. Any updates of this memory between the LL/SC pair results in unpredictable behaviour.
// value - address to memory containing value to store into 'obj':
// Stored by SC to 'obj' memory on success, otherwise 'code' is repeated.
// code - code executed between the LL/SC pair.
//
// Notes on Arm optimized clang implementation:
// Armv7A and Armv8A architectures are enabled by default. Newer architectures will be enabled once tested and verified compliant.
// Specifically, the configuration of the exclusive access global/local monitors such as ERG (Exclusives Reservation Granule) size may vary on other platforms.
// See Arm Synchronization Primitives: http://infocenter.arm.com/help/topic/com.arm.doc.dht0008a/DHT0008A_arm_synchronization_primitives.pdf
// chapter 1.2 "Exclusive accesses" for more detailed information.
//
// Notes on default implementation (platforms/architectures not listed in the Arm clang notes)
// Atomic load and compare_exchange intrinsics emulates LL/SC capability.
// The values of 'expected' and 'obj' value to determine if SC should succeed and store 'value'.
//
// Example:
// struct Data { BASELIB_ALIGN_AS(PLATFORM_CACHE_LINE_SIZE) int32_t obj = 0; } data;
// int32_t expected = 1, value = 2;
// Baselib_atomic_llsc_32_relaxed_relaxed_v(&data.obj, &expected, &value, { if (expected == 0) value = 3; } );
// <-- obj is now 3
//
#define Baselib_atomic_llsc_8_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, relaxed, relaxed)
#define Baselib_atomic_llsc_8_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, acquire, relaxed)
#define Baselib_atomic_llsc_8_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, relaxed, release)
#define Baselib_atomic_llsc_8_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, acquire, release)
#define Baselib_atomic_llsc_8_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 8, seq_cst, seq_cst)
#define Baselib_atomic_llsc_16_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, relaxed, relaxed)
#define Baselib_atomic_llsc_16_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, acquire, relaxed)
#define Baselib_atomic_llsc_16_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, relaxed, release)
#define Baselib_atomic_llsc_16_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, acquire, release)
#define Baselib_atomic_llsc_16_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 16, seq_cst, seq_cst)
#define Baselib_atomic_llsc_32_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, relaxed, relaxed)
#define Baselib_atomic_llsc_32_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, acquire, relaxed)
#define Baselib_atomic_llsc_32_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, relaxed, release)
#define Baselib_atomic_llsc_32_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, acquire, release)
#define Baselib_atomic_llsc_32_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 32, seq_cst, seq_cst)
#define Baselib_atomic_llsc_64_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, relaxed, relaxed)
#define Baselib_atomic_llsc_64_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, acquire, relaxed)
#define Baselib_atomic_llsc_64_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, relaxed, release)
#define Baselib_atomic_llsc_64_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, acquire, release)
#define Baselib_atomic_llsc_64_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, 64, seq_cst, seq_cst)
#define Baselib_atomic_llsc_ptr_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, relaxed, relaxed)
#define Baselib_atomic_llsc_ptr_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, acquire, relaxed)
#define Baselib_atomic_llsc_ptr_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, relaxed, release)
#define Baselib_atomic_llsc_ptr_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, acquire, release)
#define Baselib_atomic_llsc_ptr_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_v(obj, expected, value, code, ptr, seq_cst, seq_cst)
#if PLATFORM_ARCH_64
#define Baselib_atomic_llsc_ptr2x_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, relaxed)
#define Baselib_atomic_llsc_ptr2x_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, relaxed)
#define Baselib_atomic_llsc_ptr2x_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, release)
#define Baselib_atomic_llsc_ptr2x_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, release)
#define Baselib_atomic_llsc_ptr2x_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, seq_cst, seq_cst)
#define Baselib_atomic_llsc_128_relaxed_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, relaxed)
#define Baselib_atomic_llsc_128_acquire_relaxed_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, relaxed)
#define Baselib_atomic_llsc_128_relaxed_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, relaxed, release)
#define Baselib_atomic_llsc_128_acquire_release_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, acquire, release)
#define Baselib_atomic_llsc_128_seq_cst_seq_cst_v(obj, expected, value, code) detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, seq_cst, seq_cst)
#else // PLATFORM_ARCH_64
#define Baselib_atomic_llsc_ptr2x_relaxed_relaxed_v(obj, expected, value, code) Baselib_atomic_llsc_64_relaxed_relaxed_v(obj, expected, value, code)
#define Baselib_atomic_llsc_ptr2x_acquire_relaxed_v(obj, expected, value, code) Baselib_atomic_llsc_64_acquire_relaxed_v(obj, expected, value, code)
#define Baselib_atomic_llsc_ptr2x_relaxed_release_v(obj, expected, value, code) Baselib_atomic_llsc_64_relaxed_release_v(obj, expected, value, code)
#define Baselib_atomic_llsc_ptr2x_acquire_release_v(obj, expected, value, code) Baselib_atomic_llsc_64_acquire_release_v(obj, expected, value, code)
#define Baselib_atomic_llsc_ptr2x_seq_cst_seq_cst_v(obj, expected, value, code) Baselib_atomic_llsc_64_seq_cst_seq_cst_v(obj, expected, value, code)
#endif
// Enable LLSC native support for supported compilers and architectures/profiles
#ifndef PLATFORM_LLSC_NATIVE_SUPPORT
#if (COMPILER_CLANG) && ((__ARM_ARCH >= 7) && (__ARM_ARCH < 9) && (__ARM_ARCH_PROFILE == 'A'))
#define PLATFORM_LLSC_NATIVE_SUPPORT 1
#else
#define PLATFORM_LLSC_NATIVE_SUPPORT 0
#endif
#endif
#if PLATFORM_LLSC_NATIVE_SUPPORT
// Arm specific implementation of LLSC macros
#include "Internal/Compiler/Baselib_Atomic_LLSC_Gcc.inl.h"
#else
// Generic implementation of LLSC macros
#include "Baselib_Atomic.h"
// LLSC exlusive state access break implementation (nop)
#define detail_Baselib_atomic_llsc_break()
// LLSC implementation using load/cmp_xcgh
#define detail_Baselib_atomic_llsc_cmpxchg_v(obj, expected, value, code, size, loadbarrier, storebarrier) \
do { \
Baselib_atomic_load_##size##_##loadbarrier##_v(obj, expected); \
do { \
code; \
} while (!Baselib_atomic_compare_exchange_weak_##size##_##storebarrier##_##loadbarrier##_v(obj, expected, value)); \
} while (false)
#define detail_Baselib_atomic_llsc_relaxed_relaxed_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, relaxed, relaxed)
#define detail_Baselib_atomic_llsc_acquire_relaxed_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, acquire, acquire)
#define detail_Baselib_atomic_llsc_relaxed_release_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, relaxed, release)
#define detail_Baselib_atomic_llsc_acquire_release_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, acquire, acq_rel)
#define detail_Baselib_atomic_llsc_seq_cst_seq_cst_v(obj, expected, value, code, size) detail_Baselib_atomic_llsc_cmpxchg_v( obj, expected, value, code, size, seq_cst, seq_cst)
#define detail_Baselib_atomic_llsc_v(obj, expected, value, code, size, loadbarrier, storebarrier) \
detail_Baselib_atomic_llsc_##loadbarrier##_##storebarrier##_v(obj, expected, value, code, size)
#define detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, loadbarrier, storebarrier) \
detail_Baselib_atomic_llsc_v(obj, expected, value, code, 128, loadbarrier, storebarrier)
#endif // PLATFORM_LLSC_NATIVE_SUPPORT

View File

@@ -0,0 +1,167 @@
#pragma once
#include "Baselib_Alignment.h"
//
// order - relaxed, acquire, release, acq_rel, seq_cst
//
// MACRO_(order, ...)
//
#define Baselib_Atomic_FOR_EACH_MEMORY_ORDER(MACRO_, ...) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(acquire, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(release, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(acq_rel, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(seq_cst, __VA_ARGS__))
//
// operation - load, store, fetch_add, fetch_and, fetch_or, fetch_xor, exchange, compare_exchange_weak, compare_exchange_strong
// order - relaxed, acquire, release, acq_rel, seq_cst
// order_success - relaxed, acquire, release, acq_rel, seq_cst
// order_failure - relaxed, acquire, seq_cst
//
// LOAD_MACRO_(operation, order, ...)
// STORE_MACRO_(operation, order, ...)
// ADD_MACRO_(operation, order, ...)
// AND_MACRO_(operation, order, ...)
// OR_MACRO_(operation, order, ...)
// XOR_MACRO_(operation, order, ...)
// XCHG_MACRO_(operation, order, ...)
// CMP_XCHG_WEAK_MACRO_(operation, order_success, order_failure, ...)
// CMP_XCHG_STRONG_MACRO_(operation, order_success, order_failure, ...)
//
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ...) \
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_MEMORY_ORDER(LOAD_MACRO_, load, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_STORE_MEMORY_ORDER(STORE_MACRO_, store, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(ADD_MACRO_, fetch_add, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(AND_MACRO_, fetch_and, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(OR_MACRO_, fetch_or, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(XOR_MACRO_, fetch_xor, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(XCHG_MACRO_, exchange, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_CMP_XCHG_MEMORY_ORDER(CMP_XCHG_WEAK_MACRO_, compare_exchange_weak, __VA_ARGS__) \
DETAIL__Baselib_Atomic_FOR_EACH_CMP_XCHG_MEMORY_ORDER(CMP_XCHG_STRONG_MACRO_, compare_exchange_strong, __VA_ARGS__)
//
// LOAD_MACRO_(operation, order, ...)
// STORE_MACRO_(operation, order, ...)
// LOAD_STORE_MACRO_(operation, order, ...)
// CMP_XCHG_MACRO_(operation, order_success, order_failure, ...)
//
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER2(LOAD_MACRO_, STORE_MACRO_, LOAD_STORE_MACRO_, CMP_XCHG_MACRO_, ...) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER( \
LOAD_MACRO_, \
STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
CMP_XCHG_MACRO_, \
CMP_XCHG_MACRO_, \
__VA_ARGS__)
//
// operation - load, store, fetch_add, fetch_and, fetch_or, fetch_xor, exchange, compare_exchange_weak, compare_exchange_strong
// order - relaxed, acquire, release, acq_rel, seq_cst
// order_success - relaxed, acquire, release, acq_rel, seq_cst
// order_failure - relaxed, acquire, seq_cst
// id - 8, 16, 32, 64
// bits - 8, 16, 32, 64
// int_type - int8_t, int16_t, int32_t, int64_t
//
// LOAD_MACRO_(operation, order, id, bits, int_type, ...)
// STORE_MACRO_(operation, order, id, bits, int_type, ...)
// ADD_MACRO_(operation, order, id, bits, int_type, ...)
// AND_MACRO_(operation, order, id, bits, int_type, ...)
// OR_MACRO_(operation, order, id, bits, int_type, ...)
// XOR_MACRO_(operation, order, id, bits, int_type, ...)
// XCHG_MACRO_(operation, order, id, bits, int_type, ...)
// CMP_XCHG_WEAK_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
// CMP_XCHG_STRONG_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
//
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_INT_TYPE(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ...) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 8, 8, int8_t __VA_ARGS__) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 16, 16, int16_t, __VA_ARGS__) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 32, 32, int32_t, __VA_ARGS__) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, 64, 64, int64_t, __VA_ARGS__)
//
// operation - load, store, fetch_add, fetch_and, fetch_or, fetch_xor, exchange, compare_exchange_weak, compare_exchange_strong
// order - relaxed, acquire, release, acq_rel, seq_cst
// order_success - relaxed, acquire, release, acq_rel, seq_cst
// order_failure - relaxed, acquire, seq_cst
// id - 8, 16, 32, 64, ptr
// bits - 8, 16, 32, 64
// int_type - int8_t, int16_t, int32_t, int64_t, intptr_t
//
// LOAD_MACRO_(operation, order, id, bits, int_type, ...)
// STORE_MACRO_(operation, order, id, bits, int_type, ...)
// ADD_MACRO_(operation, order, id, bits, int_type, ...)
// AND_MACRO_(operation, order, id, bits, int_type, ...)
// OR_MACRO_(operation, order, id, bits, int_type, ...)
// XOR_MACRO_(operation, order, id, bits, int_type, ...)
// XCHG_MACRO_(operation, order, id, bits, int_type, ...)
// CMP_XCHG_WEAK_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
// CMP_XCHG_STRONG_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
//
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ...) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_INT_TYPE(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, __VA_ARGS__) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(LOAD_MACRO_, STORE_MACRO_, ADD_MACRO_, AND_MACRO_, OR_MACRO_, XOR_MACRO_, XCHG_MACRO_, CMP_XCHG_WEAK_MACRO_, CMP_XCHG_STRONG_MACRO_, ptr, DETAIL__Baselib_Atomic_PTR_SIZE, intptr_t, __VA_ARGS__)
//
// LOAD_MACRO_(operation, order, id, bits, int_type, ...)
// STORE_MACRO_(operation, order, id, bits, int_type, ...)
// LOAD_STORE_MACRO_(operation, order, id, bits, int_type, ...)
// CMP_XCHG_MACRO_(operation, order_success, order_failure, id , bits, int_type, ...)
//
#define Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE2(LOAD_MACRO_, STORE_MACRO_, LOAD_STORE_MACRO_, CMP_XCHG_MACRO_, ...) \
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE( \
LOAD_MACRO_, \
STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
LOAD_STORE_MACRO_, \
CMP_XCHG_MACRO_, \
CMP_XCHG_MACRO_, \
__VA_ARGS__)
//
// Implementation details
// ----------------------------------------------------------------------------------
#if PLATFORM_ARCH_64
#define DETAIL__Baselib_Atomic_PTR_SIZE 64
#else
#define DETAIL__Baselib_Atomic_PTR_SIZE 32
#endif
#define DETAIL__Baselib_Atomic_EVAL(...) __VA_ARGS__
#define DETAIL__Baselib_Atomic_FOR_EACH_LOAD_MEMORY_ORDER(MACRO_, OP_, ...) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, __VA_ARGS__))
#define DETAIL__Baselib_Atomic_FOR_EACH_STORE_MEMORY_ORDER(MACRO_, OP_, ...) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, release, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, __VA_ARGS__))
#define DETAIL__Baselib_Atomic_FOR_EACH_LOAD_STORE_MEMORY_ORDER(MACRO_, OP_, ...) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, release, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acq_rel, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, __VA_ARGS__))
#define DETAIL__Baselib_Atomic_FOR_EACH_CMP_XCHG_MEMORY_ORDER(MACRO_, OP_, ...) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, relaxed, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acquire, acquire, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, release, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acq_rel, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, acq_rel, acquire, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, relaxed, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, acquire, __VA_ARGS__)) \
DETAIL__Baselib_Atomic_EVAL(MACRO_(OP_, seq_cst, seq_cst, __VA_ARGS__))

View File

@@ -0,0 +1,343 @@
#pragma once
#include "Baselib_Atomic.h"
// TypeSafe version of baselib atomics "C" API
// 8-bit declarations
// ----------------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE int8_t Baselib_atomic_load_8_relaxed(const int8_t* obj);
static FORCE_INLINE int8_t Baselib_atomic_load_8_acquire(const int8_t* obj);
static FORCE_INLINE int8_t Baselib_atomic_load_8_seq_cst(const int8_t* obj);
static FORCE_INLINE void Baselib_atomic_store_8_relaxed(int8_t* obj, int8_t value);
static FORCE_INLINE void Baselib_atomic_store_8_release(int8_t* obj, int8_t value);
static FORCE_INLINE void Baselib_atomic_store_8_seq_cst(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_relaxed(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_acquire(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_release(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_acq_rel(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_add_8_seq_cst(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_relaxed(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_acquire(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_release(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_acq_rel(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_and_8_seq_cst(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_relaxed(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_acquire(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_release(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_acq_rel(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_or_8_seq_cst(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_relaxed(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_acquire(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_release(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_acq_rel(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_fetch_xor_8_seq_cst(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_relaxed(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_acquire(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_release(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_acq_rel(int8_t* obj, int8_t value);
static FORCE_INLINE int8_t Baselib_atomic_exchange_8_seq_cst(int8_t* obj, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_relaxed_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acquire_acquire(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_release_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_acq_rel_acquire(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_acquire(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_8_seq_cst_seq_cst(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_relaxed_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acquire_acquire(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_release_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_acq_rel_acquire(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_relaxed(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_acquire(int8_t* obj, int8_t* expected, int8_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_8_seq_cst_seq_cst(int8_t* obj, int8_t* expected, int8_t value);
// 16-bit declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE int16_t Baselib_atomic_load_16_relaxed(const int16_t* obj);
static FORCE_INLINE int16_t Baselib_atomic_load_16_acquire(const int16_t* obj);
static FORCE_INLINE int16_t Baselib_atomic_load_16_seq_cst(const int16_t* obj);
static FORCE_INLINE void Baselib_atomic_store_16_relaxed(int16_t* obj, int16_t value);
static FORCE_INLINE void Baselib_atomic_store_16_release(int16_t* obj, int16_t value);
static FORCE_INLINE void Baselib_atomic_store_16_seq_cst(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_relaxed(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_acquire(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_release(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_acq_rel(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_add_16_seq_cst(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_relaxed(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_acquire(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_release(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_acq_rel(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_and_16_seq_cst(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_relaxed(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_acquire(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_release(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_acq_rel(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_or_16_seq_cst(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_relaxed(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_acquire(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_release(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_acq_rel(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_fetch_xor_16_seq_cst(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_relaxed(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_acquire(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_release(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_acq_rel(int16_t* obj, int16_t value);
static FORCE_INLINE int16_t Baselib_atomic_exchange_16_seq_cst(int16_t* obj, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_relaxed_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acquire_acquire(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_release_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_acq_rel_acquire(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_acquire(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_16_seq_cst_seq_cst(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_relaxed_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acquire_acquire(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_release_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_acq_rel_acquire(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_relaxed(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_acquire(int16_t* obj, int16_t* expected, int16_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_16_seq_cst_seq_cst(int16_t* obj, int16_t* expected, int16_t value);
// 32-bit declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE int32_t Baselib_atomic_load_32_relaxed(const int32_t* obj);
static FORCE_INLINE int32_t Baselib_atomic_load_32_acquire(const int32_t* obj);
static FORCE_INLINE int32_t Baselib_atomic_load_32_seq_cst(const int32_t* obj);
static FORCE_INLINE void Baselib_atomic_store_32_relaxed(int32_t* obj, int32_t value);
static FORCE_INLINE void Baselib_atomic_store_32_release(int32_t* obj, int32_t value);
static FORCE_INLINE void Baselib_atomic_store_32_seq_cst(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_relaxed(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_acquire(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_release(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_acq_rel(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_add_32_seq_cst(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_relaxed(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_acquire(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_release(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_acq_rel(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_and_32_seq_cst(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_relaxed(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_acquire(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_release(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_acq_rel(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_or_32_seq_cst(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_relaxed(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_acquire(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_release(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_acq_rel(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_fetch_xor_32_seq_cst(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_relaxed(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_acquire(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_release(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_acq_rel(int32_t* obj, int32_t value);
static FORCE_INLINE int32_t Baselib_atomic_exchange_32_seq_cst(int32_t* obj, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acquire_acquire(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_release_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_acq_rel_acquire(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_acquire(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_32_seq_cst_seq_cst(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_relaxed_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acquire_acquire(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_release_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_acq_rel_acquire(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_relaxed(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_acquire(int32_t* obj, int32_t* expected, int32_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_32_seq_cst_seq_cst(int32_t* obj, int32_t* expected, int32_t value);
// 64-bit declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE int64_t Baselib_atomic_load_64_relaxed(const int64_t* obj);
static FORCE_INLINE int64_t Baselib_atomic_load_64_acquire(const int64_t* obj);
static FORCE_INLINE int64_t Baselib_atomic_load_64_seq_cst(const int64_t* obj);
static FORCE_INLINE void Baselib_atomic_store_64_relaxed(int64_t* obj, int64_t value);
static FORCE_INLINE void Baselib_atomic_store_64_release(int64_t* obj, int64_t value);
static FORCE_INLINE void Baselib_atomic_store_64_seq_cst(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_relaxed(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_acquire(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_release(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_acq_rel(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_add_64_seq_cst(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_relaxed(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_acquire(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_release(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_acq_rel(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_and_64_seq_cst(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_relaxed(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_acquire(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_release(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_acq_rel(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_or_64_seq_cst(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_relaxed(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_acquire(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_release(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_acq_rel(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_fetch_xor_64_seq_cst(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_relaxed(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_acquire(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_release(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_acq_rel(int64_t* obj, int64_t value);
static FORCE_INLINE int64_t Baselib_atomic_exchange_64_seq_cst(int64_t* obj, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acquire_acquire(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_release_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_acq_rel_acquire(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_acquire(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_64_seq_cst_seq_cst(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_relaxed_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acquire_acquire(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_release_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_acq_rel_acquire(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_relaxed(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_acquire(int64_t* obj, int64_t* expected, int64_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_64_seq_cst_seq_cst(int64_t* obj, int64_t* expected, int64_t value);
// ptr declarations
// ------------------------------------------------------------------------------------------------------------------------------
static FORCE_INLINE intptr_t Baselib_atomic_load_ptr_relaxed(const intptr_t* obj);
static FORCE_INLINE intptr_t Baselib_atomic_load_ptr_acquire(const intptr_t* obj);
static FORCE_INLINE intptr_t Baselib_atomic_load_ptr_seq_cst(const intptr_t* obj);
static FORCE_INLINE void Baselib_atomic_store_ptr_relaxed(intptr_t* obj, intptr_t value);
static FORCE_INLINE void Baselib_atomic_store_ptr_release(intptr_t* obj, intptr_t value);
static FORCE_INLINE void Baselib_atomic_store_ptr_seq_cst(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_relaxed(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_acquire(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_release(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_acq_rel(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_add_ptr_seq_cst(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_relaxed(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_acquire(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_release(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_acq_rel(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_and_ptr_seq_cst(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_relaxed(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_acquire(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_release(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_acq_rel(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_or_ptr_seq_cst(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_relaxed(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_acquire(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_release(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_acq_rel(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_fetch_xor_ptr_seq_cst(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_relaxed(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_acquire(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_release(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_acq_rel(intptr_t* obj, intptr_t value);
static FORCE_INLINE intptr_t Baselib_atomic_exchange_ptr_seq_cst(intptr_t* obj, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_relaxed_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acquire_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_release_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_acq_rel_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_weak_ptr_seq_cst_seq_cst(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_relaxed_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acquire_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_release_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_acq_rel_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_relaxed(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_acquire(intptr_t* obj, intptr_t* expected, intptr_t value);
static FORCE_INLINE bool Baselib_atomic_compare_exchange_strong_ptr_seq_cst_seq_cst(intptr_t* obj, intptr_t* expected, intptr_t value);
// Macro Implementation
// ------------------------------------------------------------------------------------------------------------------------------
#define detail_LOAD(op, order, id , bits, int_type, ...) \
static FORCE_INLINE int_type Baselib_atomic_##op##_##id##_##order(const int_type* obj) \
{ \
int_type result; \
Baselib_atomic_##op##_##bits##_##order##_v(obj, &result); \
return result; \
}
#define detail_STORE(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order(int_type* obj, int_type value) \
{ \
Baselib_atomic_##op##_##bits##_##order##_v(obj, &value); \
}
#define detail_LOAD_STORE(op, order, id , bits, int_type, ...) \
static FORCE_INLINE int_type Baselib_atomic_##op##_##id##_##order(int_type* obj, int_type value) \
{ \
int_type result; \
Baselib_atomic_##op##_##bits##_##order##_v(obj, &value, &result); \
return result; \
}
#define detail_CMP_XCHG(op, order1, order2, id , bits, int_type, ...) \
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2(int_type* obj, int_type* expected, int_type value) \
{ \
return Baselib_atomic_##op##_##bits##_##order1##_##order2##_v(obj, expected, &value); \
}
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE2(detail_LOAD, detail_STORE, detail_LOAD_STORE, detail_CMP_XCHG);
#undef detail_LOAD
#undef detail_STORE
#undef detail_LOAD_STORE
#undef detail_CMP_XCHG

View File

@@ -0,0 +1,81 @@
#pragma once
// Baselib_CappedSemaphore
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
// decremented, or toggled) depending on programmer-defined conditions.
//
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
// resource becomes available.
//
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#include "Internal/Baselib_CappedSemaphore_FutexBased.inl.h"
#else
#include "Internal/Baselib_CappedSemaphore_SemaphoreBased.inl.h"
#endif
// Creates a capped counting semaphore synchronization primitive.
//
// Cap is the number of tokens that can be held by the semaphore when there is no contention.
// If there are not enough system resources to create a semaphore, process abort is triggered.
//
// For optimal performance, the returned Baselib_CappedSemaphore should be stored at a cache aligned memory location.
//
// \returns A struct representing a semaphore instance. Use Baselib_CappedSemaphore_Free to free the semaphore.
BASELIB_INLINE_API Baselib_CappedSemaphore Baselib_CappedSemaphore_Create(uint16_t cap);
// Try to consume a token and return immediately.
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// \returns true if token was consumed. false if not.
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryAcquire(Baselib_CappedSemaphore* semaphore);
// Wait for semaphore token to become available
//
// This function is guaranteed to emit an acquire barrier.
BASELIB_INLINE_API void Baselib_CappedSemaphore_Acquire(Baselib_CappedSemaphore* semaphore);
// Wait for semaphore token to become available
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
// while Acquire may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \param timeoutInMilliseconds Time to wait for token to become available in milliseconds.
//
// \returns true if token was consumed. false if timeout was reached.
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryTimedAcquire(Baselib_CappedSemaphore* semaphore, const uint32_t timeoutInMilliseconds);
// Submit tokens to the semaphore.
//
// If threads are waiting an equal amount of tokens are consumed before this function return.
//
// When successful this function is guaranteed to emit a release barrier.
//
// \returns number of submitted tokens.
BASELIB_INLINE_API uint16_t Baselib_CappedSemaphore_Release(Baselib_CappedSemaphore* semaphore, const uint16_t count);
// Sets the semaphore token count to zero and release all waiting threads.
//
// When successful this function is guaranteed to emit a release barrier.
//
// \returns number of released threads.
BASELIB_INLINE_API uint32_t Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(Baselib_CappedSemaphore* semaphore);
// Reclaim resources and memory held by the semaphore.
//
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
// Calling this function with a nullptr result in a no-op.
BASELIB_INLINE_API void Baselib_CappedSemaphore_Free(Baselib_CappedSemaphore* semaphore);

View File

@@ -0,0 +1,61 @@
#pragma once
#include "Baselib_Timer.h"
#include <math.h>
typedef struct Baselib_CountdownTimer
{
Baselib_Timer_Ticks startTimeInTicks;
Baselib_Timer_Ticks timeoutInTicks;
} Baselib_CountdownTimer;
BASELIB_INLINE_API Baselib_Timer_Ticks Detail_MillisecondsToTicks(double milliseconds)
{
return (Baselib_Timer_Ticks)(milliseconds * Baselib_NanosecondsPerMillisecond / Baselib_Timer_TickToNanosecondsConversionFactor);
}
BASELIB_INLINE_API double Detail_TicksToMilliseconds(Baselib_Timer_Ticks ticks)
{
return ticks * Baselib_Timer_TickToNanosecondsConversionFactor / Baselib_NanosecondsPerMillisecond;
}
// Create and start a countdown timer
BASELIB_INLINE_API Baselib_CountdownTimer Baselib_CountdownTimer_StartMs(uint32_t timeoutInMilliseconds)
{
const Baselib_CountdownTimer timer = {Baselib_Timer_GetHighPrecisionTimerTicks(), Detail_MillisecondsToTicks(timeoutInMilliseconds)};
return timer;
}
BASELIB_INLINE_API Baselib_CountdownTimer Baselib_CountdownTimer_StartTicks(Baselib_Timer_Ticks timeoutInTicks)
{
const Baselib_CountdownTimer timer = {Baselib_Timer_GetHighPrecisionTimerTicks(), timeoutInTicks};
return timer;
}
// Get the number of ticks left before countdown expires.
//
// This function is guaranteed to return zero once timeout expired.
// It is also guaranteed that this function will not return zero until timeout expires.
BASELIB_INLINE_API Baselib_Timer_Ticks Baselib_CountdownTimer_GetTimeLeftInTicks(Baselib_CountdownTimer timer)
{
const Baselib_Timer_Ticks then = timer.startTimeInTicks;
const Baselib_Timer_Ticks now = Baselib_Timer_GetHighPrecisionTimerTicks();
const Baselib_Timer_Ticks timeLeft = timer.timeoutInTicks - (now - then);
return timeLeft <= timer.timeoutInTicks ? timeLeft : 0;
}
// Get the number of milliseconds left before countdown expires.
//
// This function is guaranteed to return zero once timeout expired.
// It is also guaranteed that this function will not return zero until timeout expires.
BASELIB_INLINE_API uint32_t Baselib_CountdownTimer_GetTimeLeftInMilliseconds(Baselib_CountdownTimer timer)
{
const Baselib_Timer_Ticks timeLeft = Baselib_CountdownTimer_GetTimeLeftInTicks(timer);
return (uint32_t)ceil(Detail_TicksToMilliseconds(timeLeft));
}
// Check if timout has been reached.
BASELIB_INLINE_API bool Baselib_CountdownTimer_TimeoutExpired(Baselib_CountdownTimer timer)
{
return Baselib_CountdownTimer_GetTimeLeftInTicks(timer) == 0;
}

View File

@@ -0,0 +1,21 @@
#pragma once
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Generates breakpoint exception (interrupt) the same way as normal breakpoint would.
//
// If debugger is attached, this will break into the debugger.
// If debugger is not attached, application will crash, unless breakpoint exception is handled.
// Breakpoint exception can be handled on some platforms by using signal(SIGTRAP, ...) or AddVectoredExceptionHandler.
// Platforms can override default compiler implementation by providing BASELIB_DEBUG_TRAP.
#define Baselib_Debug_Break() BASELIB_DEBUG_TRAP()
// \returns true if debugger is attached
BASELIB_API bool Baselib_Debug_IsDebuggerAttached(void);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,128 @@
#pragma once
// Baselib Dynamic Library.
// In computing, a dynamic linker is the part of an operating system that loads and links
// the shared libraries needed by an executable when it is executed (at "run time"),
// by copying the content of libraries from persistent storage to RAM, filling jump tables and
// relocating pointers. The specific operating system and executable format determine how
// the dynamic linker functions and how it is implemented.
//
// "Dynamic linker", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Dynamic_linker&oldid=935827444
//
// Platform specific gotchas:
// - On Posix/Darwin based platforms, if executable/library has import entries,
// as for importing functions from .so's/.dylib's at executable/library open time,
// Baselib_DynamicLibrary_GetFunction is able to return them as well.
// This is because of ELF/Mach-O format limitations.
// - On Posix/Darwin based platforms, to be able to query symbols in an executable
// they must be made visible via --external-dynamic and -external_dynamic flags respectively.
// Some linkers have an option to make specific symbols visible.
// - Emscripten limitations are detailed in
// https://github.com/emscripten-core/emscripten/wiki/Linking
// - On some platforms dynamic linker doesn't load downstream dependencies.
// For example if library A imports a symbol from library B,
// and this is passed to the compiler/linker at compilation step,
// on most platforms it will generate load entries inside library A to load library B,
// so if you load library A then library B will be loaded for you by the dynamic linker.
// But on some platforms, you have to load library B first, and then library A.
#include "Baselib_ErrorState.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
typedef struct Baselib_DynamicLibrary_Handle { intptr_t handle; } Baselib_DynamicLibrary_Handle;
// values in range from 0 inclusive to -5 are valid handles on some platforms
static const Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_Handle_Invalid = { -100 };
#include <C/Baselib_DynamicLibrary.inl.h>
// Open a dynamic library.
//
// Dynamic libraries are reference counted, so if the same library is loaded again
// with Baselib_DynamicLibrary_OpenUtf8/Baselib_DynamicLibrary_OpenUtf16, the same file handle is returned.
// It is also possible to load two different libraries containing two different functions that have the same name.
//
// Please note that additional error information should be retrieved via error state explain and be presented to the end user.
// This is needed to improve ergonomics of debugging library loading issues.
//
// \param pathnameUtf8 Library file to be opened.
// If relative pathname is provided, platform library search rules are applied (if any).
// If nullptr is passed, Baselib_ErrorCode_InvalidArgument will be risen.
//
// Possible error codes:
// - Baselib_ErrorCode_FailedToOpenDynamicLibrary: Unable to open requested dynamic library.
// - Baselib_ErrorCode_NotSupported: This feature is not supported on the current platform.
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_OpenUtf8(
const char* pathnameUtf8,
Baselib_ErrorState* errorState
);
// Open a dynamic library.
// Functionally identical to Baselib_DynamicLibrary_OpenUtf8, but accepts UTF-16 path instead.
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_OpenUtf16(
const baselib_char16_t* pathnameUtf16,
Baselib_ErrorState* errorState
);
// Return a handle that can be used to query functions in the program's scope.
// Must be closed via Baselib_DynamicLibrary_Close.
//
// Possible error codes:
// - Baselib_ErrorCode_NotSupported: This feature is not supported on the current platform.
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_OpenProgramHandle(
Baselib_ErrorState* errorState
);
// Convert native handle into baselib handle without changing the dynamic library ref counter.
//
// Provided handle should be closed either via Baselib_DynamicLibrary_Close or other means.
// The caller is responsible for closing the handle once done with it.
// Other corresponding resources should be closed by other means.
//
// \param handle Platform defined native handle.
// \param type Platform defined native handle type from Baselib_DynamicLibrary_NativeHandleType enum.
// If unsupported type is passed, will return Baselib_DynamicLibrary_Handle_Invalid.
//
// \returns Baselib_DynamicLibrary_Handle handle.
BASELIB_API Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_FromNativeHandle(
uint64_t handle,
uint32_t type,
Baselib_ErrorState* errorState
);
// Lookup a function in a dynamic library.
//
// \param handle Library handle.
// If Baselib_DynamicLibrary_Handle_Invalid is passed, Baselib_ErrorCode_InvalidArgument will be risen.
// \param functionName Function name to look for.
// If nullptr is passed, Baselib_ErrorCode_InvalidArgument will be risen.
//
// \returns pointer to the function (can be NULL for symbols mapped to NULL).
//
// Possible error codes:
// - Baselib_ErrorCode_FunctionNotFound: Requested function was not found.
BASELIB_API void* Baselib_DynamicLibrary_GetFunction(
Baselib_DynamicLibrary_Handle handle,
const char* functionName,
Baselib_ErrorState* errorState
);
// Close a dynamic library.
//
// Decreases reference counter, if it becomes zero, closes the library.
// If system api will return an error during this operation, the process will be aborted.
//
// \param handle Library handle.
// If Baselib_DynamicLibrary_Handle_Invalid is passed, function is no-op.
BASELIB_API void Baselib_DynamicLibrary_Close(
Baselib_DynamicLibrary_Handle handle
);
#if __cplusplus
}
#endif

View File

@@ -0,0 +1,60 @@
#pragma once
#include "Internal/Baselib_EnumSizeCheck.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// All possible baselib error codes.
typedef enum Baselib_ErrorCode
{
Baselib_ErrorCode_Success = 0x00000000,
// Common
Baselib_ErrorCode_OutOfMemory = 0x01000000,
Baselib_ErrorCode_OutOfSystemResources,
Baselib_ErrorCode_InvalidAddressRange,
// nativeErrorCode contains name of invalid argument
Baselib_ErrorCode_InvalidArgument,
Baselib_ErrorCode_InvalidBufferSize,
Baselib_ErrorCode_InvalidState,
Baselib_ErrorCode_NotSupported,
Baselib_ErrorCode_Timeout,
// Memory
Baselib_ErrorCode_UnsupportedAlignment = 0x02000000,
Baselib_ErrorCode_InvalidPageSize,
Baselib_ErrorCode_InvalidPageCount,
Baselib_ErrorCode_UnsupportedPageState,
// Thread
Baselib_ErrorCode_ThreadCannotJoinSelf = 0x03000000,
// Socket
Baselib_ErrorCode_NetworkInitializationError = 0x04000000,
Baselib_ErrorCode_AddressInUse,
// Risen in case if destination cannot be reached or requested address for bind was not local.
Baselib_ErrorCode_AddressUnreachable,
Baselib_ErrorCode_AddressFamilyNotSupported,
Baselib_ErrorCode_Disconnected,
// FileIO
Baselib_ErrorCode_InvalidPathname = 0x05000000,
Baselib_ErrorCode_RequestedAccessIsNotAllowed,
Baselib_ErrorCode_IOError,
// DynamicLibrary
Baselib_ErrorCode_FailedToOpenDynamicLibrary = 0x06000000,
Baselib_ErrorCode_FunctionNotFound,
// An error that was not anticipated by the baselib authors.
// Occurrence of this error is preceeded by a debug assertion.
Baselib_ErrorCode_UnexpectedError = 0xFFFFFFFF,
} Baselib_ErrorCode;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_ErrorCode);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,134 @@
#pragma once
#include "Baselib_ErrorCode.h"
#include "Baselib_SourceLocation.h"
#include <assert.h>
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Native error code type.
typedef enum Baselib_ErrorState_NativeErrorCodeType_t
{
// Native error code is not present.
Baselib_ErrorState_NativeErrorCodeType_None = 0,
// All platform error codes types must be bigger or equal to this value.
Baselib_ErrorState_NativeErrorCodeType_PlatformDefined,
} Baselib_ErrorState_NativeErrorCodeType_t;
typedef uint8_t Baselib_ErrorState_NativeErrorCodeType;
// Extra information type.
typedef enum Baselib_ErrorState_ExtraInformationType_t
{
// Extra information is not present.
Baselib_ErrorState_ExtraInformationType_None = 0,
// Extra information is a pointer of const char* type.
// Pointer guaranteed to be valid for lifetime of the program (static strings, buffers, etc).
Baselib_ErrorState_ExtraInformationType_StaticString,
// Extra information is a generation counter to ErrorState internal static buffer.
Baselib_ErrorState_ExtraInformationType_GenerationCounter,
} Baselib_ErrorState_ExtraInformationType_t;
typedef uint8_t Baselib_ErrorState_ExtraInformationType;
// Baselib error information.
//
// All functions that expect a pointer to a error state object will *not* allow to pass a nullptr for it
// If an error state with code other than Success is passed, the function is guaranteed to early out.
// Note that even if an error state is expected, there might be no full argument validation. For details check documentation of individual functions.
typedef struct Baselib_ErrorState
{
Baselib_SourceLocation sourceLocation;
uint64_t nativeErrorCode;
uint64_t extraInformation;
Baselib_ErrorCode code;
Baselib_ErrorState_NativeErrorCodeType nativeErrorCodeType;
Baselib_ErrorState_ExtraInformationType extraInformationType;
} Baselib_ErrorState;
// Creates a new error state object that is initialized to Baselib_ErrorCode_Success.
static inline Baselib_ErrorState Baselib_ErrorState_Create(void)
{
Baselib_ErrorState errorState = {
{ NULL, NULL, 0 },
0,
0,
Baselib_ErrorCode_Success,
Baselib_ErrorState_NativeErrorCodeType_None,
Baselib_ErrorState_ExtraInformationType_None
};
return errorState;
}
// Resets an existing error state to success and passes it on. Passes nullptr directly on.
static inline Baselib_ErrorState* Baselib_ErrorState_Reset(Baselib_ErrorState* errorState)
{
if (errorState)
errorState->code = Baselib_ErrorCode_Success;
return errorState;
}
static inline bool Baselib_ErrorState_ErrorRaised(const Baselib_ErrorState* errorState)
{
BaselibAssert(errorState);
return errorState->code != Baselib_ErrorCode_Success;
}
static inline void Baselib_ErrorState_RaiseError(
Baselib_ErrorState* errorState,
Baselib_ErrorCode errorCode,
Baselib_ErrorState_NativeErrorCodeType nativeErrorCodeType,
uint64_t nativeErrorCode,
Baselib_ErrorState_ExtraInformationType extraInformationType,
uint64_t extraInformation,
Baselib_SourceLocation sourceLocation
)
{
if (!errorState)
return;
if (errorState->code != Baselib_ErrorCode_Success)
return;
errorState->sourceLocation = sourceLocation;
errorState->nativeErrorCode = nativeErrorCode;
errorState->extraInformation = extraInformation;
errorState->code = errorCode;
errorState->nativeErrorCodeType = nativeErrorCodeType;
errorState->extraInformationType = extraInformationType;
}
typedef enum Baselib_ErrorState_ExplainVerbosity
{
// Include error type with platform specific value (if specified).
Baselib_ErrorState_ExplainVerbosity_ErrorType = 0,
// Include error type with platform specific value (if specified),
// source location (subject to BASELIB_ENABLE_SOURCELOCATION define) and an error explanation if available.
Baselib_ErrorState_ExplainVerbosity_ErrorType_SourceLocation_Explanation = 1,
} Baselib_ErrorState_ExplainVerbosity;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_ErrorState_ExplainVerbosity);
// Writes a null terminated string containing native error code value and explanation if possible.
//
// \param errorState Error state to explain. If null an empty string will be written into buffer.
// \param buffer Buffer to write explanation into.
// If nullptr is passed, nothing will be written but function will still return correct amount of bytes.
// \param bufferLen Length of buffer in bytes.
// If 0 is passed, behaviour is the same as passing nullptr as buffer.
// \param verbosity Verbosity level of the explanation string.
//
// \returns the number of characters that would have been written if buffer had been sufficiently large, including the terminating null character.
BASELIB_API uint32_t Baselib_ErrorState_Explain(
const Baselib_ErrorState* errorState,
char buffer[],
uint32_t bufferLen,
Baselib_ErrorState_ExplainVerbosity verbosity
);
#include <C/Baselib_ErrorState.inl.h>
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,88 @@
#pragma once
// Baselib_EventSemaphore
// In computer science, an event (also called event semaphore) is a type of synchronization mechanism that is used to indicate to waiting processes when a
// particular condition has become true.
// An event is an abstract data type with a boolean state and the following operations:
// * wait - when executed, causes the suspension of the executing process until the state of the event is set to true. If the state is already set to true has no effect.
// * set - sets the event's state to true, release all waiting processes.
// * clear - sets the event's state to false.
//
// "Event (synchronization primitive)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Event_(synchronization_primitive)&oldid=781517732
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#include "Internal/Baselib_EventSemaphore_FutexBased.inl.h"
#else
#include "Internal/Baselib_EventSemaphore_SemaphoreBased.inl.h"
#endif
// Creates an event semaphore synchronization primitive. Initial state of event is unset.
//
// If there are not enough system resources to create a semaphore, process abort is triggered.
//
// For optimal performance, the returned Baselib_EventSemaphore should be stored at a cache aligned memory location.
//
// \returns A struct representing a semaphore instance. Use Baselib_EventSemaphore_Free to free the semaphore.
BASELIB_INLINE_API Baselib_EventSemaphore Baselib_EventSemaphore_Create(void);
// Try to acquire semaphore.
//
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
//
// \returns true if event is set, false other wise.
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryAcquire(Baselib_EventSemaphore* semaphore);
// Acquire semaphore.
//
// This function is guaranteed to emit an acquire barrier.
BASELIB_INLINE_API void Baselib_EventSemaphore_Acquire(Baselib_EventSemaphore* semaphore);
// Try to acquire semaphore.
//
// If event is set this function return true, otherwise the thread will wait for event to be set or for release to be called.
//
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
//
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
// while Acquire may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \returns true if semaphore was acquired.
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryTimedAcquire(Baselib_EventSemaphore* semaphore, const uint32_t timeoutInMilliseconds);
// Sets the event
//
// Setting the event will cause all waiting threads to wakeup. And will let all future acquiring threads through until Baselib_EventSemaphore_Reset is called.
// It is guaranteed that any thread waiting previously on the EventSemaphore will be woken up, even if the semaphore is immediately reset. (no lock stealing)
//
// Guaranteed to emit a release barrier.
BASELIB_INLINE_API void Baselib_EventSemaphore_Set(Baselib_EventSemaphore* semaphore);
// Reset event
//
// Resetting the event will cause all future acquiring threads to enter a wait state.
// Has no effect if the EventSemaphore is already in a reset state.
//
// Guaranteed to emit a release barrier.
BASELIB_INLINE_API void Baselib_EventSemaphore_Reset(Baselib_EventSemaphore* semaphore);
// Reset event and release all waiting threads
//
// Resetting the event will cause all future acquiring threads to enter a wait state.
// If there were any threads waiting (i.e. the EventSemaphore was already in a release state) they will be released.
//
// Guaranteed to emit a release barrier.
BASELIB_INLINE_API void Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(Baselib_EventSemaphore* semaphore);
// Reclaim resources and memory held by the semaphore.
//
// If threads are waiting on the semaphore, calling free may trigger an assert and may cause process abort.
// Calling this function with a nullptr result in a no-op
BASELIB_INLINE_API void Baselib_EventSemaphore_Free(Baselib_EventSemaphore* semaphore);

View File

@@ -0,0 +1,400 @@
#pragma once
// Baselib FileIO
//
// This is a file reading abstraction api heavily influenced by next-gen async API's like io_uring, windows register I/O, etc.
// This api allows for platform independent async file reading.
#include "Baselib_ErrorState.h"
#include "Baselib_Memory.h"
#include "Internal/Baselib_EnumSizeCheck.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Event queue handle.
typedef struct Baselib_FileIO_EventQueue {void* handle;} Baselib_FileIO_EventQueue;
// Async file handle.
typedef struct Baselib_FileIO_AsyncFile {void* handle;} Baselib_FileIO_AsyncFile;
// Sync file handle.
typedef struct Baselib_FileIO_SyncFile {void* handle;} Baselib_FileIO_SyncFile;
// Event queue handle invalid constant.
static const Baselib_FileIO_EventQueue Baselib_FileIO_EventQueue_Invalid = { NULL };
// Async file handle invalid constant.
static const Baselib_FileIO_AsyncFile Baselib_FileIO_AsyncFile_Invalid = { NULL };
// Sync file handle invalid constant.
static const Baselib_FileIO_SyncFile Baselib_FileIO_SyncFile_Invalid = { (void*)-1 };
typedef enum Baselib_FileIO_OpenFlags_t
{
// Allows read access to the file.
Baselib_FileIO_OpenFlags_Read = 0x01,
// Allows write access to the file.
Baselib_FileIO_OpenFlags_Write = 0x02,
// Opens existing file without changes or creates 0 size file if file doesn't exist.
// On some platforms open will implicitly add write flag if required by native API's.
Baselib_FileIO_OpenFlags_OpenAlways = 0x04,
// Always creates 0 size file.
// On some platforms open will implicitly add write flag if required by native API's.
Baselib_FileIO_OpenFlags_CreateAlways = 0x08,
} Baselib_FileIO_OpenFlags_t;
typedef uint32_t Baselib_FileIO_OpenFlags;
// File IO read request.
typedef struct Baselib_FileIO_ReadRequest
{
// Offset in a file to read from.
// If offset+size is pointing pass EOF, will read up to EOF bytes.
// If offset is pointing pass EOF, will read 0 bytes.
uint64_t offset;
// Buffer to read to, must be available for duration of operation.
void* buffer;
// Size of requested read.
// If 0 is passed will read 0 bytes and raise no error.
uint64_t size;
} Baselib_FileIO_ReadRequest;
// File IO priorities.
// First we process all requests with high priority, then with normal priority.
// There's no round-robin, and high priority can starve normal priority.
typedef enum Baselib_FileIO_Priority
{
Baselib_FileIO_Priority_Normal = 0,
Baselib_FileIO_Priority_High = 1
} Baselib_FileIO_Priority;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_FileIO_Priority);
typedef enum Baselib_FileIO_EventQueue_ResultType
{
// Upon receiving this event, please call the provided callback with provided data argument.
Baselib_FileIO_EventQueue_Callback = 1,
// Result of open file operation.
Baselib_FileIO_EventQueue_OpenFile = 2,
// Result of read file operation.
Baselib_FileIO_EventQueue_ReadFile = 3,
// Result of close file operation.
Baselib_FileIO_EventQueue_CloseFile = 4
} Baselib_FileIO_EventQueue_ResultType;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_FileIO_EventQueue_ResultType);
typedef void (*EventQueueCallback)(uint64_t userdata);
typedef struct Baselib_FileIO_EventQueue_Result_Callback
{
// Please invoke this callback with userdata from the event.
EventQueueCallback callback;
} Baselib_FileIO_EventQueue_Result_Callback;
typedef struct Baselib_FileIO_EventQueue_Result_OpenFile
{
// Size of the file as seen on during open.
uint64_t fileSize;
} Baselib_FileIO_EventQueue_Result_OpenFile;
typedef struct Baselib_FileIO_EventQueue_Result_ReadFile
{
// Bytes transferred during read.
uint64_t bytesTransferred;
} Baselib_FileIO_EventQueue_Result_ReadFile;
// Event queue result.
typedef struct Baselib_FileIO_EventQueue_Result
{
// Event type.
Baselib_FileIO_EventQueue_ResultType type;
// Userdata as provided to the request.
uint64_t userdata;
// Error state of the operation.
Baselib_ErrorState errorState;
union
{
Baselib_FileIO_EventQueue_Result_Callback callback;
Baselib_FileIO_EventQueue_Result_OpenFile openFile;
Baselib_FileIO_EventQueue_Result_ReadFile readFile;
};
} Baselib_FileIO_EventQueue_Result;
// Creates event queue.
//
// \returns Event queue.
BASELIB_API Baselib_FileIO_EventQueue Baselib_FileIO_EventQueue_Create(void);
// Frees event queue.
//
// \param eq event queue to free.
BASELIB_API void Baselib_FileIO_EventQueue_Free(
Baselib_FileIO_EventQueue eq
);
// Dequeue events from event queue.
//
// \param eq Event queue to dequeue from.
// \param results Results array to dequeue elements into.
// If null will return 0.
// \param count Amount of elements in results array.
// If equals 0 will return 0.
// \param timeoutInMilliseconds If no elements are present in the queue,
// waits for any elements to be appear for specified amount of time.
// If 0 is passed, wait is omitted.
// If elements are present, dequeues up-to-count elements, and wait is omitted.
//
// File operations errors are reported via Baselib_FileIO_EventQueue_Result::errorState
// Possible error codes:
// - InvalidPathname: Requested pathname is invalid (not found, a directory, etc).
// - RequestedAccessIsNotAllowed: Access to requested pathname is not allowed.
// - IOError: IO error occured.
//
// \returns Amount of results filled.
BASELIB_API uint64_t Baselib_FileIO_EventQueue_Dequeue(
Baselib_FileIO_EventQueue eq,
Baselib_FileIO_EventQueue_Result results[],
uint64_t count,
uint32_t timeoutInMilliseconds // 0 will return immediately
);
// Request dequeue to shutdown
//
// \param eq Event queue to shutdown.
// \param threadCount Number of threads to signal termination
//
// An empty queue will hang in Baselib_FileIO_EventQueue_Dequeue for as long as the timeout lasts.
// This function can be used to exit such a condition
BASELIB_API void Baselib_FileIO_EventQueue_Shutdown(
Baselib_FileIO_EventQueue eq,
uint32_t threadCount
);
// Asynchronously opens a file.
//
// \param eq Event queue to associate file with.
// File can only be associated with one event queue,
// but one event queue can be associated with multiple files.
// If invalid event queue is passed, will return invalid file handle.
// \param pathname Platform defined pathname of a file.
// Can be freed after this function returns.
// If null is passed will return invalid file handle.
// \param userdata Userdata to be set in the completion event.
// \param priority Priority for file opening operation.
//
// Please note errors are reported via Baselib_FileIO_EventQueue_Result::errorState
// Possible error codes:
// - InvalidPathname: Requested pathname is invalid (not found, a directory, etc).
// - RequestedAccessIsNotAllowed: Access to requested pathname is not allowed.
// - IOError: IO error occured.
//
// \returns Async file handle, which can be used immediately for scheduling other operations.
// In case if file opening fails, all scheduled operations will fail as well.
// In case if invalid arguments are passed, might return invalid file handle (see args descriptions).
BASELIB_API Baselib_FileIO_AsyncFile Baselib_FileIO_AsyncOpen(
Baselib_FileIO_EventQueue eq,
const char* pathname,
uint64_t userdata,
Baselib_FileIO_Priority priority
);
// Asynchronously reads data from a file.
//
// Note scheduling reads on closed file is undefined.
//
// \param file Async file to read from.
// If invalid file handle is passed, will no-op.
// If file handle was already closed, behavior is undefined.
// \param requests Requests to schedule.
// If more than 1 provided,
// will provide completion event per individual request in the array.
// If null is passed, will no-op.
// \param count Amount of requests in requests array.
// If 0 is passed, will no-op.
// \param userdata Userdata to be set in the completion event(s).
// \param priority Priority for file reading operation(s).
//
// Please note errors are reported via Baselib_FileIO_EventQueue_Result::errorState
// If file is invalid handle, error can not be reported because event queue is not known.
// Possible error codes:
// - IOError: IO error occured.
BASELIB_API void Baselib_FileIO_AsyncRead(
Baselib_FileIO_AsyncFile file,
Baselib_FileIO_ReadRequest requests[],
uint64_t count,
uint64_t userdata,
Baselib_FileIO_Priority priority
);
// Asynchronously closes a file.
//
// Will wait for all pending operations to complete,
// after that will close a file and put a completion event.
//
// \param file Async file to close.
// If invalid file handle is passed, will no-op.
//
// Please note errors are reported via Baselib_FileIO_EventQueue_Result::errorState
// If file is invalid handle, error can not be reported because event queue is not known.
// Possible error codes:
// - IOError: IO error occured.
BASELIB_API void Baselib_FileIO_AsyncClose(
Baselib_FileIO_AsyncFile file
);
// Synchronously opens a file.
//
// Will try use the most open access permissions options that are available for each platform.
// Meaning it might be possible for other process to write to file opened via this API.
// On most platforms file can be simultaneously opened with different open flags.
// If you require more strict options, or platform specific access configuration, please use Baselib_FileIO_SyncFileFromNativeHandle.
//
// \param pathname Platform defined pathname to open.
// \param openFlags Open flags.
// If file is created because one of Create flags is passed, it will have size of 0 bytes.
//
// Possible error codes:
// - InvalidArgument: Invalid argument was passed.
// - RequestedAccessIsNotAllowed: Request access is not allowed.
// - IOError: Generic IO error occured.
//
// \returns SyncFile handle.
BASELIB_API Baselib_FileIO_SyncFile Baselib_FileIO_SyncOpen(
const char* pathname,
Baselib_FileIO_OpenFlags openFlags,
Baselib_ErrorState* errorState
);
// Transfer ownership of native handle to Baselib_FileIO_SyncFile handle.
//
// This function transfers ownership, meaning you don't need to close native handle yourself,
// instead returned SyncFile must closed via Baselib_FileIO_SyncClose.
// Implementations might cache information about the file state,
// so native handle shouldn't be used after transfering ownership.
//
// \param handle Platform defined native handle.
// If invalid native handle is passed, will return Baselib_FileIO_SyncFile_Invalid.
// \param type Platform defined native handle type from Baselib_FileIO_NativeHandleType enum.
// If unsupported type is passed, will return Baselib_FileIO_SyncFile_Invalid.
//
// \returns SyncFile handle.
BASELIB_API Baselib_FileIO_SyncFile Baselib_FileIO_SyncFileFromNativeHandle(
uint64_t handle,
uint32_t type
);
// Synchronously reads data from a file.
//
// \param file File to read from.
// If invalid file handle is passed, will raise InvalidArgument error and return 0.
// \param offset Offset in the file to read data at.
// If offset+size goes past end-of-file (EOF), function will read until EOF.
// If offset points past EOF, will return 0.
// \param buffer Pointer to data to read into.
// \param size Size of data to read.
//
// Possible error codes:
// - InvalidArgument: Invalid argument was passed.
// - IOError: Generic IO error occured.
//
// \returns Amount of bytes read.
BASELIB_API uint64_t Baselib_FileIO_SyncRead(
Baselib_FileIO_SyncFile file,
uint64_t offset,
void* buffer,
uint64_t size,
Baselib_ErrorState* errorState
);
// Synchronously writes data to a file.
//
// \param file File to write to.
// If invalid file handle is passed, will raise InvalidArgument error and return 0.
// \param offset Offset in the file to write data at.
// If offset+size goes past end-of-file (EOF), then file will be resized.
// \param buffer Pointer to data to write.
// \param size Size of data to write.
//
// Possible error codes:
// - InvalidArgument: Invalid argument was passed.
// - IOError: Generic IO error occured.
//
// \returns Amount of bytes written.
BASELIB_API uint64_t Baselib_FileIO_SyncWrite(
Baselib_FileIO_SyncFile file,
uint64_t offset,
const void* buffer,
uint64_t size,
Baselib_ErrorState* errorState
);
// Synchronously flushes file buffers.
//
// Operating system might buffer some write operations.
// Flushing buffers is required to guarantee (best effort) writing data to disk.
//
// \param file File to flush.
// If invalid file handle is passed, will no-op.
//
// Possible error codes:
// - InvalidArgument: Invalid argument was passed.
// - IOError: Generic IO error occured.
BASELIB_API void Baselib_FileIO_SyncFlush(
Baselib_FileIO_SyncFile file,
Baselib_ErrorState* errorState
);
// Synchronously changes file size.
//
// \param file File to get size of.
// If invalid file handle is passed, will raise invalid argument error.
// \param size New file size.
//
// Possible error codes:
// - InvalidArgument: Invalid argument was passed.
// - IOError: Generic IO error occured.
//
// \returns File size.
BASELIB_API void Baselib_FileIO_SyncSetFileSize(
Baselib_FileIO_SyncFile file,
uint64_t size,
Baselib_ErrorState* errorState
);
// Synchronously retrieves file size.
//
// \param file File to get size of.
// If invalid file handle is passed, will return 0.
//
// Possible error codes:
// - InvalidArgument: Invalid argument was passed.
// - IOError: Generic IO error occured.
//
// \returns File size.
BASELIB_API uint64_t Baselib_FileIO_SyncGetFileSize(
Baselib_FileIO_SyncFile file,
Baselib_ErrorState* errorState
);
// Synchronously closes a file.
//
// Close does not guarantee that the data was written to disk,
// Please use Baselib_FileIO_SyncFlush to guarantee (best effort) that data was written to disk.
//
// \param file File to close.
// If invalid file handle is passed, will no-op.
//
// Possible error codes:
// - InvalidArgument: Invalid argument was passed.
// - IOError: Generic IO error occured.
BASELIB_API void Baselib_FileIO_SyncClose(
Baselib_FileIO_SyncFile file,
Baselib_ErrorState* errorState
);
#include <C/Baselib_FileIO.inl.h>
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,74 @@
#pragma once
// Baselib_HighCapacitySemaphore
// This semaphore is similar to Baselib_Semaphore but allows for a far greater token count for a price of a bit slower performance.
// This semaphore is usable for counting resources.
// This is the max number of tokens guaranteed to be held by the semaphore at
// any given point in time. Tokens submitted that exceed this value may silently be discarded.
static const int64_t Baselib_HighCapacitySemaphore_MaxGuaranteedCount = UINT64_C(1) << 61;
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#include "Internal/Baselib_HighCapacitySemaphore_FutexBased.inl.h"
#else
#include "Internal/Baselib_HighCapacitySemaphore_SemaphoreBased.inl.h"
#endif
// Creates a counting semaphore synchronization primitive.
//
// If there are not enough system resources to create a semaphore, process abort is triggered.
//
// For optimal performance, the returned Baselib_HighCapacitySemaphore should be stored at a cache aligned memory location.
//
// \returns A struct representing a semaphore instance. Use Baselib_HighCapacitySemaphore_Free to free the semaphore.
BASELIB_INLINE_API Baselib_HighCapacitySemaphore Baselib_HighCapacitySemaphore_Create(void);
// Wait for semaphore token to become available
//
// This function is guaranteed to emit an acquire barrier.
// Returns if token was consumed or was woken up by Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads.
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Acquire(Baselib_HighCapacitySemaphore* semaphore);
// Try to consume a token and return immediately.
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// \returns true if token was consumed. false if not.
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryAcquire(Baselib_HighCapacitySemaphore* semaphore);
// Wait for semaphore token to become available
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
// while Acquire may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \param timeout Time to wait for token to become available.
//
// \returns true if token was consumed or was woken up by Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads. false if timeout was reached.
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryTimedAcquire(Baselib_HighCapacitySemaphore* semaphore, const uint32_t timeoutInMilliseconds);
// Submit tokens to the semaphore.
//
// When successful this function is guaranteed to emit a release barrier.
//
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
// If count reach `Baselib_HighCapacitySemaphore_MaxGuaranteedCount` this function may silently discard any overflow.
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Release(Baselib_HighCapacitySemaphore* semaphore, const uint32_t count);
// If threads are waiting on Baselib_HighCapacitySemaphore_Acquire / Baselib_HighCapacitySemaphore_TryTimedAcquire,
// releases enough tokens to wake them up. Otherwise consumes all available tokens.
//
// When successful this function is guaranteed to emit a release barrier.
//
// \returns number of released threads.
BASELIB_INLINE_API uint64_t Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(Baselib_HighCapacitySemaphore* semaphore);
// Reclaim resources and memory held by the semaphore.
//
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
// Calling this function with a nullptr result in a no-op
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Free(Baselib_HighCapacitySemaphore* semaphore);

View File

@@ -0,0 +1,69 @@
#pragma once
// In computer science, a lock or mutex (from mutual exclusion) is a synchronization mechanism for enforcing limits on access to a resource in an environment
// where there are many threads of execution. A lock is designed to enforce a mutual exclusion concurrency control policy.
//
// "Lock (computer science)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Lock_(computer_science)&oldid=875674239
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#include "Internal/Baselib_Lock_FutexBased.inl.h"
#else
#include "Internal/Baselib_Lock_SemaphoreBased.inl.h"
#endif
// Creates a lock synchronization primitive.
//
// If there are not enough system resources to create a lock, process abort is triggered.
//
// For optimal performance, the returned Baselib_Lock should be stored at a cache aligned memory location.
//
// \returns A struct representing a lock instance. Use Baselib_Lock_Free to free the lock.
BASELIB_INLINE_API Baselib_Lock Baselib_Lock_Create(void);
// Try to acquire lock and return immediately.
//
// If lock is held, either by this or another thread, then lock is not acquired and function return false.
//
// If successful this function is guaranteed to emit an acquire barrier.
//
// \returns true if lock was acquired.
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_Lock_TryAcquire(Baselib_Lock* lock);
// Acquire lock.
//
// If lock is held, either by this or another thread, then the function wait for lock to be released.
//
// This function is guaranteed to emit an acquire barrier.
BASELIB_INLINE_API void Baselib_Lock_Acquire(Baselib_Lock* lock);
// Try to acquire lock.
//
// If lock is held, either by this or another thread, then the function wait for timeoutInMilliseconds for lock to be released.
//
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
// while Acquire may enter the kernel and cause a context switch.
//
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \returns true if lock was acquired.
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_Lock_TryTimedAcquire(Baselib_Lock* lock, uint32_t timeoutInMilliseconds);
// Release lock and make it available to other threads.
//
// This function can be called from any thread, not only the thread that acquired the lock.
// If no lock was previously held calling this function result in a no-op.
//
// When the lock is released this function is guaranteed to emit a release barrier.
BASELIB_INLINE_API void Baselib_Lock_Release(Baselib_Lock* lock);
// Reclaim resources and memory held by lock.
//
// If threads are waiting on the lock, calling free may trigger an assert and may cause process abort.
// Calling this function with a nullptr result in a no-op
BASELIB_INLINE_API void Baselib_Lock_Free(Baselib_Lock* lock);

View File

@@ -0,0 +1,196 @@
#pragma once
#include "Baselib_ErrorState.h"
#include "Internal/Baselib_EnumSizeCheck.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Max alignment that can be passed to Baselib_Memory_AlignedAlloc and Baselib_Memory_AlignedReallocate functions
static const size_t Baselib_Memory_MaxAlignment = 64 * 1024;
// We can't handle platform varying constants in the C# bindings right now.
#if !defined(BASELIB_BINDING_GENERATION)
// Minimum guaranteed alignment for Baselib_Memory_Allocate/Baselib_Memory_AlignedAlloc in bytes.
//
// Guaranteed to be at least 8.
// Note that on some platforms it is possible to overwrite the internally used allocator in which case this guarantee may no longer be upheld.
static const size_t Baselib_Memory_MinGuaranteedAlignment = PLATFORM_MEMORY_MALLOC_MIN_ALIGNMENT;
#else
// Minimum guaranteed alignment for Baselib_Memory_Allocate/Baselib_Memory_AlignedAlloc in bytes.
//
// Guaranteed to be at least 8.
// Note that on some platforms it is possible to overwrite the internally used allocator in which case this guarantee may no longer be upheld.
static const size_t Baselib_Memory_MinGuaranteedAlignment = 8;
#endif // !defined(BASELIB_BINDING_GENERATION)
// Information about available pages sizes.
//
// Page sizes do not reflect necessarily hardware ("physical") page sizes, but rather "virtual" page sizes that the OS is dealing with.
// I.e. a virtual page may refer to several hardware pages, but the OS exposes only a single state for this group of pages.
typedef struct Baselib_Memory_PageSizeInfo
{
// Commonly used page size on this platform.
uint64_t defaultPageSize;
// pageSizesLen valid page sizes, ordered from small to large.
uint64_t pageSizes[6];
uint64_t pageSizesLen;
} Baselib_Memory_PageSizeInfo;
typedef struct Baselib_Memory_PageAllocation
{
void* ptr;
uint64_t pageSize;
uint64_t pageCount;
} Baselib_Memory_PageAllocation;
static const Baselib_Memory_PageAllocation Baselib_Memory_PageAllocation_Invalid = {0, 0, 0};
// Fills out a Baselib_Memory_PageSizeInfo struct.
//
// \param outPagesSizeInfo: Pointer to page size info struct. Passing 'nullptr' will return immediately.
BASELIB_API void Baselib_Memory_GetPageSizeInfo(Baselib_Memory_PageSizeInfo* outPagesSizeInfo);
// Allocates memory using a system allocator like malloc.
//
// Allocation failures or invalid alignments will trigger process abort.
//
// \param size Size of the allocation. Zero is valid.
// \returns Unique pointer to allocation. At least aligned to by Baselib_Memory_MinGuaranteedAlignment bytes.
// This is true for zero sized allocations as well.
BASELIB_API void* Baselib_Memory_Allocate(size_t size);
// Reallocates memory previously allocated by Baselib_Memory_Allocate or Baselib_Memory_Reallocate.
//
// Allocation failures or invalid alignments will trigger process abort.
//
// \param ptr Pointer previously returned by Baselib_Memory_Allocate or Baselib_Memory_Reallocate.
// Reallocating an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_Allocate or
// Baselib_Memory_Reallocate leads to undefined behavior.
// Passing `nullptr` yield the same result as calling Baselib_Memory_Allocate.
// \param size Size of the allocation. No special restrictions apply, zero is valid.
// \returns Unique pointer to allocation. At least aligned to by Baselib_Memory_MinGuaranteedAlignment bytes.
// This is true for zero sized allocations as well.
BASELIB_API void* Baselib_Memory_Reallocate(void* ptr, size_t newSize);
// Frees memory allocated by Baselib_Memory_Allocate Baselib_Memory_Reallocate.
//
// \param ptr Pointer previously returned by Baselib_Memory_Allocate or Baselib_Memory_Reallocate.
// Freeing an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_Allocate or Baselib_Memory_Reallocate leads to undefined behavior.
// Passing `nullptr` result in a no-op.
BASELIB_API void Baselib_Memory_Free(void* ptr);
// Allocates memory using a system allocator like malloc and guarantees that the returned pointer is aligned to the specified alignment.
//
// Allocation failures or invalid alignments will trigger process abort.
//
// \param size Size of the allocation. No special restrictions (like multiples of alignment) apply, zero is valid.
// \param alignment Needs to be a power of two which is also a multiple of of pointer size (i.e. sizeof(void*)) but less or equal to Baselib_Memory_MaxAlignment.
// Any alignment smaller than Baselib_Memory_MinGuaranteedAlignment, will be clamped to Baselib_Memory_MinGuaranteedAlignment.
// \returns Unique pointer to aligned allocation. This is true for zero sized allocations as well.
BASELIB_API void* Baselib_Memory_AlignedAllocate(size_t size, size_t alignment);
// Reallocates memory previously allocated by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
//
// Allocation failures or invalid alignments will trigger process abort.
//
// \param ptr Pointer previously returned by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
// Reallocating an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_AlignedAllocate or
// Baselib_Memory_AlignedReallocate leads to undefined behavior.
// Passing `nullptr` yield the same result as calling Baselib_Memory_AlignedAllocate.
// \param size Size of the allocation. No special restrictions apply, zero is valid.
// \param alignment Needs to be a power of two which is also a multiple of of pointer size (i.e. sizeof(void*)) but less or equal to Baselib_Memory_MaxAlignment.
// Any alignment smaller than Baselib_Memory_MinGuaranteedAlignment, will be clamped to Baselib_Memory_MinGuaranteedAlignment.
// \returns Unique pointer to aligned allocation. This is true for zero sized allocations as well.
BASELIB_API void* Baselib_Memory_AlignedReallocate(void* ptr, size_t newSize, size_t alignment);
// Frees memory allocated by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
//
// \param ptr Pointer previously returned by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate.
// Freeing an already freed pointer or a pointer that was not previously allocated by Baselib_Memory_AlignedAllocate or Baselib_Memory_AlignedReallocate leads to undefined behavior.
// Passing `nullptr` result in a no-op.
BASELIB_API void Baselib_Memory_AlignedFree(void* ptr);
// Page state options
typedef enum Baselib_Memory_PageState
{
// The page are in a reserved state and any access will cause a seg-fault/access violation.
// On some platforms that support this state this may be just a hint to the OS and there is no guarantee pages in this state behave differently from Baselib_Memory_PageState_NoAccess.
// The Baselib implementation does a best effort and tries to ensure as best as possible that pages in this state are not commited.
Baselib_Memory_PageState_Reserved = 0x00,
// This is a no access page and will cause a seg-fault/access violation when accessed.
Baselib_Memory_PageState_NoAccess = 0x01,
// The memory can only be read.
Baselib_Memory_PageState_ReadOnly = 0x02,
// The memory can be read and written.
Baselib_Memory_PageState_ReadWrite = 0x04,
// The memory can be used to execute code and can be read.
Baselib_Memory_PageState_ReadOnly_Executable = 0x10 | Baselib_Memory_PageState_ReadOnly,
// The memory can be used to execute code and can be both read and written.
Baselib_Memory_PageState_ReadWrite_Executable = 0x10 | Baselib_Memory_PageState_ReadWrite,
} Baselib_Memory_PageState;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_Memory_PageState);
// Allocates a given number of memory pages and guarantees that the returned pointer is aligned to specified multiple of the page size.
//
// Large alignments may lead to a significantly higher use of virtual address space than the amount of memory requested.
// This may result in an aligned page allocation to fail where a less/non-aligned allocation would succeed.
// Note that this is especially common in 32bit applications but a platform may impose additional restrictions on the size of its virtual address space.
// Whether a page allocation is pure virtual address space or already commited memory depends on the platform and passed page state flag.
//
// \param pageCount Number of pages requested (each will have pageSize size)
// \param alignmentInMultipleOfPageSize Specified alignment in multiple of page sizes (a value of 1 implies alignment to page size).
// Value needs to be larger than zero and a power of two, otherwise UnsupportedAlignment will be raised.
// \param pageState: In which state the pages should be. Certain values may raise UnsupportedPageState on certain platforms.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidPageSize: Page size doesn't match any of the available page sizes (see Baselib_Memory_GetPageSizeInfo).
// - Baselib_ErrorCode_InvalidPageCount: Requested number of pages is zero.
// - Baselib_ErrorCode_UnsupportedAlignment: Requested alignment is invalid.
// - Baselib_ErrorCode_UnsupportedPageState: The underlying system doesn't support the requested page state (see Baselib_Memory_PageState).
// - Baselib_ErrorCode_OutOfMemory: If there is not enough continuous address space available, or physical memory space when acquiring committed memory.
//
// \returns Page allocation info or Baselib_Memory_PageAllocation_Invalid in case of an error.
BASELIB_API Baselib_Memory_PageAllocation Baselib_Memory_AllocatePages(uint64_t pageSize, uint64_t pageCount, uint64_t alignmentInMultipleOfPageSize, Baselib_Memory_PageState pageState, Baselib_ErrorState* errorState);
// Releases the previously allocated pages (using either Baselib_Memory_AllocatePages)
//
// A single call of ReleasePages must encompass all pages that were originally allocated with a single call of AllocatePages.
// Passing Baselib_Memory_PageAllocation with a nullptr or a zero page count result in a no-op.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidAddressRange: Address range was detected to not match a valid allocation.
// CAUTION: Not all platforms are able to detect this and may either raise an error or cause undefined behavior.
// Note to implementors: Raising the error is strongly preferred as it helps identifying issues in user code.
// - Baselib_ErrorCode_InvalidPageSize: If page size doesn't match a previous allocation at `pageAllocation.ptr`.
//
// Implementation note:
// We could be able to allow granular ReleasePages call, but even then only in the _allocation granularity_ which might be different from the page size.
// (e.g. windows page size 4k allocation granularity 64k)
BASELIB_API void Baselib_Memory_ReleasePages(Baselib_Memory_PageAllocation pageAllocation, Baselib_ErrorState* errorState);
// Modifies the page state property of an already allocated virtual address range.
//
// It is possible to modify only some of the pages allocated by Baselib_Memory_AllocatePages.
// Passing `nullptr` or a zero page count result in a no-op.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidAddressRange: Address range is not covered by a valid allocation.
// Platforms that emulate page allocations (e.g. Emscripten) are not able to present this error and will pass the function call silently.
// - Baselib_ErrorCode_InvalidPageSize: If page size doesn't match the previous allocation at `addressOfFirstPage`.
// - Baselib_ErrorCode_UnsupportedPageState: The underlying system doesn't support the requested page state (see Baselib_Memory_PageState).
BASELIB_API void Baselib_Memory_SetPageState(void* addressOfFirstPage, uint64_t pageSize, uint64_t pageCount, Baselib_Memory_PageState pageState, Baselib_ErrorState* errorState);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,100 @@
#pragma once
// Baselib Network Address
#include "Baselib_ErrorState.h"
#include "Baselib_Alignment.h"
#include "Internal/Baselib_EnumSizeCheck.h"
#include <string.h>
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Address family.
typedef enum Baselib_NetworkAddress_Family
{
Baselib_NetworkAddress_Family_Invalid = 0,
Baselib_NetworkAddress_Family_IPv4 = 1,
Baselib_NetworkAddress_Family_IPv6 = 2
} Baselib_NetworkAddress_Family;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_NetworkAddress_Family);
// Fixed size address structure, large enough to hold IPv4 and IPv6 addresses.
typedef struct Baselib_NetworkAddress
{
union
{
uint8_t data[16];
uint8_t ipv6[16]; // in network byte order
uint8_t ipv4[4]; // in network byte order
};
BASELIB_ALIGN_AS(2) uint8_t port[2]; // in network byte order
uint8_t family;
uint8_t _padding; // Explicit padding to allow for deterministic bitwise compare.
// Scope zone index for IPv6 (ignored for IPv4)
// Defaults to zero if not specified.
// Note that unlike the other fields in this struct, this is *not* in network byte order!
uint32_t ipv6_scope_id;
} Baselib_NetworkAddress;
// Max length of any string representing an IP address
static const uint32_t Baselib_NetworkAddress_IpMaxStringLength = 46;
// Binary encode string representation of an address.
//
// Neither port not ipAddressBuffer scope id are parsed from the ip string.
// dstAddress->ipv6_scope_id is set to zero and needs to be manually set if required.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument - One or more of the input parameters are invalid
BASELIB_API void Baselib_NetworkAddress_Encode(
Baselib_NetworkAddress* dstAddress,
Baselib_NetworkAddress_Family family,
const char ip[],
uint16_t port,
Baselib_ErrorState* errorState
);
// Decode binary representation of an address.
//
// family, ipAddressBuffer, and port are all optional arguments.
// passing zero as ipAddressBufferLen is the same as passing an ipAddressBuffer nullptr.
// Port and IPv6 scope id are not encodeded to ipAddressBuffer.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument - srcAddress is null or otherwise invalid.
// - Baselib_ErrorCode_InvalidBufferSize - ipAddressBuffer is too small to hold decoded ip address.
BASELIB_API void Baselib_NetworkAddress_Decode(
const Baselib_NetworkAddress* srcAddress,
Baselib_NetworkAddress_Family* family,
char ipAddressBuffer[],
uint32_t ipAddressBufferLen,
uint16_t* port,
Baselib_ErrorState* errorState
);
// Returns zero initialized network address struct
static inline Baselib_NetworkAddress Baselib_NetworkAddress_Empty(void)
{
Baselib_NetworkAddress address;
memset(&address, 0, sizeof(address));
return address;
}
typedef enum Baselib_NetworkAddress_AddressReuse
{
Baselib_NetworkAddress_AddressReuse_DoNotAllow = 0,
// Allow multiple sockets to be bound to the same address/port.
// All sockets bound to the same address/port need to have this flag set.
Baselib_NetworkAddress_AddressReuse_Allow = 1,
} Baselib_NetworkAddress_AddressReuse;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_NetworkAddress_AddressReuse);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,14 @@
#pragma once
#include "Baselib_ErrorCode.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
BASELIB_API COMPILER_NORETURN void Baselib_Process_Abort(Baselib_ErrorCode error);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,76 @@
#pragma once
// Baselib_ReentrantLock
// In computer science, the reentrant mutex (recursive mutex, recursive lock) is particular type of mutual exclusion (mutex) device that may be locked multiple
// times by the same process/thread, without causing a deadlock.
// While any attempt to perform the "lock" operation on an ordinary mutex (lock) would either fail or block when the mutex is already locked, on a recursive
// mutex this operation will succeed if and only if the locking thread is the one that already holds the lock. Typically, a recursive mutex tracks the number
// of times it has been locked, and requires equally many unlock operations to be performed before other threads may lock it.
//
// "Reentrant mutex", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Reentrant_mutex&oldid=818566928
#include "Internal/Baselib_ReentrantLock.inl.h"
// Creates a reentrant lock synchronization primitive.
//
// If there are not enough system resources to create a lock, process abort is triggered.
//
// For optimal performance, the returned Baselib_ReentrantLock should be stored at a cache aligned memory location.
//
// \returns A struct representing a lock instance. Use Baselib_ReentrantLock_Free to free the lock.
BASELIB_INLINE_API Baselib_ReentrantLock Baselib_ReentrantLock_Create(void);
// Try to acquire lock and return immediately.
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
// to be made before the lock is released.
//
// When lock is acquired this function is guaranteed to emit an acquire barrier.
//
// \returns true if lock was acquired.
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryAcquire(Baselib_ReentrantLock* lock);
// Acquire lock.
//
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
// to be made before the lock is released.
// If lock is held by another thread, this function wait for lock to be released.
//
// This function is guaranteed to emit an acquire barrier.
BASELIB_INLINE_API void Baselib_ReentrantLock_Acquire(Baselib_ReentrantLock* lock);
// Acquire lock.
// If lock is already acquired by the current thread this function increase the lock count so that an equal number of calls to Baselib_ReentrantLock_Release needs
// to be made before the lock is released.
// If lock is held by another thread, this function wait for timeoutInMilliseconds for lock to be released.
//
// When a lock is acquired this function is guaranteed to emit an acquire barrier.
//
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
// while Acquire may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \returns true if lock was acquired.
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryTimedAcquire(Baselib_ReentrantLock* lock, uint32_t timeoutInMilliseconds);
// Release lock.
// If lock count is still higher than zero after the release operation then lock remain in a locked state.
// If lock count reach zero the lock is unlocked and made available to other threads
//
// When the lock is released this function is guaranteed to emit a release barrier.
//
// Calling this function from a thread that doesn't own the lock result triggers an assert in debug and causes undefined behavior in release builds.
BASELIB_INLINE_API void Baselib_ReentrantLock_Release(Baselib_ReentrantLock* lock);
// Reclaim resources and memory held by lock.
//
// If threads are waiting on the lock, calling free may trigger an assert and may cause process abort.
// Calling this function with a nullptr result in a no-op
BASELIB_INLINE_API void Baselib_ReentrantLock_Free(Baselib_ReentrantLock* lock);

View File

@@ -0,0 +1,391 @@
#pragma once
#include "Baselib_ErrorState.h"
#include "Baselib_Memory.h"
#include "Baselib_NetworkAddress.h"
#include "Internal/Baselib_EnumSizeCheck.h"
#include <stdint.h>
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// ------------------------------------------------------------------------------------------------
// Network buffers
// Implementation defined internal buffer id.
typedef void* Baselib_RegisteredNetwork_Buffer_Id;
static const Baselib_RegisteredNetwork_Buffer_Id Baselib_RegisteredNetwork_Buffer_Id_Invalid = 0;
// Network buffer structure.
// One buffer can contain multiple packets and endpoints.
typedef struct Baselib_RegisteredNetwork_Buffer
{
Baselib_RegisteredNetwork_Buffer_Id id;
Baselib_Memory_PageAllocation allocation;
} Baselib_RegisteredNetwork_Buffer;
// Create a network buffer from a set of previously allocated memory pages.
//
// Possible error codes:
// - InvalidAddressRange: if pageAllocation is invalid
//
// \returns A network buffer. If registration fails, then buffer id is set to Baselib_RegisteredNetwork_Buffer_Id_Invalid.
BASELIB_API Baselib_RegisteredNetwork_Buffer Baselib_RegisteredNetwork_Buffer_Register(
Baselib_Memory_PageAllocation pageAllocation,
Baselib_ErrorState* errorState
);
// Deregister network buffer. Disassociate memory pages and buffer representation.
//
// Allocated pages will stay allocated and can now be used for something else.
// Passing an invalid buffer results in a no-op.
BASELIB_API void Baselib_RegisteredNetwork_Buffer_Deregister(
Baselib_RegisteredNetwork_Buffer buffer
);
// ------------------------------------------------------------------------------------------------
// Network buffers slices
// Slice of a network buffer.
typedef struct Baselib_RegisteredNetwork_BufferSlice
{
Baselib_RegisteredNetwork_Buffer_Id id;
void* data; // data of the slice
uint32_t size; // size of the slice in bytes
uint32_t offset; // offset in main buffer
} Baselib_RegisteredNetwork_BufferSlice;
// Creates slice from network buffer
//
// \param buffer Buffer to create slice from.
// \param offset Offset in buffer in bytes.
// \param size Size of the slice in bytes.
BASELIB_API Baselib_RegisteredNetwork_BufferSlice Baselib_RegisteredNetwork_BufferSlice_Create(
Baselib_RegisteredNetwork_Buffer buffer,
uint32_t offset,
uint32_t size
);
// Create empty slice that doesn't point to anything
//
// Guaranteed to reference Baselib_RegisteredNetwork_Buffer_Id_Invalid and have all other values zeroed out.
BASELIB_API Baselib_RegisteredNetwork_BufferSlice Baselib_RegisteredNetwork_BufferSlice_Empty(void);
// ------------------------------------------------------------------------------------------------
// Network endpoints are platform defined representation (sockaddr_in-like) of network address (family, ip, port).
typedef struct Baselib_RegisteredNetwork_Endpoint { Baselib_RegisteredNetwork_BufferSlice slice; } Baselib_RegisteredNetwork_Endpoint;
static const uint32_t Baselib_RegisteredNetwork_Endpoint_MaxSize = 28; // in bytes
// Place network address into the network buffer.
//
// Destination must be able to accommodate Baselib_RegisteredNetwork_Endpoint_MaxSize bytes.
//
// \param srcAddress Network address to use, pass nullptr to create an empty endpoint.
// \param dstSlice Where to write encoded data.
//
// Possible error codes:
// - InvalidArgument: if dstSlice is invalid
// - InvalidBufferSize: if dstSlice is smaller than Baselib_RegisteredNetwork_Endpoint_MaxSize
//
// \returns Endpoint or Endpoint_Empty in case of failure.
BASELIB_API Baselib_RegisteredNetwork_Endpoint Baselib_RegisteredNetwork_Endpoint_Create(
const Baselib_NetworkAddress* srcAddress,
Baselib_RegisteredNetwork_BufferSlice dstSlice,
Baselib_ErrorState* errorState
);
// Return empty endpoint that doesn't point to anything
//
// Guaranteed to contain Baselib_RegisteredNetwork_BufferSlice_Empty
BASELIB_API Baselib_RegisteredNetwork_Endpoint Baselib_RegisteredNetwork_Endpoint_Empty(void);
// Decode endpoint.
//
// \param endpoint Endpoint to be converted.
// \param dstAddress Pointer to address to write data to.
//
// Possible error codes:
// - InvalidArgument: if endpoint is invalid or dstAddress is null
BASELIB_API void Baselib_RegisteredNetwork_Endpoint_GetNetworkAddress(
Baselib_RegisteredNetwork_Endpoint endpoint,
Baselib_NetworkAddress* dstAddress,
Baselib_ErrorState* errorState
);
// ------------------------------------------------------------------------------------------------
// Request & Completion
// Send/receive request.
typedef struct Baselib_RegisteredNetwork_Request
{
Baselib_RegisteredNetwork_BufferSlice payload;
// for sending: remote address to which the payload is sent (required for UDP)
// for receiving: address from which the data was sent (optional)
Baselib_RegisteredNetwork_Endpoint remoteEndpoint;
// TODO: Not support yet. (We would also need to support this in Baselib_Socket first)
// for sending: unused
// for receiving: local address on which the data was received (optional)
//Baselib_RegisteredNetwork_Endpoint localEndpoint;
void* requestUserdata;
} Baselib_RegisteredNetwork_Request;
// Success or failure of a Baselib_RegisteredNetwork_CompletionResult.
typedef enum Baselib_RegisteredNetwork_CompletionStatus
{
// Networking request failed.
Baselib_RegisteredNetwork_CompletionStatus_Failed = 0,
// Networking request successfully finished.
Baselib_RegisteredNetwork_CompletionStatus_Success = 1,
} Baselib_RegisteredNetwork_CompletionStatus;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_RegisteredNetwork_CompletionStatus);
// Result of a previously scheduled send/receive
//
// When a networking request is completed, this is placed into an internal completion queue.
typedef struct Baselib_RegisteredNetwork_CompletionResult
{
Baselib_RegisteredNetwork_CompletionStatus status;
uint32_t bytesTransferred;
void* requestUserdata;
} Baselib_RegisteredNetwork_CompletionResult;
// ------------------------------------------------------------------------------------------------
// UDP connectionless socket.
typedef struct Baselib_RegisteredNetwork_Socket_UDP { struct Baselib_RegisteredNetwork_Socket_UDP_Impl* handle; } Baselib_RegisteredNetwork_Socket_UDP;
static const Baselib_RegisteredNetwork_Socket_UDP Baselib_RegisteredNetwork_Socket_UDP_Invalid = { NULL };
// Creates an UDP socket with internal request and completion queues.
//
// \param bindAddress Address to bind socket to, in connectionless UDP every socket has to be bound.
// \param endpointReuse Allows multiple sockets to be bound to the same address/port if set to AddressReuse_Allow,
// All sockets bound to the same address/port need to have this flag set.
// \param sendQueueSize Send queue size in amount of entries.
// \param recvQueueSize Receive queue size in amount of entries.
//
// Known issues (behavior may change in the future):
// - Some platforms do not support sending zero sized UDP packets.
//
// Possible error codes:
// - InvalidArgument: if bindAddress pointer is null or incompatible or both sendQueueSize and recvQueueSize are zero
// - EndpointInUse: endpoint is already in use
// - AddressFamilyNotSupported: if the requested address family is not available.
// - OutOfSystemResources: if network session limit was exceeded
//
// \returns A UDP socket. If socket creation fails, socket holds a Baselib_RegisteredNetwork_Socket_UDP_InvalidHandle.
BASELIB_API Baselib_RegisteredNetwork_Socket_UDP Baselib_RegisteredNetwork_Socket_UDP_Create(
const Baselib_NetworkAddress* bindAddress,
Baselib_NetworkAddress_AddressReuse endpointReuse,
uint32_t sendQueueSize,
uint32_t recvQueueSize,
Baselib_ErrorState* errorState
);
// Schedules receive requests.
//
// \param socket Socket to be used.
// \param requests Array of pointers to requests. No-op if null.
// Request objects can be freed after the function call.
// \param requestsCount Amount of requests in the array. No-op if zero.
//
// If requests is null or requestsCount is zero, this operation is a no-op.
// Note that actual receiving may be deferred until you call Baselib_RegisteredNetwork_Socket_UDP_ProcessRecv.
// UDP message data that doesn't fit a message buffer is silently discarded.
//
// Known issues (behavior may change in the future):
// - Some platforms does not support receiving zero sized UDP packets.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns The number of scheduled items. If scheduling fails this function return zero.
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_ScheduleRecv(
Baselib_RegisteredNetwork_Socket_UDP socket,
const Baselib_RegisteredNetwork_Request* requests,
uint32_t requestsCount,
Baselib_ErrorState* errorState
);
// Schedules send requests.
//
// \param socket Socket to be used.
// \param requests Array of pointers to requests. No-op if null.
// Request objects can be freed after the function call.
// \param requestsCount Amount of requests in the array. No-op if zero.
//
// If requests is null or requestsCount is zero, this operation is a no-op.
// Note that actual receiving may be deferred until you call Baselib_RegisteredNetwork_Socket_UDP_ProcessSend.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns The number of scheduled items. If scheduling fails this function return zero.
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_ScheduleSend(
Baselib_RegisteredNetwork_Socket_UDP socket,
const Baselib_RegisteredNetwork_Request* requests,
uint32_t requestsCount,
Baselib_ErrorState* errorState
);
// Status of processing send/recv.
typedef enum Baselib_RegisteredNetwork_ProcessStatus
{
// No further items to process.
//
// Note that this does not imply that all requests have been fully processed at any moment in time.
Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately = 0,
// deprecated, same as Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately
Baselib_RegisteredNetwork_ProcessStatus_Done
COMPILER_DEPRECATED_ENUM_VALUE("Use Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately instead (equivalent)") = 0,
// Should call again, there is more workload to process.
Baselib_RegisteredNetwork_ProcessStatus_Pending = 1,
} Baselib_RegisteredNetwork_ProcessStatus;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_RegisteredNetwork_ProcessStatus);
// Processes the receive queue on a socket.
//
// Needs to be called periodically to ensure requests are processed.
// You should call this in loop until either your time budget is exceed or the function returns false.
//
// Platforms emulating RIO behavior with sockets, perform one receive per call until there are no more receive requests in the queue.
// Requests failed due to empty socket receive buffer are requeued and processed at the next call to Baselib_RegisteredNetwork_Socket_UDP_ProcessRecv.
// In that case Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately is returned since an immediate retry will not have any effect.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns Baselib_RegisteredNetwork_ProcessStatus_Pending if there is more workload to process immediately, Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately if otherwise
BASELIB_API Baselib_RegisteredNetwork_ProcessStatus Baselib_RegisteredNetwork_Socket_UDP_ProcessRecv(
Baselib_RegisteredNetwork_Socket_UDP socket,
Baselib_ErrorState* errorState
);
// Processes the send queue on a socket.
//
// Needs to be called periodically to ensure requests are processed.
// You should call this in loop until either your time budget is exceed or the function returns false.
//
// Platforms emulating RIO behavior with sockets, perform one send per call until there are no more send requests in the queue.
// Requests failed due to full socket send buffer are requeued processed at the next call to Baselib_RegisteredNetwork_Socket_UDP_ProcessSend.
// In that case Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately is returned since an immediate retry will not have any effect.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns Baselib_RegisteredNetwork_ProcessStatus_Pending if there is more workload to process immediately, Baselib_RegisteredNetwork_ProcessStatus_NonePendingImmediately if otherwise
BASELIB_API Baselib_RegisteredNetwork_ProcessStatus Baselib_RegisteredNetwork_Socket_UDP_ProcessSend(
Baselib_RegisteredNetwork_Socket_UDP socket,
Baselib_ErrorState* errorState
);
// Status of a recv/send completion queue.
typedef enum Baselib_RegisteredNetwork_CompletionQueueStatus
{
// No results are ready for dequeing.
Baselib_RegisteredNetwork_CompletionQueueStatus_NoResultsAvailable = 0,
// Results are available for dequeing.
Baselib_RegisteredNetwork_CompletionQueueStatus_ResultsAvailable = 1,
} Baselib_RegisteredNetwork_CompletionQueueStatus;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_RegisteredNetwork_CompletionQueueStatus);
// Wait until results appears for a previously scheduled receive.
//
// \param timeoutInMilliseconds Wait timeout.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns Baselib_RegisteredNetwork_CompletionQueueStatus_ResultsAvailable if results are available for dequeue, Baselib_RegisteredNetwork_CompletionQueueStatus_NoResultsAvailable otherwise
BASELIB_API Baselib_RegisteredNetwork_CompletionQueueStatus Baselib_RegisteredNetwork_Socket_UDP_WaitForCompletedRecv(
Baselib_RegisteredNetwork_Socket_UDP socket,
uint32_t timeoutInMilliseconds,
Baselib_ErrorState* errorState
);
// Wait until results appears for a previously scheduled send.
//
// \param timeoutInMilliseconds Wait timeout.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns Baselib_RegisteredNetwork_CompletionQueueStatus_ResultsAvailable if results are available for dequeue, Baselib_RegisteredNetwork_CompletionQueueStatus_NoResultsAvailable otherwise
BASELIB_API Baselib_RegisteredNetwork_CompletionQueueStatus Baselib_RegisteredNetwork_Socket_UDP_WaitForCompletedSend(
Baselib_RegisteredNetwork_Socket_UDP socket,
uint32_t timeoutInMilliseconds,
Baselib_ErrorState* errorState
);
// Dequeue receive result.
//
// \param results Results array. No-op if null.
// \param resultsCount Amount of elements in results array. No-op if zero.
//
// If you're calling this method on multiple threads for the same completion queue in parallel, it may spuriously return 0.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns number of dequeued entries
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_DequeueRecv(
Baselib_RegisteredNetwork_Socket_UDP socket,
Baselib_RegisteredNetwork_CompletionResult results[],
uint32_t resultsCount,
Baselib_ErrorState* errorState
);
// Dequeue send result.
//
// \param results Results array. No-op if null.
// \param resultsCount Amount of elements in results array. No-op if zero.
//
// If you're calling this method on multiple threads for the same completion queue in parallel, it may spuriously return 0.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid
//
// \returns number of dequeued entries
BASELIB_API uint32_t Baselib_RegisteredNetwork_Socket_UDP_DequeueSend(
Baselib_RegisteredNetwork_Socket_UDP socket,
Baselib_RegisteredNetwork_CompletionResult results[],
uint32_t resultsCount,
Baselib_ErrorState* errorState
);
// Get bind address of udp socket.
//
// \param socket Socket to be used.
// \param dstAddress Pointer to address to write data to.
//
// Possible error codes:
// - InvalidArgument: if socket is invalid or if dstAddress is null
BASELIB_API void Baselib_RegisteredNetwork_Socket_UDP_GetNetworkAddress(
Baselib_RegisteredNetwork_Socket_UDP socket,
Baselib_NetworkAddress* dstAddress,
Baselib_ErrorState* errorState
);
// Closes UDP socket.
//
// Passing an invalid socket handle result in a no-op.
//
// \param socket Socket to be closed.
BASELIB_API void Baselib_RegisteredNetwork_Socket_UDP_Close(
Baselib_RegisteredNetwork_Socket_UDP socket
);
// ------------------------------------------------------------------------------------------------
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,84 @@
#pragma once
// Baselib_Semaphore
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
// decremented, or toggled) depending on programmer-defined conditions.
//
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
// resource becomes available.
//
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
// This is the max number of tokens guaranteed to be held by the semaphore at
// any given point in time. Tokens submitted that exceed this value may silently be discarded.
static const int32_t Baselib_Semaphore_MaxGuaranteedCount = UINT16_MAX;
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#include "Internal/Baselib_Semaphore_FutexBased.inl.h"
#else
#include "Internal/Baselib_Semaphore_SemaphoreBased.inl.h"
#endif
// Creates a counting semaphore synchronization primitive.
//
// If there are not enough system resources to create a semaphore, process abort is triggered.
//
// For optimal performance, the returned Baselib_Semaphore should be stored at a cache aligned memory location.
//
// \returns A struct representing a semaphore instance. Use Baselib_Semaphore_Free to free the semaphore.
BASELIB_INLINE_API Baselib_Semaphore Baselib_Semaphore_Create(void);
// Wait for semaphore token to become available
//
// This function is guaranteed to emit an acquire barrier.
// Returns if token was consumed or was woken up by Baselib_Semaphore_ResetAndReleaseWaitingThreads.
BASELIB_INLINE_API void Baselib_Semaphore_Acquire(Baselib_Semaphore* semaphore);
// Try to consume a token and return immediately.
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// \returns true if token was consumed. false if not.
BASELIB_INLINE_API bool Baselib_Semaphore_TryAcquire(Baselib_Semaphore* semaphore);
// Wait for semaphore token to become available
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
// while Acquire may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \param timeout Time to wait for token to become available.
//
// \returns true if token was consumed or was woken up by Baselib_Semaphore_ResetAndReleaseWaitingThreads. false if timeout was reached.
BASELIB_INLINE_API bool Baselib_Semaphore_TryTimedAcquire(Baselib_Semaphore* semaphore, const uint32_t timeoutInMilliseconds);
// Submit tokens to the semaphore.
//
// When successful this function is guaranteed to emit a release barrier.
//
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
// If count reach `Baselib_Semaphore_MaxGuaranteedCount` this function may silently discard any overflow.
BASELIB_INLINE_API void Baselib_Semaphore_Release(Baselib_Semaphore* semaphore, const uint16_t count);
// If threads are waiting on Baselib_Semaphore_Acquire / Baselib_Semaphore_TryTimedAcquire,
// releases enough tokens to wake them up. Otherwise consumes all available tokens.
//
// When successful this function is guaranteed to emit a release barrier.
//
// \returns number of released threads.
BASELIB_INLINE_API uint32_t Baselib_Semaphore_ResetAndReleaseWaitingThreads(Baselib_Semaphore* semaphore);
// Reclaim resources and memory held by the semaphore.
//
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
// Calling this function with a nullptr result in a no-op
BASELIB_INLINE_API void Baselib_Semaphore_Free(Baselib_Semaphore* semaphore);

View File

@@ -0,0 +1,274 @@
#pragma once
// Baselib Socket
//
// This is a socket platform abstraction api heavily influenced by non-blocking Berkeley Sockets.
// Berkeley Sockets look like they behave in similar fashion on all platforms, but there are a lot of small differences.
// Compared to Berkeley Sockets this API is somewhat more high level and doesn't provide as fine grained control.
#include "Baselib_ErrorState.h"
#include "Baselib_NetworkAddress.h"
#include "Internal/Baselib_EnumSizeCheck.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Socket Handle, a handle to a specific socket.
typedef struct Baselib_Socket_Handle { intptr_t handle; } Baselib_Socket_Handle;
static const Baselib_Socket_Handle Baselib_Socket_Handle_Invalid = { -1 };
// Socket protocol.
typedef enum Baselib_Socket_Protocol
{
Baselib_Socket_Protocol_UDP = 1,
Baselib_Socket_Protocol_TCP = 2,
} Baselib_Socket_Protocol;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_Socket_Protocol);
// Socket message. Used to send or receive data in message based protocols such as UDP.
typedef struct Baselib_Socket_Message
{
Baselib_NetworkAddress* address;
void* data;
uint32_t dataLen;
} Baselib_Socket_Message;
// Create a socket.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: if context, family or protocol is invalid or unknown.
// - Baselib_ErrorCode_AddressFamilyNotSupported: if the requested address family is not available.
BASELIB_API Baselib_Socket_Handle Baselib_Socket_Create(
Baselib_NetworkAddress_Family family,
Baselib_Socket_Protocol protocol,
Baselib_ErrorState* errorState
);
// Bind socket to a local address and port.
//
// Bind can only be called once per socket.
// Address can either be a specific interface ip address.
// In case if encoded ip is nullptr / "0.0.0.0" / "::" (same as INADDR_ANY) will bind to all interfaces.
//
// \param addressReuse A set of sockets can be bound to the same address port combination if all
// sockets are bound with this flag set to AddressReuse_Allow, similar to
// SO_REUSEADDR+SO_REUSEPORT.
// Please note that setting this flag to false doesn't mean anyone is forbidden
// to binding to the same ip/port combo, or in other words it does NOT use
// SO_EXCLUSIVEADDRUSE where it's available.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid open socket. Address pointer is null or incompatible.
// - Baselib_ErrorCode_AddressInUse: Address or port is already bound by another socket, or the system is out of ephemeral ports.
// - Baselib_ErrorCode_AddressUnreachable: Address doesn't map to any known interface.
BASELIB_API void Baselib_Socket_Bind(
Baselib_Socket_Handle socket,
const Baselib_NetworkAddress* address,
Baselib_NetworkAddress_AddressReuse addressReuse,
Baselib_ErrorState* errorState
);
// Connect a socket to a remote address.
//
// Note that this function initiates an asynchronous connection. You must call
// Baselib_Socket_Poll with Baselib_Socket_PollEvents.requestedEvents =
// Baselib_Socket_PollEvents_Connected to wait for the connection to finish.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket. Address pointer is null or incompatible.
// - Baselib_ErrorCode_AddressUnreachable: Unable to establish a connection with peer.
BASELIB_API void Baselib_Socket_TCP_Connect(
Baselib_Socket_Handle socket,
const Baselib_NetworkAddress* address,
Baselib_NetworkAddress_AddressReuse addressReuse,
Baselib_ErrorState* errorState
);
// Bitmask of events to be used in Baselib_Socket_Poll
typedef enum Baselib_Socket_PollEvents
{
Baselib_Socket_PollEvents_Readable = 1,
Baselib_Socket_PollEvents_Writable = 2,
// Note: Connected cannot be set at the same time as Readable and Writable.
Baselib_Socket_PollEvents_Connected = 4,
} Baselib_Socket_PollEvents;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_Socket_PollEvents);
// Socket entry to be passed into Baselib_Socket_Poll.
//
// Note that the name `Fd` does not refer to the fact that these are file
// descriptors (they are sockets), but rather the fact that nearly every socket
// API calls this struct "pollfd".
typedef struct Baselib_Socket_PollFd
{
Baselib_Socket_Handle handle;
Baselib_Socket_PollEvents requestedEvents;
Baselib_Socket_PollEvents resultEvents;
Baselib_ErrorState* errorState;
} Baselib_Socket_PollFd;
// Helper method to construct a Baselib_Socket_PollFd. Use of this method is not
// necessary, you may fill out the struct yourself if desired.
static inline Baselib_Socket_PollFd Baselib_Socket_PollFd_New(Baselib_Socket_Handle handle, Baselib_Socket_PollEvents events, Baselib_ErrorState* errorState)
{
Baselib_Socket_PollFd result;
result.handle = handle;
result.requestedEvents = events;
result.resultEvents = (Baselib_Socket_PollEvents)0;
result.errorState = errorState;
return result;
}
// Wait for a socket being readable, writable, or an error occurs. Specific
// events that occurred will be set in sockets[i].resultEvents. Errors
// associated with particular sockets will be reported in sockets[i].errorState.
//
// It is valid to have sockets[i].errorState to point to the same ErrorState as
// the outer parameter errorState - or, more generally, you may alias whatever
// error states within sockets[i].errorState and the parameter errorState.
//
// If timeoutInMilliseconds==0, Poll() will not block. There is no option to
// wait indefinitely.
//
// Possible error codes on the outer parameter errorState:
// - Baselib_ErrorCode_InvalidArgument: Sockets list is null. An individual socket handle is invalid.
//
// Possible error codes on sockets[i].errorState:
// - Baselib_ErrorCode_AddressUnreachable: Asynchronous Connect() failed.
// - Baselib_ErrorCode_Disconnected: Socket has been disconnected, or asynchronous Connect() failed (apple devices).
BASELIB_API void Baselib_Socket_Poll(
Baselib_Socket_PollFd* sockets,
uint32_t socketsCount,
uint32_t timeoutInMilliseconds,
Baselib_ErrorState* errorState
);
// Get address of locally bound socket.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid bound socket. Address pointer is null.
BASELIB_API void Baselib_Socket_GetAddress(
Baselib_Socket_Handle socket,
Baselib_NetworkAddress* address,
Baselib_ErrorState* errorState
);
// Configure a TCP server socket to begin listening for incoming connections.
// The maximum queue size is used for each platform.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket.
// - Baselib_ErrorCode_AddressInUse: Another socket is already listening on the same port, or the system is out of ephemeral ports.
BASELIB_API void Baselib_Socket_TCP_Listen(
Baselib_Socket_Handle socket,
Baselib_ErrorState* errorState
);
// Accept an incoming TCP connection to this server socket. When there are no
// incoming connections, this returns Baselib_Socket_Handle_Invalid and does not
// raise an error.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket.
BASELIB_API Baselib_Socket_Handle Baselib_Socket_TCP_Accept(
Baselib_Socket_Handle socket,
Baselib_ErrorState* errorState
);
// Send messages to unconnected destinations.
//
// Socket does not need to be bound before calling SendMessages.
// When sending multiple messages an error may be raised after some of the messages were submitted.
//
// If the socket is not already bound to a port SendMessages will implicitly bind the socket before issuing the send operation.
//
// Warning: This function may not fail when called with a TCP socket, as it may
// simply ignore the address parameter, and send to whatever the socket is
// connected to. However, as there is no way to retreive the actual number of
// bytes sent with this API, its use in this manner is strongly discouraged.
//
// Known issues (behavior may change in the future):
// Some platforms do not support sending zero sized UDP packets.
//
// Possible error codes:
// - Baselib_ErrorCode_AddressUnreachable: Message destination is known to not be reachable from this machine.
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket. Messages is `NULL` or a message has an invalid or incompatible destination.
// - Baselib_ErrorCode_InvalidBufferSize: Message payload exceeds max message size.
//
// \returns The number of messages successfully sent. This number may be lower than messageCount if send buffer is full or an error was raised. Reported error will be about last message tried to send.
BASELIB_API uint32_t Baselib_Socket_UDP_Send(
Baselib_Socket_Handle socket,
Baselib_Socket_Message messages[],
uint32_t messagesCount,
Baselib_ErrorState* errorState
);
// Send a message to the connected peer.
//
// \returns The possibly-zero length of the message actually sent, which may be less than `dataLen`.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket, or socket is not a TCP socket. Socket validity is not checked if dataLen==0.
// - Baselib_ErrorCode_Disconnected: Socket has been disconnected.
BASELIB_API uint32_t Baselib_Socket_TCP_Send(
Baselib_Socket_Handle socket,
void* data,
uint32_t dataLen,
Baselib_ErrorState* errorState
);
// Receive messages from unconnected sources.
//
// UDP message data that doesn't fit a message buffer is silently discarded.
//
// Warning: This function may not fail when called with a TCP socket, as it may
// simply ignore the address parameter, and receive from whatever the socket is
// connected to. However, as there is no way to retreive the actual number of
// bytes received with this API, its use in this manner is strongly discouraged.
//
// Known issues (behavior may change in the future):
// If the socket is not bound to a port RecvMessages will return zero without raising an error.
// Some platforms does not support receiveing zero sized UDP packets.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket. Or messages is `NULL`.
//
// \returns The number of messages successfully received. This number may be lower than messageCount if recv buffer is empty or an error was raised. Reported error will be about last message tried to receive.
BASELIB_API uint32_t Baselib_Socket_UDP_Recv(
Baselib_Socket_Handle socket,
Baselib_Socket_Message messages[],
uint32_t messagesCount,
Baselib_ErrorState* errorState
);
// Receive a message from a connected source. Note that this method differs from
// traditional socket APIs in that it is valid to return 0, this means that no
// data were received. Disconnection is detected by errorState being
// Baselib_ErrorCode_Disconnected.
//
// This function may or may not work when passed a UDP socket. Graceful error
// handling of this case is omitted due to performance reasons.
//
// \returns The length of the message actually received, which may be less than `dataLen` or even zero.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: Socket does not represent a valid socket.
// - Baselib_ErrorCode_Disconnected: Socket has been disconnected.
BASELIB_API uint32_t Baselib_Socket_TCP_Recv(
Baselib_Socket_Handle socket,
void* data,
uint32_t dataLen,
Baselib_ErrorState* errorState
);
// Close socket.
//
// Closing an already closed socket results in a no-op.
BASELIB_API void Baselib_Socket_Close(
Baselib_Socket_Handle socket
);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,33 @@
#pragma once
#ifndef BASELIB_ENABLE_SOURCELOCATION
#ifdef NDEBUG
#define BASELIB_ENABLE_SOURCELOCATION 0
#else
#define BASELIB_ENABLE_SOURCELOCATION 1
#endif
#endif
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Human readable about the original location of a piece of source code.
typedef struct Baselib_SourceLocation
{
const char* file;
const char* function;
uint32_t lineNumber;
} Baselib_SourceLocation;
// Macro to create source location in-place for the current line of code.
#if BASELIB_ENABLE_SOURCELOCATION
#define BASELIB_SOURCELOCATION Baselib_SourceLocation { __FILE__, __func__, __LINE__ }
#else
#define BASELIB_SOURCELOCATION Baselib_SourceLocation { NULL, NULL, 0 }
#endif
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,9 @@
#pragma once
// C99 compatible static_assert
// Use static_assert in all C++ code directly.
#ifdef __cplusplus
#define BASELIB_STATIC_ASSERT(EXPR_, MSG_) static_assert(EXPR_, MSG_)
#else
#define BASELIB_STATIC_ASSERT(EXPR_, MSG_) COMPILER_C_STATIC_ASSERT(EXPR_, MSG_)
#endif

View File

@@ -0,0 +1,63 @@
#pragma once
// Baselib_SystemFutex
// In computing, a futex (short for "fast userspace mutex") is a kernel system call that programmers can use to implement basic locking, or as a building block
// for higher-level locking abstractions such as semaphores and POSIX mutexes or condition variables.
//
// A futex consists of a kernelspace wait queue that is attached to an atomic integer in userspace. Multiple processes or threads operate on the integer
// entirely in userspace (using atomic operations to avoid interfering with one another), and only resort to relatively expensive system calls to request
// operations on the wait queue (for example to wake up waiting processes, or to put the current process on the wait queue). A properly programmed futex-based
// lock will not use system calls except when the lock is contended; since most operations do not require arbitration between processes, this will not happen
// in most cases.
//
// "Futex", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Futex&oldid=850172014
#include "Baselib_WakeupFallbackStrategy.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Determines if the platform has access to a kernel level futex api
//
// If native support is not present the futex will fallback to an emulated futex setup.
//
// Notes on the emulation:
// * It uses a single synchronization primitive to multiplex all potential addresses. This means there will be
// additional contention as well as spurious wakeups compared to a native implementation.
// * While the fallback implementation is not something that should be used in production it can still provide value
// when bringing up new platforms or to test features built on top of the futex api.
BASELIB_INLINE_API bool Baselib_SystemFutex_NativeSupport(void) { return PLATFORM_FUTEX_NATIVE_SUPPORT == 1; }
// Wait for notification.
//
// Address will be checked atomically against expected before entering wait. This can be used to guarantee there are no lost wakeups.
// Note: When notified the thread always wake up regardless if the expectation match the value at address or not.
//
// | Problem this solves
// | Thread 1: checks condition and determine we should enter wait
// | Thread 2: change condition and notify waiting threads
// | Thread 1: enters waiting state
// |
// | With a futex the two Thread 1 operations become a single op.
//
// Spurious Wakeup - This function is subject to spurious wakeups.
//
// \param address Any address that can be read from both user and kernel space.
// \param expected What address points to will be checked against this value. If the values don't match thread will not enter a waiting state.
// \param timeoutInMilliseconds A timeout indicating to the kernel when to wake the thread. Regardless of being notified or not.
BASELIB_API void Baselib_SystemFutex_Wait(int32_t* address, int32_t expected, uint32_t timeoutInMilliseconds);
// Notify threads waiting on a specific address.
//
// \param address Any address that can be read from both user and kernel space
// \param count Number of waiting threads to wakeup.
// \param wakeupFallbackStrategy Platforms that don't support waking up a specific number of threads will use this strategy.
BASELIB_API void Baselib_SystemFutex_Notify(int32_t* address, uint32_t count, Baselib_WakeupFallbackStrategy wakeupFallbackStrategy);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,81 @@
#pragma once
// Baselib_SystemSemaphore
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
// decremented, or toggled) depending on programmer-defined conditions.
//
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
// resource becomes available.
//
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
typedef struct Baselib_SystemSemaphore_Handle { void* handle; } Baselib_SystemSemaphore_Handle;
// This is the maximum number of tokens that can be made available on a semaphore
enum { Baselib_SystemSemaphore_MaxCount = INT32_MAX };
// Creates a counting semaphore synchronization primitive.
//
// If there are not enough system resources to create a semaphore, process abort is triggered.
//
// \returns A handle to a semaphore instance. Use Baselib_SystemSemaphore_Free to free the semaphore.
BASELIB_API Baselib_SystemSemaphore_Handle Baselib_SystemSemaphore_Create(void);
// Creates a counting semaphore synchronization primitive given a memory buffer.
//
// Semaphore is created in-place in semaphoreData and must be atleast Baselib_SystemSemaphore_PlatformSize in size.
// (Some platforms dont support in-place creation and this function then works the same as Baselib_SystemSemaphore_Create() )
//
// \returns A handle to a semaphore instance. Use Baselib_Semaphore_FreeInplace to free the semaphore.
BASELIB_API Baselib_SystemSemaphore_Handle Baselib_SystemSemaphore_CreateInplace(void* semaphoreData);
// Wait for semaphore token to become available
//
BASELIB_API void Baselib_SystemSemaphore_Acquire(Baselib_SystemSemaphore_Handle semaphore);
// Try to consume a token and return immediately.
//
// \returns true if token was consumed. false if not.
BASELIB_API bool Baselib_SystemSemaphore_TryAcquire(Baselib_SystemSemaphore_Handle semaphore);
// Wait for semaphore token to become available
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \param timeout Time to wait for token to become available.
//
// \returns true if token was consumed. false if timeout was reached.
BASELIB_API bool Baselib_SystemSemaphore_TryTimedAcquire(Baselib_SystemSemaphore_Handle semaphore, uint32_t timeoutInMilliseconds);
// Submit tokens to the semaphore.
//
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
// If count reach `Baselib_SystemSemaphore_MaxCount` this function silently discard any overflow.
// Note that hitting max count may inflict a heavy performance penalty.
BASELIB_API void Baselib_SystemSemaphore_Release(Baselib_SystemSemaphore_Handle semaphore, uint32_t count);
// Reclaim resources and memory held by the semaphore.
//
// If threads are waiting on the semaphore, calling free may cause process abort.
BASELIB_API void Baselib_SystemSemaphore_Free(Baselib_SystemSemaphore_Handle semaphore);
// Reclaim resources held by the semaphore created using Baselib_SystemSemaphore_CreateInplace
//
// If threads are waiting on the semaphore, calling free will trigger an assert and may cause process abort.
// Must not be used to free a semaphore created with Baselib_Semaphore_Create
BASELIB_API void Baselib_SystemSemaphore_FreeInplace(Baselib_SystemSemaphore_Handle semaphore);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,107 @@
#pragma once
#include "Baselib_Timer.h"
#include "Baselib_ErrorState.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Unique thread id that can be used to compare different threads or stored for bookkeeping etc..
typedef intptr_t Baselib_Thread_Id;
// Baselib_Thread_Id that is guaranteed not to represent a thread
static const Baselib_Thread_Id Baselib_Thread_InvalidId = 0;
// Max number of characters for threadnames internal to baselib. Used for name in Baselib_Thread_Config
// In practice thread implementation on some platforms support even fewer characters for names
static const size_t Baselib_Thread_MaxThreadNameLength = 64;
// Yields the execution context of the current thread to other threads, potentially causing a context switch.
//
// The operating system may decide to not switch to any other thread.
BASELIB_API void Baselib_Thread_YieldExecution(void);
// Return the thread id of the current thread, i.e. the thread that is calling this function
BASELIB_API Baselib_Thread_Id Baselib_Thread_GetCurrentThreadId(void);
// We currently do not allow creating threads from C# bindings,
// since there is right now no way accessible way to inform the garbage collector about new baselib threads.
// I.e. any managed allocation on a baselib thread created from C# would never be garbage collected!
#ifndef BASELIB_BINDING_GENERATION
// The minimum guaranteed number of max concurrent threads that works on all platforms.
//
// This only applies if all the threads are created with Baselib.
// In practice, it might not be possible to create this many threads either. If memory is exhausted,
// by for example creating threads with very large stacks, that might translate to a lower limit in practice.
// Note that on many platforms the actual limit is way higher.
static const int Baselib_Thread_MinGuaranteedMaxConcurrentThreads = 64;
typedef struct Baselib_Thread Baselib_Thread;
typedef void (*Baselib_Thread_EntryPointFunction)(void* arg);
typedef struct Baselib_Thread_Config
{
// Nullterminated name of the created thread (optional)
// Useful exclusively for debugging - which tooling it is shown by and how it can be queried is platform dependent.
// Truncated to Baselib_Thread_MaxThreadNameLength number of characters and copied to an internal buffer
const char* name;
// The minimum size in bytes to allocate for the thread stack. (optional)
// If not set, a platform/system specific default stack size will be used.
// If the value set does not conform to platform specific minimum values or alignment requirements,
// the actual stack size used will be bigger than what was requested.
uint64_t stackSize;
// Required, this is set by calling Baselib_Thread_ConfigCreate with a valid entry point function.
Baselib_Thread_EntryPointFunction entryPoint;
// Argument to the entry point function, does only need to be set if entryPoint takes an argument.
void* entryPointArgument;
} Baselib_Thread_Config;
// Creates and starts a new thread.
//
// On some platforms the thread name is not set until the thread has begun executing, which is not guaranteed
// to have happened when the creation function returns. There is typically a platform specific limit on the length of
// the thread name. If config.name is longer than this limit, the name will be automatically truncated.
//
// \param config A pointer to a config object. entryPoint needs to be a valid function pointer, all other properties can be zero/null.
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: config.entryPoint is null
// - Baselib_ErrorCode_OutOfSystemResources: there is not enough memory to create a thread with that stack size or the system limit of number of concurrent threads has been reached
BASELIB_API Baselib_Thread* Baselib_Thread_Create(Baselib_Thread_Config config, Baselib_ErrorState* errorState);
// Waits until a thread has finished its execution.
//
// Also frees its resources.
// If called and completed successfully, no Baselib_Thread function can be called again on the same Baselib_Thread.
//
// \param thread A pointer to a thread object.
// \param timeoutInMilliseconds Time to wait for the thread to finish
//
// Possible error codes:
// - Baselib_ErrorCode_InvalidArgument: thread is null
// - Baselib_ErrorCode_ThreadCannotJoinSelf: the thread parameter points to the current thread, i.e. the thread that is calling this function
// - Baselib_ErrorCode_Timeout: timeout is reached before the thread has finished
BASELIB_API void Baselib_Thread_Join(Baselib_Thread* thread, uint32_t timeoutInMilliseconds, Baselib_ErrorState* errorState);
// Return the thread id of the thread given as argument
//
// \param thread A pointer to a thread object.
BASELIB_API Baselib_Thread_Id Baselib_Thread_GetId(Baselib_Thread* thread);
// Returns true if there is support in baselib for threads on this platform, otherwise false.
BASELIB_API bool Baselib_Thread_SupportsThreads(void);
#endif // !BASELIB_BINDING_GENERATION
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,58 @@
#pragma once
// Baselib_ThreadLocalStorage
// Thread-local storage (TLS) is a computer programming method that uses static or global memory local to a thread.
//
// TLS is used in some places where ordinary, single-threaded programs would use global variables, but where this would be inappropriate
// in multithreaded cases. An example of such situations is where functions use a global variable to set an error condition
// (for example the global variable errno used by many functions of the C library). If errno were a global variable,
// a call of a system function on one thread may overwrite the value previously set by a call of a system function on a different thread,
// possibly before following code on that different thread could check for the error condition. The solution is to have errno be a variable
// that looks like it is global, but in fact exists once per thread—i.e., it lives in thread-local storage. A second use case would be
// multiple threads accumulating information into a global variable. To avoid a race condition, every access to this global variable would
// have to be protected by a mutex. Alternatively, each thread might accumulate into a thread-local variable (that, by definition,
// cannot be read from or written to from other threads, implying that there can be no race conditions). Threads then only have to synchronise
// a final accumulation from their own thread-local variable into a single, truly global variable.
//
// Many systems impose restrictions on the size of the thread-local memory block, in fact often rather tight limits.
// On the other hand, if a system can provide at least a memory address (pointer) sized variable thread-local, then this allows the use of
// arbitrarily sized memory blocks in a thread-local manner, by allocating such a memory block dynamically and storing the memory address of
// that block in the thread-local variable.
//
// "Thread-local storage", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Thread-local_storage&oldid=860347814
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// It's guaranteed that we can allocate at least Baselib_TLS_MinimumGuaranteedSlots values on all platforms.
static const uint32_t Baselib_TLS_MinimumGuaranteedSlots = 100;
// Thread Local Storage slot handle.
typedef uintptr_t Baselib_TLS_Handle;
// Allocates a new Thread Local Storage slot. In case of an error, abort with Baselib_ErrorCode_OutOfSystemResources will be triggered.
// On some platforms this might be fiber local storage.
//
// The value of a newly create Thread Local Storage slot is guaranteed to be zero on all threads.
BASELIB_API Baselib_TLS_Handle Baselib_TLS_Alloc(void);
// Frees provided Thread Local Storage slot.
BASELIB_API void Baselib_TLS_Free(Baselib_TLS_Handle handle);
// Sets value to Thread Local Storage slot.
BASELIB_FORCEINLINE_API void Baselib_TLS_Set(Baselib_TLS_Handle handle, uintptr_t value);
// Gets value from Thread Local Storage slot.
//
// If called on just initialized variable, guaranteed to return 0.
BASELIB_FORCEINLINE_API uintptr_t Baselib_TLS_Get(Baselib_TLS_Handle handle);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif
#include <C/Baselib_ThreadLocalStorage.inl.h>

View File

@@ -0,0 +1,85 @@
#pragma once
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Time conversion factors.
//
// (not an enum since Int32 can't represent Baselib_NanosecondsPerMinute)
static const uint64_t Baselib_SecondsPerMinute = 60ULL;
static const uint64_t Baselib_MillisecondsPerSecond = 1000ULL;
static const uint64_t Baselib_MillisecondsPerMinute = 60ULL * 1000ULL;
static const uint64_t Baselib_MicrosecondsPerMillisecond = 1000ULL;
static const uint64_t Baselib_MicrosecondsPerSecond = 1000ULL * 1000ULL;
static const uint64_t Baselib_MicrosecondsPerMinute = 60ULL * 1000ULL * 1000ULL;
static const uint64_t Baselib_NanosecondsPerMicrosecond = 1000ULL;
static const uint64_t Baselib_NanosecondsPerMillisecond = 1000ULL * 1000ULL;
static const uint64_t Baselib_NanosecondsPerSecond = 1000ULL * 1000ULL * 1000ULL;
static const uint64_t Baselib_NanosecondsPerMinute = 60ULL * 1000ULL * 1000ULL * 1000ULL;
// Timer specific representation of time progression
typedef uint64_t Baselib_Timer_Ticks;
// Baselib_Timer_Ticks are guaranteed to be more granular than this constant.
static const uint64_t Baselib_Timer_MaxNumberOfNanosecondsPerTick = 1000ULL;
// Baselib_Timer_Ticks are guaranteed to be less granular than this constant.
static const double Baselib_Timer_MinNumberOfNanosecondsPerTick = 0.01;
// Defines the conversion ratio from Baselib_Timer_Ticks to nanoseconds as a fraction.
typedef struct Baselib_Timer_TickToNanosecondConversionRatio
{
uint64_t ticksToNanosecondsNumerator;
uint64_t ticksToNanosecondsDenominator;
} Baselib_Timer_TickToNanosecondConversionRatio;
// Returns the conversion ratio between ticks and nanoseconds.
//
// The conversion factor is guaranteed to be constant for the entire application for its entire lifetime.
// However, it may be different on every start of the application.
//
// \returns The conversion factor from ticks to nanoseconds as an integer fraction.
BASELIB_API Baselib_Timer_TickToNanosecondConversionRatio Baselib_Timer_GetTicksToNanosecondsConversionRatio(void);
// The fraction of Baselib_Timer_GetTicksToNanosecondsConversionRatio as a precomputed double value. It is subject to precision loss.
//
// Attention:
// This value is determined during static initialization of baselib. As such it should not be used if it is not guaranteed that baselib is fully loaded.
// Prefer Baselib_Timer_GetTicksToNanosecondsConversionRatio when in doubt.
extern BASELIB_API const double Baselib_Timer_TickToNanosecondsConversionFactor;
// Get the current tick count of the high precision timer.
//
// Accuracy:
// It is assumed that the accuracy corresponds to the granularity of Baselib_Timer_Ticks (which is determined by Baselib_Timer_GetTicksToNanosecondsConversionRatio).
// However, there are no strict guarantees on the accuracy of the timer.
//
// Monotony:
// ATTENTION: On some platforms this clock is suspended during application/device sleep states.
// The timer is not susceptible to wall clock time changes by the user.
// Different threads are guaranteed to be on the same timeline.
//
// Known issues:
// * Some web browsers impose Spectre mitigation which can introduce jitter in this timer.
// * Some web browsers may have different timelines per thread/webworker if they are not spawned on startup (this is a bug according to newest W3C specification)
//
// \returns Current tick value of the high precision timer.
BASELIB_API Baselib_Timer_Ticks Baselib_Timer_GetHighPrecisionTimerTicks(void);
// This function will wait for at least the requested amount of time before returning.
//
// Unlike some implementations of 'sleep', passing 0 does NOT guarantee a thread yield and may return immediately! Use the corresponding functionality in Baselib_Thread instead.
//
// \param timeInMilliseconds Time to wait in milliseconds
BASELIB_API void Baselib_Timer_WaitForAtLeast(uint32_t timeInMilliseconds);
// Time since application startup in seconds.
//
// Disregarding potential rounding errors, all threads are naturally on the same timeline (i.e. time since process start).
BASELIB_API double Baselib_Timer_GetTimeSinceStartupInSeconds(void);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@@ -0,0 +1,33 @@
#pragma once
#include "Internal/Baselib_EnumSizeCheck.h"
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
// Can be used to control the wakeup behavior on platforms that don't support waking up a specific number of thread.
// Syscalls don't come for free so you need to weigh the cost of doing multiple syscalls against the cost of having lots of context switches.
//
// There are however two easy cases.
// * When you only want to notify one thread use Baselib_WakeupFallbackStrategy_OneByOne.
// * When you want to wakeup all threads use Baselib_WakeupFallbackStrategy_All
//
// For the not so easy cases.
// * Use Baselib_WakeupFallbackStrategy_OneByOne when wake count is low, or significantly lower than the number of waiting threads.
// * Use Baselib_WakeupFallbackStrategy_All if wake count is high.
typedef enum Baselib_WakeupFallbackStrategy
{
// Do one syscall for each waiting thread or notification.
Baselib_WakeupFallbackStrategy_OneByOne,
// Do a single syscall to wake all waiting threads.
Baselib_WakeupFallbackStrategy_All,
} Baselib_WakeupFallbackStrategy;
BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(Baselib_WakeupFallbackStrategy);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif

View File

@@ -0,0 +1,152 @@
#pragma once
#include "../Baselib_CountdownTimer.h"
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemFutex.h"
#include "../Baselib_Thread.h"
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_CappedSemaphore_SemaphoreBased.inl.h"
#endif
// Space out to different cache lines.
// the idea here is that threads waking up from sleep should not have to
// access the cache line where count is stored, and only touch wakeups.
// the only exception to that rule is if we hit a timeout.
typedef struct Baselib_CappedSemaphore
{
int32_t wakeups;
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
int32_t count;
const int32_t cap;
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t) * 2]; // Having cap on the same cacheline is fine since it is a constant.
} Baselib_CappedSemaphore;
BASELIB_STATIC_ASSERT(sizeof(Baselib_CappedSemaphore) == PLATFORM_CACHE_LINE_SIZE * 2, "Baselib_CappedSemaphore (Futex) size should match 2*cacheline size (128bytes)");
BASELIB_STATIC_ASSERT(offsetof(Baselib_CappedSemaphore, wakeups) ==
(offsetof(Baselib_CappedSemaphore, count) - PLATFORM_CACHE_LINE_SIZE), "Baselib_CappedSemaphore (futex) wakeups and count shouldnt share cacheline");
BASELIB_INLINE_API Baselib_CappedSemaphore Baselib_CappedSemaphore_Create(const uint16_t cap)
{
Baselib_CappedSemaphore semaphore = { 0, {0}, 0, cap, {0} };
return semaphore;
}
BASELIB_INLINE_API bool Detail_Baselib_CappedSemaphore_ConsumeWakeup(Baselib_CappedSemaphore* semaphore)
{
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->wakeups);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->wakeups, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryAcquire(Baselib_CappedSemaphore* semaphore)
{
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API void Baselib_CappedSemaphore_Acquire(Baselib_CappedSemaphore* semaphore)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return;
while (!Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore))
{
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, UINT32_MAX);
}
}
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryTimedAcquire(Baselib_CappedSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return true;
if (Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore))
return true;
uint32_t timeLeft = timeoutInMilliseconds;
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
do
{
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, timeLeft);
if (Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore))
return true;
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
}
while (timeLeft);
// When timeout occurs we need to make sure we do one of the following:
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
//
// If count is not negative it's likely we are racing with a release operation in which case we
// may end up having a successful acquire operation.
do
{
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (count < 0)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
return false;
}
// Likely a race, yield to give the release operation room to complete.
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
Baselib_Thread_YieldExecution();
}
while (!Detail_Baselib_CappedSemaphore_ConsumeWakeup(semaphore));
return true;
}
BASELIB_INLINE_API uint16_t Baselib_CappedSemaphore_Release(Baselib_CappedSemaphore* semaphore, const uint16_t _count)
{
int32_t count = _count;
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
do
{
if (previousCount == semaphore->cap)
return 0;
if (previousCount + count > semaphore->cap)
count = semaphore->cap - previousCount;
}
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->count, &previousCount, previousCount + count));
if (OPTIMIZER_UNLIKELY(previousCount < 0))
{
const int32_t waitingThreads = -previousCount;
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
}
return count;
}
BASELIB_INLINE_API uint32_t Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(Baselib_CappedSemaphore* semaphore)
{
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
if (OPTIMIZER_LIKELY(count >= 0))
return 0;
const int32_t threadsToWakeup = -count;
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_All);
return threadsToWakeup;
}
BASELIB_INLINE_API void Baselib_CappedSemaphore_Free(Baselib_CappedSemaphore* semaphore)
{
if (!semaphore)
return;
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
}

View File

@@ -0,0 +1,122 @@
#pragma once
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemSemaphore.h"
#include "../Baselib_Thread.h"
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#error "It's highly recommended to use Baselib_CappedSemaphore_FutexBased.inl.h on platforms which has native semaphore support"
#endif
typedef struct Baselib_CappedSemaphore
{
Baselib_SystemSemaphore_Handle handle;
int32_t count;
const int32_t cap;
// Make the capped semaphore take a full cache line so that if the user cacheline aligned semaphore,
// llsc operations on count will not spuriously fail.
char _cachelineSpacer[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t) * 2 - sizeof(Baselib_SystemSemaphore_Handle)];
char _systemSemaphoreData[Baselib_SystemSemaphore_PlatformSize];
} Baselib_CappedSemaphore;
BASELIB_STATIC_ASSERT((offsetof(Baselib_CappedSemaphore, count) + PLATFORM_CACHE_LINE_SIZE - sizeof(Baselib_SystemSemaphore_Handle)) ==
offsetof(Baselib_CappedSemaphore, _systemSemaphoreData), "count and internalData must not share cacheline");
BASELIB_INLINE_API Baselib_CappedSemaphore Baselib_CappedSemaphore_Create(uint16_t cap)
{
Baselib_CappedSemaphore semaphore = {{0}, 0, cap, {0}, {0}};
semaphore.handle = Baselib_SystemSemaphore_CreateInplace(&semaphore._systemSemaphoreData);
return semaphore;
}
BASELIB_INLINE_API void Baselib_CappedSemaphore_Acquire(Baselib_CappedSemaphore* semaphore)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return;
Baselib_SystemSemaphore_Acquire(semaphore->handle);
}
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryAcquire(Baselib_CappedSemaphore* semaphore)
{
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API bool Baselib_CappedSemaphore_TryTimedAcquire(Baselib_CappedSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return true;
if (OPTIMIZER_LIKELY(Baselib_SystemSemaphore_TryTimedAcquire(semaphore->handle, timeoutInMilliseconds)))
return true;
// When timeout occurs we need to make sure we do one of the following:
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
//
// If count is not negative it's likely we are racing with a release operation in which case we
// may end up having a successful acquire operation.
do
{
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (count < 0)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
return false;
}
// Likely a race, yield to give the release operation room to complete.
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
Baselib_Thread_YieldExecution();
}
while (!Baselib_SystemSemaphore_TryAcquire(semaphore->handle));
return true;
}
BASELIB_INLINE_API uint16_t Baselib_CappedSemaphore_Release(Baselib_CappedSemaphore* semaphore, const uint16_t _count)
{
int32_t count = _count;
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
do
{
if (previousCount == semaphore->cap)
return 0;
if (previousCount + count > semaphore->cap)
count = semaphore->cap - previousCount;
}
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->count, &previousCount, previousCount + count));
if (OPTIMIZER_UNLIKELY(previousCount < 0))
{
const int32_t waitingThreads = -previousCount;
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
}
return count;
}
BASELIB_INLINE_API uint32_t Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(Baselib_CappedSemaphore* semaphore)
{
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
if (OPTIMIZER_LIKELY(count >= 0))
return 0;
const int32_t threadsToWakeup = -count;
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
return threadsToWakeup;
}
BASELIB_INLINE_API void Baselib_CappedSemaphore_Free(Baselib_CappedSemaphore* semaphore)
{
if (!semaphore)
return;
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
Baselib_SystemSemaphore_FreeInplace(semaphore->handle);
}

View File

@@ -0,0 +1,7 @@
#pragma once
#include "../Baselib_StaticAssert.h"
#define BASELIB_ENUM_ENSURE_ABI_COMPATIBILITY(_enumType) \
BASELIB_STATIC_ASSERT(sizeof(_enumType) == 4, \
"Baselib assumes that sizeof any enum type is exactly 4 bytes, there might be ABI compatibility problems if violated");

View File

@@ -0,0 +1,198 @@
#pragma once
#include "../Baselib_CountdownTimer.h"
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemFutex.h"
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_EventSemaphore_SemaphoreBased.inl.h"
#endif
typedef struct Baselib_EventSemaphore
{
int32_t state;
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
} Baselib_EventSemaphore;
BASELIB_STATIC_ASSERT(sizeof(Baselib_EventSemaphore) == PLATFORM_CACHE_LINE_SIZE, "Baselib_EventSemaphore size should match cacheline size (64bytes)");
// The futex based event semaphore is in one of *three* states:
// * ResetNoWaitingThreads: EventSemaphore blocks threads, but there aren't any blocked yet
// * Reset: EventSemaphore blocks threads and there are some already
// * Set: EventSemaphore is not blocking any acquiring threads
//
// The ResetNoWaitingThreads state is an optimization that allows us to avoid the (comparatively) costly futex notification syscalls.
//
// In addition, there is a generation counter baked into the state variable in order to prevent lock stealing.
// -> Any change in the state during acquire (other than going from ResetNoWaitingThreads to Reset) means that the thread can continue
// (since in this case either it was set on the current generation or the generation was changed which implies an earlier release operation)
//
// Allowed state transitions:
// ResetNoWaitingThreads-Gen(X) -> Reset-Gen(X) == Acquire/TryTimedAcquire if no thread was waiting already
// ResetNoWaitingThreads-Gen(X) -> Set-Gen(X) == Set but no thread was waiting
// Reset-Gen(X) -> Set-Get(X+1) == Set if threads were waiting
// Set-Get(X) -> ResetNoWaitingThreads-Gen(X) == Reset/ResetAndReleaseWaitingThreads
// Reset-Gen(X) -> ResetNoWaitingThreads-Gen(X+1) == ResetAndReleaseWaitingThreads if threads were waiting
//
// Note how any state transition from Reset requires increasing the generation counter.
enum
{
//Detail_Baselib_EventSemaphore_ResetNoWaitingThreads = 0,
Detail_Baselib_EventSemaphore_Set = (uint32_t)1 << 30,
Detail_Baselib_EventSemaphore_Reset = (uint32_t)2 << 30,
Detail_Baselib_EventSemaphore_GenMask = ~((uint32_t)(1 | 2) << 30)
};
static FORCE_INLINE uint32_t Detail_Baselib_EventSemaphore_Generation(int32_t state)
{
return state & Detail_Baselib_EventSemaphore_GenMask;
}
// If Detail_Baselib_EventSemaphore_ResetNoWaitingThreads is set, sets Detail_Baselib_EventSemaphore_Reset flag.
// Returns last known state of the semaphore.
// Does nothing if state changed while this function runs (that includes generation changes while attempting to set the ResetState!)
static FORCE_INLINE uint32_t Detail_Baselib_EventSemaphore_TransitionFrom_ResetNoWaitingThreadsState_To_ResetState(Baselib_EventSemaphore* semaphore)
{
int32_t state = Baselib_atomic_load_32_acquire(&semaphore->state);
const int32_t resetState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Reset;
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
while (state == resetNoWaitingThreadsState)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->state, &state, resetState))
return resetState;
}
return state;
}
BASELIB_INLINE_API Baselib_EventSemaphore Baselib_EventSemaphore_Create(void)
{
const Baselib_EventSemaphore semaphore = { 0, {0} };
return semaphore;
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryAcquire(Baselib_EventSemaphore* semaphore)
{
const int32_t state = Baselib_atomic_load_32_acquire(&semaphore->state);
return state & Detail_Baselib_EventSemaphore_Set ? true : false;
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Acquire(Baselib_EventSemaphore* semaphore)
{
const int32_t state = Detail_Baselib_EventSemaphore_TransitionFrom_ResetNoWaitingThreadsState_To_ResetState(semaphore);
if (state & Detail_Baselib_EventSemaphore_Set)
return;
do
{
// State is now in Detail_Baselib_EventSemaphore_Reset-Gen(X).
Baselib_SystemFutex_Wait(&semaphore->state, state, UINT32_MAX);
// If the state has changed in any way, it is now in either of
// Set-Gen(X), Set-Gen(X+n), ResetNoWaitingThreads-Gen(X+n) or Reset(X+n). (with n>0)
if (state != Baselib_atomic_load_32_relaxed(&semaphore->state))
return;
}
while (true);
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryTimedAcquire(Baselib_EventSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int32_t state = Detail_Baselib_EventSemaphore_TransitionFrom_ResetNoWaitingThreadsState_To_ResetState(semaphore);
if (state & Detail_Baselib_EventSemaphore_Set)
return true;
uint32_t timeLeft = timeoutInMilliseconds;
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
do
{
// State is now in Detail_Baselib_EventSemaphore_Reset-Gen(X).
Baselib_SystemFutex_Wait(&semaphore->state, state, timeLeft);
// If the state has changed in any way, it is now in either of
// Set-Gen(X), Set-Gen(X+n), ResetNoWaitingThreads-Gen(X+n) or Reset(X+n). (with n>0)
if (state != Baselib_atomic_load_32_relaxed(&semaphore->state))
return true;
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
}
while (timeLeft);
// The EventSemaphore looks now like there are still threads waiting even if there *might* be none!
// This is not an issue however, since it merely means that Set/ResetAndReleaseWaitingThreads will do a potentially redundant futex notification.
return false;
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Reset(Baselib_EventSemaphore* semaphore)
{
int32_t state = Baselib_atomic_load_32_relaxed(&semaphore->state);
const int32_t setState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Set;
while (state == setState)
{
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, resetNoWaitingThreadsState))
return;
}
Baselib_atomic_thread_fence_release();
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Set(Baselib_EventSemaphore* semaphore)
{
int32_t state = Baselib_atomic_load_32_relaxed(&semaphore->state);
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
const int32_t resetState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Reset;
// If there is no thread waiting on the semaphore, there is no need to wake & increase the generation count.
// Just set it to Set if it isn't already.
while (state == resetNoWaitingThreadsState)
{
const int32_t setState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Set;
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, setState))
return;
}
// If this is not the case however, we do exactly that, increase the generation & wake all threads.
while (state == resetState)
{
const int32_t nextGenSetState = Detail_Baselib_EventSemaphore_Generation(state + 1) | Detail_Baselib_EventSemaphore_Set;
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, nextGenSetState))
{
Baselib_SystemFutex_Notify(&semaphore->state, UINT32_MAX, Baselib_WakeupFallbackStrategy_All);
return;
}
}
// EventSemaphore was already in set state.
Baselib_atomic_thread_fence_release();
}
BASELIB_INLINE_API void Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(Baselib_EventSemaphore* semaphore)
{
// Note that doing a Baselib_EventSemaphore_Set & Baselib_EventSemaphore_Reset has the same observable effects, just slightly slower.
int32_t state = Baselib_atomic_load_32_relaxed(&semaphore->state);
const int32_t setState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Set;
const int32_t resetState = Detail_Baselib_EventSemaphore_Generation(state) | Detail_Baselib_EventSemaphore_Reset;
// If there is no thread waiting on the semaphore, there is no need to wake & increase the generation count.
// Just set it to ResetNoWaitingThreads if it isn't already.
while (state == setState)
{
const int32_t resetNoWaitingThreadsState = Detail_Baselib_EventSemaphore_Generation(state);
if (Baselib_atomic_compare_exchange_weak_32_release_relaxed(&semaphore->state, &state, resetNoWaitingThreadsState))
return;
}
// If this is not the case however, we do exactly that, increase the generation & wake all threads.
while (state == resetState)
{
const int32_t nextGenPendingResetState = Detail_Baselib_EventSemaphore_Generation(state + 1);
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->state, &state, nextGenPendingResetState))
{
Baselib_SystemFutex_Notify(&semaphore->state, UINT32_MAX, Baselib_WakeupFallbackStrategy_All);
return;
}
}
// EventSemaphore was already in ResetNoWaiting threads state.
Baselib_atomic_thread_fence_release();
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Free(Baselib_EventSemaphore* semaphore)
{
}

View File

@@ -0,0 +1,211 @@
#pragma once
#include "../Baselib_CountdownTimer.h"
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemSemaphore.h"
#include "../Baselib_StaticAssert.h"
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#error "It's highly recommended to use Baselib_EventSemaphore_FutexBased.inl.h on platforms which has native semaphore support"
#endif
typedef union BASELIB_ALIGN_AS (8) Detail_Baselib_EventSemaphore_State
{
struct
{
// Can be changed without checking for changes in numWaitingForSetInProgress (use 32bit cmpex)
int32_t numWaitingForSetAndStateFlags;
// Typically not changed without checking numWaitingForSetAndStateFlags (use 64bit cmpex)
int32_t numWaitingForSetInProgress;
} parts;
int64_t stateInt64;
} Detail_Baselib_EventSemaphore_State;
enum
{
// If this flag is set, threads are still waking up from a previous Set or ResetAndReleaseWaitingThreads call.
// While this is set, any thread entering an Acquire method (that doesn't see Detail_Baselib_EventSemaphore_SetFlag),
// will wait until it is cleared before proceeding with normal operations.
Detail_Baselib_EventSemaphore_SetInProgressFlag = (uint32_t)1 << 30,
// If this flag is set, threads acquiring the semaphore succeed immediately.
Detail_Baselib_EventSemaphore_SetFlag = (uint32_t)2 << 30,
Detail_Baselib_EventSemaphore_NumWaitingForSetMask = ~((uint32_t)(1 | 2) << 30)
};
typedef struct Baselib_EventSemaphore
{
Detail_Baselib_EventSemaphore_State state;
Baselib_SystemSemaphore_Handle setSemaphore;
Baselib_SystemSemaphore_Handle setInProgressSemaphore;
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - 2 * sizeof(Baselib_SystemSemaphore_Handle) - sizeof(Detail_Baselib_EventSemaphore_State)];
char _systemSemaphoreDataSemaphore[Baselib_SystemSemaphore_PlatformSize];
char _cachelineSpacer1[PLATFORM_CACHE_LINE_SIZE - Baselib_SystemSemaphore_PlatformSize];
char _systemSemaphoreDataInProgressSemaphore[Baselib_SystemSemaphore_PlatformSize];
} Baselib_EventSemaphore;
BASELIB_STATIC_ASSERT((offsetof(Baselib_EventSemaphore, state) + PLATFORM_CACHE_LINE_SIZE) ==
offsetof(Baselib_EventSemaphore, _systemSemaphoreDataSemaphore), "state and _systemSemaphoreDataSemaphore must not share cacheline");
BASELIB_STATIC_ASSERT((offsetof(Baselib_EventSemaphore, _systemSemaphoreDataSemaphore) + PLATFORM_CACHE_LINE_SIZE) ==
offsetof(Baselib_EventSemaphore, _systemSemaphoreDataInProgressSemaphore), "_systemSemaphoreDataSemaphore and _systemSemaphoreDataInProgressSemaphore must not share cacheline");
// How (Timed)Acquire works for the SemaphoreBased EventSemaphore:
//
// If there is a set pending (Detail_Baselib_EventSemaphore_SetInProgressFlag is set),
// it means that not all threads from the previous wakeup call (either via Set or ResetAndReleaseWaitingThreads) have been woken up.
// If we would just continue, we might steal the wakeup tokens of those threads! So instead we wait until they are done.
//
// This is different from the FutexBased version, however there is no way for a user to distinguish that from
// a "regular (but lengthy)" preemption at the start of the function.
// Meaning that we don't care how often the semaphore got set and reset in the meantime!
//
//
// Invariants:
//
// Allowed flag state transitions:
// 0 -> Set | SetInProgress
// Set | SetInProgress <-> Set
// Set | SetInProgress <-> SetInProgress
// Set -> 0
// SetInProgress -> 0
//
// Additionally:
// * numWaitingForSetInProgress can only grow if SetInProgress is set.
// * numWaitingForSet can only grow if Set is set
#ifdef __cplusplus
BASELIB_C_INTERFACE
{
#endif
BASELIB_API void Detail_Baselib_EventSemaphore_SemaphoreBased_AcquireNonSet(int32_t initialNumWaitingForSetAndStateFlags, Baselib_EventSemaphore* semaphore);
COMPILER_WARN_UNUSED_RESULT
BASELIB_API bool Detail_Baselib_EventSemaphore_SemaphoreBased_TryTimedAcquireNonSet(int32_t initialNumWaitingForSetAndStateFlags, Baselib_EventSemaphore* semaphore, uint32_t timeoutInMilliseconds);
#ifdef __cplusplus
} // BASELIB_C_INTERFACE
#endif
static FORCE_INLINE bool Detail_Baselib_EventSemaphore_IsSet(int32_t numWaitingForSetAndStateFlags)
{
return (numWaitingForSetAndStateFlags & Detail_Baselib_EventSemaphore_SetFlag) ? true : false;
}
static FORCE_INLINE bool Detail_Baselib_EventSemaphore_IsSetInProgress(int32_t numWaitingForSetAndStateFlags)
{
return (numWaitingForSetAndStateFlags & Detail_Baselib_EventSemaphore_SetInProgressFlag) ? true : false;
}
static FORCE_INLINE int32_t Detail_Baselib_EventSemaphore_GetWaitingForSetCount(int32_t numWaitingForSetAndStateFlags)
{
return numWaitingForSetAndStateFlags & Detail_Baselib_EventSemaphore_NumWaitingForSetMask;
}
// Changes WaitingForSet count without affecting state flags
static FORCE_INLINE int32_t Detail_Baselib_EventSemaphore_SetWaitingForSetCount(int32_t currentNumWaitingForSetAndStateFlags, int32_t newNumWaitingForSet)
{
return newNumWaitingForSet | (currentNumWaitingForSetAndStateFlags & (~Detail_Baselib_EventSemaphore_NumWaitingForSetMask));
}
BASELIB_INLINE_API Baselib_EventSemaphore Baselib_EventSemaphore_Create(void)
{
Baselib_EventSemaphore semaphore = {{{0, 0}}, {0}, {0}, {0}, {0}, {0}, {0}};
semaphore.setSemaphore = Baselib_SystemSemaphore_CreateInplace(semaphore._systemSemaphoreDataSemaphore);
semaphore.setInProgressSemaphore = Baselib_SystemSemaphore_CreateInplace(semaphore._systemSemaphoreDataInProgressSemaphore);
return semaphore;
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryAcquire(Baselib_EventSemaphore* semaphore)
{
const int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_acquire(&semaphore->state.parts.numWaitingForSetAndStateFlags);
return Detail_Baselib_EventSemaphore_IsSet(numWaitingForSetAndStateFlags);
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Acquire(Baselib_EventSemaphore* semaphore)
{
const int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_acquire(&semaphore->state.parts.numWaitingForSetAndStateFlags);
if (!Detail_Baselib_EventSemaphore_IsSet(numWaitingForSetAndStateFlags))
Detail_Baselib_EventSemaphore_SemaphoreBased_AcquireNonSet(numWaitingForSetAndStateFlags, semaphore);
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_EventSemaphore_TryTimedAcquire(Baselib_EventSemaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_acquire(&semaphore->state.parts.numWaitingForSetAndStateFlags);
if (!Detail_Baselib_EventSemaphore_IsSet(numWaitingForSetAndStateFlags))
return Detail_Baselib_EventSemaphore_SemaphoreBased_TryTimedAcquireNonSet(numWaitingForSetAndStateFlags, semaphore, timeoutInMilliseconds);
return true;
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Reset(Baselib_EventSemaphore* semaphore)
{
int32_t resetNumWaitingForSetAndStateFlags;
int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_relaxed(&semaphore->state.parts.numWaitingForSetAndStateFlags);
do
{
resetNumWaitingForSetAndStateFlags = numWaitingForSetAndStateFlags & (~Detail_Baselib_EventSemaphore_SetFlag);
}
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(
&semaphore->state.parts.numWaitingForSetAndStateFlags,
&numWaitingForSetAndStateFlags,
resetNumWaitingForSetAndStateFlags));
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Set(Baselib_EventSemaphore* semaphore)
{
int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_relaxed(&semaphore->state.parts.numWaitingForSetAndStateFlags);
int32_t numWaitingForSetAndStateFlagsSet, numWaitingForSet;
do
{
numWaitingForSetAndStateFlagsSet = numWaitingForSetAndStateFlags | Detail_Baselib_EventSemaphore_SetFlag;
numWaitingForSet = Detail_Baselib_EventSemaphore_GetWaitingForSetCount(numWaitingForSetAndStateFlags);
BaselibAssert(numWaitingForSet >= 0, "There needs to be always a non-negative amount of threads waiting for Set");
if (numWaitingForSet)
numWaitingForSetAndStateFlagsSet |= Detail_Baselib_EventSemaphore_SetInProgressFlag;
}
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(
&semaphore->state.parts.numWaitingForSetAndStateFlags,
&numWaitingForSetAndStateFlags,
numWaitingForSetAndStateFlagsSet));
if (!Detail_Baselib_EventSemaphore_IsSetInProgress(numWaitingForSetAndStateFlags) && numWaitingForSet)
Baselib_SystemSemaphore_Release(semaphore->setSemaphore, numWaitingForSet);
}
BASELIB_INLINE_API void Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(Baselib_EventSemaphore* semaphore)
{
// Note that doing a Baselib_EventSemaphore_Set & Baselib_EventSemaphore_Reset has the same observable effects, just slightly slower.
int32_t numWaitingForSetAndStateFlags = Baselib_atomic_load_32_relaxed(&semaphore->state.parts.numWaitingForSetAndStateFlags);
int32_t resetNumWaitingForSetAndStateFlags, numWaitingForSet;
do
{
resetNumWaitingForSetAndStateFlags = numWaitingForSetAndStateFlags & (~Detail_Baselib_EventSemaphore_SetFlag);
numWaitingForSet = Detail_Baselib_EventSemaphore_GetWaitingForSetCount(numWaitingForSetAndStateFlags);
BaselibAssert(numWaitingForSet >= 0, "There needs to be always a non-negative amount of threads waiting for Set");
if (numWaitingForSet)
resetNumWaitingForSetAndStateFlags |= Detail_Baselib_EventSemaphore_SetInProgressFlag;
}
while (!Baselib_atomic_compare_exchange_weak_32_release_relaxed(
&semaphore->state.parts.numWaitingForSetAndStateFlags,
&numWaitingForSetAndStateFlags,
resetNumWaitingForSetAndStateFlags));
if (!Detail_Baselib_EventSemaphore_IsSetInProgress(numWaitingForSetAndStateFlags) && numWaitingForSet)
Baselib_SystemSemaphore_Release(semaphore->setSemaphore, numWaitingForSet);
}
BASELIB_INLINE_API void Baselib_EventSemaphore_Free(Baselib_EventSemaphore* semaphore)
{
if (!semaphore)
return;
Baselib_SystemSemaphore_FreeInplace(semaphore->setSemaphore);
Baselib_SystemSemaphore_FreeInplace(semaphore->setInProgressSemaphore);
}

View File

@@ -0,0 +1,150 @@
#pragma once
#include "../Baselib_CountdownTimer.h"
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemFutex.h"
#include "../Baselib_Thread.h"
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_HighCapacitySemaphore_SemaphoreBased.inl.h"
#endif
// Space out to different cache lines.
// the idea here is that threads waking up from sleep should not have to
// access the cache line where count is stored, and only touch wakeups.
// the only exception to that rule is if we hit a timeout.
typedef struct Baselib_HighCapacitySemaphore
{
int32_t wakeups;
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int64_t)];
int64_t count;
char _cachelineSpacer2[PLATFORM_CACHE_LINE_SIZE - sizeof(int64_t)];
} Baselib_HighCapacitySemaphore;
BASELIB_INLINE_API Baselib_HighCapacitySemaphore Baselib_HighCapacitySemaphore_Create(void)
{
Baselib_HighCapacitySemaphore semaphore = {0, {0}, 0, {0}};
return semaphore;
}
BASELIB_INLINE_API bool Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(Baselib_HighCapacitySemaphore* semaphore)
{
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->wakeups);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->wakeups, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryAcquire(Baselib_HighCapacitySemaphore* semaphore)
{
int64_t previousCount = Baselib_atomic_load_64_relaxed(&semaphore->count);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_64_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Acquire(Baselib_HighCapacitySemaphore* semaphore)
{
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return;
while (!Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(semaphore))
{
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, UINT32_MAX);
}
}
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryTimedAcquire(Baselib_HighCapacitySemaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return true;
uint32_t timeLeft = timeoutInMilliseconds;
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
do
{
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, timeLeft);
if (Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(semaphore))
return true;
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
}
while (timeLeft);
// When timeout occurs we need to make sure we do one of the following:
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
//
// If count is not negative it's likely we are racing with a release operation in which case we
// may end up having a successful acquire operation.
do
{
int64_t count = Baselib_atomic_load_64_relaxed(&semaphore->count);
while (count < 0)
{
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &count, count + 1))
return false;
}
// Likely a race, yield to give the release operation room to complete.
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
Baselib_Thread_YieldExecution();
}
while (!Detail_Baselib_HighCapacitySemaphore_ConsumeWakeup(semaphore));
return true;
}
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Release(Baselib_HighCapacitySemaphore* semaphore, const uint32_t _count)
{
const int64_t count = _count;
int64_t previousCount = Baselib_atomic_fetch_add_64_release(&semaphore->count, count);
// This should only be possible if millions of threads enter this function simultaneously posting with a high count.
// See overflow protection below.
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", (int32_t)previousCount, (int32_t)count);
if (OPTIMIZER_UNLIKELY(previousCount < 0))
{
const int64_t waitingThreads = -previousCount;
const int64_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
BaselibAssert(threadsToWakeup <= INT32_MAX);
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, (int32_t)threadsToWakeup);
Baselib_SystemFutex_Notify(&semaphore->wakeups, (int32_t)threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
return;
}
// overflow protection
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
// this way we won't have to do clamping on every iteration
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_HighCapacitySemaphore_MaxGuaranteedCount * 2))
{
const int64_t maxCount = Baselib_HighCapacitySemaphore_MaxGuaranteedCount;
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
return;
}
}
BASELIB_INLINE_API uint64_t Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(Baselib_HighCapacitySemaphore* semaphore)
{
const int64_t count = Baselib_atomic_exchange_64_release(&semaphore->count, 0);
if (OPTIMIZER_LIKELY(count >= 0))
return 0;
const int64_t threadsToWakeup = -count;
BaselibAssert(threadsToWakeup <= INT32_MAX);
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, (int32_t)threadsToWakeup);
Baselib_SystemFutex_Notify(&semaphore->wakeups, (int32_t)threadsToWakeup, Baselib_WakeupFallbackStrategy_All);
return threadsToWakeup;
}
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Free(Baselib_HighCapacitySemaphore* semaphore)
{
if (!semaphore)
return;
const int64_t count = Baselib_atomic_load_64_seq_cst(&semaphore->count);
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
}

View File

@@ -0,0 +1,126 @@
#pragma once
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemSemaphore.h"
#include "../Baselib_Thread.h"
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#error "It's highly recommended to use Baselib_HighCapacitySemaphore_FutexBased.inl.h on platforms which has native semaphore support"
#endif
typedef struct Baselib_HighCapacitySemaphore
{
int64_t count;
Baselib_SystemSemaphore_Handle handle;
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int64_t) - sizeof(Baselib_SystemSemaphore_Handle)];
char _systemSemaphoreData[Baselib_SystemSemaphore_PlatformSize];
} Baselib_HighCapacitySemaphore;
BASELIB_STATIC_ASSERT((offsetof(Baselib_HighCapacitySemaphore, count) + PLATFORM_CACHE_LINE_SIZE) ==
offsetof(Baselib_HighCapacitySemaphore, _systemSemaphoreData), "count and internalData must not share cacheline");
BASELIB_INLINE_API Baselib_HighCapacitySemaphore Baselib_HighCapacitySemaphore_Create(void)
{
Baselib_HighCapacitySemaphore semaphore = {0, {0}, {0}, {0}};
semaphore.handle = Baselib_SystemSemaphore_CreateInplace(&semaphore._systemSemaphoreData);
return semaphore;
}
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryAcquire(Baselib_HighCapacitySemaphore* semaphore)
{
int64_t previousCount = Baselib_atomic_load_64_relaxed(&semaphore->count);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_64_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Acquire(Baselib_HighCapacitySemaphore* semaphore)
{
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return;
Baselib_SystemSemaphore_Acquire(semaphore->handle);
}
BASELIB_INLINE_API bool Baselib_HighCapacitySemaphore_TryTimedAcquire(Baselib_HighCapacitySemaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int64_t previousCount = Baselib_atomic_fetch_add_64_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return true;
if (OPTIMIZER_LIKELY(Baselib_SystemSemaphore_TryTimedAcquire(semaphore->handle, timeoutInMilliseconds)))
return true;
// When timeout occurs we need to make sure we do one of the following:
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
//
// If count is not negative it's likely we are racing with a release operation in which case we
// may end up having a successful acquire operation.
do
{
int64_t count = Baselib_atomic_load_64_relaxed(&semaphore->count);
while (count < 0)
{
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &count, count + 1))
return false;
}
// Likely a race, yield to give the release operation room to complete.
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
Baselib_Thread_YieldExecution();
}
while (!Baselib_SystemSemaphore_TryAcquire(semaphore->handle));
return true;
}
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Release(Baselib_HighCapacitySemaphore* semaphore, const uint32_t _count)
{
const int64_t count = _count;
int64_t previousCount = Baselib_atomic_fetch_add_64_release(&semaphore->count, count);
// This should only be possible if millions of threads enter this function simultaneously posting with a high count.
// See overflow protection below.
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", (int32_t)previousCount, (int32_t)count);
if (OPTIMIZER_UNLIKELY(previousCount < 0))
{
const int64_t waitingThreads = -previousCount;
const int64_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
BaselibAssert(threadsToWakeup <= (int64_t)UINT32_MAX);
Baselib_SystemSemaphore_Release(semaphore->handle, (uint32_t)threadsToWakeup);
return;
}
// overflow protection
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
// this way we won't have to do clamping on every iteration
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_HighCapacitySemaphore_MaxGuaranteedCount * 2))
{
const int64_t maxCount = Baselib_HighCapacitySemaphore_MaxGuaranteedCount;
if (Baselib_atomic_compare_exchange_weak_64_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
return;
}
}
BASELIB_INLINE_API uint64_t Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(Baselib_HighCapacitySemaphore* semaphore)
{
const int64_t count = Baselib_atomic_exchange_64_release(&semaphore->count, 0);
if (OPTIMIZER_LIKELY(count >= 0))
return 0;
const int64_t threadsToWakeup = -count;
BaselibAssert(threadsToWakeup <= (int64_t)UINT32_MAX);
Baselib_SystemSemaphore_Release(semaphore->handle, (uint32_t)threadsToWakeup);
return threadsToWakeup;
}
BASELIB_INLINE_API void Baselib_HighCapacitySemaphore_Free(Baselib_HighCapacitySemaphore* semaphore)
{
if (!semaphore)
return;
const int64_t count = Baselib_atomic_load_64_seq_cst(&semaphore->count);
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
Baselib_SystemSemaphore_FreeInplace(semaphore->handle);
}

View File

@@ -0,0 +1,92 @@
#pragma once
#include "../Baselib_CountdownTimer.h"
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemFutex.h"
enum Detail_Baselib_Lock_State
{
Detail_Baselib_Lock_UNLOCKED = 0,
Detail_Baselib_Lock_LOCKED = 1,
Detail_Baselib_Lock_CONTENDED = 2,
};
typedef struct Baselib_Lock
{
int32_t state;
char _cachelineSpacer[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
} Baselib_Lock;
BASELIB_INLINE_API Baselib_Lock Baselib_Lock_Create(void)
{
Baselib_Lock lock = {Detail_Baselib_Lock_UNLOCKED, {0}};
return lock;
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_Lock_TryAcquire(Baselib_Lock* lock)
{
int32_t previousState = Detail_Baselib_Lock_UNLOCKED;
do
{
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&lock->state, &previousState, Detail_Baselib_Lock_LOCKED))
return true;
}
while (previousState == Detail_Baselib_Lock_UNLOCKED);
return false;
}
BASELIB_INLINE_API void Baselib_Lock_Acquire(Baselib_Lock* lock)
{
int32_t previousState = Detail_Baselib_Lock_UNLOCKED;
do
{
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&lock->state, &previousState, previousState + 1))
break;
}
while (previousState != Detail_Baselib_Lock_CONTENDED);
while (OPTIMIZER_LIKELY(previousState != Detail_Baselib_Lock_UNLOCKED))
{
Baselib_SystemFutex_Wait(&lock->state, Detail_Baselib_Lock_CONTENDED, UINT32_MAX);
previousState = Baselib_atomic_exchange_32_relaxed(&lock->state, Detail_Baselib_Lock_CONTENDED);
}
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_Lock_TryTimedAcquire(Baselib_Lock* lock, const uint32_t timeoutInMilliseconds)
{
int32_t previousState = Detail_Baselib_Lock_UNLOCKED;
do
{
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&lock->state, &previousState, previousState + 1))
break;
}
while (previousState != Detail_Baselib_Lock_CONTENDED);
if (OPTIMIZER_LIKELY(previousState == Detail_Baselib_Lock_UNLOCKED))
return true;
uint32_t timeLeft = timeoutInMilliseconds;
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
do
{
Baselib_SystemFutex_Wait(&lock->state, Detail_Baselib_Lock_CONTENDED, timeoutInMilliseconds);
const int32_t previousState = Baselib_atomic_exchange_32_relaxed(&lock->state, Detail_Baselib_Lock_CONTENDED);
if (previousState == Detail_Baselib_Lock_UNLOCKED)
return true;
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
}
while (timeLeft);
return false;
}
BASELIB_INLINE_API void Baselib_Lock_Release(Baselib_Lock* lock)
{
const int32_t previousState = Baselib_atomic_exchange_32_release(&lock->state, Detail_Baselib_Lock_UNLOCKED);
if (previousState == Detail_Baselib_Lock_CONTENDED)
Baselib_SystemFutex_Notify(&lock->state, 1, Baselib_WakeupFallbackStrategy_OneByOne);
}
BASELIB_INLINE_API void Baselib_Lock_Free(Baselib_Lock* lock)
{
}

View File

@@ -0,0 +1,46 @@
#pragma once
#include "../Baselib_CountdownTimer.h"
#include "../Baselib_CappedSemaphore.h"
typedef struct Baselib_Lock
{
Baselib_CappedSemaphore semaphore;
} Baselib_Lock;
BASELIB_INLINE_API Baselib_Lock Baselib_Lock_Create(void)
{
Baselib_Lock lock = { Baselib_CappedSemaphore_Create(1) };
uint16_t submittedTokens = Baselib_CappedSemaphore_Release(&lock.semaphore, 1);
BaselibAssert(submittedTokens == 1, "CappedSemaphore was unable to accept our token");
return lock;
}
BASELIB_INLINE_API void Baselib_Lock_Acquire(Baselib_Lock* lock)
{
Baselib_CappedSemaphore_Acquire(&lock->semaphore);
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_Lock_TryAcquire(Baselib_Lock* lock)
{
return Baselib_CappedSemaphore_TryAcquire(&lock->semaphore);
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_Lock_TryTimedAcquire(Baselib_Lock* lock, const uint32_t timeoutInMilliseconds)
{
return Baselib_CappedSemaphore_TryTimedAcquire(&lock->semaphore, timeoutInMilliseconds);
}
BASELIB_INLINE_API void Baselib_Lock_Release(Baselib_Lock* lock)
{
Baselib_CappedSemaphore_Release(&lock->semaphore, 1);
}
BASELIB_INLINE_API void Baselib_Lock_Free(Baselib_Lock* lock)
{
if (!lock)
return;
Baselib_CappedSemaphore_Free(&lock->semaphore);
}

View File

@@ -0,0 +1,93 @@
#pragma once
#include "../Baselib_Lock.h"
#include "../Baselib_StaticAssert.h"
#include "../Baselib_Alignment.h"
#include "../Baselib_Thread.h"
typedef struct Baselib_ReentrantLock
{
Baselib_Lock lock;
Baselib_Thread_Id owner;
int32_t count;
} Baselib_ReentrantLock;
BASELIB_STATIC_ASSERT((BASELIB_ALIGN_OF(Baselib_ReentrantLock) + offsetof(Baselib_ReentrantLock, owner)) % sizeof(Baselib_Thread_Id) == 0, "Baselib_ReentrantLock::owner is not aligned for atomic use");
BASELIB_STATIC_ASSERT((BASELIB_ALIGN_OF(Baselib_ReentrantLock) + offsetof(Baselib_ReentrantLock, count)) % sizeof(int32_t) == 0, "Baselib_ReentrantLock::count is not aligned for atomic use");
BASELIB_INLINE_API Baselib_ReentrantLock Baselib_ReentrantLock_Create(void)
{
Baselib_ReentrantLock lock = {Baselib_Lock_Create(), Baselib_Thread_InvalidId, 0};
return lock;
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryAcquire(Baselib_ReentrantLock* lock)
{
const Baselib_Thread_Id currentThreadId = Baselib_Thread_GetCurrentThreadId();
const Baselib_Thread_Id lockOwner = Baselib_atomic_load_ptr_relaxed(&lock->owner);
if (OPTIMIZER_LIKELY(currentThreadId != lockOwner))
{
if (!Baselib_Lock_TryAcquire(&lock->lock))
return false;
lock->owner = currentThreadId;
lock->count = 1;
return true;
}
lock->count++;
return true;
}
BASELIB_INLINE_API void Baselib_ReentrantLock_Acquire(Baselib_ReentrantLock* lock)
{
const Baselib_Thread_Id currentThreadId = Baselib_Thread_GetCurrentThreadId();
const Baselib_Thread_Id lockOwner = Baselib_atomic_load_ptr_relaxed(&lock->owner);
if (OPTIMIZER_LIKELY(currentThreadId != lockOwner))
{
Baselib_Lock_Acquire(&lock->lock);
lock->owner = currentThreadId;
lock->count = 1;
return;
}
lock->count++;
}
COMPILER_WARN_UNUSED_RESULT
BASELIB_INLINE_API bool Baselib_ReentrantLock_TryTimedAcquire(Baselib_ReentrantLock* lock, const uint32_t timeoutInMilliseconds)
{
const Baselib_Thread_Id currentThreadId = Baselib_Thread_GetCurrentThreadId();
const Baselib_Thread_Id lockOwner = Baselib_atomic_load_ptr_relaxed(&lock->owner);
if (OPTIMIZER_LIKELY(currentThreadId != lockOwner))
{
if (!Baselib_Lock_TryTimedAcquire(&lock->lock, timeoutInMilliseconds))
return false;
lock->owner = currentThreadId;
lock->count = 1;
return true;
}
lock->count++;
return true;
}
BASELIB_INLINE_API void Baselib_ReentrantLock_Release(Baselib_ReentrantLock* lock)
{
if (lock->count > 0)
{
BaselibAssert(Baselib_atomic_load_ptr_relaxed(&lock->owner) == Baselib_Thread_GetCurrentThreadId(), "A recursive lock can only be unlocked by the locking thread");
if (OPTIMIZER_LIKELY(lock->count == 1))
{
lock->owner = Baselib_Thread_InvalidId;
lock->count = 0;
Baselib_Lock_Release(&lock->lock);
return;
}
lock->count--;
}
}
BASELIB_INLINE_API void Baselib_ReentrantLock_Free(Baselib_ReentrantLock* lock)
{
if (!lock)
return;
Baselib_Lock_Free(&lock->lock);
}

View File

@@ -0,0 +1,152 @@
#pragma once
#include "../Baselib_CountdownTimer.h"
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemFutex.h"
#include "../Baselib_Thread.h"
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
#error "Only use this implementation on top of a proper futex, in all other situations us Baselib_Semaphore_SemaphoreBased.inl.h"
#endif
// Space out to different cache lines.
// the idea here is that threads waking up from sleep should not have to
// access the cache line where count is stored, and only touch wakeups.
// the only exception to that rule is if we hit a timeout.
typedef struct Baselib_Semaphore
{
int32_t wakeups;
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
int32_t count;
char _cachelineSpacer2[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t)];
} Baselib_Semaphore;
BASELIB_STATIC_ASSERT(sizeof(Baselib_Semaphore) == PLATFORM_CACHE_LINE_SIZE * 2, "Baselib_Semaphore (Futex) size should match 2*cacheline size (128bytes)");
BASELIB_STATIC_ASSERT(offsetof(Baselib_Semaphore, wakeups) ==
(offsetof(Baselib_Semaphore, count) - PLATFORM_CACHE_LINE_SIZE), "Baselib_Semaphore (Futex) wakeups and count shouldnt share cacheline");
BASELIB_INLINE_API Baselib_Semaphore Baselib_Semaphore_Create(void)
{
Baselib_Semaphore semaphore = {0, {0}, 0, {0}};
return semaphore;
}
BASELIB_INLINE_API bool Detail_Baselib_Semaphore_ConsumeWakeup(Baselib_Semaphore* semaphore)
{
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->wakeups);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->wakeups, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API bool Baselib_Semaphore_TryAcquire(Baselib_Semaphore* semaphore)
{
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API void Baselib_Semaphore_Acquire(Baselib_Semaphore* semaphore)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return;
while (!Detail_Baselib_Semaphore_ConsumeWakeup(semaphore))
{
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, UINT32_MAX);
}
}
BASELIB_INLINE_API bool Baselib_Semaphore_TryTimedAcquire(Baselib_Semaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return true;
uint32_t timeLeft = timeoutInMilliseconds;
const Baselib_CountdownTimer timer = Baselib_CountdownTimer_StartMs(timeoutInMilliseconds);
do
{
Baselib_SystemFutex_Wait(&semaphore->wakeups, 0, timeLeft);
if (Detail_Baselib_Semaphore_ConsumeWakeup(semaphore))
return true;
timeLeft = Baselib_CountdownTimer_GetTimeLeftInMilliseconds(timer);
}
while (timeLeft);
// When timeout occurs we need to make sure we do one of the following:
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
//
// If count is not negative it's likely we are racing with a release operation in which case we
// may end up having a successful acquire operation.
do
{
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (count < 0)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
return false;
}
// Likely a race, yield to give the release operation room to complete.
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
Baselib_Thread_YieldExecution();
}
while (!Detail_Baselib_Semaphore_ConsumeWakeup(semaphore));
return true;
}
BASELIB_INLINE_API void Baselib_Semaphore_Release(Baselib_Semaphore* semaphore, const uint16_t _count)
{
const int32_t count = _count;
int32_t previousCount = Baselib_atomic_fetch_add_32_release(&semaphore->count, count);
// This should only be possible if thousands of threads enter this function simultaneously posting with a high count.
// See overflow protection below.
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", previousCount, count);
if (OPTIMIZER_UNLIKELY(previousCount < 0))
{
const int32_t waitingThreads = -previousCount;
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
return;
}
// overflow protection
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
// this way we won't have to do clamping on every iteration
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_Semaphore_MaxGuaranteedCount * 2))
{
const int32_t maxCount = Baselib_Semaphore_MaxGuaranteedCount;
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
return;
}
}
BASELIB_INLINE_API uint32_t Baselib_Semaphore_ResetAndReleaseWaitingThreads(Baselib_Semaphore* semaphore)
{
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
if (OPTIMIZER_LIKELY(count >= 0))
return 0;
const int32_t threadsToWakeup = -count;
Baselib_atomic_fetch_add_32_relaxed(&semaphore->wakeups, threadsToWakeup);
Baselib_SystemFutex_Notify(&semaphore->wakeups, threadsToWakeup, Baselib_WakeupFallbackStrategy_All);
return threadsToWakeup;
}
BASELIB_INLINE_API void Baselib_Semaphore_Free(Baselib_Semaphore* semaphore)
{
if (!semaphore)
return;
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
}

View File

@@ -0,0 +1,126 @@
#pragma once
#include "../Baselib_Atomic_TypeSafe.h"
#include "../Baselib_SystemSemaphore.h"
#include "../Baselib_Thread.h"
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#error "It's highly recommended to use Baselib_Semaphore_FutexBased.inl.h on platforms which has native semaphore support"
#endif
typedef struct Baselib_Semaphore
{
Baselib_SystemSemaphore_Handle handle;
int32_t count;
char _cachelineSpacer0[PLATFORM_CACHE_LINE_SIZE - sizeof(int32_t) - sizeof(Baselib_SystemSemaphore_Handle)];
char _systemSemaphoreData[Baselib_SystemSemaphore_PlatformSize];
} Baselib_Semaphore;
BASELIB_STATIC_ASSERT((offsetof(Baselib_Semaphore, count) + PLATFORM_CACHE_LINE_SIZE - sizeof(Baselib_SystemSemaphore_Handle)) ==
offsetof(Baselib_Semaphore, _systemSemaphoreData), "count and internalData must not share cacheline");
BASELIB_INLINE_API Baselib_Semaphore Baselib_Semaphore_Create(void)
{
Baselib_Semaphore semaphore = {{0}, 0, {0}, {0}};
semaphore.handle = Baselib_SystemSemaphore_CreateInplace(&semaphore._systemSemaphoreData);
return semaphore;
}
BASELIB_INLINE_API bool Baselib_Semaphore_TryAcquire(Baselib_Semaphore* semaphore)
{
int32_t previousCount = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (previousCount > 0)
{
if (Baselib_atomic_compare_exchange_weak_32_acquire_relaxed(&semaphore->count, &previousCount, previousCount - 1))
return true;
}
return false;
}
BASELIB_INLINE_API void Baselib_Semaphore_Acquire(Baselib_Semaphore* semaphore)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return;
Baselib_SystemSemaphore_Acquire(semaphore->handle);
}
BASELIB_INLINE_API bool Baselib_Semaphore_TryTimedAcquire(Baselib_Semaphore* semaphore, const uint32_t timeoutInMilliseconds)
{
const int32_t previousCount = Baselib_atomic_fetch_add_32_acquire(&semaphore->count, -1);
if (OPTIMIZER_LIKELY(previousCount > 0))
return true;
if (OPTIMIZER_LIKELY(Baselib_SystemSemaphore_TryTimedAcquire(semaphore->handle, timeoutInMilliseconds)))
return true;
// When timeout occurs we need to make sure we do one of the following:
// Increase count by one from a negative value (give our acquired token back) or consume a wakeup.
//
// If count is not negative it's likely we are racing with a release operation in which case we
// may end up having a successful acquire operation.
do
{
int32_t count = Baselib_atomic_load_32_relaxed(&semaphore->count);
while (count < 0)
{
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &count, count + 1))
return false;
}
// Likely a race, yield to give the release operation room to complete.
// This includes a fully memory barrier which ensures that there is no reordering between changing/reading count and wakeup consumption.
Baselib_Thread_YieldExecution();
}
while (!Baselib_SystemSemaphore_TryAcquire(semaphore->handle));
return true;
}
BASELIB_INLINE_API void Baselib_Semaphore_Release(Baselib_Semaphore* semaphore, const uint16_t _count)
{
const int32_t count = _count;
int32_t previousCount = Baselib_atomic_fetch_add_32_release(&semaphore->count, count);
// This should only be possible if thousands of threads enter this function simultaneously posting with a high count.
// See overflow protection below.
BaselibAssert(previousCount <= (previousCount + count), "Semaphore count overflow (current: %d, added: %d).", previousCount, count);
if (OPTIMIZER_UNLIKELY(previousCount < 0))
{
const int32_t waitingThreads = -previousCount;
const int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
return;
}
// overflow protection
// we clamp count to MaxGuaranteedCount when count exceed MaxGuaranteedCount * 2
// this way we won't have to do clamping on every iteration
while (OPTIMIZER_UNLIKELY(previousCount > Baselib_Semaphore_MaxGuaranteedCount * 2))
{
const int32_t maxCount = Baselib_Semaphore_MaxGuaranteedCount;
if (Baselib_atomic_compare_exchange_weak_32_relaxed_relaxed(&semaphore->count, &previousCount, maxCount))
return;
}
}
BASELIB_INLINE_API uint32_t Baselib_Semaphore_ResetAndReleaseWaitingThreads(Baselib_Semaphore* semaphore)
{
const int32_t count = Baselib_atomic_exchange_32_release(&semaphore->count, 0);
if (OPTIMIZER_LIKELY(count >= 0))
return 0;
const int32_t threadsToWakeup = -count;
Baselib_SystemSemaphore_Release(semaphore->handle, threadsToWakeup);
return threadsToWakeup;
}
BASELIB_INLINE_API void Baselib_Semaphore_Free(Baselib_Semaphore* semaphore)
{
if (!semaphore)
return;
const int32_t count = Baselib_atomic_load_32_seq_cst(&semaphore->count);
BaselibAssert(count >= 0, "Destruction is not allowed when there are still threads waiting on the semaphore.");
Baselib_SystemSemaphore_FreeInplace(semaphore->handle);
}

View File

@@ -0,0 +1,194 @@
#pragma once
#include "../../../C/Baselib_Atomic.h"
#include "../../../C/Baselib_Atomic_Macros.h"
#include "Baselib_Atomic_Gcc_Apple_LLVM_Patch.h"
#if COMPILER_GCC && ((__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 7))
#pragma message "GNUC: " PP_STRINGIZE(__GNUC__) " GNUC_MINOR: " PP_STRINGIZE(__GNUC_MINOR__)
#error "GCC is too old and/or missing compatible atomic built-in functions" PP_STRINGIZE(__GNUC__)
#endif
#define detail_intrinsic_relaxed __ATOMIC_RELAXED
#define detail_intrinsic_acquire __ATOMIC_ACQUIRE
#define detail_intrinsic_release __ATOMIC_RELEASE
#define detail_intrinsic_acq_rel __ATOMIC_ACQ_REL
#define detail_intrinsic_seq_cst __ATOMIC_SEQ_CST
// Patch gcc and clang intrinsics to achieve a sequentially consistent barrier.
// As of writing Clang 9, GCC 9 none of them produce a seq cst barrier for load-store operations.
// To fix this we switch load store to be acquire release with a full final barrier.
#define detail_ldst_intrinsic_relaxed detail_intrinsic_relaxed
#define detail_ldst_intrinsic_acquire detail_intrinsic_acquire
#define detail_ldst_intrinsic_release detail_intrinsic_release
#define detail_ldst_intrinsic_acq_rel detail_intrinsic_acq_rel
#define detail_ldst_intrinsic_seq_cst detail_intrinsic_seq_cst
#if defined(__aarch64__)
#undef detail_ldst_intrinsic_seq_cst
#define detail_ldst_intrinsic_seq_cst __ATOMIC_ACQ_REL
#define detail_AARCH64_SEQCST_PATCH_BARRIER_relaxed
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acquire
#define detail_AARCH64_SEQCST_PATCH_BARRIER_release
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acq_rel
#define detail_AARCH64_SEQCST_PATCH_BARRIER_seq_cst __extension__({__atomic_thread_fence (__ATOMIC_SEQ_CST); });
#else
#define detail_AARCH64_SEQCST_PATCH_BARRIER_relaxed
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acquire
#define detail_AARCH64_SEQCST_PATCH_BARRIER_release
#define detail_AARCH64_SEQCST_PATCH_BARRIER_acq_rel
#define detail_AARCH64_SEQCST_PATCH_BARRIER_seq_cst
#endif
#define detail_THREAD_FENCE(order, ...) \
static FORCE_INLINE void Baselib_atomic_thread_fence_##order(void) \
{ \
__extension__({__atomic_thread_fence (detail_intrinsic_##order); }); \
} \
#define detail_LOAD(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(const void* obj, void* result) \
{ \
__extension__({ __atomic_load((int_type*)obj, (int_type*)result, detail_intrinsic_##order); }); \
}
#define detail_LOAD_NOT_CONST(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
{ \
__extension__({ __atomic_load((int_type*)obj, (int_type*)result, detail_intrinsic_##order); }); \
}
#define detail_STORE(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
{ \
__extension__({ __atomic_store((int_type*)obj, (int_type*)value, detail_intrinsic_##order); }); \
}
#define detail_ALU(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
{ \
*(int_type*)result = __extension__({ __atomic_##op((int_type*)obj, *(int_type*)value, detail_ldst_intrinsic_##order); });\
detail_AARCH64_SEQCST_PATCH_BARRIER_##order; \
}
#define detail_XCHG(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
{ \
__extension__({ __atomic_exchange((int_type*)obj, (int_type*)value, (int_type*)result, detail_ldst_intrinsic_##order); });\
detail_AARCH64_SEQCST_PATCH_BARRIER_##order; \
}
#define detail_CMP_XCHG_WEAK(op, order1, order2, id , bits, int_type, ...) \
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
{ \
detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value); \
bool result = __extension__({ __atomic_compare_exchange( \
(int_type*)obj, \
(int_type*)expected, \
(int_type*)value, \
1, \
detail_ldst_intrinsic_##order1, \
detail_ldst_intrinsic_##order2); \
}); \
if (result) { detail_AARCH64_SEQCST_PATCH_BARRIER_##order1; } \
else { detail_AARCH64_SEQCST_PATCH_BARRIER_##order2;} \
return result; \
}
#define detail_CMP_XCHG_STRONG(op, order1, order2, id , bits, int_type, ...) \
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
{ \
detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value); \
bool result = __extension__ ({ __atomic_compare_exchange( \
(int_type*)obj, \
(int_type*)expected, \
(int_type*)value, \
0, \
detail_ldst_intrinsic_##order1, \
detail_ldst_intrinsic_##order2); \
}); \
if (result) { detail_AARCH64_SEQCST_PATCH_BARRIER_##order1; } \
else { detail_AARCH64_SEQCST_PATCH_BARRIER_##order2;} \
return result; \
}
#define detail_NOT_SUPPORTED(...)
Baselib_Atomic_FOR_EACH_MEMORY_ORDER(
detail_THREAD_FENCE
)
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE(
detail_LOAD, // load
detail_STORE, // store
detail_ALU, // add
detail_ALU, // and
detail_ALU, // or
detail_ALU, // xor
detail_XCHG, // exchange
detail_CMP_XCHG_WEAK, // compare_exchange_weak
detail_CMP_XCHG_STRONG, // compare_exchange_strong
)
#if PLATFORM_ARCH_64
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
detail_LOAD_NOT_CONST, // load
detail_STORE, // store
detail_NOT_SUPPORTED, // add
detail_NOT_SUPPORTED, // and
detail_NOT_SUPPORTED, // or
detail_NOT_SUPPORTED, // xor
detail_XCHG, // exchange
detail_CMP_XCHG_WEAK, // compare_exchange_weak
detail_CMP_XCHG_STRONG, // compare_exchange_strong
128, 128, __int128 // type information
)
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
detail_LOAD_NOT_CONST, // load
detail_STORE, // store
detail_NOT_SUPPORTED, // add
detail_NOT_SUPPORTED, // and
detail_NOT_SUPPORTED, // or
detail_NOT_SUPPORTED, // xor
detail_XCHG, // exchange
detail_CMP_XCHG_WEAK, // compare_exchange_weak
detail_CMP_XCHG_STRONG, // compare_exchange_strong
ptr2x, 128, __int128 // type information
)
#else
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
detail_LOAD_NOT_CONST, // load
detail_STORE, // store
detail_NOT_SUPPORTED, // add
detail_NOT_SUPPORTED, // and
detail_NOT_SUPPORTED, // or
detail_NOT_SUPPORTED, // xor
detail_XCHG, // exchange
detail_CMP_XCHG_WEAK, // compare_exchange_weak
detail_CMP_XCHG_STRONG, // compare_exchange_strong
ptr2x, 64, int64_t // type information
)
#endif
#undef detail_intrinsic_relaxed
#undef detail_intrinsic_acquire
#undef detail_intrinsic_release
#undef detail_intrinsic_acq_rel
#undef detail_intrinsic_seq_cst
#undef detail_THREAD_FENCE
#undef detail_LOAD
#undef detail_LOAD_NOT_CONST
#undef detail_STORE
#undef detail_ALU
#undef detail_XCHG
#undef detail_CMP_XCHG_WEAK
#undef detail_CMP_XCHG_STRONG
#undef detail_NOT_SUPPORTED
#include "Baselib_Atomic_Gcc_Apple_LLVM_Patch_PostInclude.h"

View File

@@ -0,0 +1,142 @@
#pragma once
#if PLATFORM_USE_APPLE_LLVM_ATOMIC_CMPXCHG_128_PATCH
//
// Patch for Apple LLVM version 8.x.x (clang-800.0.38 - clang-900.0.37) intrinsic 128-bit __atomic_compare_exchange implementation (debug, using opt level -O0).
// Note that this patch is only in effect on tvOS/iOS AArch64 debug builds for Apple LLVM version 8.x.x. Arm32 verified working without patch.
//
// Problem:
// For the above builds, the __atomic_compare_exchange asm expasion used SUBS/SBCS to compare the pair of "obj" and "expected" values.
// SUBS/SBCS does not provide sufficient NZCV flags for comparing two 64-bit values.
// The result is erraneous comparison of "obj" and "expected". Some examples:
//
// -- fails (lo != lo && hi == hi)
// obj.lo = 5;
// obj.hi = 10;
// expected.lo = 3;
// expected.hi = 10;
//
// -- works (expected.lo < 0)
// obj.lo = 5;
// obj.hi = 20;
// expected.lo = -3;
// expected.hi = 20;
//
// -- fails (obj.lo < 0 && hi == hi)
// obj.lo = -5;
// obj.hi = 30;
// expected.lo = 3;
// expected.hi = 30;
//
// -- fails (expected.lo < 0 && obj.hi+1 == expected.hi)
// obj.lo = 5;
// obj.hi = 3;
// expected.lo = -3;
// expected.hi = 2;
//
// Solution: Inline assembly replacement of __atomic_compare_exchange using the same approach as in release mode
//
// Note: This patch should be removed in it's entirety once we require Apple LLVM version 9 (clang-900.0.37) or higher for building.
//
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ld_instr, st_instr, barrier_instr) \
{ \
register bool result asm ("w0"); \
asm volatile \
( \
" ldp x12, x13, [%x4] ; load expected \n" \
" ldp x10, x11, [%x5] ; load value \n" \
" " #ld_instr " x9, x8, [%x3] ; load obj \n" \
" eor x13, x8, x13 ; compare to expected \n" \
" eor x12, x9, x12 \n" \
" orr x12, x12, x13 \n" \
" cbnz x12, 0f ; not equal = no store \n" \
" " #st_instr " w12, x10, x11, [%x0] ; try store \n" \
" cbnz w12, 1f \n" \
" orr w0, wzr, #0x1 ; success, result in w0 \n" \
" b 2f \n" \
"0: ; no store \n" \
" clrex \n" \
"1: ; failed store \n" \
" movz w0, #0 \n" \
"2: ; store expected, fail \n" \
" tbnz w0, #0, 3f \n" \
" stp x9, x8, [%x1] \n" \
"3: \n" \
" " #barrier_instr " \n" \
\
: "+r" (obj), "+r" (expected), "=r" (result) \
: "r" (obj), "r" (expected), "r" (value) \
: "x8", "x9", "x10", "x11", "x12", "x13", "cc", "memory"); \
\
return result != 0; \
}
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_relaxed_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldxp, stxp, )
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stxp, )
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stxp, )
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_release_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldxp, stlxp, )
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, )
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, )
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, dmb ish)
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, dmb ish)
#define detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_seq_cst(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_WEAK_128(obj, expected, value, ldaxp, stlxp, dmb ish)
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ld_instr, st_instr, barrier_instr) \
{ \
register bool result asm ("w0"); \
asm volatile \
( \
" ldp x10, x11, [%x4] ; load expected \n" \
" ldp x12, x13, [%x5] ; load value \n" \
"0: \n" \
" " #ld_instr " x9, x8, [%x3] ; load obj (ldxp/ldaxp) \n" \
" eor x14, x8, x11 ; compare to expected \n" \
" eor x15, x9, x10 \n" \
" orr x14, x15, x14 \n" \
" cbnz x14, 1f ; not equal = no store \n" \
" " #st_instr " w14, x12, x13, [%x0] ; try store (stxp/stlxp) \n" \
" cbnz w14, 0b ; retry or store result in w0 \n" \
" orr w0, wzr, #0x1 \n" \
" b 2f \n" \
"1: ; no store \n" \
" movz w0, #0 \n" \
" clrex \n" \
"2: ; store expected on fail \n" \
" tbnz w0, #0, 3f \n" \
" stp x9, x8, [%x1] \n" \
"3: \n" \
" " #barrier_instr " \n" \
\
: "+r" (obj), "+r" (expected), "=r" (result) \
: "r" (obj), "r" (expected), "r" (value) \
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "cc", "memory"); \
\
return result != 0; \
}
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_relaxed_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldxp, stxp, )
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stxp, )
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stxp, )
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_release_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldxp, stlxp, )
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, )
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, )
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_relaxed(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, dmb ish)
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_acquire(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, dmb ish)
#define detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_seq_cst(obj, expected, value) detail_APPLE_LLVM_CMP_XCHG_STRONG_128(obj, expected, value, ldaxp, stlxp, dmb ish)
#define detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value) \
if(sizeof(int_type) == 16) \
detail_APPLE_LLVM_CMP_XCHG_WEAK_128_##order1##_##order2(obj, expected, value);
#define detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH(order1, order2, int_type, obj, expected, value) \
if(sizeof(int_type) == 16) \
detail_APPLE_LLVM_CMP_XCHG_STRONG_128_##order1##_##order2(obj, expected, value);
#else // PLATFORM_USE_APPLE_LLVM_ATOMIC_CMPXCHG_128_PATCH
#define detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH(...)
#define detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH(...)
#endif

View File

@@ -0,0 +1,30 @@
#pragma once
#if PLATFORM_USE_APPLE_LLVM_ATOMIC_CMPXCHG_128_PATCH
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_relaxed_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acquire_acquire
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_release_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_acq_rel_acquire
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_acquire
#undef detail_APPLE_LLVM_CMP_XCHG_WEAK_128_seq_cst_seq_cst
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_relaxed_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acquire_acquire
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_release_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_acq_rel_acquire
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_relaxed
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_acquire
#undef detail_APPLE_LLVM_CMP_XCHG_STRONG_128_seq_cst_seq_cst
#undef detail_APPLE_LLVM_CMP_XCHG_128_WEAK_APPLE_LLVM_PATCH
#undef detail_APPLE_LLVM_CMP_XCHG_128_STRONG_APPLE_LLVM_PATCH
#endif

View File

@@ -0,0 +1,40 @@
#pragma once
// Arm exlusive state access break implementation
#define detail_Baselib_atomic_llsc_break() __builtin_arm_clrex()
// Arm exlusive LLSC implementation using intrinsics.
#define detail_Baselib_atomic_llsc_arm_ts(obj, expected, value, code, ll_instr, sc_instr, load_barrier, store_barrier) \
do { \
do { \
*expected = __builtin_arm_##ll_instr(obj); \
load_barrier; \
code; \
} while (__builtin_arm_##sc_instr(*value, obj)); \
store_barrier; \
} while (false)
#define detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ll_instr, sc_instr, loadbarrier, storebarrier) \
detail_Baselib_atomic_llsc_arm_ts((int_type*)((void*)obj), \
(int_type*)((void*)expected), \
(int_type*)((void*)value), \
code, ll_instr, sc_instr, loadbarrier, storebarrier)
#define detail_Baselib_atomic_llsc_relaxed_relaxed_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, ,)
#if PLATFORM_ARCH_64
#define detail_Baselib_atomic_llsc_acquire_relaxed_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldaex, strex, ,)
#define detail_Baselib_atomic_llsc_relaxed_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, stlex, ,)
#define detail_Baselib_atomic_llsc_acquire_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldaex, stlex, ,)
#define detail_Baselib_atomic_llsc_seq_cst_seq_cst_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldaex, stlex, , __builtin_arm_dmb(11) )
#else
#define detail_Baselib_atomic_llsc_acquire_relaxed_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, __builtin_arm_dmb(11), )
#define detail_Baselib_atomic_llsc_relaxed_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, ,__builtin_arm_dmb(11) )
#define detail_Baselib_atomic_llsc_acquire_release_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, __builtin_arm_dmb(11) , __builtin_arm_dmb(11) )
#define detail_Baselib_atomic_llsc_seq_cst_seq_cst_v(obj, expected, value, code, int_type) detail_Baselib_atomic_llsc_arm_v(obj, expected, value, code, int_type, ldrex, strex, __builtin_arm_dmb(11) , __builtin_arm_dmb(11) )
#endif
#define detail_Baselib_atomic_llsc_v(obj, expected, value, code, size, loadbarrier, storebarrier) \
detail_Baselib_atomic_llsc_##loadbarrier##_##storebarrier##_v(obj, expected, value, code, int##size##_t)
#define detail_Baselib_atomic_llsc_128_v(obj, expected, value, code, loadbarrier, storebarrier) \
detail_Baselib_atomic_llsc_##loadbarrier##_##storebarrier##_v(obj, expected, value, code, __int128)

View File

@@ -0,0 +1,358 @@
#pragma once
#include "../../../C/Baselib_Atomic.h"
#include "../../../C/Baselib_Atomic_Macros.h"
#include "Baselib_Atomic_MsvcIntrinsics.h"
#define detail_relaxed_relaxed(...) __VA_ARGS__
#define detail_relaxed_acquire(...)
#define detail_relaxed_release(...)
#define detail_relaxed_acq_rel(...)
#define detail_relaxed_seq_cst(...)
#define detail_acquire_relaxed(...)
#define detail_acquire_acquire(...) __VA_ARGS__
#define detail_acquire_release(...)
#define detail_acquire_acq_rel(...)
#define detail_acquire_seq_cst(...)
#define detail_release_relaxed(...)
#define detail_release_acquire(...)
#define detail_release_release(...) __VA_ARGS__
#define detail_release_acq_rel(...)
#define detail_release_seq_cst(...)
#define detail_acq_rel_relaxed(...)
#define detail_acq_rel_acquire(...)
#define detail_acq_rel_release(...)
#define detail_acq_rel_acq_rel(...) __VA_ARGS__
#define detail_acq_rel_seq_cst(...)
#define detail_seq_cst_relaxed(...)
#define detail_seq_cst_acquire(...)
#define detail_seq_cst_release(...)
#define detail_seq_cst_acq_rel(...)
#define detail_seq_cst_seq_cst(...) __VA_ARGS__
#define detail_relaxed(memory_order, ...) detail_relaxed_##memory_order(__VA_ARGS__)
#define detail_acquire(memory_order, ...) detail_acquire_##memory_order(__VA_ARGS__)
#define detail_release(memory_order, ...) detail_release_##memory_order(__VA_ARGS__)
#define detail_acq_rel(memory_order, ...) detail_acq_rel_##memory_order(__VA_ARGS__)
#define detail_seq_cst(memory_order, ...) detail_seq_cst_##memory_order(__VA_ARGS__)
// Intel
// ------------------------------------------------------------------------------------------------------------------------------------------------------
#if defined(_M_IX86) || defined(_M_X64)
#define detail_intrinsic_relaxed
#define detail_intrinsic_acquire
#define detail_intrinsic_release
#define detail_intrinsic_acq_rel
#define detail_intrinsic_seq_cst
#if defined(_M_X64)
#define detail_THREAD_FENCE(order, ...) \
static COMPILER_FORCEINLINE void Baselib_atomic_thread_fence_##order() \
{ \
detail_acquire(order, _ReadWriteBarrier()); \
detail_release(order, _ReadWriteBarrier()); \
detail_acq_rel(order, _ReadWriteBarrier()); \
detail_seq_cst(order, __faststorefence()); \
}
#else // #defined(_M_IX86)
#define detail_THREAD_FENCE(order, ...) \
static COMPILER_FORCEINLINE void Baselib_atomic_thread_fence_##order() \
{ \
detail_acquire(order, _ReadWriteBarrier()); \
detail_release(order, _ReadWriteBarrier()); \
detail_acq_rel(order, _ReadWriteBarrier()); \
detail_seq_cst(order, _ReadWriteBarrier(); __int32 temp = 0; _InterlockedExchange32(&temp, 0); _ReadWriteBarrier()); \
}
#endif
#define detail_LOAD_BITS_8(obj, result) *(__int8*)result = *(const volatile __int8*)obj
#define detail_LOAD_BITS_16(obj, result) *(__int16*)result = *(const volatile __int16*)obj
#define detail_LOAD_BITS_32(obj, result) *(__int32*)result = *(const volatile __int32*)obj
#if PLATFORM_ARCH_64
#define detail_LOAD_BITS_64(obj, result) *(__int64*)result = *(const volatile __int64*)obj
#else
// x86 32-bit load/store 64-bit integer.
// - SSE2 enabled yields (identical to __mm_store/load):
// movsd xmm0, QWORD PTR unsigned __int64 obj
// movsd QWORD PTR unsigned __int64 result, xmm0
// - No SSE2 enabled yields:
// fld QWORD PTR unsigned __int64 obj
// fstp QWORD PTR unsigned __int64 result
// Link comparing various implementations: https://godbolt.org/z/T3zW5M
#define detail_LOAD_BITS_64(obj, result) *(double*)result = *(const volatile double*)obj
#endif
#define detail_LOAD(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(const void* obj, void* result) \
{ \
detail_LOAD_BITS_##bits(obj, result); \
detail_acquire(order, _ReadWriteBarrier()); \
detail_seq_cst(order, _ReadWriteBarrier()); \
}
#define detail_LOAD_NOT_CONST(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
{ \
detail_LOAD_BITS_##bits(obj, result); \
detail_acquire(order, _ReadWriteBarrier()); \
detail_seq_cst(order, _ReadWriteBarrier()); \
}
#define detail_STORE_BITS_8(obj, value) *(volatile __int8*)obj = *(const __int8*)value
#define detail_STORE_BITS_16(obj, value) *(volatile __int16*)obj = *(const __int16*)value
#define detail_STORE_BITS_32(obj, value) *(volatile __int32*)obj = *(const __int32*)value
#if PLATFORM_ARCH_64
#define detail_STORE_BITS_64(obj, value) *(volatile __int64*)obj = *(const __int64*)value
#else
#define detail_STORE_BITS_64(obj, value) *(volatile double*)obj = *(double*)value
#endif
#define detail_STORE(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
{ \
detail_relaxed(order, detail_STORE_BITS_##bits(obj, value)); \
detail_release(order, detail_STORE_BITS_##bits(obj, value); _ReadWriteBarrier()); \
detail_seq_cst(order, _InterlockedExchange##bits((__int##bits*)obj, *(const __int##bits*)value)); \
}
// ARM
// ------------------------------------------------------------------------------------------------------------------------------------------------------
#elif defined(_M_ARM) || defined(_M_ARM64)
#define detail_intrinsic_relaxed _nf
#define detail_intrinsic_acquire _acq
#define detail_intrinsic_release _rel
#define detail_intrinsic_acq_rel
#define detail_intrinsic_seq_cst
#define detail_THREAD_FENCE(order, ...) \
static COMPILER_FORCEINLINE void Baselib_atomic_thread_fence_##order() \
{ \
detail_acquire(order, __dmb(_ARM_BARRIER_ISH)); \
detail_release(order, __dmb(_ARM_BARRIER_ISH)); \
detail_acq_rel(order, __dmb(_ARM_BARRIER_ISH)); \
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
}
#define detail_LOAD(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(const void* obj, void* result) \
{ \
*(__int##bits*)result = __iso_volatile_load##bits((const __int##bits*)obj); \
detail_acquire(order, __dmb(_ARM_BARRIER_ISH)); \
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
}
#define detail_LOAD_NOT_CONST(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
{ \
*(__int##bits*)result = __iso_volatile_load##bits((const __int##bits*)obj); \
detail_acquire(order, __dmb(_ARM_BARRIER_ISH)); \
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
}
#define detail_STORE(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
{ \
detail_release(order, __dmb(_ARM_BARRIER_ISH)); \
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
__iso_volatile_store##bits((__int##bits*) obj, *(const __int##bits*)value); \
detail_seq_cst(order, __dmb(_ARM_BARRIER_ISH)); \
}
#endif
// Common
// ------------------------------------------------------------------------------------------------------------------------------------------------------
#define detail_intrinsic_exchange _InterlockedExchange
#define detail_intrinsic_fetch_add _InterlockedExchangeAdd
#define detail_intrinsic_fetch_and _InterlockedAnd
#define detail_intrinsic_fetch_or _InterlockedOr
#define detail_intrinsic_fetch_xor _InterlockedXor
#define detail_LOAD_STORE(op, order, id , bits, int_type, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
{ \
*(__int##bits##*)result = PP_CONCAT(detail_intrinsic_##op, bits, detail_intrinsic_##order)((__int##bits##*)obj, *(const __int##bits##*)value); \
}
#define detail_CMP_XCHG(op, order1, order2, id , bits, int_type, ...) \
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
{ \
__int##bits cmp = *(__int##bits##*)expected; \
__int##bits result = PP_CONCAT(_InterlockedCompareExchange, bits, detail_intrinsic_##order1)((__int##bits##*)obj, *(__int##bits##*)value, cmp); \
return result == cmp ? true : (*(__int##bits##*)expected = result, false); \
}
#define detail_NOT_SUPPORTED(...)
// Setup implementation
// ------------------------------------------------------------------------------------------------------------------------------------------------------
Baselib_Atomic_FOR_EACH_MEMORY_ORDER(
detail_THREAD_FENCE
)
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_TYPE(
detail_LOAD, // load
detail_STORE, // store
detail_LOAD_STORE, // add
detail_LOAD_STORE, // and
detail_LOAD_STORE, // or
detail_LOAD_STORE, // xor
detail_LOAD_STORE, // exchange
detail_CMP_XCHG, // compare_exchange_weak
detail_CMP_XCHG // compare_exchange_strong
)
#if PLATFORM_ARCH_64
// 128-bit implementation
// There are more efficient ways of doing load, store and exchange on Arm64. Unfortunately MSVC doesn't provide intrinsics for those. The specific
// instructions needed to perform atomic load, store and exchange are also not available on MSVC.
// Hence we fallback to cmpxchg for all atomic ops.
// ------------------------------------------------------------------------------------------------------------------------------------------------------
#define detail_LOAD128(op, order, id, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, void* result) \
{ \
Baselib_atomic_compare_exchange_weak_128_##order##_##order##_v((void*)obj, result, result); \
}
#define detail_STORE128(op, order, id, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value) \
{ \
uint64_t comparand[2] = { ((volatile uint64_t*)obj)[0], ((volatile uint64_t*)obj)[1] }; \
while(!Baselib_atomic_compare_exchange_weak_128_##order##_relaxed_v(obj, comparand, value)) \
; \
}
#define detail_XCHG128(op, order, id, ...) \
static FORCE_INLINE void Baselib_atomic_##op##_##id##_##order##_v(void* obj, const void* value, void* result) \
{ \
((uint64_t*)result)[0] = ((volatile uint64_t*)obj)[0]; \
((uint64_t*)result)[1] = ((volatile uint64_t*)obj)[1]; \
while(!Baselib_atomic_compare_exchange_weak_128_##order##_relaxed_v(obj, result, value)) \
; \
}
#define detail_CMP_XCHG128(op, order1, order2, id, ...) \
static FORCE_INLINE bool Baselib_atomic_##op##_##id##_##order1##_##order2##_v(void* obj, void* expected, const void* value) \
{ \
return PP_CONCAT(_InterlockedCompareExchange128, detail_intrinsic_##order1)( \
(__int64*)obj, \
((const __int64*)value)[1], \
((const __int64*)value)[0], \
(__int64*)expected \
) == 1; \
}
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
detail_LOAD128, // load
detail_STORE128, // store
detail_NOT_SUPPORTED, // add
detail_NOT_SUPPORTED, // and
detail_NOT_SUPPORTED, // or
detail_NOT_SUPPORTED, // xor
detail_XCHG128, // exchange
detail_CMP_XCHG128, // compare_exchange_weak
detail_CMP_XCHG128, // compare_exchange_strong
128
)
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
detail_LOAD128, // load
detail_STORE128, // store
detail_NOT_SUPPORTED, // add
detail_NOT_SUPPORTED, // and
detail_NOT_SUPPORTED, // or
detail_NOT_SUPPORTED, // xor
detail_XCHG128, // exchange
detail_CMP_XCHG128, // compare_exchange_weak
detail_CMP_XCHG128, // compare_exchange_strong
ptr2x
)
#undef detail_LOAD128
#undef detail_STORE128
#undef detail_XCHG128
#undef detail_CMP_XCHG128
#else
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
detail_LOAD_NOT_CONST, // load
detail_STORE, // store
detail_NOT_SUPPORTED, // add
detail_NOT_SUPPORTED, // and
detail_NOT_SUPPORTED, // or
detail_NOT_SUPPORTED, // xor
detail_LOAD_STORE, // exchange
detail_CMP_XCHG, // compare_exchange_weak
detail_CMP_XCHG, // compare_exchange_strong
ptr2x, 64, int64_t
)
#endif
#undef detail_THREAD_FENCE
#undef detail_LOAD
#undef detail_LOAD_NOT_CONST
#undef detail_STORE
#undef detail_LOAD_STORE
#undef detail_CMP_XCHG
#undef detail_NOT_SUPPORTED
#undef detail_LOAD_BITS_8
#undef detail_LOAD_BITS_16
#undef detail_LOAD_BITS_32
#undef detail_LOAD_BITS_64
#undef detail_STORE_BITS_8
#undef detail_STORE_BITS_16
#undef detail_STORE_BITS_32
#undef detail_STORE_BITS_64
#undef detail_intrinsic_exchange
#undef detail_intrinsic_fetch_add
#undef detail_intrinsic_fetch_and
#undef detail_intrinsic_fetch_or
#undef detail_intrinsic_fetch_xor
#undef detail_relaxed_relaxed
#undef detail_relaxed_acquire
#undef detail_relaxed_release
#undef detail_relaxed_acq_rel
#undef detail_relaxed_seq_cst
#undef detail_acquire_relaxed
#undef detail_acquire_acquire
#undef detail_acquire_release
#undef detail_acquire_acq_rel
#undef detail_acquire_seq_cst
#undef detail_release_relaxed
#undef detail_release_acquire
#undef detail_release_release
#undef detail_release_acq_rel
#undef detail_release_seq_cst
#undef detail_acq_rel_relaxed
#undef detail_acq_rel_acquire
#undef detail_acq_rel_release
#undef detail_acq_rel_acq_rel
#undef detail_acq_rel_seq_cst
#undef detail_seq_cst_relaxed
#undef detail_seq_cst_acquire
#undef detail_seq_cst_release
#undef detail_seq_cst_acq_rel
#undef detail_seq_cst_seq_cst
#undef detail_relaxed
#undef detail_acquire
#undef detail_release
#undef detail_acq_rel
#undef detail_seq_cst

View File

@@ -0,0 +1,58 @@
#pragma once
#include <intrin.h>
#ifndef _ARM_BARRIER_ISH
#define _ARM_BARRIER_ISH 0xB
#endif
#define _InterlockedCompareExchange32(obj, value, exp) _InterlockedCompareExchange((long*)obj, value, exp)
#define _InterlockedCompareExchange32_nf(obj, value, exp) _InterlockedCompareExchange_nf((long*)obj, value, exp)
#define _InterlockedCompareExchange32_acq(obj, value, exp) _InterlockedCompareExchange_acq((long*)obj, value, exp)
#define _InterlockedCompareExchange32_rel(obj, value, exp) _InterlockedCompareExchange_rel((long*)obj, value, exp)
#define _InterlockedExchange32(obj, value) _InterlockedExchange((long*)obj, value)
#define _InterlockedExchange32_nf(obj, value) _InterlockedExchange_nf((long*)obj, value)
#define _InterlockedExchange32_acq(obj, value) _InterlockedExchange_acq((long*)obj, value)
#define _InterlockedExchange32_rel(obj, value) _InterlockedExchange_rel((long*)obj, value)
#define _InterlockedExchangeAdd32(obj, value) _InterlockedExchangeAdd((long*)obj, value)
#define _InterlockedExchangeAdd32_nf(obj, value) _InterlockedExchangeAdd_nf((long*)obj, value)
#define _InterlockedExchangeAdd32_acq(obj, value) _InterlockedExchangeAdd_acq((long*)obj, value)
#define _InterlockedExchangeAdd32_rel(obj, value) _InterlockedExchangeAdd_rel((long*)obj, value)
#define _InterlockedAnd32(obj, value) _InterlockedAnd((long*)obj, value)
#define _InterlockedAnd32_nf(obj, value) _InterlockedAnd_nf((long*)obj, value)
#define _InterlockedAnd32_acq(obj, value) _InterlockedAnd_acq((long*)obj, value)
#define _InterlockedAnd32_rel(obj, value) _InterlockedAnd_rel((long*)obj, value)
#define _InterlockedOr32(obj, value) _InterlockedOr((long*)obj, value)
#define _InterlockedOr32_nf(obj, value) _InterlockedOr_nf((long*)obj, value)
#define _InterlockedOr32_acq(obj, value) _InterlockedOr_acq((long*)obj, value)
#define _InterlockedOr32_rel(obj, value) _InterlockedOr_rel((long*)obj, value)
#define _InterlockedXor32(obj, value) _InterlockedXor((long*)obj, value)
#define _InterlockedXor32_nf(obj, value) _InterlockedXor_nf((long*)obj, value)
#define _InterlockedXor32_acq(obj, value) _InterlockedXor_acq((long*)obj, value)
#define _InterlockedXor32_rel(obj, value) _InterlockedXor_rel((long*)obj, value)
// Use cmp_xchg on x86 to emulate 64 bit exchange and alu ops
#if defined(_M_IX86)
#undef _InterlockedExchange64
#undef _InterlockedExchangeAdd64
#undef _InterlockedOr64
#undef _InterlockedAnd64
#undef _InterlockedXor64
#define detail_CAS_OP(_name, ...) \
static __forceinline __int64 _name(__int64* obj, __int64 value) \
{ \
__int64 p1, p2 = *obj; \
do { p1 = p2; p2 = _InterlockedCompareExchange64(obj, (__VA_ARGS__), p1); } while (p1 != p2); \
return p1; \
}
detail_CAS_OP(_InterlockedExchange64, value);
detail_CAS_OP(_InterlockedExchangeAdd64, p1 + value);
detail_CAS_OP(_InterlockedOr64, p1 | value);
detail_CAS_OP(_InterlockedAnd64, p1 & value);
detail_CAS_OP(_InterlockedXor64, p1 ^ value);
#undef detail_CAS_OP
#endif

View File

@@ -0,0 +1,281 @@
#pragma once
#include <type_traits>
#include <limits>
#include "Internal/TypeTraits.h"
#include "Internal/Algorithm.inl.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
namespace Algorithm
{
// Index of the most significant bit in a 32bit mask. Returns -1 if no bits are set.
inline int HighestBit(uint32_t value);
// Index of the most significant bit in a 32bit mask of size_t value. Returns -1 if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
inline int HighestBit(T value) { return HighestBit(static_cast<uint32_t>(value)); }
// Index of the most significant bit in a 64bit mask. Returns -1 if no bits are set.
inline int HighestBit(uint64_t value);
// Index of the most significant bit in a 64bit mask of size_t value. Returns -1 if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
inline int HighestBit(T value) { return HighestBit(static_cast<uint64_t>(value)); }
// Index of the most significant bit in a 32bit mask. Unspecified result if no bits are set.
inline int HighestBitNonZero(uint32_t value);
// Index of the most significant bit in a 32bit mask of size_t value. Unspecified result if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
inline int HighestBitNonZero(T value) { return HighestBitNonZero(static_cast<uint32_t>(value)); }
// Index of the most significant bit in a 64bit mask. Unspecified result if no bits are set.
inline int HighestBitNonZero(uint64_t value);
// Index of the most significant bit in a 64bit mask of size_t value. Unspecified result if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
inline int HighestBitNonZero(T value) { return HighestBitNonZero(static_cast<uint64_t>(value)); }
// Index of the least significant bit in a 32bit mask. Returns -1 if no bits are set.
inline int LowestBit(uint32_t value);
// Index of the least significant bit in a 32bit mask of size_t value. Returns -1 if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
inline int LowestBit(T value) { return LowestBit(static_cast<uint32_t>(value)); }
// Index of the least significant bit in a 64bit mask. Returns -1 if no bits are set.
inline int LowestBit(uint64_t value);
// Index of the least significant bit in a 64bit mask of size_t value. Returns -1 if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
inline int LowestBit(T value) { return LowestBit(static_cast<uint64_t>(value)); }
// Index of the least significant bit in a 32bit mask. Unspecified result if no bits are set.
inline int LowestBitNonZero(uint32_t value);
// Index of the least significant bit in a 32bit mask of size_t value. Unspecified result if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
inline int LowestBitNonZero(T value) { return LowestBitNonZero(static_cast<uint32_t>(value)); }
// Index of the least significant bit in a 64bit mask. Unspecified result if no bits are set.
inline int LowestBitNonZero(uint64_t value);
// Index of the least significant bit in a 64bit mask of size_t value. Unspecified result if no bits are set.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
inline int LowestBitNonZero(T value) { return LowestBitNonZero(static_cast<uint64_t>(value)); }
// Returns number of set bits in a 64 bit mask.
inline int BitsInMask(uint64_t mask);
// Returns number of set bits in a 32 bit mask.
inline int BitsInMask(uint32_t mask);
// Returns number of set bits in a 16 bit mask.
inline int BitsInMask(uint16_t mask);
// Returns number os set bits in a 8 bit mask.
inline int BitsInMask(uint8_t mask);
// Number of set bits (population count) in an array of known size.
// Using Robert Harley and David Seal's algorithm from Hacker's Delight,
// variant that does 4 words in a loop iteration.
// http://www.hackersdelight.org/revisions.pdf
// http://www.hackersdelight.org/HDcode/newCode/pop_arrayHS.cc
template<typename WordT, int WordCount>
inline int BitsInArray(const WordT* data)
{
#define HarleySealCSAStep(h, l, a, b, c) {\
WordT u = a ^ b; \
h = (a & b) | (u & c); l = u ^ c; \
}
WordT ones, twos, twosA, twosB, fours;
int i = 0;
int tot = 0;
twos = ones = 0;
for (; i <= WordCount - 4; i = i + 4)
{
HarleySealCSAStep(twosA, ones, ones, data[i], data[i + 1])
HarleySealCSAStep(twosB, ones, ones, data[i + 2], data[i + 3])
HarleySealCSAStep(fours, twos, twos, twosA, twosB)
tot = tot + BitsInMask(fours);
}
tot = 4 * tot + 2 * BitsInMask(twos) + BitsInMask(ones);
for (; i < WordCount; i++) // Simply add in the last
tot = tot + BitsInMask(data[i]); // 0 to 3 elements.
return tot;
#undef HarleySealCSAStep
}
// Checks if one integers is a multiple of another.
template<typename T>
constexpr inline bool AreIntegersMultiple(T a, T b)
{
static_assert(std::is_integral<T>::value, "AreIntegersMultiple requires integral types.");
return a != 0 && b != 0 && // if at least one integer is 0, consider false (avoid div by 0 of the following modulo)
((a % b) == 0 || (b % a) == 0);
}
// Checks if value is a power-of-two.
template<typename T>
constexpr inline bool IsPowerOfTwo(T value)
{
static_assert(std::is_integral<T>::value, "IsPowerOfTwo works only with an integral type.");
using T_unsigned = typename std::make_unsigned<T>::type;
return (static_cast<T_unsigned>(value) & (static_cast<T_unsigned>(value) - 1)) == 0;
}
// Returns the next power-of-two of a 32bit number or the current value if it is a power two.
constexpr inline uint32_t CeilPowerOfTwo(uint32_t value)
{
return detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(value - 1, 16),
8),
4),
2),
1) + 1;
}
// Returns the next power-of-two of a 32bit number of size_t value, or the current value if it is a power two.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 4, bool>::type = 0>
constexpr inline uint32_t CeilPowerOfTwo(T value) { return CeilPowerOfTwo(static_cast<uint32_t>(value)); }
// Returns the next power-of-two of a 64bit number or the current value if it is a power two.
constexpr inline uint64_t CeilPowerOfTwo(uint64_t value)
{
return detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(
detail::LogicalOrRShiftOp(value - 1, 32),
16),
8),
4),
2),
1) + 1;
}
// Returns the next power-of-two of a 64bit number of size_t value, or the current value if it is a power two.
template<typename T, typename std::enable_if<std::is_same<size_t, T>::value && sizeof(T) == 8, bool>::type = 0>
constexpr inline uint64_t CeilPowerOfTwo(T value) { return CeilPowerOfTwo(static_cast<uint64_t>(value)); }
// Returns the closest power-of-two of a 32bit number.
template<typename T>
constexpr inline T RoundPowerOfTwo(T value)
{
static_assert(std::is_unsigned<T>::value, "RoundPowerOfTwo works only with an unsigned integral type.");
return (value - (CeilPowerOfTwo(value) >> 1) < CeilPowerOfTwo(value) - value) ? CeilPowerOfTwo(value) >> 1 : CeilPowerOfTwo(value);
}
// Returns the next value aligned to `alignment`, or the current value if it is already aligned.
// `alignment` is required to be a power of two value or the result is undefined. Zero `alignment` returns zero.
template<typename T>
constexpr inline T CeilAligned(T value, uint64_t alignment)
{
static_assert(std::is_integral<T>::value, "CeilAligned works only with an integral type.");
return static_cast<T>((static_cast<typename std::make_unsigned<T>::type>(value) + alignment - 1) & ~(alignment - 1));
}
// Returns true if addition of two given operands leads to an integer overflow.
template<typename T>
constexpr inline bool DoesAdditionOverflow(T a, T b)
{
static_assert(std::is_unsigned<T>::value, "Overflow checks apply only work on unsigned integral types.");
return std::numeric_limits<T>::max() - a < b;
}
// Returns true if multiplication of two given operands leads to an integer overflow.
template<typename T>
constexpr inline bool DoesMultiplicationOverflow(T a, T b)
{
static_assert(std::is_unsigned<T>::value, "Overflow checks apply only work on unsigned integral types.");
return b != 0 && std::numeric_limits<T>::max() / b < a;
}
// Clamp
//
// This function can be used with different types - `value` vs. `lo`, `hi`.
// If `lo` if larger than `hi` this function has undefined bahavior.
//
// Return: clamped `value` of the same type as `lo`, `hi`.
//
COMPILER_WARNINGS_PUSH
#if COMPILER_MSVC
COMPILER_WARNINGS_DISABLE(4756)
#endif
template<typename RT, typename VT, typename std::enable_if<
baselib::is_of_same_signedness<RT, VT>::value
|| !std::is_integral<RT>::value
|| !std::is_integral<VT>::value
, bool>::type = 0>
inline RT Clamp(VT value, RT lo, RT hi)
{
if (value < lo) return lo;
if (value > hi) return hi;
return static_cast<RT>(value);
}
COMPILER_WARNINGS_POP
template<typename RT, typename VT, typename std::enable_if<
std::is_integral<RT>::value && std::is_unsigned<RT>::value &&
std::is_integral<VT>::value && std::is_signed<VT>::value
, bool>::type = 0>
inline RT Clamp(VT value, RT lo, RT hi)
{
if (value < 0)
return lo;
using UnsignedVT = typename std::make_unsigned<VT>::type;
return Clamp(static_cast<UnsignedVT>(value), lo, hi);
}
template<typename RT, typename VT, typename std::enable_if<
std::is_integral<RT>::value && std::is_signed<RT>::value &&
std::is_integral<VT>::value && std::is_unsigned<VT>::value
, bool>::type = 0>
inline RT Clamp(VT value, RT lo, RT hi)
{
if (hi < 0)
return hi;
if (lo < 0)
lo = 0;
using UnsignedRT = typename std::make_unsigned<RT>::type;
return static_cast<RT>(Clamp(value, static_cast<UnsignedRT>(lo), static_cast<UnsignedRT>(hi)));
}
// Clamp `value` by lowest and highest value of RT.
//
// Return: clamped `value` of the type RT.
//
template<typename RT, typename VT, typename std::enable_if<
!(std::numeric_limits<RT>::has_infinity && std::numeric_limits<VT>::has_infinity)
, bool>::type = 0>
inline RT ClampToType(VT value)
{
return Clamp(value, std::numeric_limits<RT>::lowest(), std::numeric_limits<RT>::max());
}
// Clamp `value` by lowest and highest value of RT.
//
// This function is guaranteed to only return infinity values if the source value was already an infinity number.
//
// Return: clamped `value` of the type RT.
//
template<typename RT, typename VT, typename std::enable_if<
(std::numeric_limits<RT>::has_infinity && std::numeric_limits<VT>::has_infinity)
, bool>::type = 0>
inline RT ClampToType(VT value)
{
if (value == std::numeric_limits<VT>::infinity() || value == -std::numeric_limits<VT>::infinity())
return static_cast<RT>(value);
return Clamp(value, std::numeric_limits<RT>::lowest(), std::numeric_limits<RT>::max());
}
}
}
}
#if COMPILER_MSVC
#include "Internal/Compiler/Msvc/AlgorithmMsvc.inl.h"
#elif COMPILER_GCC || COMPILER_CLANG
#include "Internal/Compiler/ClangOrGcc/AlgorithmClangOrGcc.inl.h"
#else
#error "Unknown Compiler"
#endif

View File

@@ -0,0 +1,449 @@
#pragma once
#include "../C/Baselib_Atomic.h"
#include "Internal/TypeTraits.h"
// Note that aligning by type is not possible with the C compatible COMPILER_ALIGN_AS as MSVC's own alignment attribute does not allow evaluation of sizeof
#define ALIGN_ATOMIC(TYPE_) alignas(sizeof(TYPE_))
#define ALIGNED_ATOMIC(TYPE_) ALIGN_ATOMIC(TYPE_) TYPE_
// Atomic interface that sticks closely to std::atomic
// Major differences:
// * free functions that operate on types other than baselib::atomic
// * baselib::atomic allows access to its internal value
// * no zero initialization on baselib::atomic
// * no single parameter versions of compare_exchange
namespace baselib
{
BASELIB_CPP_INTERFACE
{
enum memory_order_relaxed_t { memory_order_relaxed = 0 }; // Equal to std::memory_order_relaxed
enum memory_order_acquire_t { memory_order_acquire = 2 }; // Equal to std::memory_order_acquire
enum memory_order_release_t { memory_order_release = 3 }; // Equal to std::memory_order_release
enum memory_order_acq_rel_t { memory_order_acq_rel = 4 }; // Equal to std::memory_order_acq_rel
enum memory_order_seq_cst_t { memory_order_seq_cst = 5 }; // Equal to std::memory_order_seq_cst
namespace detail
{
template<typename T, typename ... Rest>
struct is_any : std::false_type {};
template<typename T, typename First>
struct is_any<T, First> : std::is_same<T, First> {};
template<typename T, typename First, typename ... Rest>
struct is_any<T, First, Rest...>
: std::integral_constant<bool, std::is_same<T, First>::value || is_any<T, Rest...>::value>
{};
#define TEST_ATOMICS_PREREQUISITES(_TYPE) \
static_assert(baselib::is_trivially_copyable<_TYPE>::value, "atomic operation operands needs to be trivially copyable"); \
static_assert(sizeof(_TYPE) <= sizeof(void*) * 2, "atomic operation operands need to be smaller or equal than two pointers");
template<typename T> static inline T fail();
template<typename T, typename MemoryOrder, typename ... AllowedMemoryOrders> static inline T fail_prerequisites()
{
TEST_ATOMICS_PREREQUISITES(T);
static_assert(is_any<MemoryOrder, AllowedMemoryOrders...>::value, "the specified memory ordering is invalid for this atomic operation");
return fail<T>();
}
template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure> static inline T fail_prerequisites_cmpxchg()
{
TEST_ATOMICS_PREREQUISITES(T);
static_assert(
// fail: relaxed, success: relaxed/acquire/release/seq_cst
(std::is_same<MemoryOrderFailure, baselib::memory_order_relaxed_t>::value &&
is_any<MemoryOrderSuccess, baselib::memory_order_relaxed_t, baselib::memory_order_acquire_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>::value) ||
// fail: acquire, success acquire/release/seq_cst
(std::is_same<MemoryOrderFailure, baselib::memory_order_relaxed_t>::value &&
is_any<MemoryOrderSuccess, baselib::memory_order_acquire_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>::value) ||
// fail: seq_cst, success: seq_cst
(std::is_same<MemoryOrderSuccess, baselib::memory_order_seq_cst_t>::value && std::is_same<MemoryOrderFailure, baselib::memory_order_seq_cst_t>::value),
"the specified combination of memory ordering is invalid for compare exchange operations");
return fail<T>();
}
template<typename T, typename MemoryOrder> static inline T fail_prerequisites_alu()
{
static_assert(std::is_integral<T>::value, "operands of arithmetic atomic operations need to be integral");
return fail_prerequisites<T, MemoryOrder,
baselib::memory_order_relaxed_t,
baselib::memory_order_acquire_t,
baselib::memory_order_release_t,
baselib::memory_order_acq_rel_t,
baselib::memory_order_seq_cst_t>();
}
}
// MACRO generated impl
// re-directs to Baselib_atomic_ API
// ----------------------------------------------------------------------------------------------------------------------------------
#define detail_THREAD_FENCE(order, ...) \
static FORCE_INLINE void atomic_thread_fence(memory_order_##order##_t order) \
{ \
return Baselib_atomic_thread_fence_##order(); \
}
#define detail_LOAD(op, order, id, bits, ...) \
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
static FORCE_INLINE T atomic_load_explicit(const T& obj, memory_order_##order##_t order) \
{ \
T ret; \
Baselib_atomic_load_##id##_##order##_v(&obj, &ret); \
return ret; \
}
#define detail_LOAD128(op, order, id, bits, ...) \
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
static FORCE_INLINE T atomic_load_explicit(const T& obj, memory_order_##order##_t order) \
{ \
T ret; \
Baselib_atomic_load_##id##_##order##_v(const_cast<T*>(&obj), &ret); \
return ret; \
}
#define detail_STORE(op, order, id, bits, ...) \
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
static FORCE_INLINE void atomic_store_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order)\
{ \
return Baselib_atomic_store_##id##_##order##_v(&obj, &value); \
}
#define detail_LOAD_STORE(op, order, id, bits, ...) \
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
static FORCE_INLINE T atomic_##op##_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order) \
{ \
T ret; \
Baselib_atomic_##op##_##id##_##order##_v(&obj, &value, &ret); \
return ret; \
}
#define detail_ALU(op, order, id, bits, ...) \
template<typename T, typename std::enable_if<baselib::is_integral_of_size<T, bits/8>::value, int>::type = 0> \
static FORCE_INLINE T atomic_##op##_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order) \
{ \
T ret; \
Baselib_atomic_##op##_##id##_##order##_v(&obj, &value, &ret); \
return ret; \
}
#define detail_CMP_XCHG(op, order1, order2, id, bits, ...) \
template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
static FORCE_INLINE bool atomic_##op##_explicit(T& obj, \
typename std::common_type<T>::type& expected, \
typename std::common_type<T>::type desired, \
memory_order_##order1##_t order_success, \
memory_order_##order2##_t order_failure) \
{ \
return Baselib_atomic_##op##_##id##_##order1##_##order2##_v(&obj, &expected, &desired); \
}
#define detail_NOT_SUPPORTED(...)
Baselib_Atomic_FOR_EACH_MEMORY_ORDER(
detail_THREAD_FENCE
)
Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_INT_TYPE(
detail_LOAD, // load
detail_STORE, // store
detail_ALU, // add
detail_ALU, // and
detail_ALU, // or
detail_ALU, // xor
detail_LOAD_STORE, // exchange
detail_CMP_XCHG, // compare_exchange_weak
detail_CMP_XCHG // compare_exchange_strong
)
#if PLATFORM_ARCH_64
// 128bit atomics
Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
detail_LOAD128, // load
detail_STORE, // store
detail_NOT_SUPPORTED, // add
detail_NOT_SUPPORTED, // and
detail_NOT_SUPPORTED, // or
detail_NOT_SUPPORTED, // xor
detail_LOAD_STORE, // exchange
detail_CMP_XCHG, // compare_exchange_weak
detail_CMP_XCHG, // compare_exchange_strong
128, 128)
#endif
#undef detail_THREAD_FENCE
#undef detail_LOAD128
#undef detail_LOAD
#undef detail_STORE
#undef detail_LOAD_STORE
#undef detail_ALU
#undef detail_CMP_XCHG
#undef detail_NOT_SUPPORTED
template<typename T, typename MemoryOrder>
static FORCE_INLINE T atomic_fetch_sub_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
{
return atomic_fetch_add_explicit(obj, 0 - value, order);
}
// API documentation and default fallback for non-matching types
// ----------------------------------------------------------------------------------------------------------------------
template<typename T, typename MemoryOrder>
static FORCE_INLINE T atomic_load_explicit(const T& obj, MemoryOrder order)
{
return detail::fail_prerequisites<T, MemoryOrder, baselib::memory_order_relaxed_t, baselib::memory_order_acquire_t, baselib::memory_order_seq_cst_t>();
}
template<typename T, typename MemoryOrder>
static FORCE_INLINE void atomic_store_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
{
detail::fail_prerequisites<T, MemoryOrder, baselib::memory_order_relaxed_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>();
}
template<typename T, typename MemoryOrder>
static FORCE_INLINE T atomic_fetch_add_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
{
return detail::fail_prerequisites_alu<T, MemoryOrder>();
}
template<typename T, typename MemoryOrder>
static FORCE_INLINE T atomic_fetch_and_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
{
return detail::fail_prerequisites_alu<T, MemoryOrder>();
}
template<typename T, typename MemoryOrder>
static FORCE_INLINE T atomic_fetch_or_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
{
return detail::fail_prerequisites_alu<T, MemoryOrder>();
}
template<typename T, typename MemoryOrder>
static FORCE_INLINE T atomic_fetch_xor_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
{
return detail::fail_prerequisites_alu<T, MemoryOrder>();
}
template<typename T, typename MemoryOrder>
static FORCE_INLINE T atomic_exchange_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
{
return detail::fail_prerequisites<T, MemoryOrder>();
}
template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure>
static FORCE_INLINE bool atomic_compare_exchange_weak_explicit(T& obj,
typename std::common_type<T>::type& expected,
typename std::common_type<T>::type desired,
MemoryOrderSuccess order_success,
MemoryOrderFailure order_failure)
{
detail::fail_prerequisites_cmpxchg<T, MemoryOrderSuccess, MemoryOrderFailure>();
return false;
}
template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure>
static FORCE_INLINE bool atomic_compare_exchange_strong_explicit(T& obj,
typename std::common_type<T>::type& expected,
typename std::common_type<T>::type desired,
MemoryOrderSuccess order_success,
MemoryOrderFailure order_failure)
{
detail::fail_prerequisites_cmpxchg<T, MemoryOrderSuccess, MemoryOrderFailure>();
return false;
}
// default memory order (memory_order_seq_cst)
// ----------------------------------------------------------------------------------------------------------------------
template<typename T>
static FORCE_INLINE T atomic_load(const T& obj)
{
return atomic_load_explicit(obj, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE void atomic_store(T& obj, typename std::common_type<T>::type value)
{
return atomic_store_explicit(obj, value, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE T atomic_fetch_add(T& obj, typename std::common_type<T>::type value)
{
return atomic_fetch_add_explicit(obj, value, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE T atomic_fetch_sub(T& obj, typename std::common_type<T>::type value)
{
return atomic_fetch_sub_explicit(obj, value, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE T atomic_fetch_and(T& obj, typename std::common_type<T>::type value)
{
return atomic_fetch_and_explicit(obj, value, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE T atomic_fetch_or(T& obj, typename std::common_type<T>::type value)
{
return atomic_fetch_or_explicit(obj, value, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE T atomic_fetch_xor(T& obj, typename std::common_type<T>::type value)
{
return atomic_fetch_xor_explicit(obj, value, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE T atomic_exchange(T& obj, typename std::common_type<T>::type value)
{
return atomic_exchange_explicit(obj, value, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE bool atomic_compare_exchange_weak(T& obj,
typename std::common_type<T>::type& expected,
typename std::common_type<T>::type desired)
{
return atomic_compare_exchange_weak_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
}
template<typename T>
static FORCE_INLINE bool atomic_compare_exchange_strong(T& obj,
typename std::common_type<T>::type& expected,
typename std::common_type<T>::type desired)
{
return atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
}
template<typename T>
struct atomic_common
{
using value_type = T;
TEST_ATOMICS_PREREQUISITES(T);
ALIGNED_ATOMIC(T) obj;
FORCE_INLINE atomic_common() = default;
// Initializes atomic with a given value. Initialization is not atomic!
FORCE_INLINE atomic_common(T value)
{
obj = value;
}
FORCE_INLINE operator T() const { return atomic_load_explicit(obj, memory_order_seq_cst); }
FORCE_INLINE T operator=(T value) { atomic_store_explicit(obj, value, memory_order_seq_cst); return value; }
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE T load(TMemoryOrder order = memory_order_seq_cst) const
{
return atomic_load_explicit(obj, order);
}
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE void store(T value, TMemoryOrder order = memory_order_seq_cst)
{
return atomic_store_explicit(obj, value, order);
}
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE T exchange(T value, TMemoryOrder order = memory_order_seq_cst)
{
return atomic_exchange_explicit(obj, value, order);
}
template<typename TMemoryOrderSuccess, typename TMemoryOrderFailure>
FORCE_INLINE bool compare_exchange_weak(T& expected, T desired, TMemoryOrderSuccess order_success, TMemoryOrderFailure order_failure)
{
return atomic_compare_exchange_weak_explicit(obj, expected, desired, order_success, order_failure);
}
FORCE_INLINE bool compare_exchange_weak(T& expected, T desired)
{
return atomic_compare_exchange_weak_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
}
template<typename TMemoryOrderSuccess, typename TMemoryOrderFailure>
FORCE_INLINE bool compare_exchange_strong(T& expected, T desired, TMemoryOrderSuccess order_success, TMemoryOrderFailure order_failure)
{
return atomic_compare_exchange_strong_explicit(obj, expected, desired, order_success, order_failure);
}
FORCE_INLINE bool compare_exchange_strong(T& expected, T desired)
{
return atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
}
};
template<typename T, bool IsIntegral>
struct atomic_base {};
// Atomic type for integral types.
template<typename T>
struct atomic_base<T, true> : atomic_common<T>
{
using atomic_common<T>::atomic_common;
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE T fetch_add(T value, TMemoryOrder order = memory_order_seq_cst)
{
return atomic_fetch_add_explicit(atomic_common<T>::obj, value, order);
}
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE T fetch_sub(T value, TMemoryOrder order = memory_order_seq_cst)
{
return atomic_fetch_sub_explicit(atomic_common<T>::obj, value, order);
}
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE T fetch_and(T value, TMemoryOrder order = memory_order_seq_cst)
{
return atomic_fetch_and_explicit(atomic_common<T>::obj, value, order);
}
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE T fetch_or(T value, TMemoryOrder order = memory_order_seq_cst)
{
return atomic_fetch_or_explicit(atomic_common<T>::obj, value, order);
}
template<typename TMemoryOrder = memory_order_seq_cst_t>
FORCE_INLINE T fetch_xor(T value, TMemoryOrder order = memory_order_seq_cst)
{
return atomic_fetch_xor_explicit(atomic_common<T>::obj, value, order);
}
FORCE_INLINE T operator++(int) { return atomic_fetch_add_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst); }
FORCE_INLINE T operator--(int) { return atomic_fetch_sub_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst); }
FORCE_INLINE T operator++() { return atomic_fetch_add_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst) + T(1); }
FORCE_INLINE T operator--() { return atomic_fetch_sub_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst) - T(1); }
FORCE_INLINE T operator+=(T value) { return atomic_fetch_add_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) + value; }
FORCE_INLINE T operator-=(T value) { return atomic_fetch_sub_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) - value; }
FORCE_INLINE T operator&=(T value) { return atomic_fetch_and_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) & value; }
FORCE_INLINE T operator|=(T value) { return atomic_fetch_or_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) | value; }
FORCE_INLINE T operator^=(T value) { return atomic_fetch_xor_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) ^ value; }
};
// Atomic type for non-integral types.
template<typename T>
struct atomic_base<T, false> : atomic_common<T>
{
using atomic_common<T>::atomic_common;
};
template<typename T>
struct atomic : atomic_base<T, std::is_integral<T>::value>
{
using atomic_base<T, std::is_integral<T>::value>::atomic_base;
};
#undef TEST_ATOMICS_PREREQUISITES
}
}

View File

@@ -0,0 +1,98 @@
#pragma once
#include "Atomic.h"
#include "Semaphore.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
// In parallel computing, a barrier is a type of synchronization
// method. A barrier for a group of threads or processes in the source
// code means any thread/process must stop at this point and cannot
// proceed until all other threads/processes reach this barrier.
//
// "Barrier (computer science)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/wiki/Barrier_(computer_science)
//
// For optimal performance, baselib::Barrier should be stored at a
// cache aligned memory location.
class Barrier
{
public:
// non-copyable
Barrier(const Barrier& other) = delete;
Barrier& operator=(const Barrier& other) = delete;
// non-movable (strictly speaking not needed but listed to signal intent)
Barrier(Barrier&& other) = delete;
Barrier& operator=(Barrier&& other) = delete;
// Creates a barrier with a set number of threads to synchronize.
// Once a set of threads enter a Barrier, the *same* set of threads
// must continue to use the Barrier - i.e. no additional threads may
// enter any of the Acquires. For example, it is *not* allowed to
// create a Barrier with threads_num=10, then send 30 threads into
// barrier.Acquire() with the expectation 3 batches of 10 will be
// released. However, once it is guaranteed that all threads have
// exited all of the Acquire invocations, it is okay to reuse the
// same barrier object with a different set of threads - for
// example, after Join() has been called on all participating
// threads and a new batch of threads is launched.
//
// \param threads_num Wait for this number of threads before letting all proceed.
explicit Barrier(uint16_t threads_num)
: m_threshold(threads_num), m_count(0)
{
}
// Block the current thread until the specified number of threads
// also reach this `Acquire()`.
void Acquire()
{
// If there is two Barrier::Acquire calls in a row, when the
// first Acquire releases, one thread may jump out of the gate
// so fast that it reaches the next Acquire and steals *another*
// semaphore slot, continuing past the *second* Acquire, before
// all threads have even left the first Acquire. So, we instead
// construct two semaphores and alternate between them to
// prevent this.
uint16_t previous_value = m_count.fetch_add(1, memory_order_relaxed);
BaselibAssert(previous_value < m_threshold * 2);
// If count is in range [0, m_threshold), use semaphore A.
// If count is in range [m_threshold, m_threshold * 2), use semaphore B.
bool useSemaphoreB = previous_value >= m_threshold;
Semaphore& semaphore = useSemaphoreB ? m_semaphoreB : m_semaphoreA;
// If (count % m_threshold) == (m_threshold - 1), then we're the last thread in the group, release the semaphore.
bool do_release = previous_value % m_threshold == m_threshold - 1;
if (do_release)
{
if (previous_value == m_threshold * 2 - 1)
{
// Note this needs to happen before the Release to avoid
// a race condition (if this thread yields right before
// the Release, but after the add, the invariant of
// previous_value < m_threshold * 2 may break for
// another thread)
m_count.fetch_sub(m_threshold * 2, memory_order_relaxed);
}
semaphore.Release(m_threshold - 1);
}
else
{
semaphore.Acquire();
}
}
private:
Semaphore m_semaphoreA;
Semaphore m_semaphoreB;
uint16_t m_threshold;
atomic<uint16_t> m_count;
};
}
}

View File

@@ -0,0 +1,31 @@
#pragma once
#include "../C/Baselib_DynamicLibrary.h"
// alias for Baselib_DynamicLibrary_OpenUtf8
static inline Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_Open(
const char* pathnameUtf8,
Baselib_ErrorState* errorState
)
{
return Baselib_DynamicLibrary_OpenUtf8(pathnameUtf8, errorState);
}
// alias for Baselib_DynamicLibrary_OpenUtf16
static inline Baselib_DynamicLibrary_Handle Baselib_DynamicLibrary_Open(
const baselib_char16_t* pathnameUtf16,
Baselib_ErrorState* errorState
)
{
return Baselib_DynamicLibrary_OpenUtf16(pathnameUtf16, errorState);
}
static inline bool operator==(const Baselib_DynamicLibrary_Handle& a, const Baselib_DynamicLibrary_Handle& b)
{
return a.handle == b.handle;
}
static inline bool operator!=(const Baselib_DynamicLibrary_Handle& a, const Baselib_DynamicLibrary_Handle& b)
{
return a.handle != b.handle;
}

View File

@@ -0,0 +1,50 @@
#pragma once
#include "CappedSemaphore.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
// decremented, or toggled) depending on programmer-defined conditions.
//
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
// resource becomes available.
//
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
//
// For optimal performance, baselib::BinarySemaphore should be stored at a cache aligned memory location.
class BinarySemaphore : private CappedSemaphore
{
public:
// Creates a binary semaphore synchronization primitive.
// Binary means the semaphore can at any given time have at most one token available for consummation.
//
// This is just an API facade for CappedSemaphore(1)
//
// If there are not enough system resources to create a semaphore, process abort is triggered.
BinarySemaphore() : CappedSemaphore(1) {}
using CappedSemaphore::Acquire;
using CappedSemaphore::TryAcquire;
using CappedSemaphore::TryTimedAcquire;
// Submit token to the semaphore.
// If threads are waiting the token is consumed before this function return.
//
// When successful this function is guaranteed to emit a release barrier.
//
// \returns true if a token was submitted, false otherwise (meaning the BinarySemaphore already has a token)
inline bool Release()
{
return CappedSemaphore::Release(1) == 1;
}
};
}
}

View File

@@ -0,0 +1,112 @@
#pragma once
#include "../C/Baselib_CappedSemaphore.h"
#include "Time.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
// In computer science, a semaphore is a variable or abstract data type used to control access to a common resource by multiple processes in a concurrent
// system such as a multitasking operating system. A semaphore is simply a variable. This variable is used to solve critical section problems and to achieve
// process synchronization in the multi processing environment. A trivial semaphore is a plain variable that is changed (for example, incremented or
// decremented, or toggled) depending on programmer-defined conditions.
//
// A useful way to think of a semaphore as used in the real-world system is as a record of how many units of a particular resource are available, coupled with
// operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the
// resource becomes available.
//
// "Semaphore (programming)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Semaphore_(programming)&oldid=872408126
//
// For optimal performance, baselib::CappedSemaphore should be stored at a cache aligned memory location.
class CappedSemaphore
{
public:
// non-copyable
CappedSemaphore(const CappedSemaphore& other) = delete;
CappedSemaphore& operator=(const CappedSemaphore& other) = delete;
// non-movable (strictly speaking not needed but listed to signal intent)
CappedSemaphore(CappedSemaphore&& other) = delete;
CappedSemaphore& operator=(CappedSemaphore&& other) = delete;
// Creates a capped counting semaphore synchronization primitive.
// Cap is the number of tokens that can be held by the semaphore when there is no contention.
//
// If there are not enough system resources to create a semaphore, process abort is triggered.
CappedSemaphore(const uint16_t cap) : m_CappedSemaphoreData(Baselib_CappedSemaphore_Create(cap))
{
}
// Reclaim resources and memory held by the semaphore.
//
// If threads are waiting on the semaphore, destructor will trigger an assert and may cause process abort.
~CappedSemaphore()
{
Baselib_CappedSemaphore_Free(&m_CappedSemaphoreData);
}
// Wait for semaphore token to become available
//
// This function is guaranteed to emit an acquire barrier.
inline void Acquire()
{
return Baselib_CappedSemaphore_Acquire(&m_CappedSemaphoreData);
}
// Try to consume a token and return immediately.
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// Return: true if token was consumed. false if not.
inline bool TryAcquire()
{
return Baselib_CappedSemaphore_TryAcquire(&m_CappedSemaphoreData);
}
// Wait for semaphore token to become available
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// TryAcquire with a zero timeout differs from TryAcquire() in that TryAcquire() is guaranteed to be a user space operation
// while Acquire with a zero timeout may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// Arguments:
// - timeout: Time to wait for token to become available.
//
// Return: true if token was consumed. false if timeout was reached.
inline bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
{
return Baselib_CappedSemaphore_TryTimedAcquire(&m_CappedSemaphoreData, timeoutInMilliseconds.count());
}
// Submit tokens to the semaphore.
// If threads are waiting an equal amount of tokens are consumed before this function return.
//
// When successful this function is guaranteed to emit a release barrier.
//
// \returns number of submitted tokens.
inline uint16_t Release(const uint16_t count)
{
return Baselib_CappedSemaphore_Release(&m_CappedSemaphoreData, count);
}
// Sets the semaphore token count to zero and release all waiting threads.
//
// When successful this function is guaranteed to emit a release barrier.
//
// Return: number of released threads.
inline uint32_t ResetAndReleaseWaitingThreads()
{
return Baselib_CappedSemaphore_ResetAndReleaseWaitingThreads(&m_CappedSemaphoreData);
}
private:
Baselib_CappedSemaphore m_CappedSemaphoreData;
};
}
}

View File

@@ -0,0 +1,96 @@
#pragma once
#include "Time.h"
#include "Lock.h"
#include <cstdint>
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#include "Internal/ConditionVariableData_FutexBased.inl.h"
#else
#include "Internal/ConditionVariableData_SemaphoreBased.inl.h"
#endif
namespace baselib
{
BASELIB_CPP_INTERFACE
{
// Conceptually a condition variable is a queue of threads, associated with a monitor, on which a thread may wait for some condition to become true.
//
// Thus each condition variable c is associated with an assertion Pc. While a thread is waiting on a condition variable, that thread is not considered
// to occupy the monitor, and so other threads may enter the monitor to change the monitor's state. In most types of monitors, these other threads may
// signal the condition variable c to indicate that assertion Pc is true in the current state.
//
// "Monitor (synchronization)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Monitor_(synchronization)&oldid=914426020#Condition_variables_2
//
// For optimal performance, baselib::ConditionVariable should be stored at a cache aligned memory location.
class ConditionVariable
{
public:
// non-copyable
ConditionVariable(const ConditionVariable& other) = delete;
ConditionVariable& operator=(const ConditionVariable& other) = delete;
// non-movable (strictly speaking not needed but listed to signal intent)
ConditionVariable(ConditionVariable&& other) = delete;
ConditionVariable& operator=(ConditionVariable&& other) = delete;
// Creates a condition variable synchronization primitive.
ConditionVariable(Lock& lock) : m_Lock(lock)
{}
// Reclaim resources and memory held by the condition variable.
//
// If threads are waiting on the condition variable, destructor will trigger an assert and may cause process abort.
~ConditionVariable()
{
BaselibAssert(!m_Data.HasWaiters(), "Destruction is not allowed when there are still threads waiting on the condition variable.");
NotifyAll();
}
// Wait for the condition variable to become available.
//
// The lock must have been previously acquired.
// For the duration of the wait the lock is released and then re-acquired upon exit.
// This function is guaranteed to emit an acquire barrier.
inline void Wait();
// Wait for the condition variable to become available.
//
// The lock must have been previously acquired.
// For the duration of the wait the lock is released and then re-acquired upon exit.
// This function is guaranteed to emit an acquire barrier.
//
// TimedWait with a zero timeout is guaranteed to be a user space operation.
//
// \param timeoutInMilliseconds Time to wait for condition variable to become available.
// \returns true if the condition variable is available, false if timeout was reached.
inline bool TimedWait(const timeout_ms timeoutInMilliseconds);
// Wake up threads waiting for the condition variable.
//
// This function is guaranteed to emit a release barrier.
//
// \param count At most, `count` waiting threads will be notified, but never more than there are currently waiting.
inline void Notify(uint16_t count);
// Wake up all threads waiting for the condition variable.
//
// This function is guaranteed to emit a release barrier.
inline void NotifyAll()
{
Notify(std::numeric_limits<uint16_t>::max());
}
private:
Lock& m_Lock;
detail::ConditionVariableData m_Data;
};
}
}
#if PLATFORM_FUTEX_NATIVE_SUPPORT
#include "Internal/ConditionVariable_FutexBased.inl.h"
#else
#include "Internal/ConditionVariable_SemaphoreBased.inl.h"
#endif

View File

@@ -0,0 +1,70 @@
#pragma once
#include "../C/Baselib_CountdownTimer.h"
#include "Time.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
class CountdownTimer
{
public:
//
// Create a countdown timer that already expired.
//
// Guaranteed to not sample the system timer.
//
static CountdownTimer InitializeExpired()
{
return CountdownTimer();
}
//
// Create and start a countdown timer.
//
static CountdownTimer StartNew(const high_precision_clock::duration timeout)
{
return CountdownTimer(timeout);
}
//
// Get time left before timeout expires.
//
// This function is guaranteed to return zero once timeout expired.
// It is also guaranteed that this function will not return zero until timeout expires.
// Return the time left as a high precision duration.
//
high_precision_clock::duration GetTimeLeft() const
{
return high_precision_clock::duration_from_ticks(Baselib_CountdownTimer_GetTimeLeftInTicks(m_CountdownTimer));
}
//
// Get time left before timeout expires.
//
// This function is guaranteed to return zero once timeout expired.
// It is also guaranteed that this function will not return zero until timeout expires.
// Return the time left as a millisecond integer duration.
//
timeout_ms GetTimeLeftInMilliseconds() const
{
return timeout_ms(Baselib_CountdownTimer_GetTimeLeftInMilliseconds(m_CountdownTimer));
}
//
// Check if timout has been reached.
//
bool TimeoutExpired() const
{
return Baselib_CountdownTimer_TimeoutExpired(m_CountdownTimer);
}
private:
CountdownTimer() : m_CountdownTimer{0, 0} {}
CountdownTimer(const high_precision_clock::duration timeout) : m_CountdownTimer(Baselib_CountdownTimer_StartTicks(high_precision_clock::ticks_from_duration_roundup(timeout))) {}
Baselib_CountdownTimer m_CountdownTimer;
};
}
}

View File

@@ -0,0 +1,121 @@
#pragma once
#include "../C/Baselib_EventSemaphore.h"
#include "Time.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
// In computer science, an event (also called event semaphore) is a type of synchronization mechanism that is used to indicate to waiting processes when a
// particular condition has become true.
// An event is an abstract data type with a boolean state and the following operations:
// * wait - when executed, causes the suspension of the executing process until the state of the event is set to true. If the state is already set to true has no effect.
// * set - sets the event's state to true, release all waiting processes.
// * clear - sets the event's state to false.
//
// "Event (synchronization primitive)", Wikipedia: The Free Encyclopedia
// https://en.wikipedia.org/w/index.php?title=Event_(synchronization_primitive)&oldid=781517732
//
// For optimal performance, baselib::EventSemaphore should be stored at a cache aligned memory location.
class EventSemaphore
{
public:
// non-copyable
EventSemaphore(const EventSemaphore& other) = delete;
EventSemaphore& operator=(const EventSemaphore& other) = delete;
// non-movable (strictly speaking not needed but listed to signal intent)
EventSemaphore(EventSemaphore&& other) = delete;
EventSemaphore& operator=(EventSemaphore&& other) = delete;
// Creates an event semaphore synchronization primitive. Initial state of event is unset.
//
// If there are not enough system resources to create a semaphore, process abort is triggered.
EventSemaphore() : m_EventSemaphoreData(Baselib_EventSemaphore_Create())
{
}
// Reclaim resources and memory held by the semaphore.
// If threads are waiting on the semaphore, calling free may trigger an assert and may cause process abort.
~EventSemaphore()
{
Baselib_EventSemaphore_Free(&m_EventSemaphoreData);
}
// Try to acquire semaphore.
//
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
//
// \returns true if event is set, false other wise.
COMPILER_WARN_UNUSED_RESULT
inline bool TryAcquire()
{
return Baselib_EventSemaphore_TryAcquire(&m_EventSemaphoreData);
}
// Acquire semaphore.
//
// This function is guaranteed to emit an acquire barrier.
inline void Acquire()
{
return Baselib_EventSemaphore_Acquire(&m_EventSemaphoreData);
}
// Try to acquire semaphore.
//
// If event is set this function return true, otherwise the thread will wait for event to be set or for release to be called.
//
// When semaphore is acquired this function is guaranteed to emit an acquire barrier.
//
// Acquire with a zero timeout differs from TryAcquire in that TryAcquire is guaranteed to be a user space operation
// while Acquire may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// \returns true if semaphore was acquired.
COMPILER_WARN_UNUSED_RESULT
inline bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
{
return Baselib_EventSemaphore_TryTimedAcquire(&m_EventSemaphoreData, timeoutInMilliseconds.count());
}
// Sets the event
//
// Setting the event will cause all waiting threads to wakeup. And will let all future acquiring threads through until Baselib_EventSemaphore_Reset is called.
// It is guaranteed that any thread waiting previously on the EventSemaphore will be woken up, even if the semaphore is immediately reset. (no lock stealing)
//
// Guaranteed to emit a release barrier.
inline void Set()
{
return Baselib_EventSemaphore_Set(&m_EventSemaphoreData);
}
// Reset event
//
// Resetting the event will cause all future acquiring threads to enter a wait state.
// Has no effect if the EventSemaphore is already in a reset state.
//
// Guaranteed to emit a release barrier.
inline void Reset()
{
return Baselib_EventSemaphore_Reset(&m_EventSemaphoreData);
}
// Reset event and release all waiting threads
//
// Resetting the event will cause all future acquiring threads to enter a wait state.
// If there were any threads waiting (i.e. the EventSemaphore was already in a release state) they will be released.
//
// Guaranteed to emit a release barrier.
inline void ResetAndRelease()
{
return Baselib_EventSemaphore_ResetAndReleaseWaitingThreads(&m_EventSemaphoreData);
}
private:
Baselib_EventSemaphore m_EventSemaphoreData;
};
}
}

View File

@@ -0,0 +1,104 @@
#pragma once
#include "../C/Baselib_HighCapacitySemaphore.h"
#include "Time.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
// baselib::HighCapacitySemaphore is similar to baselib::Semaphore but allows for far greater token count.
// It is suitable to be used as resource counting semaphore.
class HighCapacitySemaphore
{
public:
// non-copyable
HighCapacitySemaphore(const HighCapacitySemaphore& other) = delete;
HighCapacitySemaphore& operator=(const HighCapacitySemaphore& other) = delete;
// non-movable (strictly speaking not needed but listed to signal intent)
HighCapacitySemaphore(HighCapacitySemaphore&& other) = delete;
HighCapacitySemaphore& operator=(HighCapacitySemaphore&& other) = delete;
// This is the max number of tokens guaranteed to be held by the semaphore at
// any given point in time. Tokens submitted that exceed this value may silently
// be discarded.
enum : int64_t { MaxGuaranteedCount = Baselib_HighCapacitySemaphore_MaxGuaranteedCount };
// Creates a counting semaphore synchronization primitive.
// If there are not enough system resources to create a semaphore, process abort is triggered.
HighCapacitySemaphore() : m_SemaphoreData(Baselib_HighCapacitySemaphore_Create())
{
}
// Reclaim resources and memory held by the semaphore.
//
// If threads are waiting on the semaphore, destructor will trigger an assert and may cause process abort.
~HighCapacitySemaphore()
{
Baselib_HighCapacitySemaphore_Free(&m_SemaphoreData);
}
// Wait for semaphore token to become available
//
// This function is guaranteed to emit an acquire barrier.
inline void Acquire()
{
return Baselib_HighCapacitySemaphore_Acquire(&m_SemaphoreData);
}
// Try to consume a token and return immediately.
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// Return: true if token was consumed. false if not.
inline bool TryAcquire()
{
return Baselib_HighCapacitySemaphore_TryAcquire(&m_SemaphoreData);
}
// Wait for semaphore token to become available
//
// When successful this function is guaranteed to emit an acquire barrier.
//
// TryAcquire with a zero timeout differs from TryAcquire() in that TryAcquire() is guaranteed to be a user space operation
// while Acquire with a zero timeout may enter the kernel and cause a context switch.
//
// Timeout passed to this function may be subject to system clock resolution.
// If the system clock has a resolution of e.g. 16ms that means this function may exit with a timeout error 16ms earlier than originally scheduled.
//
// Arguments:
// - timeout: Time to wait for token to become available.
//
// Return: true if token was consumed. false if timeout was reached.
inline bool TryTimedAcquire(const timeout_ms timeoutInMilliseconds)
{
return Baselib_HighCapacitySemaphore_TryTimedAcquire(&m_SemaphoreData, timeoutInMilliseconds.count());
}
// Submit tokens to the semaphore.
//
// When successful this function is guaranteed to emit a release barrier.
//
// Increase the number of available tokens on the semaphore by `count`. Any waiting threads will be notified there are new tokens available.
// If count reach `Baselib_HighCapacitySemaphore_MaxGuaranteedCount` this function may silently discard any overflow.
inline void Release(uint32_t count)
{
return Baselib_HighCapacitySemaphore_Release(&m_SemaphoreData, count);
}
// Sets the semaphore token count to zero and release all waiting threads.
//
// When successful this function is guaranteed to emit a release barrier.
//
// Return: number of released threads.
inline uint64_t ResetAndReleaseWaitingThreads()
{
return Baselib_HighCapacitySemaphore_ResetAndReleaseWaitingThreads(&m_SemaphoreData);
}
private:
Baselib_HighCapacitySemaphore m_SemaphoreData;
};
}
}

View File

@@ -0,0 +1,16 @@
#pragma once
namespace baselib
{
BASELIB_CPP_INTERFACE
{
namespace Algorithm
{
namespace detail
{
template<typename T>
static FORCE_INLINE constexpr T LogicalOrRShiftOp(T value, int shift) { return value | (value >> shift); }
}
}
}
}

View File

@@ -0,0 +1,63 @@
#pragma once
namespace baselib
{
BASELIB_CPP_INTERFACE
{
namespace Algorithm
{
inline int HighestBitNonZero(uint32_t value)
{
return 31 - __builtin_clz(value);
}
inline int HighestBitNonZero(uint64_t value)
{
#if PLATFORM_ARCH_64
return 63 - __builtin_clzll(value);
#else
return (value & 0xffffffff00000000ULL) ? (63 - __builtin_clz((uint32_t)(value >> 32))) : (31 - __builtin_clz((uint32_t)value));
#endif
}
inline int HighestBit(uint32_t value)
{
return value == 0 ? -1 : HighestBitNonZero(value);
}
inline int HighestBit(uint64_t value)
{
return value == 0 ? -1 : HighestBitNonZero(value);
}
inline int LowestBitNonZero(uint32_t value)
{
return __builtin_ctz(value);
}
inline int LowestBitNonZero(uint64_t value)
{
#if PLATFORM_ARCH_64
return __builtin_ctzll(value);
#else
return (value & 0x00000000ffffffffULL) ? __builtin_ctz((uint32_t)(value)) : (32 + __builtin_ctz((uint32_t)(value >> 32)));
#endif
}
inline int LowestBit(uint32_t value)
{
return value == 0 ? -1 : LowestBitNonZero(value);
}
inline int LowestBit(uint64_t value)
{
return value == 0 ? -1 : LowestBitNonZero(value);
}
inline int BitsInMask(uint64_t mask) { return __builtin_popcountll(mask); }
inline int BitsInMask(uint32_t mask) { return __builtin_popcount(mask); }
inline int BitsInMask(uint16_t mask) { return BitsInMask((uint32_t)mask); }
inline int BitsInMask(uint8_t mask) { return BitsInMask((uint32_t)mask); }
}
}
}

View File

@@ -0,0 +1,131 @@
#pragma once
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
#if PLATFORM_ARCH_64
#pragma intrinsic(_BitScanReverse64)
#endif
namespace baselib
{
BASELIB_CPP_INTERFACE
{
namespace Algorithm
{
inline int HighestBit(uint32_t value)
{
unsigned long res;
return _BitScanReverse(&res, value) ? (int)res : -1;
}
inline int HighestBit(uint64_t value)
{
#if PLATFORM_ARCH_64
unsigned long res;
return _BitScanReverse64(&res, value) ? (int)res : -1;
#else
unsigned long lower, upper;
int lower_int = _BitScanReverse(&lower, (uint32_t)value) ? (int)lower : -1;
return _BitScanReverse(&upper, (uint32_t)(value >> 32)) ? (int)(32 + upper) : lower_int;
#endif
}
inline int HighestBitNonZero(uint32_t value)
{
unsigned long res = 0;
_BitScanReverse(&res, value);
return (int)res;
}
inline int HighestBitNonZero(uint64_t value)
{
#if PLATFORM_ARCH_64
unsigned long res = 0;
_BitScanReverse64(&res, value);
return (int)res;
#else
unsigned long lower, upper;
_BitScanReverse(&lower, (uint32_t)value);
return _BitScanReverse(&upper, (uint32_t)(value >> 32)) ? (32 + upper) : lower;
#endif
}
inline int LowestBit(uint32_t value)
{
unsigned long res;
return _BitScanForward(&res, value) ? (int)res : -1;
}
inline int LowestBit(uint64_t value)
{
#if PLATFORM_ARCH_64
unsigned long res;
return _BitScanForward64(&res, value) ? (int)res : -1;
#else
unsigned long lower, upper;
int upper_int = _BitScanForward(&upper, (uint32_t)(value >> 32)) ? (int)upper : -33;
return _BitScanForward(&lower, (uint32_t)(value)) ? (int)lower : (32 + upper_int);
#endif
}
inline int LowestBitNonZero(uint32_t value)
{
unsigned long res = 0;
_BitScanForward(&res, value);
return (int)res;
}
inline int LowestBitNonZero(uint64_t value)
{
#if PLATFORM_ARCH_64
unsigned long res = 0;
_BitScanForward64(&res, value);
return (int)res;
#else
unsigned long lower, upper;
_BitScanForward(&upper, (uint32_t)(value >> 32));
return _BitScanForward(&lower, (uint32_t)(value)) ? (int)lower : (int)(32 + upper);
#endif
}
// __popcnt/__popcnt16/__popcnt64 were introduced as part of SSE4a
// See https://en.wikipedia.org/wiki/SSE4#POPCNT_and_LZCNT
// To check this accurately, we would need to check cpuid which itself is not for free.
// However, compiling for some hardware, MSVC defines __AVX__ which is a superset of SSE4 so we can use that.
// (as of writing there's no equivalent __SSE4__)
#if defined(__AVX__)
#ifdef _AMD64_
inline int BitsInMask(uint64_t value) { return (int)__popcnt64(value); }
#else
inline int BitsInMask(uint64_t value) { return BitsInMask((uint32_t)value) + BitsInMask((uint32_t)(value >> 32)); }
#endif
inline int BitsInMask(uint32_t value) { return (int)__popcnt(value); }
inline int BitsInMask(uint16_t value) { return (int)__popcnt16(value); }
inline int BitsInMask(uint8_t value) { return BitsInMask((uint16_t)value); }
// Todo: Consider using VCNT instruction on arm (NEON)
#else
inline int BitsInMask(uint64_t value)
{
// From http://www-graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
value = value - ((value >> 1) & (uint64_t) ~(uint64_t)0 / 3);
value = (value & (uint64_t) ~(uint64_t)0 / 15 * 3) + ((value >> 2) & (uint64_t) ~(uint64_t)0 / 15 * 3);
value = (value + (value >> 4)) & (uint64_t) ~(uint64_t)0 / 255 * 15;
return (uint64_t)(value * ((uint64_t) ~(uint64_t)0 / 255)) >> (sizeof(uint64_t) - 1) * 8;
}
inline int BitsInMask(uint32_t value)
{
// From http://www-graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
value = value - ((value >> 1) & 0x55555555);
value = (value & 0x33333333) + ((value >> 2) & 0x33333333);
return (((value + (value >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
}
inline int BitsInMask(uint16_t value) { return BitsInMask((uint32_t)value); }
inline int BitsInMask(uint8_t value) { return BitsInMask((uint32_t)value); }
#endif
}
}
}

View File

@@ -0,0 +1,38 @@
#pragma once
#include "../Atomic.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
namespace detail
{
struct ConditionVariableData
{
atomic<int32_t> waiters;
atomic<int32_t> wakeups;
ConditionVariableData() : waiters(0), wakeups(0) {}
inline bool HasWaiters() const
{
return waiters.load(memory_order_acquire) > 0;
}
inline bool TryConsumeWakeup()
{
int32_t previousCount = wakeups.load(memory_order_relaxed);
while (previousCount > 0)
{
if (wakeups.compare_exchange_weak(previousCount, previousCount - 1, memory_order_acquire, memory_order_relaxed))
{
return true;
}
}
return false;
}
};
}
}
}

View File

@@ -0,0 +1,26 @@
#pragma once
#include "../Atomic.h"
#include "../Semaphore.h"
namespace baselib
{
BASELIB_CPP_INTERFACE
{
namespace detail
{
struct ConditionVariableData
{
Semaphore semaphore;
atomic<uint32_t> waiters;
ConditionVariableData() : semaphore(), waiters(0) {}
inline bool HasWaiters() const
{
return waiters.load(memory_order_acquire) > 0;
}
};
}
}
}

View File

@@ -0,0 +1,86 @@
#pragma once
#include "../CountdownTimer.h"
#include "../../C/Baselib_SystemFutex.h"
#include "../../C/Baselib_Thread.h"
#if !PLATFORM_FUTEX_NATIVE_SUPPORT
#error "Only use this implementation on top of a proper futex, in all other situations us ConditionVariable_SemaphoreBased.inl.h"
#endif
namespace baselib
{
BASELIB_CPP_INTERFACE
{
inline void ConditionVariable::Wait()
{
m_Data.waiters.fetch_add(1, memory_order_relaxed);
m_Lock.Release();
while (!m_Data.TryConsumeWakeup())
{
Baselib_SystemFutex_Wait(&m_Data.wakeups.obj, 0, std::numeric_limits<uint32_t>::max());
}
m_Lock.Acquire();
}
inline bool ConditionVariable::TimedWait(const timeout_ms timeoutInMilliseconds)
{
m_Data.waiters.fetch_add(1, memory_order_relaxed);
m_Lock.Release();
uint32_t timeLeft = timeoutInMilliseconds.count();
auto timer = CountdownTimer::StartNew(timeoutInMilliseconds);
do
{
Baselib_SystemFutex_Wait(&m_Data.wakeups.obj, 0, timeLeft);
if (m_Data.TryConsumeWakeup())
{
m_Lock.Acquire();
return true;
}
timeLeft = timer.GetTimeLeftInMilliseconds().count();
}
while (timeLeft);
do
{
int32_t waiters = m_Data.waiters.load(memory_order_relaxed);
while (waiters > 0)
{
if (m_Data.waiters.compare_exchange_weak(waiters, waiters - 1, memory_order_relaxed, memory_order_relaxed))
{
m_Lock.Acquire();
return false;
}
}
Baselib_Thread_YieldExecution();
}
while (!m_Data.TryConsumeWakeup());
m_Lock.Acquire();
return true;
}
inline void ConditionVariable::Notify(uint16_t count)
{
int32_t waitingThreads = m_Data.waiters.load(memory_order_acquire);
do
{
int32_t threadsToWakeup = count < waitingThreads ? count : waitingThreads;
if (threadsToWakeup == 0)
{
atomic_thread_fence(memory_order_release);
return;
}
if (m_Data.waiters.compare_exchange_weak(waitingThreads, waitingThreads - threadsToWakeup, memory_order_relaxed, memory_order_relaxed))
{
m_Data.wakeups.fetch_add(threadsToWakeup, memory_order_release);
Baselib_SystemFutex_Notify(&m_Data.wakeups.obj, threadsToWakeup, Baselib_WakeupFallbackStrategy_OneByOne);
return;
}
}
while (waitingThreads > 0);
}
}
}

Some files were not shown because too many files have changed in this diff Show More