Abstract TextureView/BufferDelegate locking into LockableSharedPtr

An atomic transactional loop was performed on the backing `std::shared_ptr` inside `BufferView`/`TextureView`'s `lock`/`LockWithTag`/`try_lock` functions, these locks utilized `std::atomic_load` for atomically loading the value from the `shared_ptr` recursively till it was the same value pre/post-locking. 

This commit abstracts the locking functionality of `TextureView`/`BufferDelegate` into `LockableSharedPtr` to avoid code duplication and removes the usage of `std::atomic_load` in either case as it is not necessary due to the implicit memory barrier provided by locking a mutex.
This commit is contained in:
PixelyIon 2022-06-27 21:17:49 +05:30
parent 2d08886e4e
commit 217d484cba
No known key found for this signature in database
GPG Key ID: 11BC6C3201BC2C05
6 changed files with 97 additions and 77 deletions

View File

@ -0,0 +1,78 @@
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2022 Skyline Team and Contributors (https://github.com/skyline-emu/)
#pragma once
#include <memory>
#include <atomic>
namespace skyline {
/**
* @brief A wrapper around a shared_ptr<T> which can be utilized to perform transactional atomic operations to lock the underlying resource and attain stability in the pointer value
* @note Any operations directly accessing the value are **NOT** atomic and should be done after a locking transaction
*/
template<typename Type>
class LockableSharedPtr : public std::shared_ptr<Type> {
public:
using std::shared_ptr<Type>::shared_ptr;
using std::shared_ptr<Type>::operator=;
LockableSharedPtr(std::shared_ptr<Type> &&ptr) : std::shared_ptr<Type>{std::move(ptr)} {}
private:
/**
* @brief A lock function for the underlying object that conforms to the BasicLockable named requirement
*/
static void DefaultLockFunction(Type *object) {
object->lock();
}
/**
* @brief An unlock function for the underlying object that conforms to the BasicLockable named requirement
*/
static void DefaultUnlockFunction(Type *object) {
object->unlock();
}
/**
* @brief A try_lock function for the underlying object that conforms to the Lockable named requirement
*/
static bool DefaultTryLockFunction(Type *object) {
return object->try_lock();
}
public:
/**
* @brief Locks the underlying object with the supplied lock/unlock functions
*/
template<typename LockFunction = typeof(DefaultLockFunction), typename UnlockFunction = typeof(DefaultUnlockFunction)>
void Lock(LockFunction lock = DefaultLockFunction, UnlockFunction unlock = DefaultUnlockFunction) const {
while (true) {
auto object{this->get()};
lock(object);
if (this->get() == object)
return;
unlock(object);
}
}
/**
* @brief Attempts to lock the underlying object with the supplied try_lock/unlock functions
*/
template<typename TryLockFunction = typeof(DefaultTryLockFunction), typename UnlockFunction = typeof(DefaultUnlockFunction)>
bool TryLock(TryLockFunction tryLock = DefaultTryLockFunction, UnlockFunction unlock = DefaultUnlockFunction) const {
while (true) {
auto object{this->get()};
bool wasLocked{tryLock(object)};
if (this->get() == object)
return wasLocked;
if (wasLocked)
unlock(object);
}
}
};
}

View File

@ -275,32 +275,15 @@ namespace skyline::gpu {
}
void Buffer::BufferDelegate::lock() {
auto lBuffer{std::atomic_load(&buffer)};
while (true) {
lBuffer->lock();
auto latestBacking{std::atomic_load(&buffer)};
if (lBuffer == latestBacking)
return;
lBuffer->unlock();
lBuffer = latestBacking;
}
buffer.Lock();
}
bool Buffer::BufferDelegate::LockWithTag(ContextTag pTag) {
auto lBuffer{std::atomic_load(&buffer)};
while (true) {
bool didLock{lBuffer->LockWithTag(pTag)};
auto latestBacking{std::atomic_load(&buffer)};
if (lBuffer == latestBacking)
return didLock;
if (didLock)
lBuffer->unlock();
lBuffer = latestBacking;
}
bool result{};
buffer.Lock([pTag, &result](Buffer* pBuffer) {
result = pBuffer->LockWithTag(pTag);
});
return result;
}
void Buffer::BufferDelegate::unlock() {
@ -308,20 +291,7 @@ namespace skyline::gpu {
}
bool Buffer::BufferDelegate::try_lock() {
auto lBuffer{std::atomic_load(&buffer)};
while (true) {
bool success{lBuffer->try_lock()};
auto latestBuffer{std::atomic_load(&buffer)};
if (lBuffer == latestBuffer)
// We want to ensure that the try_lock() was on the latest backing and not on an outdated one
return success;
if (success)
// We only unlock() if the try_lock() was successful and we acquired the mutex
lBuffer->unlock();
lBuffer = latestBuffer;
}
return buffer.TryLock();
}
BufferView::BufferView(std::shared_ptr<Buffer> buffer, const Buffer::BufferViewStorage *view) : bufferDelegate(std::make_shared<Buffer::BufferDelegate>(std::move(buffer), view)) {}

View File

@ -5,6 +5,7 @@
#include <unordered_set>
#include <boost/functional/hash.hpp>
#include <common/lockable_shared_ptr.h>
#include <nce.h>
#include <gpu/tag_allocator.h>
#include "memory_manager.h"
@ -94,7 +95,7 @@ namespace skyline::gpu {
* @note This class conforms to the Lockable and BasicLockable C++ named requirements
*/
struct BufferDelegate {
std::shared_ptr<Buffer> buffer;
LockableSharedPtr<Buffer> buffer;
const Buffer::BufferViewStorage *view;
std::function<void(const BufferViewStorage &, const std::shared_ptr<Buffer> &)> usageCallback;
std::list<BufferDelegate *>::iterator iterator;

View File

@ -86,7 +86,7 @@ namespace skyline::gpu {
// Transfer all delegates references from the overlapping buffer to the new buffer
for (auto &delegate : overlap->delegates) {
atomic_exchange(&delegate->buffer, newBuffer);
delegate->buffer = newBuffer;
if (delegate->usageCallback)
delegate->usageCallback(*delegate->view, newBuffer);
}

View File

@ -93,32 +93,15 @@ namespace skyline::gpu {
}
void TextureView::lock() {
auto backing{std::atomic_load(&texture)};
while (true) {
backing->lock();
auto latestBacking{std::atomic_load(&texture)};
if (backing == latestBacking)
return;
backing->unlock();
backing = latestBacking;
}
texture.Lock();
}
bool TextureView::LockWithTag(ContextTag tag) {
auto backing{std::atomic_load(&texture)};
while (true) {
bool didLock{backing->LockWithTag(tag)};
auto latestBacking{std::atomic_load(&texture)};
if (backing == latestBacking)
return didLock;
if (didLock)
backing->unlock();
backing = latestBacking;
}
bool result{};
texture.Lock([tag, &result](Texture* pTexture) {
result = pTexture->LockWithTag(tag);
});
return result;
}
void TextureView::unlock() {
@ -126,20 +109,7 @@ namespace skyline::gpu {
}
bool TextureView::try_lock() {
auto backing{std::atomic_load(&texture)};
while (true) {
bool success{backing->try_lock()};
auto latestBacking{std::atomic_load(&texture)};
if (backing == latestBacking)
// We want to ensure that the try_lock() was on the latest backing and not on an outdated one
return success;
if (success)
// We only unlock() if the try_lock() was successful and we acquired the mutex
backing->unlock();
backing = latestBacking;
}
return texture.TryLock();
}
void Texture::SetupGuestMappings() {

View File

@ -3,6 +3,7 @@
#pragma once
#include <common/lockable_shared_ptr.h>
#include <nce.h>
#include <gpu/tag_allocator.h>
#include <gpu/memory_manager.h>
@ -298,7 +299,7 @@ namespace skyline::gpu {
vk::ImageView vkView{};
public:
std::shared_ptr<Texture> texture;
LockableSharedPtr<Texture> texture;
vk::ImageViewType type;
texture::Format format;
vk::ComponentMapping mapping;