diff --git a/app/src/main/cpp/skyline/gpu/interconnect/command_executor.cpp b/app/src/main/cpp/skyline/gpu/interconnect/command_executor.cpp index e8214fa6..e6c1e47d 100644 --- a/app/src/main/cpp/skyline/gpu/interconnect/command_executor.cpp +++ b/app/src/main/cpp/skyline/gpu/interconnect/command_executor.cpp @@ -29,10 +29,27 @@ namespace skyline::gpu::interconnect { fence{gpu.vkDevice, vk::FenceCreateInfo{ .flags = vk::FenceCreateFlagBits::eSignaled }}, cycle{std::make_shared(gpu.vkDevice, *fence, true)} {} + CommandRecordThread::Slot::ScopedReset::ScopedReset(CommandRecordThread::Slot &slot) : slot{slot} {} + + CommandRecordThread::Slot::ScopedReset::~ScopedReset() { + std::scoped_lock resetLock{slot.resetMutex}; + if (slot.needsReset) + slot.commandBuffer.reset(); + + slot.needsReset = false; + } + std::shared_ptr CommandRecordThread::Slot::Reset(GPU &gpu) { cycle->Wait(); cycle = std::make_shared(gpu.vkDevice, *fence); - commandBuffer.reset(); + + std::scoped_lock resetLock{resetMutex}; + if (needsReset) + commandBuffer.reset(); + + needsReset = false; + cycle->AttachObjects(std::make_shared(*this)); + return cycle; } @@ -72,6 +89,9 @@ namespace skyline::gpu::interconnect { slot->nodes.clear(); slot->allocator.Reset(); + + std::scoped_lock resetLock{slot->resetMutex}; + slot->needsReset = true; } void CommandRecordThread::Run() { diff --git a/app/src/main/cpp/skyline/gpu/interconnect/command_executor.h b/app/src/main/cpp/skyline/gpu/interconnect/command_executor.h index e8bf0ee8..b2a6b148 100644 --- a/app/src/main/cpp/skyline/gpu/interconnect/command_executor.h +++ b/app/src/main/cpp/skyline/gpu/interconnect/command_executor.h @@ -19,12 +19,25 @@ namespace skyline::gpu::interconnect { * @brief Single execution slot, buffered back and forth between the GPFIFO thread and the record thread */ struct Slot { + /** + * @brief Helper to reset a slot's command buffer asynchronously + */ + struct ScopedReset { + Slot &slot; + + ScopedReset(Slot &slot); + + ~ScopedReset(); + }; + vk::raii::CommandPool commandPool; //!< Use one command pool per slot since command buffers from different slots may be recorded into on multiple threads at the same time vk::raii::CommandBuffer commandBuffer; vk::raii::Fence fence; std::shared_ptr cycle; boost::container::stable_vector nodes; LinearAllocatorState<> allocator; + std::mutex resetMutex; + bool needsReset{}; //!< If the slot's command buffer needs to be reset before it can be used again Slot(GPU &gpu);