Merge pull request #5311 from ReinUsesLisp/fence-wait

vk_fence_manager: Use timeline semaphores instead of spin waits
master
bunnei 2021-01-12 21:00:05 +07:00 committed by GitHub
commit de1a316369
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 18 additions and 54 deletions

@ -3,7 +3,6 @@
// Refer to the license.txt file included.
#include <memory>
#include <thread>
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_fence_manager.h"
@ -14,13 +13,11 @@
namespace Vulkan {
InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_,
bool is_stubbed_)
: FenceBase{payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
InnerFence::InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_)
: FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {}
InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_,
u32 payload_, bool is_stubbed_)
: FenceBase{address_, payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
InnerFence::InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
: FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
InnerFence::~InnerFence() = default;
@ -28,63 +25,38 @@ void InnerFence::Queue() {
if (is_stubbed) {
return;
}
ASSERT(!event);
event = device.GetLogical().CreateEvent();
ticks = scheduler.CurrentTick();
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
cmdbuf.SetEvent(event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
});
// Get the current tick so we can wait for it
wait_tick = scheduler.CurrentTick();
scheduler.Flush();
}
bool InnerFence::IsSignaled() const {
if (is_stubbed) {
return true;
}
ASSERT(event);
return IsEventSignalled();
return scheduler.IsFree(wait_tick);
}
void InnerFence::Wait() {
if (is_stubbed) {
return;
}
ASSERT(event);
if (ticks >= scheduler.CurrentTick()) {
scheduler.Flush();
}
while (!IsEventSignalled()) {
std::this_thread::yield();
}
}
bool InnerFence::IsEventSignalled() const {
switch (const VkResult result = event.GetStatus()) {
case VK_EVENT_SET:
return true;
case VK_EVENT_RESET:
return false;
default:
throw vk::Exception(result);
}
scheduler.Wait(wait_tick);
}
VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
const Device& device_, VKScheduler& scheduler_)
VKScheduler& scheduler_)
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
device{device_}, scheduler{scheduler_} {}
scheduler{scheduler_} {}
Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed);
return std::make_shared<InnerFence>(scheduler, value, is_stubbed);
}
Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
return std::make_shared<InnerFence>(device, scheduler, addr, value, is_stubbed);
return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed);
}
void VKFenceManager::QueueFence(Fence& fence) {

@ -28,10 +28,8 @@ class VKScheduler;
class InnerFence : public VideoCommon::FenceBase {
public:
explicit InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_,
bool is_stubbed_);
explicit InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_,
u32 payload_, bool is_stubbed_);
explicit InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_);
explicit InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
~InnerFence();
void Queue();
@ -41,12 +39,8 @@ public:
void Wait();
private:
bool IsEventSignalled() const;
const Device& device;
VKScheduler& scheduler;
vk::Event event;
u64 ticks = 0;
u64 wait_tick = 0;
};
using Fence = std::shared_ptr<InnerFence>;
@ -58,7 +52,7 @@ public:
explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
const Device& device_, VKScheduler& scheduler_);
VKScheduler& scheduler_);
protected:
Fence CreateFence(u32 value, bool is_stubbed) override;
@ -68,7 +62,6 @@ protected:
void WaitFence(Fence& fence) override;
private:
const Device& device;
VKScheduler& scheduler;
};

@ -428,8 +428,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_manager, scheduler, stream_buffer,
staging_pool),
query_cache{*this, maxwell3d, gpu_memory, device, scheduler},
fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device,
scheduler),
fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, scheduler),
wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) {
scheduler.SetQueryCache(query_cache);
if (device.UseAsynchronousShaders()) {