Merge pull request #7720 from FernandoS27/yfc-gc

First Nugget: Reworked Garbage Collection to be smarter [originally from Project YFC]
merge-requests/60/head
bunnei 2022-03-24 20:16:11 +07:00 committed by GitHub
commit ab6a5784fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 259 additions and 43 deletions

@ -76,8 +76,9 @@ class BufferCache {
static constexpr BufferId NULL_BUFFER_ID{0}; static constexpr BufferId NULL_BUFFER_ID{0};
static constexpr u64 EXPECTED_MEMORY = 512_MiB; static constexpr s64 DEFAULT_EXPECTED_MEMORY = 512_MiB;
static constexpr u64 CRITICAL_MEMORY = 1_GiB; static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB;
static constexpr s64 TARGET_THRESHOLD = 4_GiB;
using Maxwell = Tegra::Engines::Maxwell3D::Regs; using Maxwell = Tegra::Engines::Maxwell3D::Regs;
@ -436,6 +437,8 @@ private:
Common::LeastRecentlyUsedCache<LRUItemParams> lru_cache; Common::LeastRecentlyUsedCache<LRUItemParams> lru_cache;
u64 frame_tick = 0; u64 frame_tick = 0;
u64 total_used_memory = 0; u64 total_used_memory = 0;
u64 minimum_memory = 0;
u64 critical_memory = 0;
std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table; std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table;
}; };
@ -451,11 +454,30 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
// Ensure the first slot is used for the null buffer // Ensure the first slot is used for the null buffer
void(slot_buffers.insert(runtime, NullBufferParams{})); void(slot_buffers.insert(runtime, NullBufferParams{}));
common_ranges.clear(); common_ranges.clear();
if (!runtime.CanReportMemoryUsage()) {
minimum_memory = DEFAULT_EXPECTED_MEMORY;
critical_memory = DEFAULT_CRITICAL_MEMORY;
return;
}
const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB;
const s64 min_spacing_critical = device_memory - 1_GiB;
const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD);
const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
const s64 min_vacancy_critical = (3 * mem_threshold) / 10;
minimum_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected),
DEFAULT_EXPECTED_MEMORY));
critical_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical),
DEFAULT_CRITICAL_MEMORY));
} }
template <class P> template <class P>
void BufferCache<P>::RunGarbageCollector() { void BufferCache<P>::RunGarbageCollector() {
const bool aggressive_gc = total_used_memory >= CRITICAL_MEMORY; const bool aggressive_gc = total_used_memory >= critical_memory;
const u64 ticks_to_destroy = aggressive_gc ? 60 : 120; const u64 ticks_to_destroy = aggressive_gc ? 60 : 120;
int num_iterations = aggressive_gc ? 64 : 32; int num_iterations = aggressive_gc ? 64 : 32;
const auto clean_up = [this, &num_iterations](BufferId buffer_id) { const auto clean_up = [this, &num_iterations](BufferId buffer_id) {
@ -486,7 +508,11 @@ void BufferCache<P>::TickFrame() {
const bool skip_preferred = hits * 256 < shots * 251; const bool skip_preferred = hits * 256 < shots * 251;
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0; uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
if (total_used_memory >= EXPECTED_MEMORY) { // If we can obtain the memory info, use it instead of the estimate.
if (runtime.CanReportMemoryUsage()) {
total_used_memory = runtime.GetDeviceMemoryUsage();
}
if (total_used_memory >= minimum_memory) {
RunGarbageCollector(); RunGarbageCollector();
} }
++frame_tick; ++frame_tick;

@ -135,6 +135,20 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_)
buffer.Create(); buffer.Create();
glNamedBufferData(buffer.handle, 0x10'000, nullptr, GL_STREAM_COPY); glNamedBufferData(buffer.handle, 0x10'000, nullptr, GL_STREAM_COPY);
} }
device_access_memory = [this]() -> u64 {
if (device.CanReportMemoryUsage()) {
return device.GetCurrentDedicatedVideoMemory() + 512_MiB;
}
return 2_GiB; // Return minimum requirements
}();
}
u64 BufferCacheRuntime::GetDeviceMemoryUsage() const {
if (device.CanReportMemoryUsage()) {
return device_access_memory - device.GetCurrentDedicatedVideoMemory();
}
return 2_GiB;
} }
void BufferCacheRuntime::CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer, void BufferCacheRuntime::CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer,

@ -89,6 +89,8 @@ public:
void BindImageBuffer(Buffer& buffer, u32 offset, u32 size, void BindImageBuffer(Buffer& buffer, u32 offset, u32 size,
VideoCore::Surface::PixelFormat format); VideoCore::Surface::PixelFormat format);
u64 GetDeviceMemoryUsage() const;
void BindFastUniformBuffer(size_t stage, u32 binding_index, u32 size) { void BindFastUniformBuffer(size_t stage, u32 binding_index, u32 size) {
const GLuint handle = fast_uniforms[stage][binding_index].handle; const GLuint handle = fast_uniforms[stage][binding_index].handle;
const GLsizeiptr gl_size = static_cast<GLsizeiptr>(size); const GLsizeiptr gl_size = static_cast<GLsizeiptr>(size);
@ -151,6 +153,14 @@ public:
use_storage_buffers = use_storage_buffers_; use_storage_buffers = use_storage_buffers_;
} }
u64 GetDeviceLocalMemory() const {
return device_access_memory;
}
bool CanReportMemoryUsage() const {
return device.CanReportMemoryUsage();
}
private: private:
static constexpr std::array PABO_LUT{ static constexpr std::array PABO_LUT{
GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV, GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV, GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV, GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV,
@ -184,6 +194,8 @@ private:
std::array<OGLBuffer, VideoCommon::NUM_COMPUTE_UNIFORM_BUFFERS> copy_compute_uniforms; std::array<OGLBuffer, VideoCommon::NUM_COMPUTE_UNIFORM_BUFFERS> copy_compute_uniforms;
u32 index_buffer_offset = 0; u32 index_buffer_offset = 0;
u64 device_access_memory;
}; };
struct BufferCacheParams { struct BufferCacheParams {

@ -13,12 +13,15 @@
#include <glad/glad.h> #include <glad/glad.h>
#include "common/literals.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/settings.h" #include "common/settings.h"
#include "shader_recompiler/stage.h" #include "shader_recompiler/stage.h"
#include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_resource_manager.h"
using namespace Common::Literals;
namespace OpenGL { namespace OpenGL {
namespace { namespace {
constexpr std::array LIMIT_UBOS = { constexpr std::array LIMIT_UBOS = {
@ -165,6 +168,7 @@ Device::Device() {
has_sparse_texture_2 = GLAD_GL_ARB_sparse_texture2; has_sparse_texture_2 = GLAD_GL_ARB_sparse_texture2;
warp_size_potentially_larger_than_guest = !is_nvidia && !is_intel; warp_size_potentially_larger_than_guest = !is_nvidia && !is_intel;
need_fastmath_off = is_nvidia; need_fastmath_off = is_nvidia;
can_report_memory = GLAD_GL_NVX_gpu_memory_info;
// At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive // At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive
// uniform buffers as "push constants" // uniform buffers as "push constants"
@ -276,4 +280,10 @@ void main() {
})"); })");
} }
u64 Device::GetCurrentDedicatedVideoMemory() const {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
}
} // namespace OpenGL } // namespace OpenGL

@ -20,6 +20,8 @@ public:
[[nodiscard]] std::string GetVendorName() const; [[nodiscard]] std::string GetVendorName() const;
u64 GetCurrentDedicatedVideoMemory() const;
u32 GetMaxUniformBuffers(Shader::Stage stage) const noexcept { u32 GetMaxUniformBuffers(Shader::Stage stage) const noexcept {
return max_uniform_buffers[static_cast<size_t>(stage)]; return max_uniform_buffers[static_cast<size_t>(stage)];
} }
@ -168,6 +170,10 @@ public:
return vendor_name == "ATI Technologies Inc."; return vendor_name == "ATI Technologies Inc.";
} }
bool CanReportMemoryUsage() const {
return can_report_memory;
}
private: private:
static bool TestVariableAoffi(); static bool TestVariableAoffi();
static bool TestPreciseBug(); static bool TestPreciseBug();
@ -210,6 +216,7 @@ private:
bool need_fastmath_off{}; bool need_fastmath_off{};
bool has_cbuf_ftou_bug{}; bool has_cbuf_ftou_bug{};
bool has_bool_ref_bug{}; bool has_bool_ref_bug{};
bool can_report_memory{};
std::string vendor_name; std::string vendor_name;
}; };

@ -484,6 +484,13 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, ProgramManager&
rescale_read_fbos[i].Create(); rescale_read_fbos[i].Create();
} }
} }
device_access_memory = [this]() -> u64 {
if (device.CanReportMemoryUsage()) {
return device.GetCurrentDedicatedVideoMemory() + 512_MiB;
}
return 2_GiB; // Return minimum requirements
}();
} }
TextureCacheRuntime::~TextureCacheRuntime() = default; TextureCacheRuntime::~TextureCacheRuntime() = default;
@ -500,13 +507,11 @@ ImageBufferMap TextureCacheRuntime::DownloadStagingBuffer(size_t size) {
return download_buffers.RequestMap(size, false); return download_buffers.RequestMap(size, false);
} }
u64 TextureCacheRuntime::GetDeviceLocalMemory() const { u64 TextureCacheRuntime::GetDeviceMemoryUsage() const {
if (GLAD_GL_NVX_gpu_memory_info) { if (device.CanReportMemoryUsage()) {
GLint cur_avail_mem_kb = 0; return device_access_memory - device.GetCurrentDedicatedVideoMemory();
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
} }
return 2_GiB; // Return minimum requirements return 2_GiB;
} }
void TextureCacheRuntime::CopyImage(Image& dst_image, Image& src_image, void TextureCacheRuntime::CopyImage(Image& dst_image, Image& src_image,
@ -686,6 +691,7 @@ Image::Image(TextureCacheRuntime& runtime_, const VideoCommon::ImageInfo& info_,
} }
if (IsConverted(runtime->device, info.format, info.type)) { if (IsConverted(runtime->device, info.format, info.type)) {
flags |= ImageFlagBits::Converted; flags |= ImageFlagBits::Converted;
flags |= ImageFlagBits::CostlyLoad;
gl_internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8; gl_internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8;
gl_format = GL_RGBA; gl_format = GL_RGBA;
gl_type = GL_UNSIGNED_INT_8_8_8_8_REV; gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;

@ -10,6 +10,7 @@
#include <glad/glad.h> #include <glad/glad.h>
#include "shader_recompiler/shader_info.h" #include "shader_recompiler/shader_info.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/util_shaders.h" #include "video_core/renderer_opengl/util_shaders.h"
#include "video_core/texture_cache/image_view_base.h" #include "video_core/texture_cache/image_view_base.h"
@ -21,7 +22,6 @@ struct ResolutionScalingInfo;
namespace OpenGL { namespace OpenGL {
class Device;
class ProgramManager; class ProgramManager;
class StateTracker; class StateTracker;
@ -83,7 +83,15 @@ public:
ImageBufferMap DownloadStagingBuffer(size_t size); ImageBufferMap DownloadStagingBuffer(size_t size);
u64 GetDeviceLocalMemory() const; u64 GetDeviceLocalMemory() const {
return device_access_memory;
}
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const {
return device.CanReportMemoryUsage();
}
bool ShouldReinterpret([[maybe_unused]] Image& dst, [[maybe_unused]] Image& src) { bool ShouldReinterpret([[maybe_unused]] Image& dst, [[maybe_unused]] Image& src) {
return true; return true;
@ -172,6 +180,7 @@ private:
std::array<OGLFramebuffer, 4> rescale_draw_fbos; std::array<OGLFramebuffer, 4> rescale_draw_fbos;
std::array<OGLFramebuffer, 4> rescale_read_fbos; std::array<OGLFramebuffer, 4> rescale_read_fbos;
const Settings::ResolutionScalingInfo& resolution; const Settings::ResolutionScalingInfo& resolution;
u64 device_access_memory;
}; };
class Image : public VideoCommon::ImageBase { class Image : public VideoCommon::ImageBase {

@ -141,6 +141,18 @@ StagingBufferRef BufferCacheRuntime::DownloadStagingBuffer(size_t size) {
return staging_pool.Request(size, MemoryUsage::Download); return staging_pool.Request(size, MemoryUsage::Download);
} }
u64 BufferCacheRuntime::GetDeviceLocalMemory() const {
return device.GetDeviceLocalMemory();
}
u64 BufferCacheRuntime::GetDeviceMemoryUsage() const {
return device.GetDeviceMemoryUsage();
}
bool BufferCacheRuntime::CanReportMemoryUsage() const {
return device.CanReportMemoryUsage();
}
void BufferCacheRuntime::Finish() { void BufferCacheRuntime::Finish() {
scheduler.Finish(); scheduler.Finish();
} }

@ -65,6 +65,12 @@ public:
void Finish(); void Finish();
u64 GetDeviceLocalMemory() const;
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const;
[[nodiscard]] StagingBufferRef UploadStagingBuffer(size_t size); [[nodiscard]] StagingBufferRef UploadStagingBuffer(size_t size);
[[nodiscard]] StagingBufferRef DownloadStagingBuffer(size_t size); [[nodiscard]] StagingBufferRef DownloadStagingBuffer(size_t size);

@ -118,7 +118,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
.image = nullptr, .image = nullptr,
.buffer = *stream_buffer, .buffer = *stream_buffer,
}; };
const auto memory_properties = device.GetPhysical().GetMemoryProperties(); const auto memory_properties = device.GetPhysical().GetMemoryProperties().memoryProperties;
VkMemoryAllocateInfo stream_memory_info{ VkMemoryAllocateInfo stream_memory_info{
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = make_dedicated ? &dedicated_info : nullptr, .pNext = make_dedicated ? &dedicated_info : nullptr,

@ -1189,6 +1189,14 @@ u64 TextureCacheRuntime::GetDeviceLocalMemory() const {
return device.GetDeviceLocalMemory(); return device.GetDeviceLocalMemory();
} }
u64 TextureCacheRuntime::GetDeviceMemoryUsage() const {
return device.GetDeviceMemoryUsage();
}
bool TextureCacheRuntime::CanReportMemoryUsage() const {
return device.CanReportMemoryUsage();
}
void TextureCacheRuntime::TickFrame() {} void TextureCacheRuntime::TickFrame() {}
Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu_addr_, Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu_addr_,
@ -1203,6 +1211,7 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu
} else { } else {
flags |= VideoCommon::ImageFlagBits::Converted; flags |= VideoCommon::ImageFlagBits::Converted;
} }
flags |= VideoCommon::ImageFlagBits::CostlyLoad;
} }
if (runtime->device.HasDebuggingToolAttached()) { if (runtime->device.HasDebuggingToolAttached()) {
original_image.SetObjectNameEXT(VideoCommon::Name(*this).c_str()); original_image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());

@ -55,6 +55,10 @@ public:
u64 GetDeviceLocalMemory() const; u64 GetDeviceLocalMemory() const;
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const;
void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src, void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src,
const Region2D& dst_region, const Region2D& src_region, const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter, Tegra::Engines::Fermi2D::Filter filter,

@ -33,11 +33,12 @@ enum class ImageFlagBits : u32 {
///< garbage collection priority ///< garbage collection priority
Alias = 1 << 11, ///< This image has aliases and has priority on garbage Alias = 1 << 11, ///< This image has aliases and has priority on garbage
///< collection ///< collection
CostlyLoad = 1 << 12, ///< Protected from low-tier GC as it is costly to load back.
// Rescaler // Rescaler
Rescaled = 1 << 12, Rescaled = 1 << 13,
CheckingRescalable = 1 << 13, CheckingRescalable = 1 << 14,
IsRescalable = 1 << 14, IsRescalable = 1 << 15,
}; };
DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits) DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits)

@ -50,14 +50,20 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
void(slot_samplers.insert(runtime, sampler_descriptor)); void(slot_samplers.insert(runtime, sampler_descriptor));
if constexpr (HAS_DEVICE_MEMORY_INFO) { if constexpr (HAS_DEVICE_MEMORY_INFO) {
const auto device_memory = runtime.GetDeviceLocalMemory(); const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
const u64 possible_expected_memory = (device_memory * 4) / 10; const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB;
const u64 possible_critical_memory = (device_memory * 7) / 10; const s64 min_spacing_critical = device_memory - 1_GiB;
expected_memory = std::max(possible_expected_memory, DEFAULT_EXPECTED_MEMORY - 256_MiB); const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD);
critical_memory = std::max(possible_critical_memory, DEFAULT_CRITICAL_MEMORY - 512_MiB); const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
minimum_memory = 0; const s64 min_vacancy_critical = (3 * mem_threshold) / 10;
expected_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected),
DEFAULT_EXPECTED_MEMORY));
critical_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical),
DEFAULT_CRITICAL_MEMORY));
minimum_memory = static_cast<u64>((device_memory - mem_threshold) / 2);
} else { } else {
// On OpenGL we can be more conservatives as the driver takes care.
expected_memory = DEFAULT_EXPECTED_MEMORY + 512_MiB; expected_memory = DEFAULT_EXPECTED_MEMORY + 512_MiB;
critical_memory = DEFAULT_CRITICAL_MEMORY + 1_GiB; critical_memory = DEFAULT_CRITICAL_MEMORY + 1_GiB;
minimum_memory = 0; minimum_memory = 0;
@ -66,18 +72,21 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
template <class P> template <class P>
void TextureCache<P>::RunGarbageCollector() { void TextureCache<P>::RunGarbageCollector() {
const bool high_priority_mode = total_used_memory >= expected_memory; bool high_priority_mode = total_used_memory >= expected_memory;
const bool aggressive_mode = total_used_memory >= critical_memory; bool aggressive_mode = total_used_memory >= critical_memory;
const u64 ticks_to_destroy = aggressive_mode ? 10ULL : high_priority_mode ? 25ULL : 100ULL; const u64 ticks_to_destroy = aggressive_mode ? 10ULL : high_priority_mode ? 25ULL : 50ULL;
size_t num_iterations = aggressive_mode ? 300 : (high_priority_mode ? 50 : 10); size_t num_iterations = aggressive_mode ? 40 : (high_priority_mode ? 20 : 10);
const auto clean_up = [this, &num_iterations, high_priority_mode](ImageId image_id) { const auto clean_up = [this, &num_iterations, &high_priority_mode,
&aggressive_mode](ImageId image_id) {
if (num_iterations == 0) { if (num_iterations == 0) {
return true; return true;
} }
--num_iterations; --num_iterations;
auto& image = slot_images[image_id]; auto& image = slot_images[image_id];
const bool must_download = image.IsSafeDownload(); const bool must_download =
if (!high_priority_mode && must_download) { image.IsSafeDownload() && False(image.flags & ImageFlagBits::BadOverlap);
if (!high_priority_mode &&
(must_download || True(image.flags & ImageFlagBits::CostlyLoad))) {
return false; return false;
} }
if (must_download) { if (must_download) {
@ -92,6 +101,18 @@ void TextureCache<P>::RunGarbageCollector() {
} }
UnregisterImage(image_id); UnregisterImage(image_id);
DeleteImage(image_id, image.scale_tick > frame_tick + 5); DeleteImage(image_id, image.scale_tick > frame_tick + 5);
if (total_used_memory < critical_memory) {
if (aggressive_mode) {
// Sink the aggresiveness.
num_iterations >>= 2;
aggressive_mode = false;
return false;
}
if (high_priority_mode && total_used_memory < expected_memory) {
num_iterations >>= 1;
high_priority_mode = false;
}
}
return false; return false;
}; };
lru_cache.ForEachItemBelow(frame_tick - ticks_to_destroy, clean_up); lru_cache.ForEachItemBelow(frame_tick - ticks_to_destroy, clean_up);
@ -99,6 +120,10 @@ void TextureCache<P>::RunGarbageCollector() {
template <class P> template <class P>
void TextureCache<P>::TickFrame() { void TextureCache<P>::TickFrame() {
// If we can obtain the memory info, use it instead of the estimate.
if (runtime.CanReportMemoryUsage()) {
total_used_memory = runtime.GetDeviceMemoryUsage();
}
if (total_used_memory > minimum_memory) { if (total_used_memory > minimum_memory) {
RunGarbageCollector(); RunGarbageCollector();
} }
@ -106,6 +131,7 @@ void TextureCache<P>::TickFrame() {
sentenced_framebuffers.Tick(); sentenced_framebuffers.Tick();
sentenced_image_view.Tick(); sentenced_image_view.Tick();
runtime.TickFrame(); runtime.TickFrame();
critical_gc = 0;
++frame_tick; ++frame_tick;
} }
@ -1052,6 +1078,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
for (const ImageId overlap_id : overlap_ids) { for (const ImageId overlap_id : overlap_ids) {
Image& overlap = slot_images[overlap_id]; Image& overlap = slot_images[overlap_id];
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
new_image.flags |= ImageFlagBits::GpuModified;
new_image.modification_tick =
std::max(overlap.modification_tick, new_image.modification_tick);
}
if (overlap.info.num_samples != new_image.info.num_samples) { if (overlap.info.num_samples != new_image.info.num_samples) {
LOG_WARNING(HW_GPU, "Copying between images with different samples is not implemented"); LOG_WARNING(HW_GPU, "Copying between images with different samples is not implemented");
} else { } else {
@ -1414,6 +1445,10 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format); tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
} }
total_used_memory += Common::AlignUp(tentative_size, 1024); total_used_memory += Common::AlignUp(tentative_size, 1024);
if (total_used_memory > critical_memory && critical_gc < GC_EMERGENCY_COUNTS) {
RunGarbageCollector();
critical_gc++;
}
image.lru_index = lru_cache.Insert(image_id, frame_tick); image.lru_index = lru_cache.Insert(image_id, frame_tick);
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
@ -1704,6 +1739,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
aliased_images.push_back(&aliased); aliased_images.push_back(&aliased);
any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
if (True(aliased_image.flags & ImageFlagBits::GpuModified)) {
image.flags |= ImageFlagBits::GpuModified;
}
} }
} }
if (aliased_images.empty()) { if (aliased_images.empty()) {

@ -59,8 +59,10 @@ class TextureCache {
/// True when the API can provide info about the memory of the device. /// True when the API can provide info about the memory of the device.
static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO;
static constexpr u64 DEFAULT_EXPECTED_MEMORY = 1_GiB; static constexpr s64 TARGET_THRESHOLD = 4_GiB;
static constexpr u64 DEFAULT_CRITICAL_MEMORY = 2_GiB; static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB;
static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB;
static constexpr size_t GC_EMERGENCY_COUNTS = 2;
using Runtime = typename P::Runtime; using Runtime = typename P::Runtime;
using Image = typename P::Image; using Image = typename P::Image;
@ -372,6 +374,7 @@ private:
u64 minimum_memory; u64 minimum_memory;
u64 expected_memory; u64 expected_memory;
u64 critical_memory; u64 critical_memory;
size_t critical_gc;
SlotVector<Image> slot_images; SlotVector<Image> slot_images;
SlotVector<ImageMapView> slot_map_views; SlotVector<ImageMapView> slot_map_views;

@ -12,12 +12,14 @@
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
#include "common/literals.h"
#include "common/settings.h" #include "common/settings.h"
#include "video_core/vulkan_common/nsight_aftermath_tracker.h" #include "video_core/vulkan_common/nsight_aftermath_tracker.h"
#include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h" #include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan { namespace Vulkan {
using namespace Common::Literals;
namespace { namespace {
namespace Alternatives { namespace Alternatives {
constexpr std::array STENCIL8_UINT{ constexpr std::array STENCIL8_UINT{
@ -596,6 +598,11 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
} }
logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld); logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld);
is_integrated = properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
is_virtual = properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
is_non_gpu = properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_OTHER ||
properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_CPU;
CollectPhysicalMemoryInfo(); CollectPhysicalMemoryInfo();
CollectTelemetryParameters(); CollectTelemetryParameters();
CollectToolingInfo(); CollectToolingInfo();
@ -985,6 +992,7 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
test(has_khr_swapchain_mutable_format, VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME, test(has_khr_swapchain_mutable_format, VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME,
false); false);
test(has_ext_line_rasterization, VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME, false); test(has_ext_line_rasterization, VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME, false);
test(ext_memory_budget, VK_EXT_MEMORY_BUDGET_EXTENSION_NAME, true);
if (Settings::values.enable_nsight_aftermath) { if (Settings::values.enable_nsight_aftermath) {
test(nv_device_diagnostics_config, VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME, test(nv_device_diagnostics_config, VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME,
true); true);
@ -997,7 +1005,7 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
VkPhysicalDeviceFeatures2KHR features{}; VkPhysicalDeviceFeatures2KHR features{};
features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR; features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
VkPhysicalDeviceProperties2KHR physical_properties; VkPhysicalDeviceProperties2KHR physical_properties{};
physical_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR; physical_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
if (has_khr_shader_float16_int8) { if (has_khr_shader_float16_int8) {
@ -1267,15 +1275,50 @@ void Device::CollectTelemetryParameters() {
vendor_name = driver.driverName; vendor_name = driver.driverName;
} }
u64 Device::GetDeviceMemoryUsage() const {
VkPhysicalDeviceMemoryBudgetPropertiesEXT budget;
budget.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT;
budget.pNext = nullptr;
physical.GetMemoryProperties(&budget);
u64 result{};
for (const size_t heap : valid_heap_memory) {
result += budget.heapUsage[heap];
}
return result;
}
void Device::CollectPhysicalMemoryInfo() { void Device::CollectPhysicalMemoryInfo() {
const auto mem_properties = physical.GetMemoryProperties(); VkPhysicalDeviceMemoryBudgetPropertiesEXT budget{};
budget.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT;
const auto mem_info = physical.GetMemoryProperties(ext_memory_budget ? &budget : nullptr);
const auto& mem_properties = mem_info.memoryProperties;
const size_t num_properties = mem_properties.memoryHeapCount; const size_t num_properties = mem_properties.memoryHeapCount;
device_access_memory = 0; device_access_memory = 0;
u64 device_initial_usage = 0;
u64 local_memory = 0;
for (size_t element = 0; element < num_properties; ++element) { for (size_t element = 0; element < num_properties; ++element) {
if ((mem_properties.memoryHeaps[element].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) { const bool is_heap_local =
(mem_properties.memoryHeaps[element].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
if (!is_integrated && !is_heap_local) {
continue;
}
valid_heap_memory.push_back(element);
if (is_heap_local) {
local_memory += mem_properties.memoryHeaps[element].size;
}
if (ext_memory_budget) {
device_initial_usage += budget.heapUsage[element];
device_access_memory += budget.heapBudget[element];
continue;
}
device_access_memory += mem_properties.memoryHeaps[element].size; device_access_memory += mem_properties.memoryHeaps[element].size;
} }
if (!is_integrated) {
return;
} }
const s64 available_memory = static_cast<s64>(device_access_memory - device_initial_usage);
device_access_memory = static_cast<u64>(std::max<s64>(
std::min<s64>(available_memory - 8_GiB, 4_GiB), static_cast<s64>(local_memory)));
} }
void Device::CollectToolingInfo() { void Device::CollectToolingInfo() {

@ -341,6 +341,12 @@ public:
return device_access_memory; return device_access_memory;
} }
bool CanReportMemoryUsage() const {
return ext_memory_budget;
}
u64 GetDeviceMemoryUsage() const;
u32 GetSetsPerPool() const { u32 GetSetsPerPool() const {
return sets_per_pool; return sets_per_pool;
} }
@ -421,6 +427,9 @@ private:
bool is_topology_list_restart_supported{}; ///< Support for primitive restart with list bool is_topology_list_restart_supported{}; ///< Support for primitive restart with list
///< topologies. ///< topologies.
bool is_patch_list_restart_supported{}; ///< Support for primitive restart with list patch. bool is_patch_list_restart_supported{}; ///< Support for primitive restart with list patch.
bool is_integrated{}; ///< Is GPU an iGPU.
bool is_virtual{}; ///< Is GPU a virtual GPU.
bool is_non_gpu{}; ///< Is SoftwareRasterizer, FPGA, non-GPU device.
bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle. bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle.
bool nv_viewport_array2{}; ///< Support for VK_NV_viewport_array2. bool nv_viewport_array2{}; ///< Support for VK_NV_viewport_array2.
bool nv_geometry_shader_passthrough{}; ///< Support for VK_NV_geometry_shader_passthrough. bool nv_geometry_shader_passthrough{}; ///< Support for VK_NV_geometry_shader_passthrough.
@ -445,6 +454,7 @@ private:
bool ext_shader_atomic_int64{}; ///< Support for VK_KHR_shader_atomic_int64. bool ext_shader_atomic_int64{}; ///< Support for VK_KHR_shader_atomic_int64.
bool ext_conservative_rasterization{}; ///< Support for VK_EXT_conservative_rasterization. bool ext_conservative_rasterization{}; ///< Support for VK_EXT_conservative_rasterization.
bool ext_provoking_vertex{}; ///< Support for VK_EXT_provoking_vertex. bool ext_provoking_vertex{}; ///< Support for VK_EXT_provoking_vertex.
bool ext_memory_budget{}; ///< Support for VK_EXT_memory_budget.
bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config. bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config.
bool has_broken_cube_compatibility{}; ///< Has broken cube compatiblity bit bool has_broken_cube_compatibility{}; ///< Has broken cube compatiblity bit
bool has_renderdoc{}; ///< Has RenderDoc attached bool has_renderdoc{}; ///< Has RenderDoc attached
@ -456,6 +466,7 @@ private:
// Telemetry parameters // Telemetry parameters
std::string vendor_name; ///< Device's driver name. std::string vendor_name; ///< Device's driver name.
std::vector<std::string> supported_extensions; ///< Reported Vulkan extensions. std::vector<std::string> supported_extensions; ///< Reported Vulkan extensions.
std::vector<size_t> valid_heap_memory; ///< Heaps used.
/// Format properties dictionary. /// Format properties dictionary.
std::unordered_map<VkFormat, VkFormatProperties> format_properties; std::unordered_map<VkFormat, VkFormatProperties> format_properties;

@ -227,7 +227,7 @@ void MemoryCommit::Release() {
} }
MemoryAllocator::MemoryAllocator(const Device& device_, bool export_allocations_) MemoryAllocator::MemoryAllocator(const Device& device_, bool export_allocations_)
: device{device_}, properties{device_.GetPhysical().GetMemoryProperties()}, : device{device_}, properties{device_.GetPhysical().GetMemoryProperties().memoryProperties},
export_allocations{export_allocations_}, export_allocations{export_allocations_},
buffer_image_granularity{ buffer_image_granularity{
device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {} device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {}

@ -237,8 +237,8 @@ bool Load(VkInstance instance, InstanceDispatch& dld) noexcept {
return X(vkCreateDevice) && X(vkDestroyDevice) && X(vkDestroyDevice) && return X(vkCreateDevice) && X(vkDestroyDevice) && X(vkDestroyDevice) &&
X(vkEnumerateDeviceExtensionProperties) && X(vkEnumeratePhysicalDevices) && X(vkEnumerateDeviceExtensionProperties) && X(vkEnumeratePhysicalDevices) &&
X(vkGetDeviceProcAddr) && X(vkGetPhysicalDeviceFormatProperties) && X(vkGetDeviceProcAddr) && X(vkGetPhysicalDeviceFormatProperties) &&
X(vkGetPhysicalDeviceMemoryProperties) && X(vkGetPhysicalDeviceProperties) && X(vkGetPhysicalDeviceMemoryProperties) && X(vkGetPhysicalDeviceMemoryProperties2) &&
X(vkGetPhysicalDeviceQueueFamilyProperties); X(vkGetPhysicalDeviceProperties) && X(vkGetPhysicalDeviceQueueFamilyProperties);
#undef X #undef X
} }
@ -926,9 +926,12 @@ std::vector<VkPresentModeKHR> PhysicalDevice::GetSurfacePresentModesKHR(
return modes; return modes;
} }
VkPhysicalDeviceMemoryProperties PhysicalDevice::GetMemoryProperties() const noexcept { VkPhysicalDeviceMemoryProperties2 PhysicalDevice::GetMemoryProperties(
VkPhysicalDeviceMemoryProperties properties; void* next_structures) const noexcept {
dld->vkGetPhysicalDeviceMemoryProperties(physical_device, &properties); VkPhysicalDeviceMemoryProperties2 properties{};
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
properties.pNext = next_structures;
dld->vkGetPhysicalDeviceMemoryProperties2(physical_device, &properties);
return properties; return properties;
} }

@ -172,6 +172,7 @@ struct InstanceDispatch {
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR{}; PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR{};
PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties{}; PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties{};
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties{}; PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties{};
PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2{};
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties{}; PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties{};
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR{}; PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR{};
PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties{}; PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties{};
@ -950,7 +951,8 @@ public:
std::vector<VkPresentModeKHR> GetSurfacePresentModesKHR(VkSurfaceKHR) const; std::vector<VkPresentModeKHR> GetSurfacePresentModesKHR(VkSurfaceKHR) const;
VkPhysicalDeviceMemoryProperties GetMemoryProperties() const noexcept; VkPhysicalDeviceMemoryProperties2 GetMemoryProperties(
void* next_structures = nullptr) const noexcept;
private: private:
VkPhysicalDevice physical_device = nullptr; VkPhysicalDevice physical_device = nullptr;