|
|
|
@ -164,11 +164,16 @@ public:
|
|
|
|
|
/// Pop asynchronous downloads
|
|
|
|
|
void PopAsyncFlushes();
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] bool DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount);
|
|
|
|
|
bool DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount);
|
|
|
|
|
|
|
|
|
|
bool DMAClear(GPUVAddr src_address, u64 amount, u32 value);
|
|
|
|
|
|
|
|
|
|
/// Return true when a CPU region is modified from the GPU
|
|
|
|
|
[[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
|
|
|
|
|
|
|
|
|
|
/// Return true when a region is registered on the cache
|
|
|
|
|
[[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size);
|
|
|
|
|
|
|
|
|
|
/// Return true when a CPU region is modified from the CPU
|
|
|
|
|
[[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size);
|
|
|
|
|
|
|
|
|
@ -324,6 +329,8 @@ private:
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] bool HasFastUniformBufferBound(size_t stage, u32 binding_index) const noexcept;
|
|
|
|
|
|
|
|
|
|
void ClearDownload(IntervalType subtract_interval);
|
|
|
|
|
|
|
|
|
|
VideoCore::RasterizerInterface& rasterizer;
|
|
|
|
|
Tegra::Engines::Maxwell3D& maxwell3d;
|
|
|
|
|
Tegra::Engines::KeplerCompute& kepler_compute;
|
|
|
|
@ -462,6 +469,14 @@ void BufferCache<P>::DownloadMemory(VAddr cpu_addr, u64 size) {
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <class P>
|
|
|
|
|
void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
|
|
|
|
|
uncommitted_ranges.subtract(subtract_interval);
|
|
|
|
|
for (auto& interval_set : committed_ranges) {
|
|
|
|
|
interval_set.subtract(subtract_interval);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <class P>
|
|
|
|
|
bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
|
|
|
|
|
const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address);
|
|
|
|
@ -469,17 +484,14 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
|
|
|
|
if (!cpu_src_address || !cpu_dest_address) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
const bool source_dirty = IsRegionGpuModified(*cpu_src_address, amount);
|
|
|
|
|
const bool dest_dirty = IsRegionGpuModified(*cpu_dest_address, amount);
|
|
|
|
|
const bool source_dirty = IsRegionRegistered(*cpu_src_address, amount);
|
|
|
|
|
const bool dest_dirty = IsRegionRegistered(*cpu_dest_address, amount);
|
|
|
|
|
if (!source_dirty && !dest_dirty) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const IntervalType subtract_interval{*cpu_dest_address, *cpu_dest_address + amount};
|
|
|
|
|
uncommitted_ranges.subtract(subtract_interval);
|
|
|
|
|
for (auto& interval_set : committed_ranges) {
|
|
|
|
|
interval_set.subtract(subtract_interval);
|
|
|
|
|
}
|
|
|
|
|
ClearDownload(subtract_interval);
|
|
|
|
|
|
|
|
|
|
BufferId buffer_a;
|
|
|
|
|
BufferId buffer_b;
|
|
|
|
@ -510,12 +522,13 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
|
|
|
|
ForEachWrittenRange(*cpu_src_address, amount, mirror);
|
|
|
|
|
// This subtraction in this order is important for overlapping copies.
|
|
|
|
|
common_ranges.subtract(subtract_interval);
|
|
|
|
|
bool atleast_1_download = tmp_intervals.size() != 0;
|
|
|
|
|
for (const IntervalType add_interval : tmp_intervals) {
|
|
|
|
|
common_ranges.add(add_interval);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
runtime.CopyBuffer(dest_buffer, src_buffer, copies);
|
|
|
|
|
if (source_dirty) {
|
|
|
|
|
if (atleast_1_download) {
|
|
|
|
|
dest_buffer.MarkRegionAsGpuModified(*cpu_dest_address, amount);
|
|
|
|
|
}
|
|
|
|
|
std::vector<u8> tmp_buffer(amount);
|
|
|
|
@ -524,6 +537,33 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <class P>
|
|
|
|
|
bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
|
|
|
|
|
const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address);
|
|
|
|
|
if (!cpu_dst_address) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
const bool dest_dirty = IsRegionRegistered(*cpu_dst_address, amount);
|
|
|
|
|
if (!dest_dirty) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const size_t size = amount * sizeof(u32);
|
|
|
|
|
const IntervalType subtract_interval{*cpu_dst_address, *cpu_dst_address + size};
|
|
|
|
|
ClearDownload(subtract_interval);
|
|
|
|
|
common_ranges.subtract(subtract_interval);
|
|
|
|
|
|
|
|
|
|
BufferId buffer;
|
|
|
|
|
do {
|
|
|
|
|
has_deleted_buffers = false;
|
|
|
|
|
buffer = FindBuffer(*cpu_dst_address, static_cast<u32>(size));
|
|
|
|
|
} while (has_deleted_buffers);
|
|
|
|
|
auto& dest_buffer = slot_buffers[buffer];
|
|
|
|
|
const u32 offset = static_cast<u32>(*cpu_dst_address - dest_buffer.CpuAddr());
|
|
|
|
|
runtime.ClearBuffer(dest_buffer, offset, size, value);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <class P>
|
|
|
|
|
void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
|
|
|
|
|
u32 size) {
|
|
|
|
@ -781,6 +821,27 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <class P>
|
|
|
|
|
bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
|
|
|
|
|
const VAddr end_addr = addr + size;
|
|
|
|
|
const u64 page_end = Common::DivCeil(end_addr, PAGE_SIZE);
|
|
|
|
|
for (u64 page = addr >> PAGE_BITS; page < page_end;) {
|
|
|
|
|
const BufferId buffer_id = page_table[page];
|
|
|
|
|
if (!buffer_id) {
|
|
|
|
|
++page;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
Buffer& buffer = slot_buffers[buffer_id];
|
|
|
|
|
const VAddr buf_start_addr = buffer.CpuAddr();
|
|
|
|
|
const VAddr buf_end_addr = buf_start_addr + buffer.SizeBytes();
|
|
|
|
|
if (buf_start_addr < end_addr && addr < buf_end_addr) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
page = Common::DivCeil(end_addr, PAGE_SIZE);
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <class P>
|
|
|
|
|
bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
|
|
|
|
|
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE);
|
|
|
|
@ -1425,6 +1486,7 @@ void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si
|
|
|
|
|
const VAddr end_address = start_address + range_size;
|
|
|
|
|
ForEachWrittenRange(start_address, range_size, add_download);
|
|
|
|
|
const IntervalType subtract_interval{start_address, end_address};
|
|
|
|
|
ClearDownload(subtract_interval);
|
|
|
|
|
common_ranges.subtract(subtract_interval);
|
|
|
|
|
});
|
|
|
|
|
if (total_size_bytes == 0) {
|
|
|
|
|