video_core: Refactor GPU interface (#7272)
* video_core: Refactor GPU interface * citra_qt: Better debug widget lifetimemaster
parent
602f4f60d8
commit
2bb7f89c30
@ -0,0 +1,110 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/bit_field.h"
|
||||
|
||||
namespace Service::GSP {
|
||||
|
||||
/// GSP command ID
|
||||
enum class CommandId : u32 {
|
||||
RequestDma = 0x00,
|
||||
SubmitCmdList = 0x01,
|
||||
MemoryFill = 0x02,
|
||||
DisplayTransfer = 0x03,
|
||||
TextureCopy = 0x04,
|
||||
CacheFlush = 0x05,
|
||||
};
|
||||
|
||||
struct DmaCommand {
|
||||
u32 source_address;
|
||||
u32 dest_address;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
struct SubmitCmdListCommand {
|
||||
u32 address;
|
||||
u32 size;
|
||||
u32 flags;
|
||||
u32 unused[3];
|
||||
u32 do_flush;
|
||||
};
|
||||
|
||||
struct MemoryFillCommand {
|
||||
u32 start1;
|
||||
u32 value1;
|
||||
u32 end1;
|
||||
|
||||
u32 start2;
|
||||
u32 value2;
|
||||
u32 end2;
|
||||
|
||||
u16 control1;
|
||||
u16 control2;
|
||||
};
|
||||
|
||||
struct DisplayTransferCommand {
|
||||
u32 in_buffer_address;
|
||||
u32 out_buffer_address;
|
||||
u32 in_buffer_size;
|
||||
u32 out_buffer_size;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct TextureCopyCommand {
|
||||
u32 in_buffer_address;
|
||||
u32 out_buffer_address;
|
||||
u32 size;
|
||||
u32 in_width_gap;
|
||||
u32 out_width_gap;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct CacheFlushCommand {
|
||||
struct {
|
||||
u32 address;
|
||||
u32 size;
|
||||
} regions[3];
|
||||
};
|
||||
|
||||
/// GSP command
|
||||
struct Command {
|
||||
BitField<0, 8, CommandId> id;
|
||||
union {
|
||||
DmaCommand dma_request;
|
||||
SubmitCmdListCommand submit_gpu_cmdlist;
|
||||
MemoryFillCommand memory_fill;
|
||||
DisplayTransferCommand display_transfer;
|
||||
TextureCopyCommand texture_copy;
|
||||
CacheFlushCommand cache_flush;
|
||||
std::array<u8, 0x1C> raw_data;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(Command) == 0x20, "Command struct has incorrect size");
|
||||
|
||||
/// GSP shared memory GX command buffer header
|
||||
struct CommandBuffer {
|
||||
union {
|
||||
u32 hex;
|
||||
|
||||
// Current command index. This index is updated by GSP module after loading the command
|
||||
// data, right before the command is processed. When this index is updated by GSP module,
|
||||
// the total commands field is decreased by one as well.
|
||||
BitField<0, 8, u32> index;
|
||||
|
||||
// Total commands to process, must not be value 0 when GSP module handles commands. This
|
||||
// must be <=15 when writing a command to shared memory. This is incremented by the
|
||||
// application when writing a command to shared memory, after increasing this value
|
||||
// TriggerCmdReqQueue is only used if this field is value 1.
|
||||
BitField<8, 8, u32> number_commands;
|
||||
};
|
||||
|
||||
u32 unk[7];
|
||||
|
||||
Command commands[0xF];
|
||||
};
|
||||
static_assert(sizeof(CommandBuffer) == 0x200, "CommandBuffer struct has incorrect size");
|
||||
|
||||
} // namespace Service::GSP
|
@ -0,0 +1,42 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Service::GSP {
|
||||
|
||||
/// GSP interrupt ID
|
||||
enum class InterruptId : u8 {
|
||||
PSC0 = 0x00,
|
||||
PSC1 = 0x01,
|
||||
PDC0 = 0x02,
|
||||
PDC1 = 0x03,
|
||||
PPF = 0x04,
|
||||
P3D = 0x05,
|
||||
DMA = 0x06,
|
||||
};
|
||||
|
||||
/// GSP thread interrupt relay queue
|
||||
struct InterruptRelayQueue {
|
||||
// Index of last interrupt in the queue
|
||||
u8 index;
|
||||
// Number of interrupts remaining to be processed by the userland code
|
||||
u8 number_interrupts;
|
||||
// Error code - zero on success, otherwise an error has occurred
|
||||
u8 error_code;
|
||||
u8 padding1;
|
||||
|
||||
u32 missed_PDC0;
|
||||
u32 missed_PDC1;
|
||||
|
||||
InterruptId slot[0x34]; ///< Interrupt ID slots
|
||||
};
|
||||
static_assert(sizeof(InterruptRelayQueue) == 0x40, "InterruptRelayQueue struct has incorrect size");
|
||||
|
||||
using InterruptHandler = std::function<void(InterruptId)>;
|
||||
|
||||
} // namespace Service::GSP
|
@ -1,572 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include <numeric>
|
||||
#include <type_traits>
|
||||
#include "common/alignment.h"
|
||||
#include "common/color.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/gsp/gsp.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/hw/hw.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/tracer/recorder.h"
|
||||
#include "video_core/command_processor.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/utils.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace GPU {
|
||||
|
||||
Regs g_regs;
|
||||
Memory::MemorySystem* g_memory;
|
||||
|
||||
/// Event id for CoreTiming
|
||||
static Core::TimingEventType* vblank_event;
|
||||
|
||||
template <typename T>
|
||||
inline void Read(T& var, const u32 raw_addr) {
|
||||
u32 addr = raw_addr - HW::VADDR_GPU;
|
||||
u32 index = addr / 4;
|
||||
|
||||
// Reads other than u32 are untested, so I'd rather have them abort than silently fail
|
||||
if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) {
|
||||
LOG_ERROR(HW_GPU, "unknown Read{} @ {:#010X}", sizeof(var) * 8, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
var = g_regs[addr / 4];
|
||||
}
|
||||
|
||||
static Common::Vec4<u8> DecodePixel(Regs::PixelFormat input_format, const u8* src_pixel) {
|
||||
switch (input_format) {
|
||||
case Regs::PixelFormat::RGBA8:
|
||||
return Common::Color::DecodeRGBA8(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGB8:
|
||||
return Common::Color::DecodeRGB8(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGB565:
|
||||
return Common::Color::DecodeRGB565(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGB5A1:
|
||||
return Common::Color::DecodeRGB5A1(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGBA4:
|
||||
return Common::Color::DecodeRGBA4(src_pixel);
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown source framebuffer format {:x}", input_format);
|
||||
return {0, 0, 0, 0};
|
||||
}
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255));
|
||||
MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100));
|
||||
|
||||
static void MemoryFill(const Regs::MemoryFillConfig& config) {
|
||||
const PAddr start_addr = config.GetStartAddress();
|
||||
const PAddr end_addr = config.GetEndAddress();
|
||||
|
||||
// TODO: do hwtest with these cases
|
||||
if (!g_memory->IsValidPhysicalAddress(start_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid start address {:#010X}", start_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!g_memory->IsValidPhysicalAddress(end_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid end address {:#010X}", end_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (end_addr <= start_addr) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid memory range from {:#010X} to {:#010X}", start_addr,
|
||||
end_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
u8* start = g_memory->GetPhysicalPointer(start_addr);
|
||||
u8* end = g_memory->GetPhysicalPointer(end_addr);
|
||||
|
||||
if (VideoCore::g_renderer->Rasterizer()->AccelerateFill(config))
|
||||
return;
|
||||
|
||||
Memory::RasterizerInvalidateRegion(config.GetStartAddress(),
|
||||
config.GetEndAddress() - config.GetStartAddress());
|
||||
|
||||
if (config.fill_24bit) {
|
||||
// fill with 24-bit values
|
||||
for (u8* ptr = start; ptr < end; ptr += 3) {
|
||||
ptr[0] = config.value_24bit_r;
|
||||
ptr[1] = config.value_24bit_g;
|
||||
ptr[2] = config.value_24bit_b;
|
||||
}
|
||||
} else if (config.fill_32bit) {
|
||||
// fill with 32-bit values
|
||||
if (end > start) {
|
||||
u32 value = config.value_32bit;
|
||||
std::size_t len = (end - start) / sizeof(u32);
|
||||
for (std::size_t i = 0; i < len; ++i)
|
||||
std::memcpy(&start[i * sizeof(u32)], &value, sizeof(u32));
|
||||
}
|
||||
} else {
|
||||
// fill with 16-bit values
|
||||
u16 value_16bit = config.value_16bit.Value();
|
||||
for (u8* ptr = start; ptr < end; ptr += sizeof(u16))
|
||||
std::memcpy(ptr, &value_16bit, sizeof(u16));
|
||||
}
|
||||
}
|
||||
|
||||
static void DisplayTransfer(const Regs::DisplayTransferConfig& config) {
|
||||
const PAddr src_addr = config.GetPhysicalInputAddress();
|
||||
PAddr dst_addr = config.GetPhysicalOutputAddress();
|
||||
|
||||
// TODO: do hwtest with these cases
|
||||
if (!g_memory->IsValidPhysicalAddress(src_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid input address {:#010X}", src_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!g_memory->IsValidPhysicalAddress(dst_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid output address {:#010X}", dst_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.input_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero input width");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.input_height == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero input height");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.output_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero output width");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.output_height == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero output height");
|
||||
return;
|
||||
}
|
||||
|
||||
if (VideoCore::g_renderer->Rasterizer()->AccelerateDisplayTransfer(config))
|
||||
return;
|
||||
|
||||
// Using flip_vertically alongside crop_input_lines produces skewed output on hardware.
|
||||
// We have to emulate this because some games rely on this behaviour to render correctly.
|
||||
if (config.flip_vertically && config.crop_input_lines &&
|
||||
config.input_width > config.output_width) {
|
||||
dst_addr += (config.input_width - config.output_width) * (config.output_height - 1) *
|
||||
GPU::Regs::BytesPerPixel(config.output_format);
|
||||
}
|
||||
|
||||
u8* src_pointer = g_memory->GetPhysicalPointer(src_addr);
|
||||
u8* dst_pointer = g_memory->GetPhysicalPointer(dst_addr);
|
||||
|
||||
if (config.scaling > config.ScaleXY) {
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode {}",
|
||||
config.scaling.Value());
|
||||
UNIMPLEMENTED();
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.input_linear && config.scaling != config.NoScale) {
|
||||
LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input");
|
||||
UNIMPLEMENTED();
|
||||
return;
|
||||
}
|
||||
|
||||
int horizontal_scale = config.scaling != config.NoScale ? 1 : 0;
|
||||
int vertical_scale = config.scaling == config.ScaleXY ? 1 : 0;
|
||||
|
||||
u32 output_width = config.output_width >> horizontal_scale;
|
||||
u32 output_height = config.output_height >> vertical_scale;
|
||||
|
||||
u32 input_size =
|
||||
config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format);
|
||||
u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format);
|
||||
|
||||
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), input_size);
|
||||
Memory::RasterizerInvalidateRegion(config.GetPhysicalOutputAddress(), output_size);
|
||||
|
||||
for (u32 y = 0; y < output_height; ++y) {
|
||||
for (u32 x = 0; x < output_width; ++x) {
|
||||
Common::Vec4<u8> src_color;
|
||||
|
||||
// Calculate the [x,y] position of the input image
|
||||
// based on the current output position and the scale
|
||||
u32 input_x = x << horizontal_scale;
|
||||
u32 input_y = y << vertical_scale;
|
||||
|
||||
u32 output_y;
|
||||
if (config.flip_vertically) {
|
||||
// Flip the y value of the output data,
|
||||
// we do this after calculating the [x,y] position of the input image
|
||||
// to account for the scaling options.
|
||||
output_y = output_height - y - 1;
|
||||
} else {
|
||||
output_y = y;
|
||||
}
|
||||
|
||||
u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format);
|
||||
u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format);
|
||||
u32 src_offset;
|
||||
u32 dst_offset;
|
||||
|
||||
if (config.input_linear) {
|
||||
if (!config.dont_swizzle) {
|
||||
// Interpret the input as linear and the output as tiled
|
||||
u32 coarse_y = output_y & ~7;
|
||||
u32 stride = output_width * dst_bytes_per_pixel;
|
||||
|
||||
src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel;
|
||||
dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) +
|
||||
coarse_y * stride;
|
||||
} else {
|
||||
// Both input and output are linear
|
||||
src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel;
|
||||
dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel;
|
||||
}
|
||||
} else {
|
||||
if (!config.dont_swizzle) {
|
||||
// Interpret the input as tiled and the output as linear
|
||||
u32 coarse_y = input_y & ~7;
|
||||
u32 stride = config.input_width * src_bytes_per_pixel;
|
||||
|
||||
src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) +
|
||||
coarse_y * stride;
|
||||
dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel;
|
||||
} else {
|
||||
// Both input and output are tiled
|
||||
u32 out_coarse_y = output_y & ~7;
|
||||
u32 out_stride = output_width * dst_bytes_per_pixel;
|
||||
|
||||
u32 in_coarse_y = input_y & ~7;
|
||||
u32 in_stride = config.input_width * src_bytes_per_pixel;
|
||||
|
||||
src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) +
|
||||
in_coarse_y * in_stride;
|
||||
dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) +
|
||||
out_coarse_y * out_stride;
|
||||
}
|
||||
}
|
||||
|
||||
const u8* src_pixel = src_pointer + src_offset;
|
||||
src_color = DecodePixel(config.input_format, src_pixel);
|
||||
if (config.scaling == config.ScaleX) {
|
||||
Common::Vec4<u8> pixel =
|
||||
DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel);
|
||||
src_color = ((src_color + pixel) / 2).Cast<u8>();
|
||||
} else if (config.scaling == config.ScaleXY) {
|
||||
Common::Vec4<u8> pixel1 =
|
||||
DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel);
|
||||
Common::Vec4<u8> pixel2 =
|
||||
DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel);
|
||||
Common::Vec4<u8> pixel3 =
|
||||
DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel);
|
||||
src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>();
|
||||
}
|
||||
|
||||
u8* dst_pixel = dst_pointer + dst_offset;
|
||||
switch (config.output_format) {
|
||||
case Regs::PixelFormat::RGBA8:
|
||||
Common::Color::EncodeRGBA8(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGB8:
|
||||
Common::Color::EncodeRGB8(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGB565:
|
||||
Common::Color::EncodeRGB565(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGB5A1:
|
||||
Common::Color::EncodeRGB5A1(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGBA4:
|
||||
Common::Color::EncodeRGBA4(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown destination framebuffer format {:x}",
|
||||
static_cast<u32>(config.output_format.Value()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void TextureCopy(const Regs::DisplayTransferConfig& config) {
|
||||
const PAddr src_addr = config.GetPhysicalInputAddress();
|
||||
const PAddr dst_addr = config.GetPhysicalOutputAddress();
|
||||
|
||||
// TODO: do hwtest with invalid addresses
|
||||
if (!g_memory->IsValidPhysicalAddress(src_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid input address {:#010X}", src_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!g_memory->IsValidPhysicalAddress(dst_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid output address {:#010X}", dst_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (VideoCore::g_renderer->Rasterizer()->AccelerateTextureCopy(config))
|
||||
return;
|
||||
|
||||
u8* src_pointer = g_memory->GetPhysicalPointer(src_addr);
|
||||
u8* dst_pointer = g_memory->GetPhysicalPointer(dst_addr);
|
||||
|
||||
u32 remaining_size = Common::AlignDown(config.texture_copy.size, 16);
|
||||
|
||||
if (remaining_size == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero size. Real hardware freezes on this.");
|
||||
return;
|
||||
}
|
||||
|
||||
u32 input_gap = config.texture_copy.input_gap * 16;
|
||||
u32 output_gap = config.texture_copy.output_gap * 16;
|
||||
|
||||
// Zero gap means contiguous input/output even if width = 0. To avoid infinite loop below, width
|
||||
// is assigned with the total size if gap = 0.
|
||||
u32 input_width = input_gap == 0 ? remaining_size : config.texture_copy.input_width * 16;
|
||||
u32 output_width = output_gap == 0 ? remaining_size : config.texture_copy.output_width * 16;
|
||||
|
||||
if (input_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero input width. Real hardware freezes on this.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (output_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero output width. Real hardware freezes on this.");
|
||||
return;
|
||||
}
|
||||
|
||||
std::size_t contiguous_input_size =
|
||||
config.texture_copy.size / input_width * (input_width + input_gap);
|
||||
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(),
|
||||
static_cast<u32>(contiguous_input_size));
|
||||
|
||||
std::size_t contiguous_output_size =
|
||||
config.texture_copy.size / output_width * (output_width + output_gap);
|
||||
// Only need to flush output if it has a gap
|
||||
const auto FlushInvalidate_fn = (output_gap != 0) ? Memory::RasterizerFlushAndInvalidateRegion
|
||||
: Memory::RasterizerInvalidateRegion;
|
||||
FlushInvalidate_fn(config.GetPhysicalOutputAddress(), static_cast<u32>(contiguous_output_size));
|
||||
|
||||
u32 remaining_input = input_width;
|
||||
u32 remaining_output = output_width;
|
||||
while (remaining_size > 0) {
|
||||
u32 copy_size = std::min({remaining_input, remaining_output, remaining_size});
|
||||
|
||||
std::memcpy(dst_pointer, src_pointer, copy_size);
|
||||
src_pointer += copy_size;
|
||||
dst_pointer += copy_size;
|
||||
|
||||
remaining_input -= copy_size;
|
||||
remaining_output -= copy_size;
|
||||
remaining_size -= copy_size;
|
||||
|
||||
if (remaining_input == 0) {
|
||||
remaining_input = input_width;
|
||||
src_pointer += input_gap;
|
||||
}
|
||||
if (remaining_output == 0) {
|
||||
remaining_output = output_width;
|
||||
dst_pointer += output_gap;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Write(u32 addr, const T data) {
|
||||
addr -= HW::VADDR_GPU;
|
||||
u32 index = addr / 4;
|
||||
|
||||
// Writes other than u32 are untested, so I'd rather have them abort than silently fail
|
||||
if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) {
|
||||
LOG_ERROR(HW_GPU, "unknown Write{} {:#010X} @ {:#010X}", sizeof(data) * 8, (u32)data, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
g_regs[index] = static_cast<u32>(data);
|
||||
|
||||
switch (index) {
|
||||
|
||||
// Memory fills are triggered once the fill value is written.
|
||||
case GPU_REG_INDEX(memory_fill_config[0].trigger):
|
||||
case GPU_REG_INDEX(memory_fill_config[1].trigger): {
|
||||
const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger));
|
||||
auto& config = g_regs.memory_fill_config[is_second_filler];
|
||||
|
||||
if (config.trigger) {
|
||||
MemoryFill(config);
|
||||
LOG_TRACE(HW_GPU, "MemoryFill from {:#010X} to {:#010X}", config.GetStartAddress(),
|
||||
config.GetEndAddress());
|
||||
|
||||
// It seems that it won't signal interrupt if "address_start" is zero.
|
||||
// TODO: hwtest this
|
||||
if (config.GetStartAddress() != 0) {
|
||||
if (!is_second_filler) {
|
||||
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC0);
|
||||
} else {
|
||||
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC1);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset "trigger" flag and set the "finish" flag
|
||||
// NOTE: This was confirmed to happen on hardware even if "address_start" is zero.
|
||||
config.trigger.Assign(0);
|
||||
config.finished.Assign(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case GPU_REG_INDEX(display_transfer_config.trigger): {
|
||||
MICROPROFILE_SCOPE(GPU_DisplayTransfer);
|
||||
|
||||
const auto& config = g_regs.display_transfer_config;
|
||||
if (config.trigger & 1) {
|
||||
|
||||
if (Pica::g_debug_context)
|
||||
Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer,
|
||||
nullptr);
|
||||
|
||||
if (config.is_texture_copy) {
|
||||
TextureCopy(config);
|
||||
LOG_TRACE(HW_GPU,
|
||||
"TextureCopy: {:#X} bytes from {:#010X}({}+{})-> "
|
||||
"{:#010X}({}+{}), flags {:#010X}",
|
||||
config.texture_copy.size, config.GetPhysicalInputAddress(),
|
||||
config.texture_copy.input_width * 16, config.texture_copy.input_gap * 16,
|
||||
config.GetPhysicalOutputAddress(), config.texture_copy.output_width * 16,
|
||||
config.texture_copy.output_gap * 16, config.flags);
|
||||
} else {
|
||||
DisplayTransfer(config);
|
||||
LOG_TRACE(HW_GPU,
|
||||
"DisplayTransfer: {:#010X}({}x{})-> "
|
||||
"{:#010X}({}x{}), dst format {:x}, flags {:#010X}",
|
||||
config.GetPhysicalInputAddress(), config.input_width.Value(),
|
||||
config.input_height.Value(), config.GetPhysicalOutputAddress(),
|
||||
config.output_width.Value(), config.output_height.Value(),
|
||||
static_cast<u32>(config.output_format.Value()), config.flags);
|
||||
}
|
||||
|
||||
g_regs.display_transfer_config.trigger = 0;
|
||||
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PPF);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Seems like writing to this register triggers processing
|
||||
case GPU_REG_INDEX(command_processor_config.trigger): {
|
||||
const auto& config = g_regs.command_processor_config;
|
||||
if (config.trigger & 1) {
|
||||
MICROPROFILE_SCOPE(GPU_CmdlistProcessing);
|
||||
|
||||
Pica::CommandProcessor::ProcessCommandList(config.GetPhysicalAddress(), config.size);
|
||||
|
||||
g_regs.command_processor_config.trigger = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// Notify tracer about the register write
|
||||
// This is happening *after* handling the write to make sure we properly catch all memory reads.
|
||||
if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
|
||||
// addr + GPU VBase - IO VBase + IO PBase
|
||||
Pica::g_debug_context->recorder->RegisterWritten<T>(
|
||||
addr + 0x1EF00000 - 0x1EC00000 + 0x10100000, data);
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly instantiate template functions because we aren't defining this in the header:
|
||||
|
||||
template void Read<u64>(u64& var, const u32 addr);
|
||||
template void Read<u32>(u32& var, const u32 addr);
|
||||
template void Read<u16>(u16& var, const u32 addr);
|
||||
template void Read<u8>(u8& var, const u32 addr);
|
||||
|
||||
template void Write<u64>(u32 addr, const u64 data);
|
||||
template void Write<u32>(u32 addr, const u32 data);
|
||||
template void Write<u16>(u32 addr, const u16 data);
|
||||
template void Write<u8>(u32 addr, const u8 data);
|
||||
|
||||
/// Update hardware
|
||||
static void VBlankCallback(std::uintptr_t user_data, s64 cycles_late) {
|
||||
VideoCore::g_renderer->SwapBuffers();
|
||||
|
||||
// Signal to GSP that GPU interrupt has occurred
|
||||
// TODO(yuriks): hwtest to determine if PDC0 is for the Top screen and PDC1 for the Sub
|
||||
// screen, or if both use the same interrupts and these two instead determine the
|
||||
// beginning and end of the VBlank period. If needed, split the interrupt firing into
|
||||
// two different intervals.
|
||||
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC0);
|
||||
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC1);
|
||||
|
||||
// Reschedule recurrent event
|
||||
Core::System::GetInstance().CoreTiming().ScheduleEvent(frame_ticks - cycles_late, vblank_event);
|
||||
}
|
||||
|
||||
/// Initialize hardware
|
||||
void Init(Memory::MemorySystem& memory) {
|
||||
g_memory = &memory;
|
||||
std::memset(&g_regs, 0, sizeof(g_regs));
|
||||
|
||||
auto& framebuffer_top = g_regs.framebuffer_config[0];
|
||||
auto& framebuffer_sub = g_regs.framebuffer_config[1];
|
||||
|
||||
// Setup default framebuffer addresses (located in VRAM)
|
||||
// .. or at least these are the ones used by system applets.
|
||||
// There's probably a smarter way to come up with addresses
|
||||
// like this which does not require hardcoding.
|
||||
framebuffer_top.address_left1 = 0x181E6000;
|
||||
framebuffer_top.address_left2 = 0x1822C800;
|
||||
framebuffer_top.address_right1 = 0x18273000;
|
||||
framebuffer_top.address_right2 = 0x182B9800;
|
||||
framebuffer_sub.address_left1 = 0x1848F000;
|
||||
framebuffer_sub.address_left2 = 0x184C7800;
|
||||
|
||||
framebuffer_top.width.Assign(240);
|
||||
framebuffer_top.height.Assign(400);
|
||||
framebuffer_top.stride = 3 * 240;
|
||||
framebuffer_top.color_format.Assign(Regs::PixelFormat::RGB8);
|
||||
framebuffer_top.active_fb = 0;
|
||||
|
||||
framebuffer_sub.width.Assign(240);
|
||||
framebuffer_sub.height.Assign(320);
|
||||
framebuffer_sub.stride = 3 * 240;
|
||||
framebuffer_sub.color_format.Assign(Regs::PixelFormat::RGB8);
|
||||
framebuffer_sub.active_fb = 0;
|
||||
|
||||
Core::Timing& timing = Core::System::GetInstance().CoreTiming();
|
||||
vblank_event = timing.RegisterEvent("GPU::VBlankCallback", VBlankCallback);
|
||||
timing.ScheduleEvent(frame_ticks, vblank_event);
|
||||
|
||||
LOG_DEBUG(HW_GPU, "initialized OK");
|
||||
}
|
||||
|
||||
/// Shutdown hardware
|
||||
void Shutdown() {
|
||||
LOG_DEBUG(HW_GPU, "shutdown OK");
|
||||
}
|
||||
|
||||
} // namespace GPU
|
@ -1,344 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <type_traits>
|
||||
#include <boost/serialization/access.hpp>
|
||||
#include <boost/serialization/binary_object.hpp>
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/core_timing.h"
|
||||
|
||||
namespace Memory {
|
||||
class MemorySystem;
|
||||
}
|
||||
|
||||
namespace GPU {
|
||||
|
||||
// Measured on hardware to be 2240568 timer cycles or 4481136 ARM11 cycles
|
||||
constexpr u64 frame_ticks = 4481136ull;
|
||||
|
||||
// Refresh rate defined by ratio of ARM11 frequency to ARM11 ticks per frame
|
||||
// (268,111,856) / (4,481,136) = 59.83122493939037Hz
|
||||
constexpr double SCREEN_REFRESH_RATE = BASE_CLOCK_RATE_ARM11 / static_cast<double>(frame_ticks);
|
||||
|
||||
// Returns index corresponding to the Regs member labeled by field_name
|
||||
#define GPU_REG_INDEX(field_name) (offsetof(GPU::Regs, field_name) / sizeof(u32))
|
||||
|
||||
// Returns index corresponding to the Regs::FramebufferConfig labeled by field_name
|
||||
// screen_id is a subscript for Regs::framebuffer_config
|
||||
#define GPU_FRAMEBUFFER_REG_INDEX(screen_id, field_name) \
|
||||
((offsetof(GPU::Regs, framebuffer_config) + \
|
||||
sizeof(GPU::Regs::FramebufferConfig) * (screen_id) + \
|
||||
offsetof(GPU::Regs::FramebufferConfig, field_name)) / \
|
||||
sizeof(u32))
|
||||
|
||||
// MMIO region 0x1EFxxxxx
|
||||
struct Regs {
|
||||
|
||||
// helper macro to make sure the defined structures are of the expected size.
|
||||
#define ASSERT_MEMBER_SIZE(name, size_in_bytes) \
|
||||
static_assert(sizeof(name) == size_in_bytes, \
|
||||
"Structure size and register block length don't match")
|
||||
|
||||
// Components are laid out in reverse byte order, most significant bits first.
|
||||
enum class PixelFormat : u32 {
|
||||
RGBA8 = 0,
|
||||
RGB8 = 1,
|
||||
RGB565 = 2,
|
||||
RGB5A1 = 3,
|
||||
RGBA4 = 4,
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the number of bytes per pixel.
|
||||
*/
|
||||
static int BytesPerPixel(PixelFormat format) {
|
||||
switch (format) {
|
||||
case PixelFormat::RGBA8:
|
||||
return 4;
|
||||
case PixelFormat::RGB8:
|
||||
return 3;
|
||||
case PixelFormat::RGB565:
|
||||
case PixelFormat::RGB5A1:
|
||||
case PixelFormat::RGBA4:
|
||||
return 2;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
INSERT_PADDING_WORDS(0x4);
|
||||
|
||||
struct MemoryFillConfig {
|
||||
u32 address_start;
|
||||
u32 address_end;
|
||||
|
||||
union {
|
||||
u32 value_32bit;
|
||||
|
||||
BitField<0, 16, u32> value_16bit;
|
||||
|
||||
// TODO: Verify component order
|
||||
BitField<0, 8, u32> value_24bit_r;
|
||||
BitField<8, 8, u32> value_24bit_g;
|
||||
BitField<16, 8, u32> value_24bit_b;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 control;
|
||||
|
||||
// Setting this field to 1 triggers the memory fill.
|
||||
// This field also acts as a status flag, and gets reset to 0 upon completion.
|
||||
BitField<0, 1, u32> trigger;
|
||||
|
||||
// Set to 1 upon completion.
|
||||
BitField<1, 1, u32> finished;
|
||||
|
||||
// If both of these bits are unset, then it will fill the memory with a 16 bit value
|
||||
// 1: fill with 24-bit wide values
|
||||
BitField<8, 1, u32> fill_24bit;
|
||||
// 1: fill with 32-bit wide values
|
||||
BitField<9, 1, u32> fill_32bit;
|
||||
};
|
||||
|
||||
inline u32 GetStartAddress() const {
|
||||
return DecodeAddressRegister(address_start);
|
||||
}
|
||||
|
||||
inline u32 GetEndAddress() const {
|
||||
return DecodeAddressRegister(address_end);
|
||||
}
|
||||
|
||||
inline std::string DebugName() const {
|
||||
return fmt::format("from {:#X} to {:#X} with {}-bit value {:#X}", GetStartAddress(),
|
||||
GetEndAddress(), fill_32bit ? "32" : (fill_24bit ? "24" : "16"),
|
||||
value_32bit);
|
||||
}
|
||||
} memory_fill_config[2];
|
||||
ASSERT_MEMBER_SIZE(memory_fill_config[0], 0x10);
|
||||
|
||||
INSERT_PADDING_WORDS(0x10b);
|
||||
|
||||
struct FramebufferConfig {
|
||||
union {
|
||||
u32 size;
|
||||
|
||||
BitField<0, 16, u32> width;
|
||||
BitField<16, 16, u32> height;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
u32 address_left1;
|
||||
u32 address_left2;
|
||||
|
||||
union {
|
||||
u32 format;
|
||||
|
||||
BitField<0, 3, PixelFormat> color_format;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
union {
|
||||
u32 active_fb;
|
||||
|
||||
// 0: Use parameters ending with "1"
|
||||
// 1: Use parameters ending with "2"
|
||||
BitField<0, 1, u32> second_fb_active;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x5);
|
||||
|
||||
// Distance between two pixel rows, in bytes
|
||||
u32 stride;
|
||||
|
||||
u32 address_right1;
|
||||
u32 address_right2;
|
||||
|
||||
INSERT_PADDING_WORDS(0x30);
|
||||
} framebuffer_config[2];
|
||||
ASSERT_MEMBER_SIZE(framebuffer_config[0], 0x100);
|
||||
|
||||
INSERT_PADDING_WORDS(0x169);
|
||||
|
||||
struct DisplayTransferConfig {
|
||||
u32 input_address;
|
||||
u32 output_address;
|
||||
|
||||
inline u32 GetPhysicalInputAddress() const {
|
||||
return DecodeAddressRegister(input_address);
|
||||
}
|
||||
|
||||
inline u32 GetPhysicalOutputAddress() const {
|
||||
return DecodeAddressRegister(output_address);
|
||||
}
|
||||
|
||||
inline std::string DebugName() const noexcept {
|
||||
return fmt::format("from {:#x} to {:#x} with {} scaling and stride {}, width {}",
|
||||
GetPhysicalInputAddress(), GetPhysicalOutputAddress(),
|
||||
scaling == NoScale ? "no" : (scaling == ScaleX ? "X" : "XY"),
|
||||
input_width.Value(), output_width.Value());
|
||||
}
|
||||
|
||||
union {
|
||||
u32 output_size;
|
||||
|
||||
BitField<0, 16, u32> output_width;
|
||||
BitField<16, 16, u32> output_height;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 input_size;
|
||||
|
||||
BitField<0, 16, u32> input_width;
|
||||
BitField<16, 16, u32> input_height;
|
||||
};
|
||||
|
||||
enum ScalingMode : u32 {
|
||||
NoScale = 0, // Doesn't scale the image
|
||||
ScaleX = 1, // Downscales the image in half in the X axis and applies a box filter
|
||||
ScaleXY =
|
||||
2, // Downscales the image in half in both the X and Y axes and applies a box filter
|
||||
};
|
||||
|
||||
union {
|
||||
u32 flags;
|
||||
|
||||
BitField<0, 1, u32> flip_vertically; // flips input data vertically
|
||||
BitField<1, 1, u32> input_linear; // Converts from linear to tiled format
|
||||
BitField<2, 1, u32> crop_input_lines;
|
||||
BitField<3, 1, u32> is_texture_copy; // Copies the data without performing any
|
||||
// processing and respecting texture copy fields
|
||||
BitField<5, 1, u32> dont_swizzle;
|
||||
BitField<8, 3, PixelFormat> input_format;
|
||||
BitField<12, 3, PixelFormat> output_format;
|
||||
/// Uses some kind of 32x32 block swizzling mode, instead of the usual 8x8 one.
|
||||
BitField<16, 1, u32> block_32; // TODO(yuriks): unimplemented
|
||||
BitField<24, 2, ScalingMode> scaling; // Determines the scaling mode of the transfer
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// it seems that writing to this field triggers the display transfer
|
||||
u32 trigger;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
struct {
|
||||
u32 size; // The lower 4 bits are ignored
|
||||
|
||||
union {
|
||||
u32 input_size;
|
||||
|
||||
BitField<0, 16, u32> input_width;
|
||||
BitField<16, 16, u32> input_gap;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 output_size;
|
||||
|
||||
BitField<0, 16, u32> output_width;
|
||||
BitField<16, 16, u32> output_gap;
|
||||
};
|
||||
} texture_copy;
|
||||
} display_transfer_config;
|
||||
ASSERT_MEMBER_SIZE(display_transfer_config, 0x2c);
|
||||
|
||||
INSERT_PADDING_WORDS(0x32D);
|
||||
|
||||
struct {
|
||||
// command list size (in bytes)
|
||||
u32 size;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// command list address
|
||||
u32 address;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// it seems that writing to this field triggers command list processing
|
||||
u32 trigger;
|
||||
|
||||
inline u32 GetPhysicalAddress() const {
|
||||
return DecodeAddressRegister(address);
|
||||
}
|
||||
} command_processor_config;
|
||||
ASSERT_MEMBER_SIZE(command_processor_config, 0x14);
|
||||
|
||||
INSERT_PADDING_WORDS(0x9c3);
|
||||
|
||||
static constexpr std::size_t NumIds() {
|
||||
return sizeof(Regs) / sizeof(u32);
|
||||
}
|
||||
|
||||
const u32& operator[](int index) const {
|
||||
const u32* content = reinterpret_cast<const u32*>(this);
|
||||
return content[index];
|
||||
}
|
||||
|
||||
u32& operator[](int index) {
|
||||
u32* content = reinterpret_cast<u32*>(this);
|
||||
return content[index];
|
||||
}
|
||||
|
||||
#undef ASSERT_MEMBER_SIZE
|
||||
|
||||
private:
|
||||
/*
|
||||
* Most physical addresses which GPU registers refer to are 8-byte aligned.
|
||||
* This function should be used to get the address from a raw register value.
|
||||
*/
|
||||
static inline u32 DecodeAddressRegister(u32 register_value) {
|
||||
return register_value * 8;
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int) {
|
||||
ar& boost::serialization::make_binary_object(this, sizeof(Regs));
|
||||
}
|
||||
friend class boost::serialization::access;
|
||||
};
|
||||
static_assert(std::is_standard_layout<Regs>::value, "Structure does not use standard layout");
|
||||
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
static_assert(offsetof(Regs, field_name) == position * 4, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_REG_POSITION(memory_fill_config[0], 0x00004);
|
||||
ASSERT_REG_POSITION(memory_fill_config[1], 0x00008);
|
||||
ASSERT_REG_POSITION(framebuffer_config[0], 0x00117);
|
||||
ASSERT_REG_POSITION(framebuffer_config[1], 0x00157);
|
||||
ASSERT_REG_POSITION(display_transfer_config, 0x00300);
|
||||
ASSERT_REG_POSITION(command_processor_config, 0x00638);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
|
||||
// The total number of registers is chosen arbitrarily, but let's make sure it's not some odd value
|
||||
// anyway.
|
||||
static_assert(sizeof(Regs) == 0x1000 * sizeof(u32), "Invalid total size of register set");
|
||||
|
||||
extern Regs g_regs;
|
||||
|
||||
template <typename T>
|
||||
void Read(T& var, const u32 addr);
|
||||
|
||||
template <typename T>
|
||||
void Write(u32 addr, const T data);
|
||||
|
||||
/// Initialize hardware
|
||||
void Init(Memory::MemorySystem& memory);
|
||||
|
||||
/// Shutdown hardware
|
||||
void Shutdown();
|
||||
|
||||
} // namespace GPU
|
@ -1,102 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hw/aes/key.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/hw/hw.h"
|
||||
#include "core/hw/lcd.h"
|
||||
|
||||
namespace HW {
|
||||
|
||||
template <typename T>
|
||||
inline void Read(T& var, const u32 addr) {
|
||||
switch (addr & 0xFFFFF000) {
|
||||
case VADDR_GPU:
|
||||
case VADDR_GPU + 0x1000:
|
||||
case VADDR_GPU + 0x2000:
|
||||
case VADDR_GPU + 0x3000:
|
||||
case VADDR_GPU + 0x4000:
|
||||
case VADDR_GPU + 0x5000:
|
||||
case VADDR_GPU + 0x6000:
|
||||
case VADDR_GPU + 0x7000:
|
||||
case VADDR_GPU + 0x8000:
|
||||
case VADDR_GPU + 0x9000:
|
||||
case VADDR_GPU + 0xA000:
|
||||
case VADDR_GPU + 0xB000:
|
||||
case VADDR_GPU + 0xC000:
|
||||
case VADDR_GPU + 0xD000:
|
||||
case VADDR_GPU + 0xE000:
|
||||
case VADDR_GPU + 0xF000:
|
||||
GPU::Read(var, addr);
|
||||
break;
|
||||
case VADDR_LCD:
|
||||
LCD::Read(var, addr);
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(HW_Memory, "unknown Read{} @ {:#010X}", sizeof(var) * 8, addr);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Write(u32 addr, const T data) {
|
||||
switch (addr & 0xFFFFF000) {
|
||||
case VADDR_GPU:
|
||||
case VADDR_GPU + 0x1000:
|
||||
case VADDR_GPU + 0x2000:
|
||||
case VADDR_GPU + 0x3000:
|
||||
case VADDR_GPU + 0x4000:
|
||||
case VADDR_GPU + 0x5000:
|
||||
case VADDR_GPU + 0x6000:
|
||||
case VADDR_GPU + 0x7000:
|
||||
case VADDR_GPU + 0x8000:
|
||||
case VADDR_GPU + 0x9000:
|
||||
case VADDR_GPU + 0xA000:
|
||||
case VADDR_GPU + 0xB000:
|
||||
case VADDR_GPU + 0xC000:
|
||||
case VADDR_GPU + 0xD000:
|
||||
case VADDR_GPU + 0xE000:
|
||||
case VADDR_GPU + 0xF000:
|
||||
GPU::Write(addr, data);
|
||||
break;
|
||||
case VADDR_LCD:
|
||||
LCD::Write(addr, data);
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(HW_Memory, "unknown Write{} {:#010X} @ {:#010X}", sizeof(data) * 8, (u32)data,
|
||||
addr);
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly instantiate template functions because we aren't defining this in the header:
|
||||
|
||||
template void Read<u64>(u64& var, const u32 addr);
|
||||
template void Read<u32>(u32& var, const u32 addr);
|
||||
template void Read<u16>(u16& var, const u32 addr);
|
||||
template void Read<u8>(u8& var, const u32 addr);
|
||||
|
||||
template void Write<u64>(u32 addr, const u64 data);
|
||||
template void Write<u32>(u32 addr, const u32 data);
|
||||
template void Write<u16>(u32 addr, const u16 data);
|
||||
template void Write<u8>(u32 addr, const u8 data);
|
||||
|
||||
/// Update hardware
|
||||
void Update() {}
|
||||
|
||||
/// Initialize hardware
|
||||
void Init(Memory::MemorySystem& memory) {
|
||||
AES::InitKeys();
|
||||
GPU::Init(memory);
|
||||
LCD::Init();
|
||||
LOG_DEBUG(HW, "initialized OK");
|
||||
}
|
||||
|
||||
/// Shutdown hardware
|
||||
void Shutdown() {
|
||||
GPU::Shutdown();
|
||||
LCD::Shutdown();
|
||||
LOG_DEBUG(HW, "shutdown OK");
|
||||
}
|
||||
} // namespace HW
|
@ -1,54 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Memory {
|
||||
class MemorySystem;
|
||||
}
|
||||
|
||||
namespace HW {
|
||||
|
||||
/// Beginnings of IO register regions, in the user VA space.
|
||||
enum : u32 {
|
||||
VADDR_HASH = 0x1EC01000,
|
||||
VADDR_CSND = 0x1EC03000,
|
||||
VADDR_DSP = 0x1EC40000,
|
||||
VADDR_PDN = 0x1EC41000,
|
||||
VADDR_CODEC = 0x1EC41000,
|
||||
VADDR_SPI = 0x1EC42000,
|
||||
VADDR_SPI_2 = 0x1EC43000, // Only used under TWL_FIRM?
|
||||
VADDR_I2C = 0x1EC44000,
|
||||
VADDR_CODEC_2 = 0x1EC45000,
|
||||
VADDR_HID = 0x1EC46000,
|
||||
VADDR_GPIO = 0x1EC47000,
|
||||
VADDR_I2C_2 = 0x1EC48000,
|
||||
VADDR_SPI_3 = 0x1EC60000,
|
||||
VADDR_I2C_3 = 0x1EC61000,
|
||||
VADDR_MIC = 0x1EC62000,
|
||||
VADDR_PXI = 0x1EC63000,
|
||||
VADDR_LCD = 0x1ED02000,
|
||||
VADDR_DSP_2 = 0x1ED03000,
|
||||
VADDR_HASH_2 = 0x1EE01000,
|
||||
VADDR_GPU = 0x1EF00000,
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
void Read(T& var, const u32 addr);
|
||||
|
||||
template <typename T>
|
||||
void Write(u32 addr, const T data);
|
||||
|
||||
/// Update hardware
|
||||
void Update();
|
||||
|
||||
/// Initialize hardware
|
||||
void Init(Memory::MemorySystem& memory);
|
||||
|
||||
/// Shutdown hardware
|
||||
void Shutdown();
|
||||
|
||||
} // namespace HW
|
@ -1,76 +0,0 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hw/hw.h"
|
||||
#include "core/hw/lcd.h"
|
||||
#include "core/tracer/recorder.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
|
||||
namespace LCD {
|
||||
|
||||
Regs g_regs;
|
||||
|
||||
template <typename T>
|
||||
inline void Read(T& var, const u32 raw_addr) {
|
||||
u32 addr = raw_addr - HW::VADDR_LCD;
|
||||
u32 index = addr / 4;
|
||||
|
||||
// Reads other than u32 are untested, so I'd rather have them abort than silently fail
|
||||
if (index >= 0x400 || !std::is_same<T, u32>::value) {
|
||||
LOG_ERROR(HW_LCD, "unknown Read{} @ {:#010X}", sizeof(var) * 8, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
var = g_regs[index];
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Write(u32 addr, const T data) {
|
||||
addr -= HW::VADDR_LCD;
|
||||
u32 index = addr / 4;
|
||||
|
||||
// Writes other than u32 are untested, so I'd rather have them abort than silently fail
|
||||
if (index >= 0x400 || !std::is_same<T, u32>::value) {
|
||||
LOG_ERROR(HW_LCD, "unknown Write{} {:#010X} @ {:#010X}", sizeof(data) * 8, (u32)data, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
g_regs[index] = static_cast<u32>(data);
|
||||
|
||||
// Notify tracer about the register write
|
||||
// This is happening *after* handling the write to make sure we properly catch all memory reads.
|
||||
if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
|
||||
// addr + GPU VBase - IO VBase + IO PBase
|
||||
Pica::g_debug_context->recorder->RegisterWritten<T>(
|
||||
addr + HW::VADDR_LCD - 0x1EC00000 + 0x10100000, data);
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly instantiate template functions because we aren't defining this in the header:
|
||||
|
||||
template void Read<u64>(u64& var, const u32 addr);
|
||||
template void Read<u32>(u32& var, const u32 addr);
|
||||
template void Read<u16>(u16& var, const u32 addr);
|
||||
template void Read<u8>(u8& var, const u32 addr);
|
||||
|
||||
template void Write<u64>(u32 addr, const u64 data);
|
||||
template void Write<u32>(u32 addr, const u32 data);
|
||||
template void Write<u16>(u32 addr, const u16 data);
|
||||
template void Write<u8>(u32 addr, const u8 data);
|
||||
|
||||
/// Initialize hardware
|
||||
void Init() {
|
||||
std::memset(&g_regs, 0, sizeof(g_regs));
|
||||
LOG_DEBUG(HW_LCD, "initialized OK");
|
||||
}
|
||||
|
||||
/// Shutdown hardware
|
||||
void Shutdown() {
|
||||
LOG_DEBUG(HW_LCD, "shutdown OK");
|
||||
}
|
||||
|
||||
} // namespace LCD
|
@ -1,677 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/hle/service/gsp/gsp.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/tracer/recorder.h"
|
||||
#include "video_core/command_processor.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/primitive_assembly.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/regs.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/vertex_loader.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace Pica::CommandProcessor {
|
||||
|
||||
// Expand a 4-bit mask to 4-byte mask, e.g. 0b0101 -> 0x00FF00FF
|
||||
constexpr std::array<u32, 16> expand_bits_to_bytes{
|
||||
0x00000000, 0x000000ff, 0x0000ff00, 0x0000ffff, 0x00ff0000, 0x00ff00ff, 0x00ffff00, 0x00ffffff,
|
||||
0xff000000, 0xff0000ff, 0xff00ff00, 0xff00ffff, 0xffff0000, 0xffff00ff, 0xffffff00, 0xffffffff,
|
||||
};
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_Drawing, "GPU", "Drawing", MP_RGB(50, 50, 240));
|
||||
|
||||
static const char* GetShaderSetupTypeName(Shader::ShaderSetup& setup) {
|
||||
if (&setup == &g_state.vs) {
|
||||
return "vertex shader";
|
||||
}
|
||||
if (&setup == &g_state.gs) {
|
||||
return "geometry shader";
|
||||
}
|
||||
return "unknown shader";
|
||||
}
|
||||
|
||||
static void WriteUniformBoolReg(Shader::ShaderSetup& setup, u32 value) {
|
||||
for (unsigned i = 0; i < setup.uniforms.b.size(); ++i)
|
||||
setup.uniforms.b[i] = (value & (1 << i)) != 0;
|
||||
}
|
||||
|
||||
static void WriteUniformIntReg(Shader::ShaderSetup& setup, unsigned index,
|
||||
const Common::Vec4<u8>& values) {
|
||||
ASSERT(index < setup.uniforms.i.size());
|
||||
setup.uniforms.i[index] = values;
|
||||
LOG_TRACE(HW_GPU, "Set {} integer uniform {} to {:02x} {:02x} {:02x} {:02x}",
|
||||
GetShaderSetupTypeName(setup), index, values.x, values.y, values.z, values.w);
|
||||
}
|
||||
|
||||
static void WriteUniformFloatReg(ShaderRegs& config, Shader::ShaderSetup& setup,
|
||||
int& float_regs_counter, std::array<u32, 4>& uniform_write_buffer,
|
||||
u32 value) {
|
||||
auto& uniform_setup = config.uniform_setup;
|
||||
|
||||
// TODO: Does actual hardware indeed keep an intermediate buffer or does
|
||||
// it directly write the values?
|
||||
uniform_write_buffer[float_regs_counter++] = value;
|
||||
|
||||
// Uniforms are written in a packed format such that four float24 values are encoded in
|
||||
// three 32-bit numbers. We write to internal memory once a full such vector is
|
||||
// written.
|
||||
if ((float_regs_counter >= 4 && uniform_setup.IsFloat32()) ||
|
||||
(float_regs_counter >= 3 && !uniform_setup.IsFloat32())) {
|
||||
float_regs_counter = 0;
|
||||
|
||||
if (uniform_setup.index >= setup.uniforms.f.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid {} float uniform index {}", GetShaderSetupTypeName(setup),
|
||||
(int)uniform_setup.index);
|
||||
} else {
|
||||
auto& uniform = setup.uniforms.f[uniform_setup.index];
|
||||
|
||||
// NOTE: The destination component order indeed is "backwards"
|
||||
if (uniform_setup.IsFloat32()) {
|
||||
for (auto i : {0, 1, 2, 3}) {
|
||||
float buffer_value;
|
||||
std::memcpy(&buffer_value, &uniform_write_buffer[i], sizeof(float));
|
||||
uniform[3 - i] = f24::FromFloat32(buffer_value);
|
||||
}
|
||||
} else {
|
||||
// TODO: Untested
|
||||
uniform.w = f24::FromRaw(uniform_write_buffer[0] >> 8);
|
||||
uniform.z = f24::FromRaw(((uniform_write_buffer[0] & 0xFF) << 16) |
|
||||
((uniform_write_buffer[1] >> 16) & 0xFFFF));
|
||||
uniform.y = f24::FromRaw(((uniform_write_buffer[1] & 0xFFFF) << 8) |
|
||||
((uniform_write_buffer[2] >> 24) & 0xFF));
|
||||
uniform.x = f24::FromRaw(uniform_write_buffer[2] & 0xFFFFFF);
|
||||
}
|
||||
|
||||
LOG_TRACE(HW_GPU, "Set {} float uniform {:x} to ({} {} {} {})",
|
||||
GetShaderSetupTypeName(setup), (int)uniform_setup.index,
|
||||
uniform.x.ToFloat32(), uniform.y.ToFloat32(), uniform.z.ToFloat32(),
|
||||
uniform.w.ToFloat32());
|
||||
|
||||
// TODO: Verify that this actually modifies the register!
|
||||
uniform_setup.index.Assign(uniform_setup.index + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
||||
auto& regs = g_state.regs;
|
||||
|
||||
if (id >= Regs::NUM_REGS) {
|
||||
LOG_ERROR(
|
||||
HW_GPU,
|
||||
"Commandlist tried to write to invalid register 0x{:03X} (value: {:08X}, mask: {:X})",
|
||||
id, value, mask);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Figure out how register masking acts on e.g. vs.uniform_setup.set_value
|
||||
u32 old_value = regs.reg_array[id];
|
||||
|
||||
const u32 write_mask = expand_bits_to_bytes[mask];
|
||||
|
||||
regs.reg_array[id] = (old_value & ~write_mask) | (value & write_mask);
|
||||
|
||||
// Double check for is_pica_tracing to avoid call overhead
|
||||
if (DebugUtils::IsPicaTracing()) {
|
||||
DebugUtils::OnPicaRegWrite({(u16)id, (u16)mask, regs.reg_array[id]});
|
||||
}
|
||||
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::PicaCommandLoaded,
|
||||
reinterpret_cast<void*>(&id));
|
||||
|
||||
switch (id) {
|
||||
// Trigger IRQ
|
||||
case PICA_REG_INDEX(trigger_irq):
|
||||
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::P3D);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.triangle_topology):
|
||||
g_state.primitive_assembler.Reconfigure(regs.pipeline.triangle_topology);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.restart_primitive):
|
||||
g_state.primitive_assembler.Reset();
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.index):
|
||||
g_state.immediate.current_attribute = 0;
|
||||
g_state.immediate.reset_geometry_pipeline = true;
|
||||
g_state.default_attr_counter = 0;
|
||||
break;
|
||||
|
||||
// Load default vertex input attributes
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.set_value[0]):
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.set_value[1]):
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.set_value[2]): {
|
||||
// TODO: Does actual hardware indeed keep an intermediate buffer or does
|
||||
// it directly write the values?
|
||||
g_state.default_attr_write_buffer[g_state.default_attr_counter++] = value;
|
||||
|
||||
// Default attributes are written in a packed format such that four float24 values are
|
||||
// encoded in
|
||||
// three 32-bit numbers. We write to internal memory once a full such vector is
|
||||
// written.
|
||||
if (g_state.default_attr_counter >= 3) {
|
||||
g_state.default_attr_counter = 0;
|
||||
|
||||
auto& setup = regs.pipeline.vs_default_attributes_setup;
|
||||
|
||||
if (setup.index >= 16) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS default attribute index {}", (int)setup.index);
|
||||
break;
|
||||
}
|
||||
|
||||
Common::Vec4<f24> attribute;
|
||||
|
||||
// NOTE: The destination component order indeed is "backwards"
|
||||
attribute.w = f24::FromRaw(g_state.default_attr_write_buffer[0] >> 8);
|
||||
attribute.z = f24::FromRaw(((g_state.default_attr_write_buffer[0] & 0xFF) << 16) |
|
||||
((g_state.default_attr_write_buffer[1] >> 16) & 0xFFFF));
|
||||
attribute.y = f24::FromRaw(((g_state.default_attr_write_buffer[1] & 0xFFFF) << 8) |
|
||||
((g_state.default_attr_write_buffer[2] >> 24) & 0xFF));
|
||||
attribute.x = f24::FromRaw(g_state.default_attr_write_buffer[2] & 0xFFFFFF);
|
||||
|
||||
LOG_TRACE(HW_GPU, "Set default VS attribute {:x} to ({} {} {} {})", (int)setup.index,
|
||||
attribute.x.ToFloat32(), attribute.y.ToFloat32(), attribute.z.ToFloat32(),
|
||||
attribute.w.ToFloat32());
|
||||
|
||||
// TODO: Verify that this actually modifies the register!
|
||||
if (setup.index < 15) {
|
||||
g_state.input_default_attributes.attr[setup.index] = attribute;
|
||||
setup.index++;
|
||||
} else {
|
||||
// Put each attribute into an immediate input buffer. When all specified immediate
|
||||
// attributes are present, the Vertex Shader is invoked and everything is sent to
|
||||
// the primitive assembler.
|
||||
|
||||
auto& immediate_input = g_state.immediate.input_vertex;
|
||||
auto& immediate_attribute_id = g_state.immediate.current_attribute;
|
||||
|
||||
immediate_input.attr[immediate_attribute_id] = attribute;
|
||||
|
||||
if (immediate_attribute_id < regs.pipeline.max_input_attrib_index) {
|
||||
immediate_attribute_id += 1;
|
||||
} else {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
immediate_attribute_id = 0;
|
||||
|
||||
Shader::OutputVertex::ValidateSemantics(regs.rasterizer);
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
static_cast<void*>(&immediate_input));
|
||||
Shader::UnitState shader_unit;
|
||||
Shader::AttributeBuffer output{};
|
||||
|
||||
shader_unit.LoadInput(regs.vs, immediate_input);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
shader_unit.WriteOutput(regs.vs, output);
|
||||
|
||||
// Send to geometry pipeline
|
||||
if (g_state.immediate.reset_geometry_pipeline) {
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.immediate.reset_geometry_pipeline = false;
|
||||
}
|
||||
ASSERT(!g_state.geometry_pipeline.NeedIndexInput());
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
g_state.geometry_pipeline.SubmitVertex(output);
|
||||
|
||||
// TODO: If drawing after every immediate mode triangle kills performance,
|
||||
// change it to flush triangles whenever a drawing config register changes
|
||||
// See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch,
|
||||
nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(pipeline.gpu_mode):
|
||||
// This register likely just enables vertex processing and doesn't need any special handling
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.command_buffer.trigger[0]):
|
||||
case PICA_REG_INDEX(pipeline.command_buffer.trigger[1]): {
|
||||
unsigned index =
|
||||
static_cast<unsigned>(id - PICA_REG_INDEX(pipeline.command_buffer.trigger[0]));
|
||||
u32* head_ptr = (u32*)VideoCore::g_memory->GetPhysicalPointer(
|
||||
regs.pipeline.command_buffer.GetPhysicalAddress(index));
|
||||
g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = head_ptr;
|
||||
g_state.cmd_list.length = regs.pipeline.command_buffer.GetSize(index) / sizeof(u32);
|
||||
break;
|
||||
}
|
||||
|
||||
// It seems like these trigger vertex rendering
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw):
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw_indexed): {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
|
||||
#if PICA_LOG_TEV
|
||||
DebugUtils::DumpTevStageConfig(regs.GetTevStages());
|
||||
#endif
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr);
|
||||
|
||||
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
|
||||
|
||||
bool accelerate_draw = VideoCore::g_hw_shader_enabled && primitive_assembler.IsEmpty();
|
||||
|
||||
if (regs.pipeline.use_gs == PipelineRegs::UseGS::No) {
|
||||
auto topology = primitive_assembler.GetTopology();
|
||||
if (topology == PipelineRegs::TriangleTopology::Shader ||
|
||||
topology == PipelineRegs::TriangleTopology::List) {
|
||||
accelerate_draw = accelerate_draw && (regs.pipeline.num_vertices % 3) == 0;
|
||||
}
|
||||
// TODO (wwylele): for Strip/Fan topology, if the primitive assember is not restarted
|
||||
// after this draw call, the buffered vertex from this draw should "leak" to the next
|
||||
// draw, in which case we should buffer the vertex into the software primitive assember,
|
||||
// or disable accelerate draw completely. However, there is not game found yet that does
|
||||
// this, so this is left unimplemented for now. Revisit this when an issue is found in
|
||||
// games.
|
||||
} else {
|
||||
accelerate_draw = false;
|
||||
}
|
||||
|
||||
bool is_indexed = (id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
|
||||
|
||||
if (accelerate_draw &&
|
||||
VideoCore::g_renderer->Rasterizer()->AccelerateDrawBatch(is_indexed)) {
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Processes information about internal vertex attributes to figure out how a vertex is
|
||||
// loaded.
|
||||
// Later, these can be compiled and cached.
|
||||
const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress();
|
||||
VertexLoader loader(regs.pipeline);
|
||||
Shader::OutputVertex::ValidateSemantics(regs.rasterizer);
|
||||
|
||||
// Load vertices
|
||||
const auto& index_info = regs.pipeline.index_array;
|
||||
const u8* index_address_8 =
|
||||
VideoCore::g_memory->GetPhysicalPointer(base_address + index_info.offset);
|
||||
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
|
||||
bool index_u16 = index_info.format != 0;
|
||||
|
||||
if (g_debug_context && g_debug_context->recorder) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
const auto texture = regs.texturing.GetTextures()[i];
|
||||
if (!texture.enabled)
|
||||
continue;
|
||||
|
||||
u8* texture_data =
|
||||
VideoCore::g_memory->GetPhysicalPointer(texture.config.GetPhysicalAddress());
|
||||
g_debug_context->recorder->MemoryAccessed(
|
||||
texture_data,
|
||||
Pica::TexturingRegs::NibblesPerPixel(texture.format) * texture.config.width /
|
||||
2 * texture.config.height,
|
||||
texture.config.GetPhysicalAddress());
|
||||
}
|
||||
}
|
||||
|
||||
DebugUtils::MemoryAccessTracker memory_accesses;
|
||||
|
||||
// Simple circular-replacement vertex cache
|
||||
// The size has been tuned for optimal balance between hit-rate and the cost of lookup
|
||||
const std::size_t VERTEX_CACHE_SIZE = 32;
|
||||
std::array<bool, VERTEX_CACHE_SIZE> vertex_cache_valid{};
|
||||
std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
|
||||
std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
|
||||
Shader::AttributeBuffer vs_output;
|
||||
|
||||
unsigned int vertex_cache_pos = 0;
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
Shader::UnitState shader_unit;
|
||||
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
if (g_state.geometry_pipeline.NeedIndexInput())
|
||||
ASSERT(is_indexed);
|
||||
|
||||
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
|
||||
// Indexed rendering doesn't use the start offset
|
||||
unsigned int vertex =
|
||||
is_indexed ? (index_u16 ? index_address_16[index] : index_address_8[index])
|
||||
: (index + regs.pipeline.vertex_offset);
|
||||
|
||||
bool vertex_cache_hit = false;
|
||||
|
||||
if (is_indexed) {
|
||||
if (g_state.geometry_pipeline.NeedIndexInput()) {
|
||||
g_state.geometry_pipeline.SubmitIndex(vertex);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (g_debug_context && Pica::g_debug_context->recorder) {
|
||||
int size = index_u16 ? 2 : 1;
|
||||
memory_accesses.AddAccess(base_address + index_info.offset + size * index,
|
||||
size);
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) {
|
||||
if (vertex_cache_valid[i] && vertex == vertex_cache_ids[i]) {
|
||||
vs_output = vertex_cache[i];
|
||||
vertex_cache_hit = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!vertex_cache_hit) {
|
||||
// Initialize data for the current vertex
|
||||
Shader::AttributeBuffer input;
|
||||
loader.LoadVertex(base_address, index, vertex, input, memory_accesses);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
(void*)&input);
|
||||
shader_unit.LoadInput(regs.vs, input);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
shader_unit.WriteOutput(regs.vs, vs_output);
|
||||
|
||||
if (is_indexed) {
|
||||
vertex_cache[vertex_cache_pos] = vs_output;
|
||||
vertex_cache_valid[vertex_cache_pos] = true;
|
||||
vertex_cache_ids[vertex_cache_pos] = vertex;
|
||||
vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
// Send to geometry pipeline
|
||||
g_state.geometry_pipeline.SubmitVertex(vs_output);
|
||||
}
|
||||
|
||||
for (auto& range : memory_accesses.ranges) {
|
||||
g_debug_context->recorder->MemoryAccessed(
|
||||
VideoCore::g_memory->GetPhysicalPointer(range.first), range.second, range.first);
|
||||
}
|
||||
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.bool_uniforms):
|
||||
WriteUniformBoolReg(g_state.gs, g_state.regs.gs.bool_uniforms.Value());
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(gs.int_uniforms[0]):
|
||||
case PICA_REG_INDEX(gs.int_uniforms[1]):
|
||||
case PICA_REG_INDEX(gs.int_uniforms[2]):
|
||||
case PICA_REG_INDEX(gs.int_uniforms[3]): {
|
||||
unsigned index = (id - PICA_REG_INDEX(gs.int_uniforms[0]));
|
||||
auto values = regs.gs.int_uniforms[index];
|
||||
WriteUniformIntReg(g_state.gs, index,
|
||||
Common::Vec4<u8>(values.x, values.y, values.z, values.w));
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[0]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[1]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[2]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[3]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[4]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[5]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[6]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[7]): {
|
||||
WriteUniformFloatReg(g_state.regs.gs, g_state.gs, g_state.gs_float_regs_counter,
|
||||
g_state.gs_uniform_write_buffer, value);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.program.set_word[0]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[1]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[2]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[3]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[4]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[5]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[6]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[7]): {
|
||||
u32& offset = g_state.regs.gs.program.offset;
|
||||
if (offset >= 4096) {
|
||||
LOG_ERROR(HW_GPU, "Invalid GS program offset {}", offset);
|
||||
} else {
|
||||
g_state.gs.program_code[offset] = value;
|
||||
g_state.gs.MarkProgramCodeDirty();
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[0]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[1]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[2]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[3]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[4]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[5]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[6]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[7]): {
|
||||
u32& offset = g_state.regs.gs.swizzle_patterns.offset;
|
||||
if (offset >= g_state.gs.swizzle_data.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid GS swizzle pattern offset {}", offset);
|
||||
} else {
|
||||
g_state.gs.swizzle_data[offset] = value;
|
||||
g_state.gs.MarkSwizzleDataDirty();
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.bool_uniforms):
|
||||
// TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
|
||||
WriteUniformBoolReg(g_state.vs, g_state.regs.vs.bool_uniforms.Value());
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(vs.int_uniforms[0]):
|
||||
case PICA_REG_INDEX(vs.int_uniforms[1]):
|
||||
case PICA_REG_INDEX(vs.int_uniforms[2]):
|
||||
case PICA_REG_INDEX(vs.int_uniforms[3]): {
|
||||
// TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
|
||||
unsigned index = (id - PICA_REG_INDEX(vs.int_uniforms[0]));
|
||||
auto values = regs.vs.int_uniforms[index];
|
||||
WriteUniformIntReg(g_state.vs, index,
|
||||
Common::Vec4<u8>(values.x, values.y, values.z, values.w));
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[0]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[1]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[2]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[3]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[4]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[5]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[6]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[7]): {
|
||||
// TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
|
||||
WriteUniformFloatReg(g_state.regs.vs, g_state.vs, g_state.vs_float_regs_counter,
|
||||
g_state.vs_uniform_write_buffer, value);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.program.set_word[0]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[1]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[2]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[3]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[4]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[5]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[6]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[7]): {
|
||||
u32& offset = g_state.regs.vs.program.offset;
|
||||
if (offset >= 512) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS program offset {}", offset);
|
||||
} else {
|
||||
g_state.vs.program_code[offset] = value;
|
||||
g_state.vs.MarkProgramCodeDirty();
|
||||
if (!g_state.regs.pipeline.gs_unit_exclusive_configuration) {
|
||||
g_state.gs.program_code[offset] = value;
|
||||
g_state.gs.MarkProgramCodeDirty();
|
||||
}
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[0]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[1]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[2]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[3]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[4]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[5]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[6]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[7]): {
|
||||
u32& offset = g_state.regs.vs.swizzle_patterns.offset;
|
||||
if (offset >= g_state.vs.swizzle_data.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS swizzle pattern offset {}", offset);
|
||||
} else {
|
||||
g_state.vs.swizzle_data[offset] = value;
|
||||
g_state.vs.MarkSwizzleDataDirty();
|
||||
if (!g_state.regs.pipeline.gs_unit_exclusive_configuration) {
|
||||
g_state.gs.swizzle_data[offset] = value;
|
||||
g_state.gs.MarkSwizzleDataDirty();
|
||||
}
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(lighting.lut_data[0]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[1]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[2]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[3]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[4]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[5]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[6]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[7]): {
|
||||
auto& lut_config = regs.lighting.lut_config;
|
||||
|
||||
ASSERT_MSG(lut_config.index < 256, "lut_config.index exceeded maximum value of 255!");
|
||||
|
||||
g_state.lighting.luts[lut_config.type][lut_config.index].raw = value;
|
||||
lut_config.index.Assign(lut_config.index + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[0]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[1]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[2]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[3]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[4]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[5]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[6]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[7]): {
|
||||
g_state.fog.lut[regs.texturing.fog_lut_offset % 128].raw = value;
|
||||
regs.texturing.fog_lut_offset.Assign(regs.texturing.fog_lut_offset + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[0]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[1]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[2]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[3]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[4]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[5]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[6]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[7]): {
|
||||
auto& index = regs.texturing.proctex_lut_config.index;
|
||||
auto& pt = g_state.proctex;
|
||||
|
||||
switch (regs.texturing.proctex_lut_config.ref_table.Value()) {
|
||||
case TexturingRegs::ProcTexLutTable::Noise:
|
||||
pt.noise_table[index % pt.noise_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::ColorMap:
|
||||
pt.color_map_table[index % pt.color_map_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::AlphaMap:
|
||||
pt.alpha_map_table[index % pt.alpha_map_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::Color:
|
||||
pt.color_table[index % pt.color_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::ColorDiff:
|
||||
pt.color_diff_table[index % pt.color_diff_table.size()].raw = value;
|
||||
break;
|
||||
}
|
||||
index.Assign(index + 1);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
VideoCore::g_renderer->Rasterizer()->NotifyPicaRegisterChanged(id);
|
||||
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::PicaCommandProcessed,
|
||||
reinterpret_cast<void*>(&id));
|
||||
}
|
||||
|
||||
void ProcessCommandList(PAddr list, u32 size) {
|
||||
|
||||
u32* buffer = (u32*)VideoCore::g_memory->GetPhysicalPointer(list);
|
||||
|
||||
if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
|
||||
Pica::g_debug_context->recorder->MemoryAccessed((u8*)buffer, size, list);
|
||||
}
|
||||
|
||||
g_state.cmd_list.addr = list;
|
||||
g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = buffer;
|
||||
g_state.cmd_list.length = size / sizeof(u32);
|
||||
|
||||
while (g_state.cmd_list.current_ptr < g_state.cmd_list.head_ptr + g_state.cmd_list.length) {
|
||||
|
||||
// Align read pointer to 8 bytes
|
||||
if ((g_state.cmd_list.head_ptr - g_state.cmd_list.current_ptr) % 2 != 0)
|
||||
++g_state.cmd_list.current_ptr;
|
||||
|
||||
u32 value = *g_state.cmd_list.current_ptr++;
|
||||
const CommandHeader header = {*g_state.cmd_list.current_ptr++};
|
||||
|
||||
WritePicaReg(header.cmd_id, value, header.parameter_mask);
|
||||
|
||||
for (unsigned i = 0; i < header.extra_data_length; ++i) {
|
||||
u32 cmd = header.cmd_id + (header.group_commands ? i + 1 : 0);
|
||||
WritePicaReg(cmd, *g_state.cmd_list.current_ptr++, header.parameter_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Pica::CommandProcessor
|
@ -1,37 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Pica::CommandProcessor {
|
||||
|
||||
union CommandHeader {
|
||||
u32 hex;
|
||||
|
||||
BitField<0, 16, u32> cmd_id;
|
||||
|
||||
// parameter_mask:
|
||||
// Mask applied to the input value to make it possible to update
|
||||
// parts of a register without overwriting its other fields.
|
||||
// first bit: 0x000000FF
|
||||
// second bit: 0x0000FF00
|
||||
// third bit: 0x00FF0000
|
||||
// fourth bit: 0xFF000000
|
||||
BitField<16, 4, u32> parameter_mask;
|
||||
|
||||
BitField<20, 11, u32> extra_data_length;
|
||||
|
||||
BitField<31, 1, u32> group_commands;
|
||||
};
|
||||
static_assert(std::is_standard_layout<CommandHeader>::value == true,
|
||||
"CommandHeader does not use standard layout");
|
||||
static_assert(sizeof(CommandHeader) == sizeof(u32), "CommandHeader has incorrect size!");
|
||||
|
||||
void ProcessCommandList(PAddr list, u32 size);
|
||||
|
||||
} // namespace Pica::CommandProcessor
|
@ -0,0 +1,419 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/archives.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/gsp/gsp_gpu.h"
|
||||
#include "core/hle/service/plgldr/plgldr.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/gpu_debugger.h"
|
||||
#include "video_core/pica/pica_core.h"
|
||||
#include "video_core/pica/regs_lcd.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/renderer_software/sw_blitter.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
constexpr VAddr VADDR_LCD = 0x1ED02000;
|
||||
constexpr VAddr VADDR_GPU = 0x1EF00000;
|
||||
|
||||
static PAddr VirtualToPhysicalAddress(VAddr addr) {
|
||||
if (addr == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (addr >= Memory::VRAM_VADDR && addr <= Memory::VRAM_VADDR_END) {
|
||||
return addr - Memory::VRAM_VADDR + Memory::VRAM_PADDR;
|
||||
}
|
||||
if (addr >= Memory::LINEAR_HEAP_VADDR && addr <= Memory::LINEAR_HEAP_VADDR_END) {
|
||||
return addr - Memory::LINEAR_HEAP_VADDR + Memory::FCRAM_PADDR;
|
||||
}
|
||||
if (addr >= Memory::NEW_LINEAR_HEAP_VADDR && addr <= Memory::NEW_LINEAR_HEAP_VADDR_END) {
|
||||
return addr - Memory::NEW_LINEAR_HEAP_VADDR + Memory::FCRAM_PADDR;
|
||||
}
|
||||
if (addr >= Memory::PLUGIN_3GX_FB_VADDR && addr <= Memory::PLUGIN_3GX_FB_VADDR_END) {
|
||||
return addr - Memory::PLUGIN_3GX_FB_VADDR + Service::PLGLDR::PLG_LDR::GetPluginFBAddr();
|
||||
}
|
||||
|
||||
LOG_ERROR(HW_Memory, "Unknown virtual address @ 0x{:08X}", addr);
|
||||
return addr;
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255));
|
||||
MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100));
|
||||
|
||||
struct GPU::Impl {
|
||||
Core::Timing& timing;
|
||||
Core::System& system;
|
||||
Memory::MemorySystem& memory;
|
||||
Pica::DebugContext& debug_context;
|
||||
Pica::PicaCore pica;
|
||||
GraphicsDebugger gpu_debugger;
|
||||
std::unique_ptr<RendererBase> renderer;
|
||||
RasterizerInterface* rasterizer;
|
||||
std::unique_ptr<SwRenderer::SwBlitter> sw_blitter;
|
||||
Core::TimingEventType* vblank_event;
|
||||
Service::GSP::InterruptHandler signal_interrupt;
|
||||
|
||||
explicit Impl(Core::System& system, Frontend::EmuWindow& emu_window,
|
||||
Frontend::EmuWindow* secondary_window)
|
||||
: timing{system.CoreTiming()}, system{system}, memory{system.Memory()},
|
||||
debug_context{*Pica::g_debug_context}, pica{memory, debug_context},
|
||||
renderer{VideoCore::CreateRenderer(emu_window, secondary_window, pica, system)},
|
||||
rasterizer{renderer->Rasterizer()}, sw_blitter{std::make_unique<SwRenderer::SwBlitter>(
|
||||
memory, rasterizer)} {}
|
||||
~Impl() = default;
|
||||
};
|
||||
|
||||
GPU::GPU(Core::System& system, Frontend::EmuWindow& emu_window,
|
||||
Frontend::EmuWindow* secondary_window)
|
||||
: impl{std::make_unique<Impl>(system, emu_window, secondary_window)} {
|
||||
impl->vblank_event = impl->timing.RegisterEvent(
|
||||
"GPU::VBlankCallback",
|
||||
[this](uintptr_t user_data, s64 cycles_late) { VBlankCallback(user_data, cycles_late); });
|
||||
impl->timing.ScheduleEvent(FRAME_TICKS, impl->vblank_event);
|
||||
|
||||
// Bind the rasterizer to the PICA GPU
|
||||
impl->pica.BindRasterizer(impl->rasterizer);
|
||||
}
|
||||
|
||||
GPU::~GPU() = default;
|
||||
|
||||
void GPU::SetInterruptHandler(Service::GSP::InterruptHandler handler) {
|
||||
impl->signal_interrupt = handler;
|
||||
impl->pica.SetInterruptHandler(handler);
|
||||
}
|
||||
|
||||
void GPU::FlushRegion(PAddr addr, u32 size) {
|
||||
impl->rasterizer->FlushRegion(addr, size);
|
||||
}
|
||||
|
||||
void GPU::InvalidateRegion(PAddr addr, u32 size) {
|
||||
impl->rasterizer->InvalidateRegion(addr, size);
|
||||
}
|
||||
|
||||
void GPU::ClearAll(bool flush) {
|
||||
impl->rasterizer->ClearAll(flush);
|
||||
}
|
||||
|
||||
void GPU::Execute(const Service::GSP::Command& command) {
|
||||
using Service::GSP::CommandId;
|
||||
auto& regs = impl->pica.regs;
|
||||
|
||||
switch (command.id) {
|
||||
case CommandId::RequestDma: {
|
||||
Memory::RasterizerFlushVirtualRegion(command.dma_request.source_address,
|
||||
command.dma_request.size, Memory::FlushMode::Flush);
|
||||
Memory::RasterizerFlushVirtualRegion(command.dma_request.dest_address,
|
||||
command.dma_request.size,
|
||||
Memory::FlushMode::Invalidate);
|
||||
|
||||
// TODO(Subv): These memory accesses should not go through the application's memory mapping.
|
||||
// They should go through the GSP module's memory mapping.
|
||||
const auto process = impl->system.Kernel().GetCurrentProcess();
|
||||
impl->memory.CopyBlock(*process, command.dma_request.dest_address,
|
||||
command.dma_request.source_address, command.dma_request.size);
|
||||
impl->signal_interrupt(Service::GSP::InterruptId::DMA);
|
||||
break;
|
||||
}
|
||||
case CommandId::SubmitCmdList: {
|
||||
auto& params = command.submit_gpu_cmdlist;
|
||||
auto& cmdbuffer = regs.internal.pipeline.command_buffer;
|
||||
|
||||
// Write to the command buffer GPU registers
|
||||
cmdbuffer.addr[0].Assign(VirtualToPhysicalAddress(params.address) >> 3);
|
||||
cmdbuffer.size[0].Assign(params.size >> 3);
|
||||
cmdbuffer.trigger[0] = 1;
|
||||
|
||||
// Trigger processing of the command list
|
||||
SubmitCmdList(0);
|
||||
break;
|
||||
}
|
||||
case CommandId::MemoryFill: {
|
||||
auto& params = command.memory_fill;
|
||||
auto& memfill = regs.memory_fill_config;
|
||||
|
||||
// Write to the memory fill GPU registers.
|
||||
if (params.start1 != 0) {
|
||||
memfill[0].address_start = VirtualToPhysicalAddress(params.start1) >> 3;
|
||||
memfill[0].address_end = VirtualToPhysicalAddress(params.end1) >> 3;
|
||||
memfill[0].value_32bit = params.value1;
|
||||
memfill[0].control = params.control1;
|
||||
MemoryFill(0);
|
||||
}
|
||||
if (params.start2 != 0) {
|
||||
memfill[1].address_start = VirtualToPhysicalAddress(params.start2) >> 3;
|
||||
memfill[1].address_end = VirtualToPhysicalAddress(params.end2) >> 3;
|
||||
memfill[1].value_32bit = params.value2;
|
||||
memfill[1].control = params.control2;
|
||||
MemoryFill(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CommandId::DisplayTransfer: {
|
||||
auto& params = command.display_transfer;
|
||||
auto& display_transfer = regs.display_transfer_config;
|
||||
|
||||
// Write to the transfer engine GPU registers.
|
||||
display_transfer.input_address = VirtualToPhysicalAddress(params.in_buffer_address) >> 3;
|
||||
display_transfer.output_address = VirtualToPhysicalAddress(params.out_buffer_address) >> 3;
|
||||
display_transfer.input_size = params.in_buffer_size;
|
||||
display_transfer.output_size = params.out_buffer_size;
|
||||
display_transfer.flags = params.flags;
|
||||
display_transfer.trigger.Assign(1);
|
||||
|
||||
// Trigger the display transfer.
|
||||
MemoryTransfer();
|
||||
break;
|
||||
}
|
||||
case CommandId::TextureCopy: {
|
||||
auto& params = command.texture_copy;
|
||||
auto& texture_copy = regs.display_transfer_config;
|
||||
|
||||
// Write to the transfer engine GPU registers.
|
||||
texture_copy.input_address = VirtualToPhysicalAddress(params.in_buffer_address) >> 3;
|
||||
texture_copy.output_address = VirtualToPhysicalAddress(params.out_buffer_address) >> 3;
|
||||
texture_copy.texture_copy.size = params.size;
|
||||
texture_copy.texture_copy.input_size = params.in_width_gap;
|
||||
texture_copy.texture_copy.output_size = params.out_width_gap;
|
||||
texture_copy.flags = params.flags;
|
||||
texture_copy.trigger.Assign(1);
|
||||
|
||||
// Trigger the texture copy.
|
||||
MemoryTransfer();
|
||||
break;
|
||||
}
|
||||
case CommandId::CacheFlush: {
|
||||
// Rasterizer flushing handled elsewhere in CPU read/write and other GPU handlers
|
||||
// Use command.cache_flush.regions to implement this handler
|
||||
break;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown command {:#08X}", command.id.Value());
|
||||
}
|
||||
|
||||
// Notify debugger that a GSP command was processed.
|
||||
impl->debug_context.OnEvent(Pica::DebugContext::Event::GSPCommandProcessed, &command);
|
||||
}
|
||||
|
||||
void GPU::SetBufferSwap(u32 screen_id, const Service::GSP::FrameBufferInfo& info) {
|
||||
const PAddr phys_address_left = VirtualToPhysicalAddress(info.address_left);
|
||||
const PAddr phys_address_right = VirtualToPhysicalAddress(info.address_right);
|
||||
|
||||
// Update framebuffer properties.
|
||||
auto& framebuffer = impl->pica.regs.framebuffer_config[screen_id];
|
||||
if (info.active_fb == 0) {
|
||||
framebuffer.address_left1 = phys_address_left;
|
||||
framebuffer.address_right1 = phys_address_right;
|
||||
} else {
|
||||
framebuffer.address_left2 = phys_address_left;
|
||||
framebuffer.address_right2 = phys_address_right;
|
||||
}
|
||||
|
||||
framebuffer.stride = info.stride;
|
||||
framebuffer.format = info.format;
|
||||
framebuffer.active_fb = info.shown_fb;
|
||||
|
||||
// Notify debugger about the buffer swap.
|
||||
impl->debug_context.OnEvent(Pica::DebugContext::Event::BufferSwapped, nullptr);
|
||||
|
||||
if (screen_id == 0) {
|
||||
MicroProfileFlip();
|
||||
impl->system.perf_stats->EndGameFrame();
|
||||
}
|
||||
}
|
||||
|
||||
void GPU::SetColorFill(const Pica::ColorFill& fill) {
|
||||
impl->pica.regs_lcd.color_fill_top = fill;
|
||||
impl->pica.regs_lcd.color_fill_bottom = fill;
|
||||
}
|
||||
|
||||
u32 GPU::ReadReg(VAddr addr) {
|
||||
switch (addr & 0xFFFFF000) {
|
||||
case VADDR_LCD: {
|
||||
const u32 offset = addr - VADDR_LCD;
|
||||
const u32 index = offset / sizeof(u32);
|
||||
ASSERT(addr % sizeof(u32) == 0);
|
||||
ASSERT(index < Pica::RegsLcd::NumIds());
|
||||
return impl->pica.regs_lcd[index];
|
||||
}
|
||||
case VADDR_GPU:
|
||||
case VADDR_GPU + 0x1000: {
|
||||
const u32 offset = addr - VADDR_GPU;
|
||||
const u32 index = offset / sizeof(u32);
|
||||
ASSERT(addr % sizeof(u32) == 0);
|
||||
ASSERT(index < Pica::PicaCore::Regs::NUM_REGS);
|
||||
return impl->pica.regs.reg_array[index];
|
||||
}
|
||||
default:
|
||||
UNREACHABLE_MSG("Read from unknown GPU address {:#08X}", addr);
|
||||
}
|
||||
}
|
||||
|
||||
void GPU::WriteReg(VAddr addr, u32 data) {
|
||||
switch (addr & 0xFFFFF000) {
|
||||
case VADDR_LCD: {
|
||||
const u32 offset = addr - VADDR_LCD;
|
||||
const u32 index = offset / sizeof(u32);
|
||||
ASSERT(addr % sizeof(u32) == 0);
|
||||
ASSERT(index < Pica::RegsLcd::NumIds());
|
||||
impl->pica.regs_lcd[index] = data;
|
||||
break;
|
||||
}
|
||||
case VADDR_GPU:
|
||||
case VADDR_GPU + 0x1000: {
|
||||
const u32 offset = addr - VADDR_GPU;
|
||||
const u32 index = offset / sizeof(u32);
|
||||
|
||||
ASSERT(addr % sizeof(u32) == 0);
|
||||
ASSERT(index < Pica::PicaCore::Regs::NUM_REGS);
|
||||
impl->pica.regs.reg_array[index] = data;
|
||||
|
||||
// Handle registers that trigger GPU actions
|
||||
switch (index) {
|
||||
case GPU_REG_INDEX(memory_fill_config[0].trigger):
|
||||
MemoryFill(0);
|
||||
break;
|
||||
case GPU_REG_INDEX(memory_fill_config[1].trigger):
|
||||
MemoryFill(1);
|
||||
break;
|
||||
case GPU_REG_INDEX(display_transfer_config.trigger):
|
||||
MemoryTransfer();
|
||||
break;
|
||||
case GPU_REG_INDEX(internal.pipeline.command_buffer.trigger[0]):
|
||||
SubmitCmdList(0);
|
||||
break;
|
||||
case GPU_REG_INDEX(internal.pipeline.command_buffer.trigger[1]):
|
||||
SubmitCmdList(1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE_MSG("Write to unknown GPU address {:#08X}", addr);
|
||||
}
|
||||
}
|
||||
|
||||
void GPU::Sync() {
|
||||
impl->renderer->Sync();
|
||||
}
|
||||
|
||||
VideoCore::RendererBase& GPU::Renderer() {
|
||||
return *impl->renderer;
|
||||
}
|
||||
|
||||
Pica::PicaCore& GPU::PicaCore() {
|
||||
return impl->pica;
|
||||
}
|
||||
|
||||
const Pica::PicaCore& GPU::PicaCore() const {
|
||||
return impl->pica;
|
||||
}
|
||||
|
||||
Pica::DebugContext& GPU::DebugContext() {
|
||||
return *Pica::g_debug_context;
|
||||
}
|
||||
|
||||
GraphicsDebugger& GPU::Debugger() {
|
||||
return impl->gpu_debugger;
|
||||
}
|
||||
|
||||
void GPU::SubmitCmdList(u32 index) {
|
||||
// Check if a command list was triggered.
|
||||
auto& config = impl->pica.regs.internal.pipeline.command_buffer;
|
||||
if (!config.trigger[index]) {
|
||||
return;
|
||||
}
|
||||
|
||||
MICROPROFILE_SCOPE(GPU_CmdlistProcessing);
|
||||
|
||||
// Forward command list processing to the PICA core.
|
||||
const PAddr addr = config.GetPhysicalAddress(index);
|
||||
const u32 size = config.GetSize(index);
|
||||
impl->pica.ProcessCmdList(addr, size);
|
||||
config.trigger[index] = 0;
|
||||
}
|
||||
|
||||
void GPU::MemoryFill(u32 index) {
|
||||
// Check if a memory fill was triggered.
|
||||
auto& config = impl->pica.regs.memory_fill_config[index];
|
||||
if (!config.trigger) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Perform memory fill.
|
||||
if (!impl->rasterizer->AccelerateFill(config)) {
|
||||
impl->sw_blitter->MemoryFill(config);
|
||||
}
|
||||
|
||||
// It seems that it won't signal interrupt if "address_start" is zero.
|
||||
// TODO: hwtest this
|
||||
if (config.GetStartAddress() != 0) {
|
||||
if (!index) {
|
||||
impl->signal_interrupt(Service::GSP::InterruptId::PSC0);
|
||||
} else {
|
||||
impl->signal_interrupt(Service::GSP::InterruptId::PSC1);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset "trigger" flag and set the "finish" flag
|
||||
// This was confirmed to happen on hardware even if "address_start" is zero.
|
||||
config.trigger.Assign(0);
|
||||
config.finished.Assign(1);
|
||||
}
|
||||
|
||||
void GPU::MemoryTransfer() {
|
||||
// Check if a transfer was triggered.
|
||||
auto& config = impl->pica.regs.display_transfer_config;
|
||||
if (!config.trigger.Value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
MICROPROFILE_SCOPE(GPU_DisplayTransfer);
|
||||
|
||||
// Notify debugger about the display transfer.
|
||||
impl->debug_context.OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, nullptr);
|
||||
|
||||
// Perform memory transfer
|
||||
if (config.is_texture_copy) {
|
||||
if (!impl->rasterizer->AccelerateTextureCopy(config)) {
|
||||
impl->sw_blitter->TextureCopy(config);
|
||||
}
|
||||
} else {
|
||||
if (!impl->rasterizer->AccelerateDisplayTransfer(config)) {
|
||||
impl->sw_blitter->DisplayTransfer(config);
|
||||
}
|
||||
}
|
||||
|
||||
// Complete transfer.
|
||||
config.trigger.Assign(0);
|
||||
impl->signal_interrupt(Service::GSP::InterruptId::PPF);
|
||||
}
|
||||
|
||||
void GPU::VBlankCallback(std::uintptr_t user_data, s64 cycles_late) {
|
||||
// Present renderered frame.
|
||||
impl->renderer->SwapBuffers();
|
||||
|
||||
// Signal to GSP that GPU interrupt has occurred
|
||||
impl->signal_interrupt(Service::GSP::InterruptId::PDC0);
|
||||
impl->signal_interrupt(Service::GSP::InterruptId::PDC1);
|
||||
|
||||
// Reschedule recurrent event
|
||||
impl->timing.ScheduleEvent(FRAME_TICKS - cycles_late, impl->vblank_event);
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void GPU::serialize(Archive& ar, const u32 file_version) {
|
||||
ar & impl->pica;
|
||||
}
|
||||
|
||||
SERIALIZE_IMPL(GPU)
|
||||
|
||||
} // namespace VideoCore
|
@ -0,0 +1,113 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <boost/serialization/access.hpp>
|
||||
|
||||
#include "core/hle/service/gsp/gsp_interrupt.h"
|
||||
|
||||
namespace Service::GSP {
|
||||
struct Command;
|
||||
struct FrameBufferInfo;
|
||||
} // namespace Service::GSP
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Pica {
|
||||
class DebugContext;
|
||||
class PicaCore;
|
||||
struct RegsLcd;
|
||||
union ColorFill;
|
||||
} // namespace Pica
|
||||
|
||||
namespace Frontend {
|
||||
class EmuWindow;
|
||||
}
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
/// Measured on hardware to be 2240568 timer cycles or 4481136 ARM11 cycles
|
||||
constexpr u64 FRAME_TICKS = 4481136ull;
|
||||
|
||||
class GraphicsDebugger;
|
||||
class RendererBase;
|
||||
|
||||
/**
|
||||
* The GPU class is the high level interface to the video_core for core services.
|
||||
*/
|
||||
class GPU {
|
||||
public:
|
||||
explicit GPU(Core::System& system, Frontend::EmuWindow& emu_window,
|
||||
Frontend::EmuWindow* secondary_window);
|
||||
~GPU();
|
||||
|
||||
/// Sets the function to call for signalling GSP interrupts.
|
||||
void SetInterruptHandler(Service::GSP::InterruptHandler handler);
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||
void FlushRegion(PAddr addr, u32 size);
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||
void InvalidateRegion(PAddr addr, u32 size);
|
||||
|
||||
/// Flushes and invalidates all memory in the rasterizer cache and removes any leftover state.
|
||||
void ClearAll(bool flush);
|
||||
|
||||
/// Executes the provided GSP command.
|
||||
void Execute(const Service::GSP::Command& command);
|
||||
|
||||
/// Updates GPU display framebuffer configuration using the specified parameters.
|
||||
void SetBufferSwap(u32 screen_id, const Service::GSP::FrameBufferInfo& info);
|
||||
|
||||
/// Sets the LCD color fill configuration for the top and bottom screens.
|
||||
void SetColorFill(const Pica::ColorFill& fill);
|
||||
|
||||
/// Reads a word from the GPU virtual address.
|
||||
u32 ReadReg(VAddr addr);
|
||||
|
||||
/// Writes the provided value to the GPU virtual address.
|
||||
void WriteReg(VAddr addr, u32 data);
|
||||
|
||||
/// Synchronizes fixed function renderer state with PICA registers.
|
||||
void Sync();
|
||||
|
||||
/// Returns a mutable reference to the renderer.
|
||||
[[nodiscard]] VideoCore::RendererBase& Renderer();
|
||||
|
||||
/// Returns a mutable reference to the PICA GPU.
|
||||
[[nodiscard]] Pica::PicaCore& PicaCore();
|
||||
|
||||
/// Returns an immutable reference to the PICA GPU.
|
||||
[[nodiscard]] const Pica::PicaCore& PicaCore() const;
|
||||
|
||||
/// Returns a mutable reference to the pica debugging context.
|
||||
[[nodiscard]] Pica::DebugContext& DebugContext();
|
||||
|
||||
/// Returns a mutable reference to the GSP command debugger.
|
||||
[[nodiscard]] GraphicsDebugger& Debugger();
|
||||
|
||||
private:
|
||||
void SubmitCmdList(u32 index);
|
||||
|
||||
void MemoryFill(u32 index);
|
||||
|
||||
void MemoryTransfer();
|
||||
|
||||
void VBlankCallback(uintptr_t user_data, s64 cycles_late);
|
||||
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version);
|
||||
|
||||
private:
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
@ -1,70 +0,0 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include "core/global.h"
|
||||
#include "video_core/geometry_pipeline.h"
|
||||
#include "video_core/pica.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace Core {
|
||||
template <>
|
||||
Pica::State& Global() {
|
||||
return Pica::g_state;
|
||||
}
|
||||
} // namespace Core
|
||||
|
||||
namespace Pica {
|
||||
|
||||
State g_state;
|
||||
|
||||
void Init() {
|
||||
g_state.Reset();
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
Shader::Shutdown();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Zero(T& o) {
|
||||
static_assert(std::is_trivial_v<T>, "It's undefined behavior to memset a non-trivial type");
|
||||
std::memset(&o, 0, sizeof(o));
|
||||
}
|
||||
|
||||
State::State() : geometry_pipeline(*this) {
|
||||
auto SubmitVertex = [this](const Shader::AttributeBuffer& vertex) {
|
||||
using Pica::Shader::OutputVertex;
|
||||
auto AddTriangle = [](const OutputVertex& v0, const OutputVertex& v1,
|
||||
const OutputVertex& v2) {
|
||||
VideoCore::g_renderer->Rasterizer()->AddTriangle(v0, v1, v2);
|
||||
};
|
||||
primitive_assembler.SubmitVertex(
|
||||
Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, vertex), AddTriangle);
|
||||
};
|
||||
|
||||
auto SetWinding = [this]() { primitive_assembler.SetWinding(); };
|
||||
|
||||
g_state.gs_unit.SetVertexHandler(SubmitVertex, SetWinding);
|
||||
g_state.geometry_pipeline.SetVertexHandler(SubmitVertex);
|
||||
}
|
||||
|
||||
void State::Reset() {
|
||||
Zero(regs);
|
||||
vs = {};
|
||||
gs = {};
|
||||
Zero(cmd_list);
|
||||
immediate = {};
|
||||
primitive_assembler.Reconfigure(PipelineRegs::TriangleTopology::List);
|
||||
vs_float_regs_counter = 0;
|
||||
vs_uniform_write_buffer.fill(0);
|
||||
gs_float_regs_counter = 0;
|
||||
gs_uniform_write_buffer.fill(0);
|
||||
default_attr_counter = 0;
|
||||
default_attr_write_buffer.fill(0);
|
||||
}
|
||||
} // namespace Pica
|
@ -1,16 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "video_core/regs_texturing.h"
|
||||
namespace Pica {
|
||||
|
||||
/// Initialize Pica state
|
||||
void Init();
|
||||
|
||||
/// Shutdown Pica state
|
||||
void Shutdown();
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,50 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "video_core/pica/output_vertex.h"
|
||||
#include "video_core/pica/regs_rasterizer.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
OutputVertex::OutputVertex(const RasterizerRegs& regs, const AttributeBuffer& output) {
|
||||
// Attributes can be used without being set in GPUREG_SH_OUTMAP_Oi
|
||||
// Hardware tests have shown that they are initialized to 1 in this case.
|
||||
std::array<f24, 32> vertex_slots_overflow;
|
||||
vertex_slots_overflow.fill(f24::One());
|
||||
|
||||
const u32 num_attributes = regs.vs_output_total & 7;
|
||||
for (std::size_t attrib = 0; attrib < num_attributes; ++attrib) {
|
||||
const auto output_register_map = regs.vs_output_attributes[attrib];
|
||||
vertex_slots_overflow[output_register_map.map_x] = output[attrib][0];
|
||||
vertex_slots_overflow[output_register_map.map_y] = output[attrib][1];
|
||||
vertex_slots_overflow[output_register_map.map_z] = output[attrib][2];
|
||||
vertex_slots_overflow[output_register_map.map_w] = output[attrib][3];
|
||||
}
|
||||
|
||||
// Copy to result
|
||||
std::memcpy(this, vertex_slots_overflow.data(), sizeof(OutputVertex));
|
||||
|
||||
// The hardware takes the absolute and saturates vertex colors, *before* doing interpolation
|
||||
for (u32 i = 0; i < 4; ++i) {
|
||||
const f32 c = std::fabs(color[i].ToFloat32());
|
||||
color[i] = f24::FromFloat32(c < 1.0f ? c : 1.0f);
|
||||
}
|
||||
}
|
||||
|
||||
#define ASSERT_POS(var, pos) \
|
||||
static_assert(offsetof(OutputVertex, var) == pos * sizeof(f24), "Semantic at wrong " \
|
||||
"offset.")
|
||||
|
||||
ASSERT_POS(pos, RasterizerRegs::VSOutputAttributes::POSITION_X);
|
||||
ASSERT_POS(quat, RasterizerRegs::VSOutputAttributes::QUATERNION_X);
|
||||
ASSERT_POS(color, RasterizerRegs::VSOutputAttributes::COLOR_R);
|
||||
ASSERT_POS(tc0, RasterizerRegs::VSOutputAttributes::TEXCOORD0_U);
|
||||
ASSERT_POS(tc1, RasterizerRegs::VSOutputAttributes::TEXCOORD1_U);
|
||||
ASSERT_POS(tc0_w, RasterizerRegs::VSOutputAttributes::TEXCOORD0_W);
|
||||
ASSERT_POS(view, RasterizerRegs::VSOutputAttributes::VIEW_X);
|
||||
ASSERT_POS(tc2, RasterizerRegs::VSOutputAttributes::TEXCOORD2_U);
|
||||
|
||||
#undef ASSERT_POS
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,48 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct RasterizerRegs;
|
||||
|
||||
using AttributeBuffer = std::array<Common::Vec4<f24>, 16>;
|
||||
|
||||
struct OutputVertex {
|
||||
OutputVertex() = default;
|
||||
explicit OutputVertex(const RasterizerRegs& regs, const AttributeBuffer& output);
|
||||
|
||||
Common::Vec4<f24> pos;
|
||||
Common::Vec4<f24> quat;
|
||||
Common::Vec4<f24> color;
|
||||
Common::Vec2<f24> tc0;
|
||||
Common::Vec2<f24> tc1;
|
||||
f24 tc0_w;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
Common::Vec3<f24> view;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
Common::Vec2<f24> tc2;
|
||||
|
||||
private:
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32) {
|
||||
ar& pos;
|
||||
ar& quat;
|
||||
ar& color;
|
||||
ar& tc0;
|
||||
ar& tc1;
|
||||
ar& tc0_w;
|
||||
ar& view;
|
||||
ar& tc2;
|
||||
}
|
||||
friend class boost::serialization::access;
|
||||
};
|
||||
static_assert(std::is_trivial_v<OutputVertex>, "Structure is not POD");
|
||||
static_assert(sizeof(OutputVertex) == 24 * sizeof(f32), "OutputVertex has invalid size");
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,74 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/serialization/binary_object.hpp>
|
||||
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/**
|
||||
* Uniforms and fixed attributes are written in a packed format such that four float24 values are
|
||||
* encoded in three 32-bit numbers. Uniforms can also encode four float32 values in four 32-bit
|
||||
* numbers. We write to internal memory once a full vector is written.
|
||||
*/
|
||||
struct PackedAttribute {
|
||||
std::array<u32, 4> buffer{};
|
||||
u32 index{};
|
||||
|
||||
/// Places a word to the queue and returns true if the queue becomes full.
|
||||
constexpr bool Push(u32 word, bool is_float32 = false) {
|
||||
buffer[index++] = word;
|
||||
return (index >= 4 && is_float32) || (index >= 3 && !is_float32);
|
||||
}
|
||||
|
||||
/// Resets the queue discarding previous entries.
|
||||
constexpr void Reset() {
|
||||
index = 0;
|
||||
}
|
||||
|
||||
/// Returns the queue contents with either float24 or float32 interpretation.
|
||||
constexpr Common::Vec4<f24> Get(bool is_float32 = false) {
|
||||
Reset();
|
||||
if (is_float32) {
|
||||
return AsFloat32();
|
||||
} else {
|
||||
return AsFloat24();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/// Decodes the queue contents with float24 transfer mode.
|
||||
constexpr Common::Vec4<f24> AsFloat24() const {
|
||||
const u32 x = buffer[2] & 0xFFFFFF;
|
||||
const u32 y = ((buffer[1] & 0xFFFF) << 8) | ((buffer[2] >> 24) & 0xFF);
|
||||
const u32 z = ((buffer[0] & 0xFF) << 16) | ((buffer[1] >> 16) & 0xFFFF);
|
||||
const u32 w = buffer[0] >> 8;
|
||||
return Common::Vec4<f24>{f24::FromRaw(x), f24::FromRaw(y), f24::FromRaw(z),
|
||||
f24::FromRaw(w)};
|
||||
}
|
||||
|
||||
/// Decodes the queue contents with float32 transfer mode.
|
||||
constexpr Common::Vec4<f24> AsFloat32() const {
|
||||
Common::Vec4<f24> uniform;
|
||||
for (u32 i = 0; i < 4; i++) {
|
||||
const f32 buffer_value = std::bit_cast<f32>(buffer[i]);
|
||||
uniform[3 - i] = f24::FromFloat32(buffer_value);
|
||||
}
|
||||
return uniform;
|
||||
}
|
||||
|
||||
private:
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32) {
|
||||
ar& buffer;
|
||||
ar& index;
|
||||
}
|
||||
friend class boost::serialization::access;
|
||||
};
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,592 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/arch.h"
|
||||
#include "common/archives.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/core.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/pica/pica_core.h"
|
||||
#include "video_core/pica/vertex_loader.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_Drawing, "GPU", "Drawing", MP_RGB(50, 50, 240));
|
||||
|
||||
using namespace DebugUtils;
|
||||
|
||||
union CommandHeader {
|
||||
u32 hex;
|
||||
BitField<0, 16, u32> cmd_id;
|
||||
BitField<16, 4, u32> parameter_mask;
|
||||
BitField<20, 8, u32> extra_data_length;
|
||||
BitField<31, 1, u32> group_commands;
|
||||
};
|
||||
static_assert(sizeof(CommandHeader) == sizeof(u32), "CommandHeader has incorrect size!");
|
||||
|
||||
PicaCore::PicaCore(Memory::MemorySystem& memory_, DebugContext& debug_context_)
|
||||
: memory{memory_}, debug_context{debug_context_}, geometry_pipeline{regs.internal, gs_unit,
|
||||
gs_setup},
|
||||
shader_engine{CreateEngine(Settings::values.use_shader_jit.GetValue())} {
|
||||
SetFramebufferDefaults();
|
||||
|
||||
const auto submit_vertex = [this](const AttributeBuffer& buffer) {
|
||||
const auto add_triangle = [this](const OutputVertex& v0, const OutputVertex& v1,
|
||||
const OutputVertex& v2) {
|
||||
rasterizer->AddTriangle(v0, v1, v2);
|
||||
};
|
||||
const auto vertex = OutputVertex(regs.internal.rasterizer, buffer);
|
||||
primitive_assembler.SubmitVertex(vertex, add_triangle);
|
||||
};
|
||||
|
||||
gs_unit.SetVertexHandlers(submit_vertex, [this]() { primitive_assembler.SetWinding(); });
|
||||
geometry_pipeline.SetVertexHandler(submit_vertex);
|
||||
|
||||
primitive_assembler.Reconfigure(PipelineRegs::TriangleTopology::List);
|
||||
}
|
||||
|
||||
PicaCore::~PicaCore() = default;
|
||||
|
||||
void PicaCore::SetFramebufferDefaults() {
|
||||
auto& framebuffer_top = regs.framebuffer_config[0];
|
||||
auto& framebuffer_sub = regs.framebuffer_config[1];
|
||||
|
||||
// Set framebuffer defaults from nn::gx::Initialize
|
||||
framebuffer_top.address_left1 = 0x181E6000;
|
||||
framebuffer_top.address_left2 = 0x1822C800;
|
||||
framebuffer_top.address_right1 = 0x18273000;
|
||||
framebuffer_top.address_right2 = 0x182B9800;
|
||||
framebuffer_sub.address_left1 = 0x1848F000;
|
||||
framebuffer_sub.address_left2 = 0x184C7800;
|
||||
|
||||
framebuffer_top.width.Assign(240);
|
||||
framebuffer_top.height.Assign(400);
|
||||
framebuffer_top.stride = 3 * 240;
|
||||
framebuffer_top.color_format.Assign(PixelFormat::RGB8);
|
||||
framebuffer_top.active_fb = 0;
|
||||
|
||||
framebuffer_sub.width.Assign(240);
|
||||
framebuffer_sub.height.Assign(320);
|
||||
framebuffer_sub.stride = 3 * 240;
|
||||
framebuffer_sub.color_format.Assign(PixelFormat::RGB8);
|
||||
framebuffer_sub.active_fb = 0;
|
||||
}
|
||||
|
||||
void PicaCore::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
|
||||
this->rasterizer = rasterizer;
|
||||
}
|
||||
|
||||
void PicaCore::SetInterruptHandler(Service::GSP::InterruptHandler& signal_interrupt) {
|
||||
this->signal_interrupt = signal_interrupt;
|
||||
}
|
||||
|
||||
void PicaCore::ProcessCmdList(PAddr list, u32 size) {
|
||||
// Initialize command list tracking.
|
||||
const u8* head = memory.GetPhysicalPointer(list);
|
||||
cmd_list.Reset(list, head, size);
|
||||
|
||||
while (cmd_list.current_index < cmd_list.length) {
|
||||
// Align read pointer to 8 bytes
|
||||
if (cmd_list.current_index % 2 != 0) {
|
||||
cmd_list.current_index++;
|
||||
}
|
||||
|
||||
// Read the header and the value to write.
|
||||
const u32 value = cmd_list.head[cmd_list.current_index++];
|
||||
const CommandHeader header{cmd_list.head[cmd_list.current_index++]};
|
||||
|
||||
// Write to the requested PICA register.
|
||||
WriteInternalReg(header.cmd_id, value, header.parameter_mask);
|
||||
|
||||
// Write any extra paramters as well.
|
||||
for (u32 i = 0; i < header.extra_data_length; ++i) {
|
||||
const u32 cmd = header.cmd_id + (header.group_commands ? i + 1 : 0);
|
||||
const u32 extra_value = cmd_list.head[cmd_list.current_index++];
|
||||
WriteInternalReg(cmd, extra_value, header.parameter_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PicaCore::WriteInternalReg(u32 id, u32 value, u32 mask) {
|
||||
if (id >= RegsInternal::NUM_REGS) {
|
||||
LOG_ERROR(
|
||||
HW_GPU,
|
||||
"Commandlist tried to write to invalid register 0x{:03X} (value: {:08X}, mask: {:X})",
|
||||
id, value, mask);
|
||||
return;
|
||||
}
|
||||
|
||||
// Expand a 4-bit mask to 4-byte mask, e.g. 0b0101 -> 0x00FF00FF
|
||||
constexpr std::array<u32, 16> ExpandBitsToBytes = {
|
||||
0x00000000, 0x000000ff, 0x0000ff00, 0x0000ffff, 0x00ff0000, 0x00ff00ff,
|
||||
0x00ffff00, 0x00ffffff, 0xff000000, 0xff0000ff, 0xff00ff00, 0xff00ffff,
|
||||
0xffff0000, 0xffff00ff, 0xffffff00, 0xffffffff,
|
||||
};
|
||||
|
||||
// TODO: Figure out how register masking acts on e.g. vs.uniform_setup.set_value
|
||||
const u32 old_value = regs.internal.reg_array[id];
|
||||
const u32 write_mask = ExpandBitsToBytes[mask];
|
||||
regs.internal.reg_array[id] = (old_value & ~write_mask) | (value & write_mask);
|
||||
|
||||
// Track register write.
|
||||
DebugUtils::OnPicaRegWrite(id, mask, regs.internal.reg_array[id]);
|
||||
|
||||
// Track events.
|
||||
debug_context.OnEvent(DebugContext::Event::PicaCommandLoaded, &id);
|
||||
SCOPE_EXIT({ debug_context.OnEvent(DebugContext::Event::PicaCommandProcessed, &id); });
|
||||
|
||||
switch (id) {
|
||||
// Trigger IRQ
|
||||
case PICA_REG_INDEX(trigger_irq):
|
||||
signal_interrupt(Service::GSP::InterruptId::P3D);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.triangle_topology):
|
||||
primitive_assembler.Reconfigure(regs.internal.pipeline.triangle_topology);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.restart_primitive):
|
||||
primitive_assembler.Reset();
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.index):
|
||||
immediate.Reset();
|
||||
break;
|
||||
|
||||
// Load default vertex input attributes
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.set_value[0]):
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.set_value[1]):
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.set_value[2]):
|
||||
SubmitImmediate(value);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.gpu_mode):
|
||||
// This register likely just enables vertex processing and doesn't need any special handling
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.command_buffer.trigger[0]):
|
||||
case PICA_REG_INDEX(pipeline.command_buffer.trigger[1]): {
|
||||
const u32 index = static_cast<u32>(id - PICA_REG_INDEX(pipeline.command_buffer.trigger[0]));
|
||||
const PAddr addr = regs.internal.pipeline.command_buffer.GetPhysicalAddress(index);
|
||||
const u32 size = regs.internal.pipeline.command_buffer.GetSize(index);
|
||||
const u8* head = memory.GetPhysicalPointer(addr);
|
||||
cmd_list.Reset(addr, head, size);
|
||||
break;
|
||||
}
|
||||
|
||||
// It seems like these trigger vertex rendering
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw):
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw_indexed): {
|
||||
const bool is_indexed = (id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
|
||||
DrawArrays(is_indexed);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.bool_uniforms):
|
||||
gs_setup.WriteUniformBoolReg(regs.internal.gs.bool_uniforms.Value());
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(gs.int_uniforms[0]):
|
||||
case PICA_REG_INDEX(gs.int_uniforms[1]):
|
||||
case PICA_REG_INDEX(gs.int_uniforms[2]):
|
||||
case PICA_REG_INDEX(gs.int_uniforms[3]): {
|
||||
const u32 index = (id - PICA_REG_INDEX(gs.int_uniforms[0]));
|
||||
gs_setup.WriteUniformIntReg(index, regs.internal.gs.GetIntUniform(index));
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[0]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[1]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[2]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[3]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[4]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[5]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[6]):
|
||||
case PICA_REG_INDEX(gs.uniform_setup.set_value[7]): {
|
||||
gs_setup.WriteUniformFloatReg(regs.internal.gs, value);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.program.set_word[0]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[1]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[2]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[3]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[4]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[5]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[6]):
|
||||
case PICA_REG_INDEX(gs.program.set_word[7]): {
|
||||
u32& offset = regs.internal.gs.program.offset;
|
||||
if (offset >= 4096) {
|
||||
LOG_ERROR(HW_GPU, "Invalid GS program offset {}", offset);
|
||||
} else {
|
||||
gs_setup.program_code[offset] = value;
|
||||
gs_setup.MarkProgramCodeDirty();
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[0]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[1]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[2]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[3]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[4]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[5]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[6]):
|
||||
case PICA_REG_INDEX(gs.swizzle_patterns.set_word[7]): {
|
||||
u32& offset = regs.internal.gs.swizzle_patterns.offset;
|
||||
if (offset >= gs_setup.swizzle_data.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid GS swizzle pattern offset {}", offset);
|
||||
} else {
|
||||
gs_setup.swizzle_data[offset] = value;
|
||||
gs_setup.MarkSwizzleDataDirty();
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.bool_uniforms):
|
||||
vs_setup.WriteUniformBoolReg(regs.internal.vs.bool_uniforms.Value());
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(vs.int_uniforms[0]):
|
||||
case PICA_REG_INDEX(vs.int_uniforms[1]):
|
||||
case PICA_REG_INDEX(vs.int_uniforms[2]):
|
||||
case PICA_REG_INDEX(vs.int_uniforms[3]): {
|
||||
const u32 index = (id - PICA_REG_INDEX(vs.int_uniforms[0]));
|
||||
vs_setup.WriteUniformIntReg(index, regs.internal.vs.GetIntUniform(index));
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[0]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[1]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[2]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[3]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[4]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[5]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[6]):
|
||||
case PICA_REG_INDEX(vs.uniform_setup.set_value[7]): {
|
||||
vs_setup.WriteUniformFloatReg(regs.internal.vs, value);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.program.set_word[0]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[1]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[2]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[3]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[4]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[5]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[6]):
|
||||
case PICA_REG_INDEX(vs.program.set_word[7]): {
|
||||
u32& offset = regs.internal.vs.program.offset;
|
||||
if (offset >= 512) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS program offset {}", offset);
|
||||
} else {
|
||||
vs_setup.program_code[offset] = value;
|
||||
vs_setup.MarkProgramCodeDirty();
|
||||
if (!regs.internal.pipeline.gs_unit_exclusive_configuration) {
|
||||
gs_setup.program_code[offset] = value;
|
||||
gs_setup.MarkProgramCodeDirty();
|
||||
}
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[0]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[1]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[2]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[3]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[4]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[5]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[6]):
|
||||
case PICA_REG_INDEX(vs.swizzle_patterns.set_word[7]): {
|
||||
u32& offset = regs.internal.vs.swizzle_patterns.offset;
|
||||
if (offset >= vs_setup.swizzle_data.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS swizzle pattern offset {}", offset);
|
||||
} else {
|
||||
vs_setup.swizzle_data[offset] = value;
|
||||
vs_setup.MarkSwizzleDataDirty();
|
||||
if (!regs.internal.pipeline.gs_unit_exclusive_configuration) {
|
||||
gs_setup.swizzle_data[offset] = value;
|
||||
gs_setup.MarkSwizzleDataDirty();
|
||||
}
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(lighting.lut_data[0]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[1]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[2]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[3]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[4]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[5]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[6]):
|
||||
case PICA_REG_INDEX(lighting.lut_data[7]): {
|
||||
auto& lut_config = regs.internal.lighting.lut_config;
|
||||
ASSERT_MSG(lut_config.index < 256, "lut_config.index exceeded maximum value of 255!");
|
||||
|
||||
lighting.luts[lut_config.type][lut_config.index].raw = value;
|
||||
lut_config.index.Assign(lut_config.index + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[0]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[1]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[2]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[3]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[4]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[5]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[6]):
|
||||
case PICA_REG_INDEX(texturing.fog_lut_data[7]): {
|
||||
fog.lut[regs.internal.texturing.fog_lut_offset % 128].raw = value;
|
||||
regs.internal.texturing.fog_lut_offset.Assign(regs.internal.texturing.fog_lut_offset + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[0]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[1]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[2]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[3]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[4]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[5]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[6]):
|
||||
case PICA_REG_INDEX(texturing.proctex_lut_data[7]): {
|
||||
auto& index = regs.internal.texturing.proctex_lut_config.index;
|
||||
|
||||
switch (regs.internal.texturing.proctex_lut_config.ref_table.Value()) {
|
||||
case TexturingRegs::ProcTexLutTable::Noise:
|
||||
proctex.noise_table[index % proctex.noise_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::ColorMap:
|
||||
proctex.color_map_table[index % proctex.color_map_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::AlphaMap:
|
||||
proctex.alpha_map_table[index % proctex.alpha_map_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::Color:
|
||||
proctex.color_table[index % proctex.color_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::ColorDiff:
|
||||
proctex.color_diff_table[index % proctex.color_diff_table.size()].raw = value;
|
||||
break;
|
||||
}
|
||||
index.Assign(index + 1);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// Notify the rasterizer an internal register was updated.
|
||||
rasterizer->NotifyPicaRegisterChanged(id);
|
||||
}
|
||||
|
||||
void PicaCore::SubmitImmediate(u32 value) {
|
||||
// Push to word to the queue. This returns true when a full attribute is formed.
|
||||
if (!immediate.queue.Push(value)) {
|
||||
return;
|
||||
}
|
||||
|
||||
constexpr size_t IMMEDIATE_MODE_INDEX = 0xF;
|
||||
|
||||
auto& setup = regs.internal.pipeline.vs_default_attributes_setup;
|
||||
if (setup.index > IMMEDIATE_MODE_INDEX) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS default attribute index {}", setup.index);
|
||||
return;
|
||||
}
|
||||
|
||||
// Retrieve the attribute and place it in the default attribute buffer.
|
||||
const auto attribute = immediate.queue.Get();
|
||||
if (setup.index < IMMEDIATE_MODE_INDEX) {
|
||||
input_default_attributes[setup.index] = attribute;
|
||||
setup.index++;
|
||||
return;
|
||||
}
|
||||
|
||||
// When index is 0xF the attribute is used for immediate mode drawing.
|
||||
immediate.input_vertex[immediate.current_attribute] = attribute;
|
||||
if (immediate.current_attribute < regs.internal.pipeline.max_input_attrib_index) {
|
||||
immediate.current_attribute++;
|
||||
return;
|
||||
}
|
||||
|
||||
// We formed a vertex, flush.
|
||||
DrawImmediate();
|
||||
}
|
||||
|
||||
void PicaCore::DrawImmediate() {
|
||||
// Compile the vertex shader.
|
||||
shader_engine->SetupBatch(vs_setup, regs.internal.vs.main_offset);
|
||||
|
||||
// Track vertex in the debug recorder.
|
||||
debug_context.OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
std::addressof(immediate.input_vertex));
|
||||
SCOPE_EXIT({ debug_context.OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr); });
|
||||
|
||||
ShaderUnit shader_unit;
|
||||
AttributeBuffer output{};
|
||||
|
||||
// Invoke the vertex shader for the vertex.
|
||||
shader_unit.LoadInput(regs.internal.vs, immediate.input_vertex);
|
||||
shader_engine->Run(vs_setup, shader_unit);
|
||||
shader_unit.WriteOutput(regs.internal.vs, output);
|
||||
|
||||
// Reconfigure geometry pipeline if needed.
|
||||
if (immediate.reset_geometry_pipeline) {
|
||||
geometry_pipeline.Reconfigure();
|
||||
immediate.reset_geometry_pipeline = false;
|
||||
}
|
||||
|
||||
// Send to geometry pipeline.
|
||||
ASSERT(!geometry_pipeline.NeedIndexInput());
|
||||
geometry_pipeline.Setup(shader_engine.get());
|
||||
geometry_pipeline.SubmitVertex(output);
|
||||
|
||||
// Flush the immediate triangle.
|
||||
rasterizer->DrawTriangles();
|
||||
immediate.current_attribute = 0;
|
||||
}
|
||||
|
||||
void PicaCore::DrawArrays(bool is_indexed) {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
|
||||
// Track vertex in the debug recorder.
|
||||
debug_context.OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr);
|
||||
SCOPE_EXIT({ debug_context.OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr); });
|
||||
|
||||
const bool accelerate_draw = [this] {
|
||||
// Geometry shaders cannot be accelerated due to register preservation.
|
||||
if (regs.internal.pipeline.use_gs == PipelineRegs::UseGS::Yes) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO (wwylele): for Strip/Fan topology, if the primitive assember is not restarted
|
||||
// after this draw call, the buffered vertex from this draw should "leak" to the next
|
||||
// draw, in which case we should buffer the vertex into the software primitive assember,
|
||||
// or disable accelerate draw completely. However, there is not game found yet that does
|
||||
// this, so this is left unimplemented for now. Revisit this when an issue is found in
|
||||
// games.
|
||||
|
||||
bool accelerate_draw = Settings::values.use_hw_shader && primitive_assembler.IsEmpty();
|
||||
const auto topology = primitive_assembler.GetTopology();
|
||||
if (topology == PipelineRegs::TriangleTopology::Shader ||
|
||||
topology == PipelineRegs::TriangleTopology::List) {
|
||||
accelerate_draw = accelerate_draw && (regs.internal.pipeline.num_vertices % 3) == 0;
|
||||
}
|
||||
return accelerate_draw;
|
||||
}();
|
||||
|
||||
// Attempt to use hardware vertex shaders if possible.
|
||||
if (accelerate_draw && rasterizer->AccelerateDrawBatch(is_indexed)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// We cannot accelerate the draw, so load and execute the vertex shader for each vertex.
|
||||
LoadVertices(is_indexed);
|
||||
|
||||
// Draw emitted triangles.
|
||||
rasterizer->DrawTriangles();
|
||||
}
|
||||
|
||||
void PicaCore::LoadVertices(bool is_indexed) {
|
||||
// Read and validate vertex information from the loaders
|
||||
const auto& pipeline = regs.internal.pipeline;
|
||||
const PAddr base_address = pipeline.vertex_attributes.GetPhysicalBaseAddress();
|
||||
const auto loader = VertexLoader(memory, pipeline);
|
||||
regs.internal.rasterizer.ValidateSemantics();
|
||||
|
||||
// Locate index buffer.
|
||||
const auto& index_info = pipeline.index_array;
|
||||
const u8* index_address_8 = memory.GetPhysicalPointer(base_address + index_info.offset);
|
||||
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
|
||||
const bool index_u16 = index_info.format != 0;
|
||||
|
||||
// Simple circular-replacement vertex cache
|
||||
const std::size_t VERTEX_CACHE_SIZE = 64;
|
||||
std::array<bool, VERTEX_CACHE_SIZE> vertex_cache_valid{};
|
||||
std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
|
||||
std::array<AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
|
||||
u32 vertex_cache_pos = 0;
|
||||
|
||||
// Compile the vertex shader for this batch.
|
||||
ShaderUnit shader_unit;
|
||||
AttributeBuffer vs_output;
|
||||
shader_engine->SetupBatch(vs_setup, regs.internal.vs.main_offset);
|
||||
|
||||
// Setup geometry pipeline in case we are using a geometry shader.
|
||||
geometry_pipeline.Reconfigure();
|
||||
geometry_pipeline.Setup(shader_engine.get());
|
||||
ASSERT(!geometry_pipeline.NeedIndexInput() || is_indexed);
|
||||
|
||||
for (u32 index = 0; index < pipeline.num_vertices; ++index) {
|
||||
// Indexed rendering doesn't use the start offset
|
||||
const u32 vertex = is_indexed
|
||||
? (index_u16 ? index_address_16[index] : index_address_8[index])
|
||||
: (index + pipeline.vertex_offset);
|
||||
|
||||
bool vertex_cache_hit = false;
|
||||
if (is_indexed) {
|
||||
if (geometry_pipeline.NeedIndexInput()) {
|
||||
geometry_pipeline.SubmitIndex(vertex);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (u32 i = 0; i < VERTEX_CACHE_SIZE; ++i) {
|
||||
if (vertex_cache_valid[i] && vertex == vertex_cache_ids[i]) {
|
||||
vs_output = vertex_cache[i];
|
||||
vertex_cache_hit = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!vertex_cache_hit) {
|
||||
// Initialize data for the current vertex
|
||||
AttributeBuffer input;
|
||||
loader.LoadVertex(base_address, index, vertex, input, input_default_attributes);
|
||||
|
||||
// Record vertex processing to the debugger.
|
||||
debug_context.OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
std::addressof(input));
|
||||
|
||||
// Invoke the vertex shader for this vertex.
|
||||
shader_unit.LoadInput(regs.internal.vs, input);
|
||||
shader_engine->Run(vs_setup, shader_unit);
|
||||
shader_unit.WriteOutput(regs.internal.vs, vs_output);
|
||||
|
||||
// Cache the vertex when doing indexed rendering.
|
||||
if (is_indexed) {
|
||||
vertex_cache[vertex_cache_pos] = vs_output;
|
||||
vertex_cache_valid[vertex_cache_pos] = true;
|
||||
vertex_cache_ids[vertex_cache_pos] = vertex;
|
||||
vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
// Send to geometry pipeline
|
||||
geometry_pipeline.SubmitVertex(vs_output);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void PicaCore::CommandList::serialize(Archive& ar, const u32 file_version) {
|
||||
ar& addr;
|
||||
ar& length;
|
||||
ar& current_index;
|
||||
if (Archive::is_loading::value) {
|
||||
const u8* ptr = Core::System::GetInstance().Memory().GetPhysicalPointer(addr);
|
||||
head = reinterpret_cast<const u32*>(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
SERIALIZE_IMPL(PicaCore::CommandList)
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,287 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/gsp/gsp_interrupt.h"
|
||||
#include "video_core/pica/geometry_pipeline.h"
|
||||
#include "video_core/pica/packed_attribute.h"
|
||||
#include "video_core/pica/primitive_assembly.h"
|
||||
#include "video_core/pica/regs_external.h"
|
||||
#include "video_core/pica/regs_internal.h"
|
||||
#include "video_core/pica/regs_lcd.h"
|
||||
#include "video_core/pica/shader_setup.h"
|
||||
#include "video_core/pica/shader_unit.h"
|
||||
|
||||
namespace Memory {
|
||||
class MemorySystem;
|
||||
}
|
||||
|
||||
namespace VideoCore {
|
||||
class RasterizerInterface;
|
||||
}
|
||||
|
||||
namespace Pica {
|
||||
|
||||
class DebugContext;
|
||||
class ShaderEngine;
|
||||
|
||||
class PicaCore {
|
||||
public:
|
||||
explicit PicaCore(Memory::MemorySystem& memory, DebugContext& debug_context_);
|
||||
~PicaCore();
|
||||
|
||||
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
||||
|
||||
void SetInterruptHandler(Service::GSP::InterruptHandler& signal_interrupt);
|
||||
|
||||
void ProcessCmdList(PAddr list, u32 size);
|
||||
|
||||
private:
|
||||
void SetFramebufferDefaults();
|
||||
|
||||
void WriteInternalReg(u32 id, u32 value, u32 mask);
|
||||
|
||||
void SubmitImmediate(u32 data);
|
||||
|
||||
void DrawImmediate();
|
||||
|
||||
void DrawArrays(bool is_indexed);
|
||||
|
||||
void LoadVertices(bool is_indexed);
|
||||
|
||||
public:
|
||||
union Regs {
|
||||
static constexpr size_t NUM_REGS = 0x732;
|
||||
|
||||
struct {
|
||||
u32 hardware_id;
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
MemoryFillConfig memory_fill_config[2];
|
||||
u32 vram_bank_control;
|
||||
u32 gpu_busy;
|
||||
INSERT_PADDING_WORDS(0x22);
|
||||
u32 backlight_control;
|
||||
INSERT_PADDING_WORDS(0xCF);
|
||||
FramebufferConfig framebuffer_config[2];
|
||||
INSERT_PADDING_WORDS(0x180);
|
||||
DisplayTransferConfig display_transfer_config;
|
||||
INSERT_PADDING_WORDS(0xF5);
|
||||
RegsInternal internal;
|
||||
};
|
||||
std::array<u32, NUM_REGS> reg_array;
|
||||
};
|
||||
static_assert(sizeof(Regs) == Regs::NUM_REGS * sizeof(u32));
|
||||
|
||||
struct CommandList {
|
||||
PAddr addr;
|
||||
const u32* head;
|
||||
u32 current_index;
|
||||
u32 length;
|
||||
|
||||
void Reset(PAddr addr, const u8* head, u32 size) {
|
||||
this->addr = addr;
|
||||
this->head = reinterpret_cast<const u32*>(head);
|
||||
this->length = size / sizeof(u32);
|
||||
current_index = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version);
|
||||
};
|
||||
|
||||
struct ImmediateModeState {
|
||||
AttributeBuffer input_vertex{};
|
||||
u32 current_attribute{};
|
||||
bool reset_geometry_pipeline{true};
|
||||
PackedAttribute queue;
|
||||
|
||||
void Reset() {
|
||||
current_attribute = 0;
|
||||
reset_geometry_pipeline = true;
|
||||
queue.Reset();
|
||||
}
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& input_vertex;
|
||||
ar& current_attribute;
|
||||
ar& reset_geometry_pipeline;
|
||||
ar& queue;
|
||||
}
|
||||
};
|
||||
|
||||
struct ProcTex {
|
||||
union ValueEntry {
|
||||
u32 raw;
|
||||
|
||||
// LUT value, encoded as 12-bit fixed point, with 12 fraction bits
|
||||
BitField<0, 12, u32> value; // 0.0.12 fixed point
|
||||
|
||||
// Difference between two entry values. Used for efficient interpolation.
|
||||
// 0.0.12 fixed point with two's complement. The range is [-0.5, 0.5).
|
||||
// Note: the type of this is different from the one of lighting LUT
|
||||
BitField<12, 12, s32> difference;
|
||||
|
||||
f32 ToFloat() const {
|
||||
return static_cast<f32>(value) / 4095.f;
|
||||
}
|
||||
|
||||
f32 DiffToFloat() const {
|
||||
return static_cast<f32>(difference) / 4095.f;
|
||||
}
|
||||
};
|
||||
|
||||
union ColorEntry {
|
||||
u32 raw;
|
||||
BitField<0, 8, u32> r;
|
||||
BitField<8, 8, u32> g;
|
||||
BitField<16, 8, u32> b;
|
||||
BitField<24, 8, u32> a;
|
||||
|
||||
Common::Vec4<u8> ToVector() const {
|
||||
return {static_cast<u8>(r), static_cast<u8>(g), static_cast<u8>(b),
|
||||
static_cast<u8>(a)};
|
||||
}
|
||||
};
|
||||
|
||||
union ColorDifferenceEntry {
|
||||
u32 raw;
|
||||
BitField<0, 8, s32> r; // half of the difference between two ColorEntry
|
||||
BitField<8, 8, s32> g;
|
||||
BitField<16, 8, s32> b;
|
||||
BitField<24, 8, s32> a;
|
||||
|
||||
Common::Vec4<s32> ToVector() const {
|
||||
return Common::Vec4<s32>{r, g, b, a} * 2;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<ValueEntry, 128> noise_table;
|
||||
std::array<ValueEntry, 128> color_map_table;
|
||||
std::array<ValueEntry, 128> alpha_map_table;
|
||||
std::array<ColorEntry, 256> color_table;
|
||||
std::array<ColorDifferenceEntry, 256> color_diff_table;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& boost::serialization::make_binary_object(this, sizeof(ProcTex));
|
||||
}
|
||||
};
|
||||
|
||||
struct Lighting {
|
||||
union LutEntry {
|
||||
// Used for raw access
|
||||
u32 raw;
|
||||
|
||||
// LUT value, encoded as 12-bit fixed point, with 12 fraction bits
|
||||
BitField<0, 12, u32> value; // 0.0.12 fixed point
|
||||
|
||||
// Used for efficient interpolation.
|
||||
BitField<12, 11, u32> difference; // 0.0.11 fixed point
|
||||
BitField<23, 1, u32> neg_difference;
|
||||
|
||||
f32 ToFloat() const {
|
||||
return static_cast<f32>(value) / 4095.f;
|
||||
}
|
||||
|
||||
f32 DiffToFloat() const {
|
||||
const f32 diff = static_cast<f32>(difference) / 2047.f;
|
||||
return neg_difference ? -diff : diff;
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& raw;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<std::array<LutEntry, 256>, 24> luts;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& boost::serialization::make_binary_object(this, sizeof(Lighting));
|
||||
}
|
||||
};
|
||||
|
||||
struct Fog {
|
||||
union LutEntry {
|
||||
// Used for raw access
|
||||
u32 raw;
|
||||
|
||||
BitField<0, 13, s32> difference; // 1.1.11 fixed point
|
||||
BitField<13, 11, u32> value; // 0.0.11 fixed point
|
||||
|
||||
f32 ToFloat() const {
|
||||
return static_cast<f32>(value) / 2047.0f;
|
||||
}
|
||||
|
||||
f32 DiffToFloat() const {
|
||||
return static_cast<f32>(difference) / 2047.0f;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<LutEntry, 128> lut;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& boost::serialization::make_binary_object(this, sizeof(Fog));
|
||||
}
|
||||
};
|
||||
|
||||
RegsLcd regs_lcd{};
|
||||
Regs regs{};
|
||||
// TODO: Move these to a separate shader scheduler class
|
||||
GeometryShaderUnit gs_unit;
|
||||
ShaderSetup vs_setup;
|
||||
ShaderSetup gs_setup;
|
||||
ProcTex proctex{};
|
||||
Lighting lighting{};
|
||||
Fog fog{};
|
||||
AttributeBuffer input_default_attributes{};
|
||||
ImmediateModeState immediate{};
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& regs_lcd;
|
||||
ar& regs.reg_array;
|
||||
ar& gs_unit;
|
||||
ar& vs_setup;
|
||||
ar& gs_setup;
|
||||
ar& proctex;
|
||||
ar& lighting;
|
||||
ar& fog;
|
||||
ar& input_default_attributes;
|
||||
ar& immediate;
|
||||
ar& geometry_pipeline;
|
||||
ar& primitive_assembler;
|
||||
ar& cmd_list;
|
||||
}
|
||||
|
||||
private:
|
||||
Memory::MemorySystem& memory;
|
||||
VideoCore::RasterizerInterface* rasterizer;
|
||||
DebugContext& debug_context;
|
||||
Service::GSP::InterruptHandler signal_interrupt;
|
||||
GeometryPipeline geometry_pipeline;
|
||||
PrimitiveAssembler primitive_assembler;
|
||||
CommandList cmd_list;
|
||||
std::unique_ptr<ShaderEngine> shader_engine;
|
||||
};
|
||||
|
||||
#define GPU_REG_INDEX(field_name) (offsetof(Pica::PicaCore::Regs, field_name) / sizeof(u32))
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,53 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/pica/primitive_assembly.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
PrimitiveAssembler::PrimitiveAssembler(PipelineRegs::TriangleTopology topology)
|
||||
: topology(topology) {}
|
||||
|
||||
void PrimitiveAssembler::SubmitVertex(const OutputVertex& vtx,
|
||||
const TriangleHandler& triangle_handler) {
|
||||
switch (topology) {
|
||||
case PipelineRegs::TriangleTopology::List:
|
||||
case PipelineRegs::TriangleTopology::Shader:
|
||||
if (buffer_index < 2) {
|
||||
buffer[buffer_index++] = vtx;
|
||||
} else {
|
||||
buffer_index = 0;
|
||||
if (topology == PipelineRegs::TriangleTopology::Shader && winding) {
|
||||
triangle_handler(buffer[1], buffer[0], vtx);
|
||||
winding = false;
|
||||
} else {
|
||||
triangle_handler(buffer[0], buffer[1], vtx);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case PipelineRegs::TriangleTopology::Strip:
|
||||
case PipelineRegs::TriangleTopology::Fan:
|
||||
if (strip_ready) {
|
||||
triangle_handler(buffer[0], buffer[1], vtx);
|
||||
}
|
||||
|
||||
buffer[buffer_index] = vtx;
|
||||
strip_ready |= (buffer_index == 1);
|
||||
|
||||
if (topology == PipelineRegs::TriangleTopology::Strip) {
|
||||
buffer_index = !buffer_index;
|
||||
} else if (topology == PipelineRegs::TriangleTopology::Fan) {
|
||||
buffer_index = 1;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown triangle topology {:x}:", (int)topology);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,217 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/bit_field.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/**
|
||||
* Most physical addresses which GPU registers refer to are 8-byte aligned.
|
||||
* This function should be used to get the address from a raw register value.
|
||||
*/
|
||||
constexpr u32 DecodeAddressRegister(u32 register_value) {
|
||||
return register_value * 8;
|
||||
}
|
||||
|
||||
/// Components are laid out in reverse byte order, most significant bits first.
|
||||
enum class PixelFormat : u32 {
|
||||
RGBA8 = 0,
|
||||
RGB8 = 1,
|
||||
RGB565 = 2,
|
||||
RGB5A1 = 3,
|
||||
RGBA4 = 4,
|
||||
};
|
||||
|
||||
constexpr u32 BytesPerPixel(Pica::PixelFormat format) {
|
||||
switch (format) {
|
||||
case Pica::PixelFormat::RGBA8:
|
||||
return 4;
|
||||
case Pica::PixelFormat::RGB8:
|
||||
return 3;
|
||||
case Pica::PixelFormat::RGB565:
|
||||
case Pica::PixelFormat::RGB5A1:
|
||||
case Pica::PixelFormat::RGBA4:
|
||||
return 2;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct MemoryFillConfig {
|
||||
u32 address_start;
|
||||
u32 address_end;
|
||||
|
||||
union {
|
||||
u32 value_32bit;
|
||||
|
||||
BitField<0, 16, u32> value_16bit;
|
||||
|
||||
// TODO: Verify component order
|
||||
BitField<0, 8, u32> value_24bit_r;
|
||||
BitField<8, 8, u32> value_24bit_g;
|
||||
BitField<16, 8, u32> value_24bit_b;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 control;
|
||||
|
||||
// Setting this field to 1 triggers the memory fill.
|
||||
// This field also acts as a status flag, and gets reset to 0 upon completion.
|
||||
BitField<0, 1, u32> trigger;
|
||||
// Set to 1 upon completion.
|
||||
BitField<1, 1, u32> finished;
|
||||
// If both of these bits are unset, then it will fill the memory with a 16 bit value
|
||||
// 1: fill with 24-bit wide values
|
||||
BitField<8, 1, u32> fill_24bit;
|
||||
// 1: fill with 32-bit wide values
|
||||
BitField<9, 1, u32> fill_32bit;
|
||||
};
|
||||
|
||||
inline u32 GetStartAddress() const {
|
||||
return DecodeAddressRegister(address_start);
|
||||
}
|
||||
|
||||
inline u32 GetEndAddress() const {
|
||||
return DecodeAddressRegister(address_end);
|
||||
}
|
||||
|
||||
inline std::string DebugName() const {
|
||||
return fmt::format("from {:#X} to {:#X} with {}-bit value {:#X}", GetStartAddress(),
|
||||
GetEndAddress(), fill_32bit ? "32" : (fill_24bit ? "24" : "16"),
|
||||
value_32bit);
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(MemoryFillConfig) == 0x10);
|
||||
|
||||
struct FramebufferConfig {
|
||||
INSERT_PADDING_WORDS(0x17);
|
||||
|
||||
union {
|
||||
u32 size;
|
||||
|
||||
BitField<0, 16, u32> width;
|
||||
BitField<16, 16, u32> height;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
u32 address_left1;
|
||||
u32 address_left2;
|
||||
|
||||
union {
|
||||
u32 format;
|
||||
|
||||
BitField<0, 3, PixelFormat> color_format;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
union {
|
||||
u32 active_fb;
|
||||
|
||||
// 0: Use parameters ending with "1"
|
||||
// 1: Use parameters ending with "2"
|
||||
BitField<0, 1, u32> second_fb_active;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x5);
|
||||
|
||||
// Distance between two pixel rows, in bytes
|
||||
u32 stride;
|
||||
|
||||
u32 address_right1;
|
||||
u32 address_right2;
|
||||
|
||||
INSERT_PADDING_WORDS(0x19);
|
||||
};
|
||||
static_assert(sizeof(FramebufferConfig) == 0x100);
|
||||
|
||||
struct DisplayTransferConfig {
|
||||
u32 input_address;
|
||||
u32 output_address;
|
||||
|
||||
inline u32 GetPhysicalInputAddress() const {
|
||||
return DecodeAddressRegister(input_address);
|
||||
}
|
||||
|
||||
inline u32 GetPhysicalOutputAddress() const {
|
||||
return DecodeAddressRegister(output_address);
|
||||
}
|
||||
|
||||
inline std::string DebugName() const noexcept {
|
||||
return fmt::format("from {:#x} to {:#x} with {} scaling and stride {}, width {}",
|
||||
GetPhysicalInputAddress(), GetPhysicalOutputAddress(),
|
||||
scaling == NoScale ? "no" : (scaling == ScaleX ? "X" : "XY"),
|
||||
input_width.Value(), output_width.Value());
|
||||
}
|
||||
|
||||
union {
|
||||
u32 output_size;
|
||||
|
||||
BitField<0, 16, u32> output_width;
|
||||
BitField<16, 16, u32> output_height;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 input_size;
|
||||
|
||||
BitField<0, 16, u32> input_width;
|
||||
BitField<16, 16, u32> input_height;
|
||||
};
|
||||
|
||||
enum ScalingMode : u32 {
|
||||
NoScale = 0, // Doesn't scale the image
|
||||
ScaleX = 1, // Downscales the image in half in the X axis and applies a box filter
|
||||
ScaleXY =
|
||||
2, // Downscales the image in half in both the X and Y axes and applies a box filter
|
||||
};
|
||||
|
||||
union {
|
||||
u32 flags;
|
||||
|
||||
BitField<0, 1, u32> flip_vertically; // flips input data vertically
|
||||
BitField<1, 1, u32> input_linear; // Converts from linear to tiled format
|
||||
BitField<2, 1, u32> crop_input_lines;
|
||||
BitField<3, 1, u32> is_texture_copy; // Copies the data without performing any
|
||||
// processing and respecting texture copy fields
|
||||
BitField<5, 1, u32> dont_swizzle;
|
||||
BitField<8, 3, PixelFormat> input_format;
|
||||
BitField<12, 3, PixelFormat> output_format;
|
||||
/// Uses some kind of 32x32 block swizzling mode, instead of the usual 8x8 one.
|
||||
BitField<16, 1, u32> block_32; // TODO(yuriks): unimplemented
|
||||
BitField<24, 2, ScalingMode> scaling; // Determines the scaling mode of the transfer
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// it seems that writing to this field triggers the display transfer
|
||||
BitField<0, 1, u32> trigger;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
struct {
|
||||
u32 size; // The lower 4 bits are ignored
|
||||
|
||||
union {
|
||||
u32 input_size;
|
||||
|
||||
BitField<0, 16, u32> input_width;
|
||||
BitField<16, 16, u32> input_gap;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 output_size;
|
||||
|
||||
BitField<0, 16, u32> output_width;
|
||||
BitField<16, 16, u32> output_gap;
|
||||
};
|
||||
} texture_copy;
|
||||
};
|
||||
static_assert(sizeof(DisplayTransferConfig) == 0x2c);
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,61 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/bit_set.h"
|
||||
#include "common/hash.h"
|
||||
#include "video_core/pica/regs_shader.h"
|
||||
#include "video_core/pica/shader_setup.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
ShaderSetup::ShaderSetup() = default;
|
||||
|
||||
ShaderSetup::~ShaderSetup() = default;
|
||||
|
||||
void ShaderSetup::WriteUniformBoolReg(u32 value) {
|
||||
const auto bits = BitSet32(value);
|
||||
for (u32 i = 0; i < uniforms.b.size(); ++i) {
|
||||
uniforms.b[i] = bits[i];
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderSetup::WriteUniformIntReg(u32 index, const Common::Vec4<u8> values) {
|
||||
ASSERT(index < uniforms.i.size());
|
||||
uniforms.i[index] = values;
|
||||
}
|
||||
|
||||
void ShaderSetup::WriteUniformFloatReg(ShaderRegs& config, u32 value) {
|
||||
auto& uniform_setup = config.uniform_setup;
|
||||
const bool is_float32 = uniform_setup.IsFloat32();
|
||||
if (!uniform_queue.Push(value, is_float32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto uniform = uniform_queue.Get(is_float32);
|
||||
if (uniform_setup.index >= uniforms.f.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid float uniform index {}", uniform_setup.index.Value());
|
||||
return;
|
||||
}
|
||||
|
||||
uniforms.f[uniform_setup.index] = uniform;
|
||||
uniform_setup.index.Assign(uniform_setup.index + 1);
|
||||
}
|
||||
|
||||
u64 ShaderSetup::GetProgramCodeHash() {
|
||||
if (program_code_hash_dirty) {
|
||||
program_code_hash = Common::ComputeHash64(&program_code, sizeof(program_code));
|
||||
program_code_hash_dirty = false;
|
||||
}
|
||||
return program_code_hash;
|
||||
}
|
||||
|
||||
u64 ShaderSetup::GetSwizzleDataHash() {
|
||||
if (swizzle_data_hash_dirty) {
|
||||
swizzle_data_hash = Common::ComputeHash64(&swizzle_data, sizeof(swizzle_data));
|
||||
swizzle_data_hash_dirty = false;
|
||||
}
|
||||
return swizzle_data_hash;
|
||||
}
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,103 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica/packed_attribute.h"
|
||||
#include "video_core/pica_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
constexpr u32 MAX_PROGRAM_CODE_LENGTH = 4096;
|
||||
constexpr u32 MAX_SWIZZLE_DATA_LENGTH = 4096;
|
||||
|
||||
using ProgramCode = std::array<u32, MAX_PROGRAM_CODE_LENGTH>;
|
||||
using SwizzleData = std::array<u32, MAX_SWIZZLE_DATA_LENGTH>;
|
||||
|
||||
struct Uniforms {
|
||||
alignas(16) std::array<Common::Vec4<f24>, 96> f;
|
||||
std::array<bool, 16> b;
|
||||
std::array<Common::Vec4<u8>, 4> i;
|
||||
|
||||
static size_t GetFloatUniformOffset(u32 index) {
|
||||
return offsetof(Uniforms, f) + index * sizeof(Common::Vec4<f24>);
|
||||
}
|
||||
|
||||
static size_t GetBoolUniformOffset(u32 index) {
|
||||
return offsetof(Uniforms, b) + index * sizeof(bool);
|
||||
}
|
||||
|
||||
static size_t GetIntUniformOffset(u32 index) {
|
||||
return offsetof(Uniforms, i) + index * sizeof(Common::Vec4<u8>);
|
||||
}
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& f;
|
||||
ar& b;
|
||||
ar& i;
|
||||
}
|
||||
};
|
||||
|
||||
struct ShaderRegs;
|
||||
|
||||
/**
|
||||
* This structure contains the state information common for all shader units such as uniforms.
|
||||
* The geometry shaders has a unique configuration so when enabled it has its own setup.
|
||||
*/
|
||||
struct ShaderSetup {
|
||||
public:
|
||||
explicit ShaderSetup();
|
||||
~ShaderSetup();
|
||||
|
||||
void WriteUniformBoolReg(u32 value);
|
||||
|
||||
void WriteUniformIntReg(u32 index, const Common::Vec4<u8> values);
|
||||
|
||||
void WriteUniformFloatReg(ShaderRegs& config, u32 value);
|
||||
|
||||
u64 GetProgramCodeHash();
|
||||
|
||||
u64 GetSwizzleDataHash();
|
||||
|
||||
void MarkProgramCodeDirty() {
|
||||
program_code_hash_dirty = true;
|
||||
}
|
||||
|
||||
void MarkSwizzleDataDirty() {
|
||||
swizzle_data_hash_dirty = true;
|
||||
}
|
||||
|
||||
public:
|
||||
Uniforms uniforms;
|
||||
PackedAttribute uniform_queue;
|
||||
ProgramCode program_code;
|
||||
SwizzleData swizzle_data;
|
||||
u32 entry_point;
|
||||
const void* cached_shader{};
|
||||
|
||||
private:
|
||||
bool program_code_hash_dirty{true};
|
||||
bool swizzle_data_hash_dirty{true};
|
||||
u64 program_code_hash{0xDEADC0DE};
|
||||
u64 swizzle_data_hash{0xDEADC0DE};
|
||||
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& uniforms;
|
||||
ar& uniform_queue;
|
||||
ar& program_code;
|
||||
ar& swizzle_data;
|
||||
ar& program_code_hash_dirty;
|
||||
ar& swizzle_data_hash_dirty;
|
||||
ar& program_code_hash;
|
||||
ar& swizzle_data_hash;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,63 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/bit_set.h"
|
||||
#include "video_core/pica/regs_shader.h"
|
||||
#include "video_core/pica/shader_unit.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
ShaderUnit::ShaderUnit(GeometryEmitter* emitter) : emitter_ptr{emitter} {}
|
||||
|
||||
ShaderUnit::~ShaderUnit() = default;
|
||||
|
||||
void ShaderUnit::LoadInput(const ShaderRegs& config, const AttributeBuffer& buffer) {
|
||||
const u32 max_attribute = config.max_input_attribute_index;
|
||||
for (u32 attr = 0; attr <= max_attribute; ++attr) {
|
||||
const u32 reg = config.GetRegisterForAttribute(attr);
|
||||
input[reg] = buffer[attr];
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderUnit::WriteOutput(const ShaderRegs& config, AttributeBuffer& buffer) {
|
||||
u32 output_index{};
|
||||
for (u32 reg : Common::BitSet<u32>(config.output_mask)) {
|
||||
buffer[output_index++] = output[reg];
|
||||
}
|
||||
}
|
||||
|
||||
void GeometryEmitter::Emit(std::span<Common::Vec4<f24>, 16> output_regs) {
|
||||
ASSERT(vertex_id < 3);
|
||||
|
||||
u32 output_index{};
|
||||
for (u32 reg : Common::BitSet<u32>(output_mask)) {
|
||||
buffer[vertex_id][output_index++] = output_regs[reg];
|
||||
}
|
||||
|
||||
if (prim_emit) {
|
||||
if (winding) {
|
||||
handlers->winding_setter();
|
||||
}
|
||||
for (std::size_t i = 0; i < buffer.size(); ++i) {
|
||||
handlers->vertex_handler(buffer[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GeometryShaderUnit::GeometryShaderUnit() : ShaderUnit{&emitter} {}
|
||||
|
||||
GeometryShaderUnit::~GeometryShaderUnit() = default;
|
||||
|
||||
void GeometryShaderUnit::SetVertexHandlers(VertexHandler vertex_handler,
|
||||
WindingSetter winding_setter) {
|
||||
emitter.handlers = new Handlers;
|
||||
emitter.handlers->vertex_handler = vertex_handler;
|
||||
emitter.handlers->winding_setter = winding_setter;
|
||||
}
|
||||
|
||||
void GeometryShaderUnit::ConfigOutput(const ShaderRegs& config) {
|
||||
emitter.output_mask = config.output_mask;
|
||||
}
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,120 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <span>
|
||||
#include <boost/serialization/base_object.hpp>
|
||||
|
||||
#include "video_core/pica/output_vertex.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/// Handler type for receiving vertex outputs from vertex shader or geometry shader
|
||||
using VertexHandler = std::function<void(const AttributeBuffer&)>;
|
||||
|
||||
/// Handler type for signaling to invert the vertex order of the next triangle
|
||||
using WindingSetter = std::function<void()>;
|
||||
|
||||
struct ShaderRegs;
|
||||
struct GeometryEmitter;
|
||||
|
||||
/**
|
||||
* This structure contains the state information that needs to be unique for a shader unit. The 3DS
|
||||
* has four shader units that process shaders in parallel.
|
||||
*/
|
||||
struct ShaderUnit {
|
||||
explicit ShaderUnit(GeometryEmitter* emitter = nullptr);
|
||||
~ShaderUnit();
|
||||
|
||||
void LoadInput(const ShaderRegs& config, const AttributeBuffer& input);
|
||||
|
||||
void WriteOutput(const ShaderRegs& config, AttributeBuffer& output);
|
||||
|
||||
static constexpr size_t InputOffset(s32 register_index) {
|
||||
return offsetof(ShaderUnit, input) + register_index * sizeof(Common::Vec4<f24>);
|
||||
}
|
||||
|
||||
static constexpr size_t OutputOffset(s32 register_index) {
|
||||
return offsetof(ShaderUnit, output) + register_index * sizeof(Common::Vec4<f24>);
|
||||
}
|
||||
|
||||
static constexpr size_t TemporaryOffset(s32 register_index) {
|
||||
return offsetof(ShaderUnit, temporary) + register_index * sizeof(Common::Vec4<f24>);
|
||||
}
|
||||
|
||||
public:
|
||||
s32 address_registers[3];
|
||||
bool conditional_code[2];
|
||||
alignas(16) std::array<Common::Vec4<f24>, 16> input;
|
||||
alignas(16) std::array<Common::Vec4<f24>, 16> temporary;
|
||||
alignas(16) std::array<Common::Vec4<f24>, 16> output;
|
||||
GeometryEmitter* emitter_ptr;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& input;
|
||||
ar& temporary;
|
||||
ar& output;
|
||||
ar& conditional_code;
|
||||
ar& address_registers;
|
||||
}
|
||||
};
|
||||
|
||||
struct Handlers {
|
||||
VertexHandler vertex_handler;
|
||||
WindingSetter winding_setter;
|
||||
};
|
||||
|
||||
/// This structure contains state information for primitive emitting in geometry shader.
|
||||
struct GeometryEmitter {
|
||||
void Emit(std::span<Common::Vec4<f24>, 16> output_regs);
|
||||
|
||||
public:
|
||||
std::array<AttributeBuffer, 3> buffer;
|
||||
u8 vertex_id;
|
||||
bool prim_emit;
|
||||
bool winding;
|
||||
u32 output_mask;
|
||||
Handlers* handlers;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& buffer;
|
||||
ar& vertex_id;
|
||||
ar& prim_emit;
|
||||
ar& winding;
|
||||
ar& output_mask;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This is an extended shader unit state that represents the special unit that can run both vertex
|
||||
* shader and geometry shader. It contains an additional primitive emitter and utilities for
|
||||
* geometry shader.
|
||||
*/
|
||||
struct GeometryShaderUnit : public ShaderUnit {
|
||||
GeometryShaderUnit();
|
||||
~GeometryShaderUnit();
|
||||
|
||||
void SetVertexHandlers(VertexHandler vertex_handler, WindingSetter winding_setter);
|
||||
void ConfigOutput(const ShaderRegs& config);
|
||||
|
||||
GeometryEmitter emitter;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const u32 file_version) {
|
||||
ar& boost::serialization::base_object<ShaderUnit>(*this);
|
||||
ar& emitter;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,109 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/pica/vertex_loader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
VertexLoader::VertexLoader(Memory::MemorySystem& memory_, const PipelineRegs& regs)
|
||||
: memory{memory_} {
|
||||
const auto& attribute_config = regs.vertex_attributes;
|
||||
num_total_attributes = attribute_config.GetNumTotalAttributes();
|
||||
|
||||
vertex_attribute_sources.fill(0xdeadbeef);
|
||||
|
||||
for (u32 i = 0; i < 16; i++) {
|
||||
vertex_attribute_is_default[i] = attribute_config.IsDefaultAttribute(i);
|
||||
}
|
||||
|
||||
// Setup attribute data from loaders
|
||||
for (u32 loader = 0; loader < 12; ++loader) {
|
||||
const auto& loader_config = attribute_config.attribute_loaders[loader];
|
||||
|
||||
u32 offset = 0;
|
||||
|
||||
// TODO: What happens if a loader overwrites a previous one's data?
|
||||
for (u32 component = 0; component < loader_config.component_count; ++component) {
|
||||
if (component >= 12) {
|
||||
LOG_ERROR(HW_GPU,
|
||||
"Overflow in the vertex attribute loader {} trying to load component {}",
|
||||
loader, component);
|
||||
continue;
|
||||
}
|
||||
|
||||
u32 attribute_index = loader_config.GetComponent(component);
|
||||
if (attribute_index < 12) {
|
||||
offset = Common::AlignUp(offset,
|
||||
attribute_config.GetElementSizeInBytes(attribute_index));
|
||||
vertex_attribute_sources[attribute_index] = loader_config.data_offset + offset;
|
||||
vertex_attribute_strides[attribute_index] =
|
||||
static_cast<u32>(loader_config.byte_count);
|
||||
vertex_attribute_formats[attribute_index] =
|
||||
attribute_config.GetFormat(attribute_index);
|
||||
vertex_attribute_elements[attribute_index] =
|
||||
attribute_config.GetNumElements(attribute_index);
|
||||
offset += attribute_config.GetStride(attribute_index);
|
||||
} else if (attribute_index < 16) {
|
||||
// Attribute ids 12, 13, 14 and 15 signify 4, 8, 12 and 16-byte paddings,
|
||||
// respectively
|
||||
offset = Common::AlignUp(offset, 4);
|
||||
offset += (attribute_index - 11) * 4;
|
||||
} else {
|
||||
UNREACHABLE(); // This is truly unreachable due to the number of bits for each
|
||||
// component
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VertexLoader::~VertexLoader() = default;
|
||||
|
||||
void VertexLoader::LoadVertex(PAddr base_address, u32 index, u32 vertex, AttributeBuffer& input,
|
||||
AttributeBuffer& input_default_attributes) const {
|
||||
for (s32 i = 0; i < num_total_attributes; ++i) {
|
||||
// Load the default attribute if we're configured to do so
|
||||
if (vertex_attribute_is_default[i]) {
|
||||
input[i] = input_default_attributes[i];
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO(yuriks): In this case, no data gets loaded and the vertex
|
||||
// remains with the last value it had. This isn't currently maintained
|
||||
// as global state, however, and so won't work in Citra yet.
|
||||
if (vertex_attribute_elements[i] == 0) {
|
||||
LOG_ERROR(HW_GPU, "Vertex retension unimplemented");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Load per-vertex data from the loader arrays
|
||||
const PAddr source_addr =
|
||||
base_address + vertex_attribute_sources[i] + vertex_attribute_strides[i] * vertex;
|
||||
|
||||
switch (vertex_attribute_formats[i]) {
|
||||
case PipelineRegs::VertexAttributeFormat::BYTE:
|
||||
LoadAttribute<s8>(source_addr, i, input);
|
||||
break;
|
||||
case PipelineRegs::VertexAttributeFormat::UBYTE:
|
||||
LoadAttribute<u8>(source_addr, i, input);
|
||||
break;
|
||||
case PipelineRegs::VertexAttributeFormat::SHORT:
|
||||
LoadAttribute<s16>(source_addr, i, input);
|
||||
break;
|
||||
case PipelineRegs::VertexAttributeFormat::FLOAT:
|
||||
LoadAttribute<f32>(source_addr, i, input);
|
||||
break;
|
||||
}
|
||||
|
||||
// Default attribute values set if array elements have < 4 components. This
|
||||
// is *not* carried over from the default attribute settings even if they're
|
||||
// enabled for this attribute.
|
||||
for (u32 comp = vertex_attribute_elements[i]; comp < 4; comp++) {
|
||||
input[i][comp] = comp == 3 ? f24::One() : f24::Zero();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Pica
|
@ -0,0 +1,47 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/memory.h"
|
||||
#include "video_core/pica/output_vertex.h"
|
||||
#include "video_core/pica/regs_pipeline.h"
|
||||
|
||||
namespace Memory {
|
||||
class MemorySystem;
|
||||
}
|
||||
|
||||
namespace Pica {
|
||||
|
||||
class VertexLoader {
|
||||
public:
|
||||
explicit VertexLoader(Memory::MemorySystem& memory_, const PipelineRegs& regs);
|
||||
~VertexLoader();
|
||||
|
||||
void LoadVertex(PAddr base_address, u32 index, u32 vertex, AttributeBuffer& input,
|
||||
AttributeBuffer& input_default_attributes) const;
|
||||
|
||||
template <typename T>
|
||||
void LoadAttribute(PAddr source_addr, u32 attrib, AttributeBuffer& out) const {
|
||||
const T* data = reinterpret_cast<const T*>(memory.GetPhysicalPointer(source_addr));
|
||||
for (u32 comp = 0; comp < vertex_attribute_elements[attrib]; ++comp) {
|
||||
out[attrib][comp] = f24::FromFloat32(data[comp]);
|
||||
}
|
||||
}
|
||||
|
||||
int GetNumTotalAttributes() const {
|
||||
return num_total_attributes;
|
||||
}
|
||||
|
||||
private:
|
||||
Memory::MemorySystem& memory;
|
||||
std::array<u32, 16> vertex_attribute_sources;
|
||||
std::array<u32, 16> vertex_attribute_strides{};
|
||||
std::array<PipelineRegs::VertexAttributeFormat, 16> vertex_attribute_formats;
|
||||
std::array<u32, 16> vertex_attribute_elements{};
|
||||
std::array<bool, 16> vertex_attribute_is_default;
|
||||
int num_total_attributes = 0;
|
||||
};
|
||||
|
||||
} // namespace Pica
|
@ -1,255 +0,0 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/split_member.hpp>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/geometry_pipeline.h"
|
||||
#include "video_core/primitive_assembly.h"
|
||||
#include "video_core/regs.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
// Boost::serialization doesn't like union types for some reason,
|
||||
// so we need to mark arrays of union values with a special serialization method
|
||||
template <typename Value, size_t Size>
|
||||
struct UnionArray : public std::array<Value, Size> {
|
||||
private:
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int) {
|
||||
static_assert(sizeof(Value) == sizeof(u32));
|
||||
ar&* static_cast<u32(*)[Size]>(static_cast<void*>(this->data()));
|
||||
}
|
||||
friend class boost::serialization::access;
|
||||
};
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/// Struct used to describe current Pica state
|
||||
struct State {
|
||||
State();
|
||||
void Reset();
|
||||
|
||||
/// Pica registers
|
||||
Regs regs;
|
||||
|
||||
Shader::ShaderSetup vs;
|
||||
Shader::ShaderSetup gs;
|
||||
|
||||
Shader::AttributeBuffer input_default_attributes;
|
||||
|
||||
struct ProcTex {
|
||||
union ValueEntry {
|
||||
u32 raw;
|
||||
|
||||
// LUT value, encoded as 12-bit fixed point, with 12 fraction bits
|
||||
BitField<0, 12, u32> value; // 0.0.12 fixed point
|
||||
|
||||
// Difference between two entry values. Used for efficient interpolation.
|
||||
// 0.0.12 fixed point with two's complement. The range is [-0.5, 0.5).
|
||||
// Note: the type of this is different from the one of lighting LUT
|
||||
BitField<12, 12, s32> difference;
|
||||
|
||||
float ToFloat() const {
|
||||
return static_cast<float>(value) / 4095.f;
|
||||
}
|
||||
|
||||
float DiffToFloat() const {
|
||||
return static_cast<float>(difference) / 4095.f;
|
||||
}
|
||||
};
|
||||
|
||||
union ColorEntry {
|
||||
u32 raw;
|
||||
BitField<0, 8, u32> r;
|
||||
BitField<8, 8, u32> g;
|
||||
BitField<16, 8, u32> b;
|
||||
BitField<24, 8, u32> a;
|
||||
|
||||
Common::Vec4<u8> ToVector() const {
|
||||
return {static_cast<u8>(r), static_cast<u8>(g), static_cast<u8>(b),
|
||||
static_cast<u8>(a)};
|
||||
}
|
||||
};
|
||||
|
||||
union ColorDifferenceEntry {
|
||||
u32 raw;
|
||||
BitField<0, 8, s32> r; // half of the difference between two ColorEntry
|
||||
BitField<8, 8, s32> g;
|
||||
BitField<16, 8, s32> b;
|
||||
BitField<24, 8, s32> a;
|
||||
|
||||
Common::Vec4<s32> ToVector() const {
|
||||
return Common::Vec4<s32>{r, g, b, a} * 2;
|
||||
}
|
||||
};
|
||||
|
||||
UnionArray<ValueEntry, 128> noise_table;
|
||||
UnionArray<ValueEntry, 128> color_map_table;
|
||||
UnionArray<ValueEntry, 128> alpha_map_table;
|
||||
UnionArray<ColorEntry, 256> color_table;
|
||||
UnionArray<ColorDifferenceEntry, 256> color_diff_table;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int file_version) {
|
||||
ar& noise_table;
|
||||
ar& color_map_table;
|
||||
ar& alpha_map_table;
|
||||
ar& color_table;
|
||||
ar& color_diff_table;
|
||||
}
|
||||
} proctex;
|
||||
|
||||
struct Lighting {
|
||||
union LutEntry {
|
||||
// Used for raw access
|
||||
u32 raw;
|
||||
|
||||
// LUT value, encoded as 12-bit fixed point, with 12 fraction bits
|
||||
BitField<0, 12, u32> value; // 0.0.12 fixed point
|
||||
|
||||
// Used for efficient interpolation.
|
||||
BitField<12, 11, u32> difference; // 0.0.11 fixed point
|
||||
BitField<23, 1, u32> neg_difference;
|
||||
|
||||
float ToFloat() const {
|
||||
return static_cast<float>(value) / 4095.f;
|
||||
}
|
||||
|
||||
float DiffToFloat() const {
|
||||
float diff = static_cast<float>(difference) / 2047.f;
|
||||
return neg_difference ? -diff : diff;
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int file_version) {
|
||||
ar& raw;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<UnionArray<LutEntry, 256>, 24> luts;
|
||||
} lighting;
|
||||
|
||||
struct {
|
||||
union LutEntry {
|
||||
// Used for raw access
|
||||
u32 raw;
|
||||
|
||||
BitField<0, 13, s32> difference; // 1.1.11 fixed point
|
||||
BitField<13, 11, u32> value; // 0.0.11 fixed point
|
||||
|
||||
float ToFloat() const {
|
||||
return static_cast<float>(value) / 2047.0f;
|
||||
}
|
||||
|
||||
float DiffToFloat() const {
|
||||
return static_cast<float>(difference) / 2047.0f;
|
||||
}
|
||||
};
|
||||
|
||||
UnionArray<LutEntry, 128> lut;
|
||||
} fog;
|
||||
|
||||
/// Current Pica command list
|
||||
struct {
|
||||
PAddr addr; // This exists only for serialization
|
||||
const u32* head_ptr;
|
||||
const u32* current_ptr;
|
||||
u32 length;
|
||||
} cmd_list;
|
||||
|
||||
/// Struct used to describe immediate mode rendering state
|
||||
struct ImmediateModeState {
|
||||
// Used to buffer partial vertices for immediate-mode rendering.
|
||||
Shader::AttributeBuffer input_vertex;
|
||||
// Index of the next attribute to be loaded into `input_vertex`.
|
||||
u32 current_attribute = 0;
|
||||
// Indicates the immediate mode just started and the geometry pipeline needs to reconfigure
|
||||
bool reset_geometry_pipeline = true;
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int file_version) {
|
||||
ar& input_vertex;
|
||||
ar& current_attribute;
|
||||
ar& reset_geometry_pipeline;
|
||||
}
|
||||
|
||||
} immediate;
|
||||
|
||||
// the geometry shader needs to be kept in the global state because some shaders relie on
|
||||
// preserved register value across shader invocation.
|
||||
// TODO: also bring the three vertex shader units here and implement the shader scheduler.
|
||||
Shader::GSUnitState gs_unit;
|
||||
|
||||
GeometryPipeline geometry_pipeline;
|
||||
|
||||
// This is constructed with a dummy triangle topology
|
||||
PrimitiveAssembler<Shader::OutputVertex> primitive_assembler;
|
||||
|
||||
int vs_float_regs_counter = 0;
|
||||
std::array<u32, 4> vs_uniform_write_buffer{};
|
||||
|
||||
int gs_float_regs_counter = 0;
|
||||
std::array<u32, 4> gs_uniform_write_buffer{};
|
||||
|
||||
int default_attr_counter = 0;
|
||||
std::array<u32, 3> default_attr_write_buffer{};
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int file_version) {
|
||||
ar& regs.reg_array;
|
||||
ar& vs;
|
||||
ar& gs;
|
||||
ar& input_default_attributes;
|
||||
ar& proctex;
|
||||
ar& lighting.luts;
|
||||
ar& fog.lut;
|
||||
ar& cmd_list.addr;
|
||||
ar& cmd_list.length;
|
||||
ar& immediate;
|
||||
ar& gs_unit;
|
||||
ar& geometry_pipeline;
|
||||
ar& primitive_assembler;
|
||||
ar& vs_float_regs_counter;
|
||||
ar& boost::serialization::make_array(vs_uniform_write_buffer.data(),
|
||||
vs_uniform_write_buffer.size());
|
||||
ar& gs_float_regs_counter;
|
||||
ar& boost::serialization::make_array(gs_uniform_write_buffer.data(),
|
||||
gs_uniform_write_buffer.size());
|
||||
ar& default_attr_counter;
|
||||
ar& boost::serialization::make_array(default_attr_write_buffer.data(),
|
||||
default_attr_write_buffer.size());
|
||||
boost::serialization::split_member(ar, *this, file_version);
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void save(Archive& ar, const unsigned int file_version) const {
|
||||
ar << static_cast<u32>(cmd_list.current_ptr - cmd_list.head_ptr);
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void load(Archive& ar, const unsigned int file_version) {
|
||||
u32 offset{};
|
||||
ar >> offset;
|
||||
cmd_list.head_ptr =
|
||||
reinterpret_cast<u32*>(VideoCore::g_memory->GetPhysicalPointer(cmd_list.addr));
|
||||
cmd_list.current_ptr = cmd_list.head_ptr + offset;
|
||||
}
|
||||
};
|
||||
|
||||
extern State g_state; ///< Current Pica state
|
||||
|
||||
} // namespace Pica
|
@ -1,87 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/primitive_assembly.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
template <typename VertexType>
|
||||
PrimitiveAssembler<VertexType>::PrimitiveAssembler(PipelineRegs::TriangleTopology topology)
|
||||
: topology(topology) {}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::SubmitVertex(const VertexType& vtx,
|
||||
const TriangleHandler& triangle_handler) {
|
||||
switch (topology) {
|
||||
case PipelineRegs::TriangleTopology::List:
|
||||
case PipelineRegs::TriangleTopology::Shader:
|
||||
if (buffer_index < 2) {
|
||||
buffer[buffer_index++] = vtx;
|
||||
} else {
|
||||
buffer_index = 0;
|
||||
if (topology == PipelineRegs::TriangleTopology::Shader && winding) {
|
||||
triangle_handler(buffer[1], buffer[0], vtx);
|
||||
winding = false;
|
||||
} else {
|
||||
triangle_handler(buffer[0], buffer[1], vtx);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case PipelineRegs::TriangleTopology::Strip:
|
||||
case PipelineRegs::TriangleTopology::Fan:
|
||||
if (strip_ready)
|
||||
triangle_handler(buffer[0], buffer[1], vtx);
|
||||
|
||||
buffer[buffer_index] = vtx;
|
||||
|
||||
strip_ready |= (buffer_index == 1);
|
||||
|
||||
if (topology == PipelineRegs::TriangleTopology::Strip)
|
||||
buffer_index = !buffer_index;
|
||||
else if (topology == PipelineRegs::TriangleTopology::Fan)
|
||||
buffer_index = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown triangle topology {:x}:", (int)topology);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::SetWinding() {
|
||||
winding = true;
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::Reset() {
|
||||
buffer_index = 0;
|
||||
strip_ready = false;
|
||||
winding = false;
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::Reconfigure(PipelineRegs::TriangleTopology topology) {
|
||||
Reset();
|
||||
this->topology = topology;
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
bool PrimitiveAssembler<VertexType>::IsEmpty() const {
|
||||
return buffer_index == 0 && strip_ready == false;
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
PipelineRegs::TriangleTopology PrimitiveAssembler<VertexType>::GetTopology() const {
|
||||
return topology;
|
||||
}
|
||||
|
||||
// explicitly instantiate use cases
|
||||
template struct PrimitiveAssembler<Shader::OutputVertex>;
|
||||
|
||||
} // namespace Pica
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue