shader: SSA and dominance

master
ReinUsesLisp 2021-02-02 21:07:00 +07:00 committed by ameerj
parent 2d48a7b4d0
commit 6c4cc0cd06
24 changed files with 569 additions and 76 deletions

@ -11,6 +11,8 @@ add_executable(shader_recompiler
frontend/ir/condition.h
frontend/ir/flow_test.cpp
frontend/ir/flow_test.h
frontend/ir/function.cpp
frontend/ir/function.h
frontend/ir/ir_emitter.cpp
frontend/ir/ir_emitter.h
frontend/ir/microinstruction.cpp
@ -51,6 +53,7 @@ add_executable(shader_recompiler
ir_opt/get_set_elimination_pass.cpp
ir_opt/identity_removal_pass.cpp
ir_opt/passes.h
ir_opt/ssa_rewrite_pass.cpp
ir_opt/verification_pass.cpp
main.cpp
)

@ -37,6 +37,10 @@ Block::iterator Block::PrependNewInst(iterator insertion_point, Opcode op,
return result_it;
}
void Block::AddImmediatePredecessor(IR::Block* immediate_predecessor) {
imm_predecessors.push_back(immediate_predecessor);
}
u32 Block::LocationBegin() const noexcept {
return location_begin;
}
@ -53,6 +57,18 @@ const Block::InstructionList& Block::Instructions() const noexcept {
return instructions;
}
std::span<IR::Block* const> Block::ImmediatePredecessors() const noexcept {
return imm_predecessors;
}
static std::string BlockToIndex(const std::map<const Block*, size_t>& block_to_index,
Block* block) {
if (const auto it{block_to_index.find(block)}; it != block_to_index.end()) {
return fmt::format("{{Block ${}}}", it->second);
}
return fmt::format("$<unknown block {:016x}>", reinterpret_cast<u64>(block));
}
static std::string ArgToIndex(const std::map<const Block*, size_t>& block_to_index,
const std::map<const Inst*, size_t>& inst_to_index,
const Value& arg) {
@ -60,10 +76,7 @@ static std::string ArgToIndex(const std::map<const Block*, size_t>& block_to_ind
return "<null>";
}
if (arg.IsLabel()) {
if (const auto it{block_to_index.find(arg.Label())}; it != block_to_index.end()) {
return fmt::format("{{Block ${}}}", it->second);
}
return fmt::format("$<unknown block {:016x}>", reinterpret_cast<u64>(arg.Label()));
return BlockToIndex(block_to_index, arg.Label());
}
if (!arg.IsImmediate()) {
if (const auto it{inst_to_index.find(arg.Inst())}; it != inst_to_index.end()) {
@ -115,6 +128,15 @@ std::string DumpBlock(const Block& block, const std::map<const Block*, size_t>&
} else {
ret += fmt::format(" {}", op); // '%00000 = ' -> 1 + 5 + 3 = 9 spaces
}
if (op == Opcode::Phi) {
size_t val_index{0};
for (const auto& [phi_block, phi_val] : inst.PhiOperands()) {
ret += val_index != 0 ? ", " : " ";
ret += fmt::format("[ {}, {} ]", ArgToIndex(block_to_index, inst_to_index, phi_val),
BlockToIndex(block_to_index, phi_block));
++val_index;
}
} else {
const size_t arg_count{NumArgsOf(op)};
for (size_t arg_index = 0; arg_index < arg_count; ++arg_index) {
const Value arg{inst.Arg(arg_index)};
@ -127,6 +149,7 @@ std::string DumpBlock(const Block& block, const std::map<const Block*, size_t>&
ret += fmt::format("<type error: {} != {}>", actual_type, expected_type);
}
}
}
if (TypeOf(op) != Type::Void) {
ret += fmt::format(" (uses: {})\n", inst.UseCount());
} else {

@ -6,6 +6,8 @@
#include <initializer_list>
#include <map>
#include <span>
#include <vector>
#include <boost/intrusive/list.hpp>
#include <boost/pool/pool_alloc.hpp>
@ -36,7 +38,11 @@ public:
void AppendNewInst(Opcode op, std::initializer_list<Value> args);
/// Prepends a new instruction to this basic block before the insertion point.
iterator PrependNewInst(iterator insertion_point, Opcode op, std::initializer_list<Value> args);
iterator PrependNewInst(iterator insertion_point, Opcode op,
std::initializer_list<Value> args = {});
/// Adds a new immediate predecessor to the basic block.
void AddImmediatePredecessor(IR::Block* immediate_predecessor);
/// Gets the starting location of this basic block.
[[nodiscard]] u32 LocationBegin() const noexcept;
@ -44,9 +50,12 @@ public:
[[nodiscard]] u32 LocationEnd() const noexcept;
/// Gets a mutable reference to the instruction list for this basic block.
InstructionList& Instructions() noexcept;
[[nodiscard]] InstructionList& Instructions() noexcept;
/// Gets an immutable reference to the instruction list for this basic block.
const InstructionList& Instructions() const noexcept;
[[nodiscard]] const InstructionList& Instructions() const noexcept;
/// Gets an immutable span to the immediate predecessors.
[[nodiscard]] std::span<IR::Block* const> ImmediatePredecessors() const noexcept;
[[nodiscard]] bool empty() const {
return instructions.empty();
@ -115,13 +124,16 @@ private:
/// End location of this block
u32 location_end;
/// List of instructions in this block.
/// List of instructions in this block
InstructionList instructions;
/// Memory pool for instruction list
boost::fast_pool_allocator<Inst, boost::default_user_allocator_malloc_free,
boost::details::pool::null_mutex>
instruction_alloc_pool;
/// Block immediate predecessors
std::vector<IR::Block*> imm_predecessors;
};
[[nodiscard]] std::string DumpBlock(const Block& block);

@ -0,0 +1,5 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/frontend/ir/function.h"

@ -0,0 +1,25 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include "shader_recompiler/frontend/ir/basic_block.h"
namespace Shader::IR {
struct Function {
struct InplaceDelete {
void operator()(IR::Block* block) const noexcept {
std::destroy_at(block);
}
};
using UniqueBlock = std::unique_ptr<IR::Block, InplaceDelete>;
std::vector<UniqueBlock> blocks;
};
} // namespace Shader::IR

@ -30,6 +30,11 @@ static void RemovePseudoInstruction(IR::Inst*& inst, IR::Opcode expected_opcode)
bool Inst::MayHaveSideEffects() const noexcept {
switch (op) {
case Opcode::Branch:
case Opcode::BranchConditional:
case Opcode::Exit:
case Opcode::Return:
case Opcode::Unreachable:
case Opcode::SetAttribute:
case Opcode::SetAttributeIndexed:
case Opcode::WriteGlobalU8:
@ -113,6 +118,17 @@ void Inst::SetArg(size_t index, Value value) {
args[index] = value;
}
std::span<const std::pair<Block*, Value>> Inst::PhiOperands() const noexcept {
return phi_operands;
}
void Inst::AddPhiOperand(Block* predecessor, const Value& value) {
if (!value.IsImmediate()) {
Use(value);
}
phi_operands.emplace_back(predecessor, value);
}
void Inst::Invalidate() {
ClearArgs();
op = Opcode::Void;
@ -125,6 +141,12 @@ void Inst::ClearArgs() {
}
value = {};
}
for (auto& [phi_block, phi_op] : phi_operands) {
if (!phi_op.IsImmediate()) {
UndoUse(phi_op);
}
}
phi_operands.clear();
}
void Inst::ReplaceUsesWith(Value replacement) {

@ -5,6 +5,8 @@
#pragma once
#include <array>
#include <span>
#include <vector>
#include <boost/intrusive/list.hpp>
@ -15,6 +17,8 @@
namespace Shader::IR {
class Block;
constexpr size_t MAX_ARG_COUNT = 4;
class Inst : public boost::intrusive::list_base_hook<> {
@ -59,6 +63,11 @@ public:
/// Set the value of a given argument index.
void SetArg(size_t index, Value value);
/// Get an immutable span to the phi operands.
[[nodiscard]] std::span<const std::pair<Block*, Value>> PhiOperands() const noexcept;
/// Add phi operand to a phi instruction.
void AddPhiOperand(Block* predecessor, const Value& value);
void Invalidate();
void ClearArgs();
@ -76,6 +85,7 @@ private:
Inst* carry_inst{};
Inst* overflow_inst{};
Inst* zsco_inst{};
std::vector<std::pair<Block*, Value>> phi_operands;
u64 flags{};
};

@ -5,6 +5,7 @@
// opcode name, return type, arg1 type, arg2 type, arg3 type, arg4 type, ...
OPCODE(Void, Void, )
OPCODE(Identity, Opaque, Opaque, )
OPCODE(Phi, Opaque, /*todo*/ )
// Control flow
OPCODE(Branch, Void, Label, )
@ -35,6 +36,13 @@ OPCODE(SetSFlag, Void, U1,
OPCODE(SetCFlag, Void, U1, )
OPCODE(SetOFlag, Void, U1, )
// Undefined
OPCODE(Undef1, U1, )
OPCODE(Undef8, U8, )
OPCODE(Undef16, U16, )
OPCODE(Undef32, U32, )
OPCODE(Undef64, U64, )
// Memory operations
OPCODE(WriteGlobalU8, Void, U64, U32, )
OPCODE(WriteGlobalS8, Void, U64, U32, )

@ -10,6 +10,13 @@ namespace Shader::IR {
enum class Pred { P0, P1, P2, P3, P4, P5, P6, PT };
constexpr size_t NUM_USER_PREDS = 6;
constexpr size_t NUM_PREDS = 7;
[[nodiscard]] constexpr size_t PredIndex(Pred pred) noexcept {
return static_cast<size_t>(pred);
}
} // namespace Shader::IR
template <>

@ -271,6 +271,9 @@ enum class Reg : u64 {
};
static_assert(static_cast<int>(Reg::RZ) == 255);
constexpr size_t NUM_USER_REGS = 255;
constexpr size_t NUM_REGS = 256;
[[nodiscard]] constexpr Reg operator+(Reg reg, int num) {
if (reg == Reg::RZ) {
// Adding or subtracting registers from RZ yields RZ
@ -290,8 +293,12 @@ static_assert(static_cast<int>(Reg::RZ) == 255);
return reg + (-num);
}
[[nodiscard]] constexpr size_t RegIndex(Reg reg) noexcept {
return static_cast<size_t>(reg);
}
[[nodiscard]] constexpr bool IsAligned(Reg reg, size_t align) {
return (static_cast<size_t>(reg) / align) * align == static_cast<size_t>(reg);
return (RegIndex(reg) / align) * align == RegIndex(reg);
}
} // namespace Shader::IR

@ -115,6 +115,43 @@ u64 Value::U64() const {
return imm_u64;
}
bool Value::operator==(const Value& other) const {
if (type != other.type) {
return false;
}
switch (type) {
case Type::Void:
return true;
case Type::Opaque:
return inst == other.inst;
case Type::Label:
return label == other.label;
case Type::Reg:
return reg == other.reg;
case Type::Pred:
return pred == other.pred;
case Type::Attribute:
return attribute == other.attribute;
case Type::U1:
return imm_u1 == other.imm_u1;
case Type::U8:
return imm_u8 == other.imm_u8;
case Type::U16:
return imm_u16 == other.imm_u16;
case Type::U32:
return imm_u32 == other.imm_u32;
case Type::U64:
return imm_u64 == other.imm_u64;
case Type::ZSCO:
throw NotImplementedException("ZSCO comparison");
}
throw LogicError("Invalid type {}", type);
}
bool Value::operator!=(const Value& other) const {
return !operator==(other);
}
void Value::ValidateAccess(IR::Type expected) const {
if (type != expected) {
throw LogicError("Reading {} out of {}", expected, type);

@ -48,6 +48,9 @@ public:
[[nodiscard]] u32 U32() const;
[[nodiscard]] u64 U64() const;
[[nodiscard]] bool operator==(const Value& other) const;
[[nodiscard]] bool operator!=(const Value& other) const;
private:
void ValidateAccess(IR::Type expected) const;

@ -36,6 +36,7 @@ static std::array<Block, 2> Split(Block&& block, Location pc, BlockId new_id) {
.cond{true},
.branch_true{new_id},
.branch_false{UNREACHABLE_BLOCK_ID},
.imm_predecessors{},
},
Block{
.begin{pc},
@ -46,6 +47,7 @@ static std::array<Block, 2> Split(Block&& block, Location pc, BlockId new_id) {
.cond{block.cond},
.branch_true{block.branch_true},
.branch_false{block.branch_false},
.imm_predecessors{},
},
};
}
@ -108,7 +110,7 @@ static bool HasFlowTest(Opcode opcode) {
}
}
static std::string Name(const Block& block) {
static std::string NameOf(const Block& block) {
if (block.begin.IsVirtual()) {
return fmt::format("\"Virtual {}\"", block.id);
} else {
@ -154,13 +156,127 @@ bool Block::Contains(Location pc) const noexcept {
}
Function::Function(Location start_address)
: entrypoint{start_address}, labels{Label{
: entrypoint{start_address}, labels{{
.address{start_address},
.block_id{0},
.stack{},
}} {}
void Function::BuildBlocksMap() {
const size_t num_blocks{NumBlocks()};
blocks_map.resize(num_blocks);
for (size_t block_index = 0; block_index < num_blocks; ++block_index) {
Block& block{blocks_data[block_index]};
blocks_map[block.id] = &block;
}
}
void Function::BuildImmediatePredecessors() {
for (const Block& block : blocks_data) {
if (block.branch_true != UNREACHABLE_BLOCK_ID) {
blocks_map[block.branch_true]->imm_predecessors.push_back(block.id);
}
if (block.branch_false != UNREACHABLE_BLOCK_ID) {
blocks_map[block.branch_false]->imm_predecessors.push_back(block.id);
}
}
}
void Function::BuildPostOrder() {
boost::container::small_vector<BlockId, 0x110> block_stack;
post_order_map.resize(NumBlocks());
Block& first_block{blocks_data[blocks.front()]};
first_block.post_order_visited = true;
block_stack.push_back(first_block.id);
const auto visit_branch = [&](BlockId block_id, BlockId branch_id) {
if (branch_id == UNREACHABLE_BLOCK_ID) {
return false;
}
if (blocks_map[branch_id]->post_order_visited) {
return false;
}
blocks_map[branch_id]->post_order_visited = true;
// Calling push_back twice is faster than insert on msvc
block_stack.push_back(block_id);
block_stack.push_back(branch_id);
return true;
};
while (!block_stack.empty()) {
const Block* const block{blocks_map[block_stack.back()]};
block_stack.pop_back();
if (!visit_branch(block->id, block->branch_true) &&
!visit_branch(block->id, block->branch_false)) {
post_order_map[block->id] = static_cast<u32>(post_order_blocks.size());
post_order_blocks.push_back(block->id);
}
}
}
void Function::BuildImmediateDominators() {
auto transform_block_id{std::views::transform([this](BlockId id) { return blocks_map[id]; })};
auto reverse_order_but_first{std::views::reverse | std::views::drop(1) | transform_block_id};
auto has_idom{std::views::filter([](Block* block) { return block->imm_dominator; })};
auto intersect{[this](Block* finger1, Block* finger2) {
while (finger1 != finger2) {
while (post_order_map[finger1->id] < post_order_map[finger2->id]) {
finger1 = finger1->imm_dominator;
}
while (post_order_map[finger2->id] < post_order_map[finger1->id]) {
finger2 = finger2->imm_dominator;
}
}
return finger1;
}};
for (Block& block : blocks_data) {
block.imm_dominator = nullptr;
}
Block* const start_block{&blocks_data[blocks.front()]};
start_block->imm_dominator = start_block;
bool changed{true};
while (changed) {
changed = false;
for (Block* const block : post_order_blocks | reverse_order_but_first) {
Block* new_idom{};
for (Block* predecessor : block->imm_predecessors | transform_block_id | has_idom) {
new_idom = new_idom ? intersect(predecessor, new_idom) : predecessor;
}
changed |= block->imm_dominator != new_idom;
block->imm_dominator = new_idom;
}
}
}
void Function::BuildDominanceFrontier() {
auto transform_block_id{std::views::transform([this](BlockId id) { return blocks_map[id]; })};
auto has_enough_predecessors{[](Block& block) { return block.imm_predecessors.size() >= 2; }};
for (Block& block : blocks_data | std::views::filter(has_enough_predecessors)) {
for (Block* current : block.imm_predecessors | transform_block_id) {
while (current != block.imm_dominator) {
current->dominance_frontiers.push_back(current->id);
current = current->imm_dominator;
}
}
}
}
CFG::CFG(Environment& env_, Location start_address) : env{env_} {
VisitFunctions(start_address);
for (Function& function : functions) {
function.BuildBlocksMap();
function.BuildImmediatePredecessors();
function.BuildPostOrder();
function.BuildImmediateDominators();
function.BuildDominanceFrontier();
}
}
void CFG::VisitFunctions(Location start_address) {
functions.emplace_back(start_address);
for (FunctionId function_id = 0; function_id < functions.size(); ++function_id) {
while (!functions[function_id].labels.empty()) {
@ -202,6 +318,7 @@ void CFG::AnalyzeLabel(FunctionId function_id, Label& label) {
.cond{true},
.branch_true{UNREACHABLE_BLOCK_ID},
.branch_false{UNREACHABLE_BLOCK_ID},
.imm_predecessors{},
};
// Analyze instructions until it reaches an already visited block or there's a branch
bool is_branch{false};
@ -310,7 +427,7 @@ CFG::AnalysisState CFG::AnalyzeInst(Block& block, FunctionId function_id, Locati
// Technically CAL pushes into PRET, but that's implicit in the function call for us
// Insert the function into the list if it doesn't exist
if (std::ranges::find(functions, cal_pc, &Function::entrypoint) == functions.end()) {
functions.push_back(cal_pc);
functions.emplace_back(cal_pc);
}
// Handle CAL like a regular instruction
break;
@ -352,6 +469,7 @@ void CFG::AnalyzeCondInst(Block& block, FunctionId function_id, Location pc,
.cond{cond},
.branch_true{conditional_block_id},
.branch_false{UNREACHABLE_BLOCK_ID},
.imm_predecessors{},
})};
// Set the end properties of the conditional instruction and give it a new identity
Block& conditional_block{block};
@ -465,14 +583,14 @@ std::string CFG::Dot() const {
dot += fmt::format("\t\tnode [style=filled];\n");
for (const u32 block_index : function.blocks) {
const Block& block{function.blocks_data[block_index]};
const std::string name{Name(block)};
const std::string name{NameOf(block)};
const auto add_branch = [&](BlockId branch_id, bool add_label) {
const auto it{std::ranges::find(function.blocks_data, branch_id, &Block::id)};
dot += fmt::format("\t\t{}->", name);
if (it == function.blocks_data.end()) {
dot += fmt::format("\"Unknown label {}\"", branch_id);
} else {
dot += Name(*it);
dot += NameOf(*it);
};
if (add_label && block.cond != true && block.cond != false) {
dot += fmt::format(" [label=\"{}\"]", block.cond);
@ -520,7 +638,7 @@ std::string CFG::Dot() const {
if (functions.front().blocks.empty()) {
dot += "Start;\n";
} else {
dot += fmt::format("\tStart -> {};\n", Name(functions.front().blocks_data.front()));
dot += fmt::format("\tStart -> {};\n", NameOf(functions.front().blocks_data.front()));
}
dot += fmt::format("\tStart [shape=diamond];\n");
}

@ -70,6 +70,12 @@ struct Block {
IR::Condition cond;
BlockId branch_true;
BlockId branch_false;
boost::container::small_vector<BlockId, 4> imm_predecessors;
boost::container::small_vector<BlockId, 8> dominance_frontiers;
union {
bool post_order_visited{false};
Block* imm_dominator;
};
};
struct Label {
@ -81,11 +87,30 @@ struct Label {
struct Function {
Function(Location start_address);
void BuildBlocksMap();
void BuildImmediatePredecessors();
void BuildPostOrder();
void BuildImmediateDominators();
void BuildDominanceFrontier();
[[nodiscard]] size_t NumBlocks() const noexcept {
return static_cast<size_t>(current_block_id) + 1;
}
Location entrypoint;
BlockId current_block_id{0};
boost::container::small_vector<Label, 16> labels;
boost::container::small_vector<u32, 0x130> blocks;
boost::container::small_vector<Block, 0x130> blocks_data;
// Translates from BlockId to block index
boost::container::small_vector<Block*, 0x130> blocks_map;
boost::container::small_vector<u32, 0x130> post_order_blocks;
boost::container::small_vector<BlockId, 0x130> post_order_map;
};
class CFG {
@ -97,6 +122,12 @@ class CFG {
public:
explicit CFG(Environment& env, Location start_address);
CFG& operator=(const CFG&) = delete;
CFG(const CFG&) = delete;
CFG& operator=(CFG&&) = delete;
CFG(CFG&&) = delete;
[[nodiscard]] std::string Dot() const;
[[nodiscard]] std::span<const Function> Functions() const noexcept {
@ -104,20 +135,22 @@ public:
}
private:
void VisitFunctions(Location start_address);
void AnalyzeLabel(FunctionId function_id, Label& label);
/// Inspect already visited blocks.
/// Return true when the block has already been visited
[[nodiscard]] bool InspectVisitedBlocks(FunctionId function_id, const Label& label);
bool InspectVisitedBlocks(FunctionId function_id, const Label& label);
[[nodiscard]] AnalysisState AnalyzeInst(Block& block, FunctionId function_id, Location pc);
AnalysisState AnalyzeInst(Block& block, FunctionId function_id, Location pc);
void AnalyzeCondInst(Block& block, FunctionId function_id, Location pc, EndClass insn_end_class,
IR::Condition cond);
/// Return true when the branch instruction is confirmed to be a branch
[[nodiscard]] bool AnalyzeBranch(Block& block, FunctionId function_id, Location pc,
Instruction inst, Opcode opcode);
bool AnalyzeBranch(Block& block, FunctionId function_id, Location pc, Instruction inst,
Opcode opcode);
void AnalyzeBRA(Block& block, FunctionId function_id, Location pc, Instruction inst,
bool is_absolute);
@ -126,8 +159,7 @@ private:
AnalysisState AnalyzeEXIT(Block& block, FunctionId function_id, Location pc, Instruction inst);
/// Return the branch target block id
[[nodiscard]] BlockId AddLabel(const Block& block, Stack stack, Location pc,
FunctionId function_id);
BlockId AddLabel(const Block& block, Stack stack, Location pc, FunctionId function_id);
Environment& env;
boost::container::small_vector<Function, 1> functions;

@ -8,41 +8,54 @@
#include "shader_recompiler/frontend/maxwell/program.h"
#include "shader_recompiler/frontend/maxwell/termination_code.h"
#include "shader_recompiler/frontend/maxwell/translate/translate.h"
#include "shader_recompiler/ir_opt/passes.h"
namespace Shader::Maxwell {
Program::Function::~Function() {
std::ranges::for_each(blocks, &std::destroy_at<IR::Block>);
}
Program::Program(Environment& env, const Flow::CFG& cfg) {
std::vector<IR::Block*> block_map;
functions.reserve(cfg.Functions().size());
for (const Flow::Function& cfg_function : cfg.Functions()) {
Function& function{functions.emplace_back()};
namespace {
void TranslateCode(Environment& env, const Flow::Function& cfg_function, IR::Function& function,
std::span<IR::Block*> block_map, IR::Block* block_memory) {
const size_t num_blocks{cfg_function.blocks.size()};
IR::Block* block_memory{block_alloc_pool.allocate(num_blocks)};
function.blocks.reserve(num_blocks);
block_map.resize(cfg_function.blocks_data.size());
// Visit the instructions of all blocks
for (const Flow::BlockId block_id : cfg_function.blocks) {
const Flow::Block& flow_block{cfg_function.blocks_data[block_id]};
IR::Block* const block{std::construct_at(block_memory, Translate(env, flow_block))};
function.blocks.emplace_back(std::construct_at(block_memory, Translate(env, flow_block)));
block_map[flow_block.id] = function.blocks.back().get();
++block_memory;
function.blocks.push_back(block);
block_map[flow_block.id] = block;
}
// Now that all blocks are defined, emit the termination instructions
}
void EmitTerminationInsts(const Flow::Function& cfg_function,
std::span<IR::Block* const> block_map) {
for (const Flow::BlockId block_id : cfg_function.blocks) {
const Flow::Block& flow_block{cfg_function.blocks_data[block_id]};
EmitTerminationCode(flow_block, block_map);
}
}
void TranslateFunction(Environment& env, const Flow::Function& cfg_function, IR::Function& function,
IR::Block* block_memory) {
std::vector<IR::Block*> block_map;
block_map.resize(cfg_function.blocks_data.size());
TranslateCode(env, cfg_function, function, block_map, block_memory);
EmitTerminationInsts(cfg_function, block_map);
}
} // Anonymous namespace
Program::Program(Environment& env, const Flow::CFG& cfg) {
functions.reserve(cfg.Functions().size());
for (const Flow::Function& cfg_function : cfg.Functions()) {
TranslateFunction(env, cfg_function, functions.emplace_back(),
block_alloc_pool.allocate(cfg_function.blocks.size()));
}
std::ranges::for_each(functions, Optimization::SsaRewritePass);
for (IR::Function& function : functions) {
Optimization::Invoke(Optimization::DeadCodeEliminationPass, function);
Optimization::Invoke(Optimization::IdentityRemovalPass, function);
// Optimization::Invoke(Optimization::VerificationPass, function);
}
}
std::string DumpProgram(const Program& program) {
@ -50,16 +63,16 @@ std::string DumpProgram(const Program& program) {
std::map<const IR::Inst*, size_t> inst_to_index;
std::map<const IR::Block*, size_t> block_to_index;
for (const Program::Function& function : program.functions) {
for (const IR::Block* const block : function.blocks) {
block_to_index.emplace(block, index);
for (const IR::Function& function : program.functions) {
for (const auto& block : function.blocks) {
block_to_index.emplace(block.get(), index);
++index;
}
}
std::string ret;
for (const Program::Function& function : program.functions) {
for (const IR::Function& function : program.functions) {
ret += fmt::format("Function\n");
for (const IR::Block* const block : function.blocks) {
for (const auto& block : function.blocks) {
ret += IR::DumpBlock(*block, block_to_index, inst_to_index, index) + '\n';
}
}

@ -4,13 +4,16 @@
#pragma once
#include <memory>
#include <string>
#include <vector>
#include <boost/container/small_vector.hpp>
#include <boost/pool/pool_alloc.hpp>
#include "shader_recompiler/environment.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/function.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h"
namespace Shader::Maxwell {
@ -22,16 +25,10 @@ public:
explicit Program(Environment& env, const Flow::CFG& cfg);
private:
struct Function {
~Function();
std::vector<IR::Block*> blocks;
};
boost::pool_allocator<IR::Block, boost::default_user_allocator_new_delete,
boost::details::pool::null_mutex>
block_alloc_pool;
std::vector<Function> functions;
boost::container::small_vector<IR::Function, 1> functions;
};
[[nodiscard]] std::string DumpProgram(const Program& program);

@ -47,12 +47,19 @@ static IR::U1 GetCond(IR::Condition cond, IR::IREmitter& ir) {
static void EmitBranch(const Flow::Block& flow_block, std::span<IR::Block* const> block_map,
IR::IREmitter& ir) {
const auto add_immediate_predecessor = [&](Flow::BlockId label) {
block_map[label]->AddImmediatePredecessor(&ir.block);
};
if (flow_block.cond == true) {
add_immediate_predecessor(flow_block.branch_true);
return ir.Branch(block_map[flow_block.branch_true]);
}
if (flow_block.cond == false) {
add_immediate_predecessor(flow_block.branch_false);
return ir.Branch(block_map[flow_block.branch_false]);
}
add_immediate_predecessor(flow_block.branch_true);
add_immediate_predecessor(flow_block.branch_false);
return ir.BranchConditional(GetCond(flow_block.cond, ir), block_map[flow_block.branch_true],
block_map[flow_block.branch_false]);
}

@ -11,6 +11,7 @@
namespace Shader::Maxwell {
/// Emit termination instructions and collect immediate predecessors
void EmitTerminationCode(const Flow::Block& flow_block, std::span<IR::Block* const> block_map);
} // namespace Shader::Maxwell

@ -208,7 +208,7 @@ public:
void P2R_reg(u64 insn);
void P2R_cbuf(u64 insn);
void P2R_imm(u64 insn);
void PBK(u64 insn);
void PBK();
void PCNT(u64 insn);
void PEXIT(u64 insn);
void PIXLD(u64 insn);
@ -252,7 +252,7 @@ public:
void SHR_reg(u64 insn);
void SHR_cbuf(u64 insn);
void SHR_imm(u64 insn);
void SSY(u64 insn);
void SSY();
void ST(u64 insn);
void STG(u64 insn);
void STL(u64 insn);

@ -762,7 +762,7 @@ void TranslatorVisitor::P2R_imm(u64) {
ThrowNotImplemented(Opcode::P2R_imm);
}
void TranslatorVisitor::PBK(u64) {
void TranslatorVisitor::PBK() {
// PBK is a no-op
}
@ -938,8 +938,8 @@ void TranslatorVisitor::SHR_imm(u64) {
ThrowNotImplemented(Opcode::SHR_imm);
}
void TranslatorVisitor::SSY(u64) {
ThrowNotImplemented(Opcode::SSY);
void TranslatorVisitor::SSY() {
// SSY is a no-op
}
void TranslatorVisitor::ST(u64) {

@ -28,7 +28,6 @@ void IdentityRemovalPass(IR::Block& block) {
++inst;
}
}
for (IR::Inst* const inst : to_invalidate) {
inst->Invalidate();
}

@ -5,12 +5,21 @@
#pragma once
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/function.h"
namespace Shader::Optimization {
template <typename Func>
void Invoke(Func&& func, IR::Function& function) {
for (const auto& block : function.blocks) {
func(*block);
}
}
void DeadCodeEliminationPass(IR::Block& block);
void GetSetElimination(IR::Block& block);
void IdentityRemovalPass(IR::Block& block);
void SsaRewritePass(IR::Function& function);
void VerificationPass(const IR::Block& block);
} // namespace Shader::Optimization

@ -0,0 +1,155 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file implements the SSA rewriting algorithm proposed in
//
// Simple and Efficient Construction of Static Single Assignment Form.
// Braun M., Buchwald S., Hack S., Leißa R., Mallon C., Zwinkau A. (2013)
// In: Jhala R., De Bosschere K. (eds)
// Compiler Construction. CC 2013.
// Lecture Notes in Computer Science, vol 7791.
// Springer, Berlin, Heidelberg
//
// https://link.springer.com/chapter/10.1007/978-3-642-37051-9_6
//
#include <map>
#include <boost/container/flat_map.hpp>
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/function.h"
#include "shader_recompiler/frontend/ir/microinstruction.h"
#include "shader_recompiler/frontend/ir/opcode.h"
#include "shader_recompiler/frontend/ir/pred.h"
#include "shader_recompiler/frontend/ir/reg.h"
#include "shader_recompiler/ir_opt/passes.h"
namespace Shader::Optimization {
namespace {
using ValueMap = boost::container::flat_map<IR::Block*, IR::Value, std::less<IR::Block*>>;
struct DefTable {
[[nodiscard]] ValueMap& operator[](IR::Reg variable) noexcept {
return regs[IR::RegIndex(variable)];
}
[[nodiscard]] ValueMap& operator[](IR::Pred variable) noexcept {
return preds[IR::PredIndex(variable)];
}
std::array<ValueMap, IR::NUM_USER_REGS> regs;
std::array<ValueMap, IR::NUM_USER_PREDS> preds;
};
IR::Opcode UndefOpcode(IR::Reg) noexcept {
return IR::Opcode::Undef32;
}
IR::Opcode UndefOpcode(IR::Pred) noexcept {
return IR::Opcode::Undef1;
}
[[nodiscard]] bool IsPhi(const IR::Inst& inst) noexcept {
return inst.Opcode() == IR::Opcode::Phi;
}
class Pass {
public:
void WriteVariable(auto variable, IR::Block* block, const IR::Value& value) {
current_def[variable].insert_or_assign(block, value);
}
IR::Value ReadVariable(auto variable, IR::Block* block) {
auto& def{current_def[variable]};
if (const auto it{def.find(block)}; it != def.end()) {
return it->second;
}
return ReadVariableRecursive(variable, block);
}
private:
IR::Value ReadVariableRecursive(auto variable, IR::Block* block) {
IR::Value val;
if (const std::span preds{block->ImmediatePredecessors()}; preds.size() == 1) {
val = ReadVariable(variable, preds.front());
} else {
// Break potential cycles with operandless phi
val = IR::Value{&*block->PrependNewInst(block->begin(), IR::Opcode::Phi)};
WriteVariable(variable, block, val);
val = AddPhiOperands(variable, val, block);
}
WriteVariable(variable, block, val);
return val;
}
IR::Value AddPhiOperands(auto variable, const IR::Value& phi, IR::Block* block) {
for (IR::Block* const pred : block->ImmediatePredecessors()) {
phi.Inst()->AddPhiOperand(pred, ReadVariable(variable, pred));
}
return TryRemoveTrivialPhi(phi, block, UndefOpcode(variable));
}
IR::Value TryRemoveTrivialPhi(const IR::Value& phi, IR::Block* block, IR::Opcode undef_opcode) {
IR::Value same;
for (const auto& pair : phi.Inst()->PhiOperands()) {
const IR::Value& op{pair.second};
if (op == same || op == phi) {
// Unique value or self-reference
continue;
}
if (!same.IsEmpty()) {
// The phi merges at least two values: not trivial
return phi;
}
same = op;
}
if (same.IsEmpty()) {
// The phi is unreachable or in the start block
const auto first_not_phi{std::ranges::find_if_not(block->Instructions(), IsPhi)};
same = IR::Value{&*block->PrependNewInst(first_not_phi, undef_opcode)};
}
// Reroute all uses of phi to same and remove phi
phi.Inst()->ReplaceUsesWith(same);
// TODO: Try to recursively remove all phi users, which might have become trivial
return same;
}
DefTable current_def;
};
} // Anonymous namespace
void SsaRewritePass(IR::Function& function) {
Pass pass;
for (const auto& block : function.blocks) {
for (IR::Inst& inst : block->Instructions()) {
switch (inst.Opcode()) {
case IR::Opcode::SetRegister:
if (const IR::Reg reg{inst.Arg(0).Reg()}; reg != IR::Reg::RZ) {
pass.WriteVariable(reg, block.get(), inst.Arg(1));
}
break;
case IR::Opcode::SetPred:
if (const IR::Pred pred{inst.Arg(0).Pred()}; pred != IR::Pred::PT) {
pass.WriteVariable(pred, block.get(), inst.Arg(1));
}
break;
case IR::Opcode::GetRegister:
if (const IR::Reg reg{inst.Arg(0).Reg()}; reg != IR::Reg::RZ) {
inst.ReplaceUsesWith(pass.ReadVariable(reg, block.get()));
}
break;
case IR::Opcode::GetPred:
if (const IR::Pred pred{inst.Arg(0).Pred()}; pred != IR::Pred::PT) {
inst.ReplaceUsesWith(pass.ReadVariable(pred, block.get()));
}
break;
default:
break;
}
}
}
}
} // namespace Shader::Optimization

@ -35,12 +35,12 @@ void RunDatabase() {
ForEachFile("D:\\Shaders\\Database", [&](const std::filesystem::path& path) {
map.emplace_back(std::make_unique<FileEnvironment>(path.string().c_str()));
});
for (int i = 0; i < 1; ++i) {
for (int i = 0; i < 300; ++i) {
for (auto& env : map) {
// fmt::print(stdout, "Decoding {}\n", path.string());
const Location start_address{0};
auto cfg{std::make_unique<Flow::CFG>(*env, start_address)};
// fmt::print(stdout, "{}\n", cfg.Dot());
// fmt::print(stdout, "{}\n", cfg->Dot());
// IR::Program program{env, cfg};
// Optimize(program);
// const std::string code{EmitGLASM(program)};