Merge pull request #5266 from bunnei/kernel-synch
Rewrite KSynchronizationObject, KConditonVariable, and KAddressArbitermaster
commit
eb3cb54aa5
@ -0,0 +1,627 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/parent_of_member.h"
|
||||||
|
#include "common/tree.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
namespace impl {
|
||||||
|
|
||||||
|
class IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
struct IntrusiveRedBlackTreeNode {
|
||||||
|
|
||||||
|
private:
|
||||||
|
RB_ENTRY(IntrusiveRedBlackTreeNode) entry{};
|
||||||
|
|
||||||
|
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
template <class, class, class>
|
||||||
|
friend class IntrusiveRedBlackTree;
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr IntrusiveRedBlackTreeNode() = default;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class T, class Traits, class Comparator>
|
||||||
|
class IntrusiveRedBlackTree;
|
||||||
|
|
||||||
|
namespace impl {
|
||||||
|
|
||||||
|
class IntrusiveRedBlackTreeImpl {
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <class, class, class>
|
||||||
|
friend class ::Common::IntrusiveRedBlackTree;
|
||||||
|
|
||||||
|
private:
|
||||||
|
RB_HEAD(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode);
|
||||||
|
using RootType = IntrusiveRedBlackTreeRoot;
|
||||||
|
|
||||||
|
private:
|
||||||
|
IntrusiveRedBlackTreeRoot root;
|
||||||
|
|
||||||
|
public:
|
||||||
|
template <bool Const>
|
||||||
|
class Iterator;
|
||||||
|
|
||||||
|
using value_type = IntrusiveRedBlackTreeNode;
|
||||||
|
using size_type = size_t;
|
||||||
|
using difference_type = ptrdiff_t;
|
||||||
|
using pointer = value_type*;
|
||||||
|
using const_pointer = const value_type*;
|
||||||
|
using reference = value_type&;
|
||||||
|
using const_reference = const value_type&;
|
||||||
|
using iterator = Iterator<false>;
|
||||||
|
using const_iterator = Iterator<true>;
|
||||||
|
|
||||||
|
template <bool Const>
|
||||||
|
class Iterator {
|
||||||
|
public:
|
||||||
|
using iterator_category = std::bidirectional_iterator_tag;
|
||||||
|
using value_type = typename IntrusiveRedBlackTreeImpl::value_type;
|
||||||
|
using difference_type = typename IntrusiveRedBlackTreeImpl::difference_type;
|
||||||
|
using pointer = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_pointer,
|
||||||
|
IntrusiveRedBlackTreeImpl::pointer>;
|
||||||
|
using reference = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_reference,
|
||||||
|
IntrusiveRedBlackTreeImpl::reference>;
|
||||||
|
|
||||||
|
private:
|
||||||
|
pointer node;
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit Iterator(pointer n) : node(n) {}
|
||||||
|
|
||||||
|
bool operator==(const Iterator& rhs) const {
|
||||||
|
return this->node == rhs.node;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator!=(const Iterator& rhs) const {
|
||||||
|
return !(*this == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
pointer operator->() const {
|
||||||
|
return this->node;
|
||||||
|
}
|
||||||
|
|
||||||
|
reference operator*() const {
|
||||||
|
return *this->node;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator& operator++() {
|
||||||
|
this->node = GetNext(this->node);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator& operator--() {
|
||||||
|
this->node = GetPrev(this->node);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator++(int) {
|
||||||
|
const Iterator it{*this};
|
||||||
|
++(*this);
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator--(int) {
|
||||||
|
const Iterator it{*this};
|
||||||
|
--(*this);
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
|
||||||
|
operator Iterator<true>() const {
|
||||||
|
return Iterator<true>(this->node);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Generate static implementations for non-comparison operations for IntrusiveRedBlackTreeRoot.
|
||||||
|
RB_GENERATE_WITHOUT_COMPARE_STATIC(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode, entry);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Define accessors using RB_* functions.
|
||||||
|
constexpr void InitializeImpl() {
|
||||||
|
RB_INIT(&this->root);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool EmptyImpl() const {
|
||||||
|
return RB_EMPTY(&this->root);
|
||||||
|
}
|
||||||
|
|
||||||
|
IntrusiveRedBlackTreeNode* GetMinImpl() const {
|
||||||
|
return RB_MIN(IntrusiveRedBlackTreeRoot,
|
||||||
|
const_cast<IntrusiveRedBlackTreeRoot*>(&this->root));
|
||||||
|
}
|
||||||
|
|
||||||
|
IntrusiveRedBlackTreeNode* GetMaxImpl() const {
|
||||||
|
return RB_MAX(IntrusiveRedBlackTreeRoot,
|
||||||
|
const_cast<IntrusiveRedBlackTreeRoot*>(&this->root));
|
||||||
|
}
|
||||||
|
|
||||||
|
IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return RB_REMOVE(IntrusiveRedBlackTreeRoot, &this->root, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return RB_NEXT(IntrusiveRedBlackTreeRoot, nullptr, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return RB_PREV(IntrusiveRedBlackTreeRoot, nullptr, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static IntrusiveRedBlackTreeNode const* GetNext(const IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||||
|
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||||
|
}
|
||||||
|
|
||||||
|
static IntrusiveRedBlackTreeNode const* GetPrev(const IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||||
|
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr IntrusiveRedBlackTreeImpl() : root() {
|
||||||
|
this->InitializeImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterator accessors.
|
||||||
|
iterator begin() {
|
||||||
|
return iterator(this->GetMinImpl());
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator begin() const {
|
||||||
|
return const_iterator(this->GetMinImpl());
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator end() {
|
||||||
|
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator end() const {
|
||||||
|
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator cbegin() const {
|
||||||
|
return this->begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator cend() const {
|
||||||
|
return this->end();
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator iterator_to(reference ref) {
|
||||||
|
return iterator(&ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator iterator_to(const_reference ref) const {
|
||||||
|
return const_iterator(&ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Content management.
|
||||||
|
bool empty() const {
|
||||||
|
return this->EmptyImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
reference back() {
|
||||||
|
return *this->GetMaxImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_reference back() const {
|
||||||
|
return *this->GetMaxImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
reference front() {
|
||||||
|
return *this->GetMinImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_reference front() const {
|
||||||
|
return *this->GetMinImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator erase(iterator it) {
|
||||||
|
auto cur = std::addressof(*it);
|
||||||
|
auto next = GetNext(cur);
|
||||||
|
this->RemoveImpl(cur);
|
||||||
|
return iterator(next);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace impl
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
concept HasLightCompareType = requires {
|
||||||
|
{ std::is_same<typename T::LightCompareType, void>::value }
|
||||||
|
->std::convertible_to<bool>;
|
||||||
|
};
|
||||||
|
|
||||||
|
namespace impl {
|
||||||
|
|
||||||
|
template <typename T, typename Default>
|
||||||
|
consteval auto* GetLightCompareType() {
|
||||||
|
if constexpr (HasLightCompareType<T>) {
|
||||||
|
return static_cast<typename T::LightCompareType*>(nullptr);
|
||||||
|
} else {
|
||||||
|
return static_cast<Default*>(nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace impl
|
||||||
|
|
||||||
|
template <typename T, typename Default>
|
||||||
|
using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
|
||||||
|
|
||||||
|
template <class T, class Traits, class Comparator>
|
||||||
|
class IntrusiveRedBlackTree {
|
||||||
|
|
||||||
|
public:
|
||||||
|
using ImplType = impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ImplType impl{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
struct IntrusiveRedBlackTreeRootWithCompare : ImplType::IntrusiveRedBlackTreeRoot {};
|
||||||
|
|
||||||
|
template <bool Const>
|
||||||
|
class Iterator;
|
||||||
|
|
||||||
|
using value_type = T;
|
||||||
|
using size_type = size_t;
|
||||||
|
using difference_type = ptrdiff_t;
|
||||||
|
using pointer = T*;
|
||||||
|
using const_pointer = const T*;
|
||||||
|
using reference = T&;
|
||||||
|
using const_reference = const T&;
|
||||||
|
using iterator = Iterator<false>;
|
||||||
|
using const_iterator = Iterator<true>;
|
||||||
|
|
||||||
|
using light_value_type = LightCompareType<Comparator, value_type>;
|
||||||
|
using const_light_pointer = const light_value_type*;
|
||||||
|
using const_light_reference = const light_value_type&;
|
||||||
|
|
||||||
|
template <bool Const>
|
||||||
|
class Iterator {
|
||||||
|
public:
|
||||||
|
friend class IntrusiveRedBlackTree<T, Traits, Comparator>;
|
||||||
|
|
||||||
|
using ImplIterator =
|
||||||
|
std::conditional_t<Const, ImplType::const_iterator, ImplType::iterator>;
|
||||||
|
|
||||||
|
using iterator_category = std::bidirectional_iterator_tag;
|
||||||
|
using value_type = typename IntrusiveRedBlackTree::value_type;
|
||||||
|
using difference_type = typename IntrusiveRedBlackTree::difference_type;
|
||||||
|
using pointer = std::conditional_t<Const, IntrusiveRedBlackTree::const_pointer,
|
||||||
|
IntrusiveRedBlackTree::pointer>;
|
||||||
|
using reference = std::conditional_t<Const, IntrusiveRedBlackTree::const_reference,
|
||||||
|
IntrusiveRedBlackTree::reference>;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ImplIterator iterator;
|
||||||
|
|
||||||
|
private:
|
||||||
|
explicit Iterator(ImplIterator it) : iterator(it) {}
|
||||||
|
|
||||||
|
explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
|
||||||
|
ImplType::iterator>::type::pointer ptr)
|
||||||
|
: iterator(ptr) {}
|
||||||
|
|
||||||
|
ImplIterator GetImplIterator() const {
|
||||||
|
return this->iterator;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
bool operator==(const Iterator& rhs) const {
|
||||||
|
return this->iterator == rhs.iterator;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator!=(const Iterator& rhs) const {
|
||||||
|
return !(*this == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
pointer operator->() const {
|
||||||
|
return Traits::GetParent(std::addressof(*this->iterator));
|
||||||
|
}
|
||||||
|
|
||||||
|
reference operator*() const {
|
||||||
|
return *Traits::GetParent(std::addressof(*this->iterator));
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator& operator++() {
|
||||||
|
++this->iterator;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator& operator--() {
|
||||||
|
--this->iterator;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator++(int) {
|
||||||
|
const Iterator it{*this};
|
||||||
|
++this->iterator;
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator--(int) {
|
||||||
|
const Iterator it{*this};
|
||||||
|
--this->iterator;
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
|
||||||
|
operator Iterator<true>() const {
|
||||||
|
return Iterator<true>(this->iterator);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Generate static implementations for comparison operations for IntrusiveRedBlackTreeRoot.
|
||||||
|
RB_GENERATE_WITH_COMPARE_STATIC(IntrusiveRedBlackTreeRootWithCompare, IntrusiveRedBlackTreeNode,
|
||||||
|
entry, CompareImpl, LightCompareImpl);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
|
||||||
|
const IntrusiveRedBlackTreeNode* rhs) {
|
||||||
|
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
|
||||||
|
return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define accessors using RB_* functions.
|
||||||
|
IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return RB_INSERT(IntrusiveRedBlackTreeRootWithCompare,
|
||||||
|
static_cast<IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root),
|
||||||
|
node);
|
||||||
|
}
|
||||||
|
|
||||||
|
IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||||
|
return RB_FIND(
|
||||||
|
IntrusiveRedBlackTreeRootWithCompare,
|
||||||
|
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||||
|
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||||
|
const_cast<IntrusiveRedBlackTreeNode*>(node));
|
||||||
|
}
|
||||||
|
|
||||||
|
IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||||
|
return RB_NFIND(
|
||||||
|
IntrusiveRedBlackTreeRootWithCompare,
|
||||||
|
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||||
|
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||||
|
const_cast<IntrusiveRedBlackTreeNode*>(node));
|
||||||
|
}
|
||||||
|
|
||||||
|
IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
|
||||||
|
return RB_FIND_LIGHT(
|
||||||
|
IntrusiveRedBlackTreeRootWithCompare,
|
||||||
|
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||||
|
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||||
|
static_cast<const void*>(lelm));
|
||||||
|
}
|
||||||
|
|
||||||
|
IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
|
||||||
|
return RB_NFIND_LIGHT(
|
||||||
|
IntrusiveRedBlackTreeRootWithCompare,
|
||||||
|
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||||
|
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||||
|
static_cast<const void*>(lelm));
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr IntrusiveRedBlackTree() = default;
|
||||||
|
|
||||||
|
// Iterator accessors.
|
||||||
|
iterator begin() {
|
||||||
|
return iterator(this->impl.begin());
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator begin() const {
|
||||||
|
return const_iterator(this->impl.begin());
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator end() {
|
||||||
|
return iterator(this->impl.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator end() const {
|
||||||
|
return const_iterator(this->impl.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator cbegin() const {
|
||||||
|
return this->begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator cend() const {
|
||||||
|
return this->end();
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator iterator_to(reference ref) {
|
||||||
|
return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator iterator_to(const_reference ref) const {
|
||||||
|
return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Content management.
|
||||||
|
bool empty() const {
|
||||||
|
return this->impl.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
reference back() {
|
||||||
|
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||||
|
}
|
||||||
|
|
||||||
|
const_reference back() const {
|
||||||
|
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||||
|
}
|
||||||
|
|
||||||
|
reference front() {
|
||||||
|
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||||
|
}
|
||||||
|
|
||||||
|
const_reference front() const {
|
||||||
|
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator erase(iterator it) {
|
||||||
|
return iterator(this->impl.erase(it.GetImplIterator()));
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator insert(reference ref) {
|
||||||
|
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
|
||||||
|
this->InsertImpl(node);
|
||||||
|
return iterator(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator find(const_reference ref) const {
|
||||||
|
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator nfind(const_reference ref) const {
|
||||||
|
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator find_light(const_light_reference ref) const {
|
||||||
|
return iterator(this->FindLightImpl(std::addressof(ref)));
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator nfind_light(const_light_reference ref) const {
|
||||||
|
return iterator(this->NFindLightImpl(std::addressof(ref)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <auto T, class Derived = impl::GetParentType<T>>
|
||||||
|
class IntrusiveRedBlackTreeMemberTraits;
|
||||||
|
|
||||||
|
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||||
|
class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
|
||||||
|
public:
|
||||||
|
template <class Comparator>
|
||||||
|
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
|
||||||
|
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <class, class, class>
|
||||||
|
friend class IntrusiveRedBlackTree;
|
||||||
|
|
||||||
|
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||||
|
return std::addressof(parent->*Member);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||||
|
return std::addressof(parent->*Member);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return GetParentPointer<Member, Derived>(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return GetParentPointer<Member, Derived>(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static constexpr TYPED_STORAGE(Derived) DerivedStorage = {};
|
||||||
|
static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage));
|
||||||
|
};
|
||||||
|
|
||||||
|
template <auto T, class Derived = impl::GetParentType<T>>
|
||||||
|
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
|
||||||
|
|
||||||
|
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||||
|
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert<Member, Derived> {
|
||||||
|
public:
|
||||||
|
template <class Comparator>
|
||||||
|
using TreeType =
|
||||||
|
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
|
||||||
|
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
static constexpr bool IsValid() {
|
||||||
|
TYPED_STORAGE(Derived) DerivedStorage = {};
|
||||||
|
return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <class, class, class>
|
||||||
|
friend class IntrusiveRedBlackTree;
|
||||||
|
|
||||||
|
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||||
|
return std::addressof(parent->*Member);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||||
|
return std::addressof(parent->*Member);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return GetParentPointer<Member, Derived>(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return GetParentPointer<Member, Derived>(node);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Derived>
|
||||||
|
class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
|
||||||
|
public:
|
||||||
|
constexpr Derived* GetPrev() {
|
||||||
|
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||||
|
}
|
||||||
|
constexpr const Derived* GetPrev() const {
|
||||||
|
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Derived* GetNext() {
|
||||||
|
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||||
|
}
|
||||||
|
constexpr const Derived* GetNext() const {
|
||||||
|
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Derived>
|
||||||
|
class IntrusiveRedBlackTreeBaseTraits {
|
||||||
|
public:
|
||||||
|
template <class Comparator>
|
||||||
|
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
|
||||||
|
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <class, class, class>
|
||||||
|
friend class IntrusiveRedBlackTree;
|
||||||
|
|
||||||
|
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
|
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||||
|
return static_cast<IntrusiveRedBlackTreeNode*>(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||||
|
return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return static_cast<Derived*>(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||||
|
return static_cast<const Derived*>(node);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Common
|
@ -0,0 +1,189 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
template <typename T, size_t Size, size_t Align>
|
||||||
|
struct TypedStorage {
|
||||||
|
std::aligned_storage_t<Size, Align> storage_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TYPED_STORAGE(...) TypedStorage<__VA_ARGS__, sizeof(__VA_ARGS__), alignof(__VA_ARGS__)>
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
static constexpr T* GetPointer(TYPED_STORAGE(T) & ts) {
|
||||||
|
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
static constexpr const T* GetPointer(const TYPED_STORAGE(T) & ts) {
|
||||||
|
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace impl {
|
||||||
|
|
||||||
|
template <size_t MaxDepth>
|
||||||
|
struct OffsetOfUnionHolder {
|
||||||
|
template <typename ParentType, typename MemberType, size_t Offset>
|
||||||
|
union UnionImpl {
|
||||||
|
using PaddingMember = char;
|
||||||
|
static constexpr size_t GetOffset() {
|
||||||
|
return Offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma pack(push, 1)
|
||||||
|
struct {
|
||||||
|
PaddingMember padding[Offset];
|
||||||
|
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
|
||||||
|
} data;
|
||||||
|
#pragma pack(pop)
|
||||||
|
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename ParentType, typename MemberType>
|
||||||
|
union UnionImpl<ParentType, MemberType, 0> {
|
||||||
|
static constexpr size_t GetOffset() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct {
|
||||||
|
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
|
||||||
|
} data;
|
||||||
|
UnionImpl<ParentType, MemberType, 1> next_union;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename ParentType, typename MemberType>
|
||||||
|
union UnionImpl<ParentType, MemberType, MaxDepth> {};
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename ParentType, typename MemberType>
|
||||||
|
struct OffsetOfCalculator {
|
||||||
|
using UnionHolder =
|
||||||
|
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
|
||||||
|
0>;
|
||||||
|
union Union {
|
||||||
|
char c{};
|
||||||
|
UnionHolder first_union;
|
||||||
|
TYPED_STORAGE(ParentType) parent;
|
||||||
|
|
||||||
|
constexpr Union() : c() {}
|
||||||
|
};
|
||||||
|
static constexpr Union U = {};
|
||||||
|
|
||||||
|
static constexpr const MemberType* GetNextAddress(const MemberType* start,
|
||||||
|
const MemberType* target) {
|
||||||
|
while (start < target) {
|
||||||
|
start++;
|
||||||
|
}
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
|
||||||
|
const MemberType* target) {
|
||||||
|
return (target - start) * sizeof(MemberType);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename CurUnion>
|
||||||
|
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
|
||||||
|
CurUnion& cur_union) {
|
||||||
|
constexpr size_t Offset = CurUnion::GetOffset();
|
||||||
|
const auto target = std::addressof(GetPointer(U.parent)->*member);
|
||||||
|
const auto start = std::addressof(cur_union.data.members[0]);
|
||||||
|
const auto next = GetNextAddress(start, target);
|
||||||
|
|
||||||
|
if (next != target) {
|
||||||
|
if constexpr (Offset < sizeof(MemberType) - 1) {
|
||||||
|
return OffsetOfImpl(member, cur_union.next_union);
|
||||||
|
} else {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (next - start) * sizeof(MemberType) + Offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
|
||||||
|
return OffsetOfImpl(member, U.first_union);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
struct GetMemberPointerTraits;
|
||||||
|
|
||||||
|
template <typename P, typename M>
|
||||||
|
struct GetMemberPointerTraits<M P::*> {
|
||||||
|
using Parent = P;
|
||||||
|
using Member = M;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <auto MemberPtr>
|
||||||
|
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
|
||||||
|
|
||||||
|
template <auto MemberPtr>
|
||||||
|
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
|
||||||
|
static inline std::ptrdiff_t OffsetOf = [] {
|
||||||
|
using DeducedParentType = GetParentType<MemberPtr>;
|
||||||
|
using MemberType = GetMemberType<MemberPtr>;
|
||||||
|
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
|
||||||
|
std::is_same<RealParentType, DeducedParentType>::value);
|
||||||
|
|
||||||
|
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
|
||||||
|
}();
|
||||||
|
|
||||||
|
} // namespace impl
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
|
||||||
|
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
|
||||||
|
return *static_cast<RealParentType*>(
|
||||||
|
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
|
||||||
|
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
|
||||||
|
return *static_cast<const RealParentType*>(static_cast<const void*>(
|
||||||
|
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
|
||||||
|
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
|
||||||
|
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
|
||||||
|
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
|
||||||
|
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
|
||||||
|
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||||
|
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
|
||||||
|
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common
|
@ -0,0 +1,822 @@
|
|||||||
|
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
|
||||||
|
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
|
||||||
|
/* $FreeBSD$ */
|
||||||
|
|
||||||
|
/*-
|
||||||
|
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
* 1. Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in the
|
||||||
|
* documentation and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||||
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||||
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||||
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||||
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _SYS_TREE_H_
|
||||||
|
#define _SYS_TREE_H_
|
||||||
|
|
||||||
|
/* FreeBSD <sys/cdefs.h> has a lot of defines we don't really want. */
|
||||||
|
/* tree.h only actually uses __inline and __unused, so we'll just define those. */
|
||||||
|
|
||||||
|
/* #include <sys/cdefs.h> */
|
||||||
|
|
||||||
|
#ifndef __inline
|
||||||
|
#define __inline inline
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This file defines data structures for different types of trees:
|
||||||
|
* splay trees and red-black trees.
|
||||||
|
*
|
||||||
|
* A splay tree is a self-organizing data structure. Every operation
|
||||||
|
* on the tree causes a splay to happen. The splay moves the requested
|
||||||
|
* node to the root of the tree and partly rebalances it.
|
||||||
|
*
|
||||||
|
* This has the benefit that request locality causes faster lookups as
|
||||||
|
* the requested nodes move to the top of the tree. On the other hand,
|
||||||
|
* every lookup causes memory writes.
|
||||||
|
*
|
||||||
|
* The Balance Theorem bounds the total access time for m operations
|
||||||
|
* and n inserts on an initially empty tree as O((m + n)lg n). The
|
||||||
|
* amortized cost for a sequence of m accesses to a splay tree is O(lg n);
|
||||||
|
*
|
||||||
|
* A red-black tree is a binary search tree with the node color as an
|
||||||
|
* extra attribute. It fulfills a set of conditions:
|
||||||
|
* - every search path from the root to a leaf consists of the
|
||||||
|
* same number of black nodes,
|
||||||
|
* - each red node (except for the root) has a black parent,
|
||||||
|
* - each leaf node is black.
|
||||||
|
*
|
||||||
|
* Every operation on a red-black tree is bounded as O(lg n).
|
||||||
|
* The maximum height of a red-black tree is 2lg (n+1).
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define SPLAY_HEAD(name, type) \
|
||||||
|
struct name { \
|
||||||
|
struct type* sph_root; /* root of the tree */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SPLAY_INITIALIZER(root) \
|
||||||
|
{ NULL }
|
||||||
|
|
||||||
|
#define SPLAY_INIT(root) \
|
||||||
|
do { \
|
||||||
|
(root)->sph_root = NULL; \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define SPLAY_ENTRY(type) \
|
||||||
|
struct { \
|
||||||
|
struct type* spe_left; /* left element */ \
|
||||||
|
struct type* spe_right; /* right element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
|
||||||
|
#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
|
||||||
|
#define SPLAY_ROOT(head) (head)->sph_root
|
||||||
|
#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
|
||||||
|
|
||||||
|
/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
|
||||||
|
#define SPLAY_ROTATE_RIGHT(head, tmp, field) \
|
||||||
|
do { \
|
||||||
|
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
|
||||||
|
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
|
||||||
|
(head)->sph_root = tmp; \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define SPLAY_ROTATE_LEFT(head, tmp, field) \
|
||||||
|
do { \
|
||||||
|
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
|
||||||
|
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
|
||||||
|
(head)->sph_root = tmp; \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define SPLAY_LINKLEFT(head, tmp, field) \
|
||||||
|
do { \
|
||||||
|
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
|
||||||
|
tmp = (head)->sph_root; \
|
||||||
|
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define SPLAY_LINKRIGHT(head, tmp, field) \
|
||||||
|
do { \
|
||||||
|
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
|
||||||
|
tmp = (head)->sph_root; \
|
||||||
|
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define SPLAY_ASSEMBLE(head, node, left, right, field) \
|
||||||
|
do { \
|
||||||
|
SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
|
||||||
|
SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \
|
||||||
|
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
|
||||||
|
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
/* Generates prototypes and inline functions */
|
||||||
|
|
||||||
|
#define SPLAY_PROTOTYPE(name, type, field, cmp) \
|
||||||
|
void name##_SPLAY(struct name*, struct type*); \
|
||||||
|
void name##_SPLAY_MINMAX(struct name*, int); \
|
||||||
|
struct type* name##_SPLAY_INSERT(struct name*, struct type*); \
|
||||||
|
struct type* name##_SPLAY_REMOVE(struct name*, struct type*); \
|
||||||
|
\
|
||||||
|
/* Finds the node with the same key as elm */ \
|
||||||
|
static __inline struct type* name##_SPLAY_FIND(struct name* head, struct type* elm) { \
|
||||||
|
if (SPLAY_EMPTY(head)) \
|
||||||
|
return (NULL); \
|
||||||
|
name##_SPLAY(head, elm); \
|
||||||
|
if ((cmp)(elm, (head)->sph_root) == 0) \
|
||||||
|
return (head->sph_root); \
|
||||||
|
return (NULL); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
static __inline struct type* name##_SPLAY_NEXT(struct name* head, struct type* elm) { \
|
||||||
|
name##_SPLAY(head, elm); \
|
||||||
|
if (SPLAY_RIGHT(elm, field) != NULL) { \
|
||||||
|
elm = SPLAY_RIGHT(elm, field); \
|
||||||
|
while (SPLAY_LEFT(elm, field) != NULL) { \
|
||||||
|
elm = SPLAY_LEFT(elm, field); \
|
||||||
|
} \
|
||||||
|
} else \
|
||||||
|
elm = NULL; \
|
||||||
|
return (elm); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
static __inline struct type* name##_SPLAY_MIN_MAX(struct name* head, int val) { \
|
||||||
|
name##_SPLAY_MINMAX(head, val); \
|
||||||
|
return (SPLAY_ROOT(head)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Main splay operation.
|
||||||
|
* Moves node close to the key of elm to top
|
||||||
|
*/
|
||||||
|
#define SPLAY_GENERATE(name, type, field, cmp) \
|
||||||
|
struct type* name##_SPLAY_INSERT(struct name* head, struct type* elm) { \
|
||||||
|
if (SPLAY_EMPTY(head)) { \
|
||||||
|
SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
|
||||||
|
} else { \
|
||||||
|
int __comp; \
|
||||||
|
name##_SPLAY(head, elm); \
|
||||||
|
__comp = (cmp)(elm, (head)->sph_root); \
|
||||||
|
if (__comp < 0) { \
|
||||||
|
SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \
|
||||||
|
SPLAY_RIGHT(elm, field) = (head)->sph_root; \
|
||||||
|
SPLAY_LEFT((head)->sph_root, field) = NULL; \
|
||||||
|
} else if (__comp > 0) { \
|
||||||
|
SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \
|
||||||
|
SPLAY_LEFT(elm, field) = (head)->sph_root; \
|
||||||
|
SPLAY_RIGHT((head)->sph_root, field) = NULL; \
|
||||||
|
} else \
|
||||||
|
return ((head)->sph_root); \
|
||||||
|
} \
|
||||||
|
(head)->sph_root = (elm); \
|
||||||
|
return (NULL); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
struct type* name##_SPLAY_REMOVE(struct name* head, struct type* elm) { \
|
||||||
|
struct type* __tmp; \
|
||||||
|
if (SPLAY_EMPTY(head)) \
|
||||||
|
return (NULL); \
|
||||||
|
name##_SPLAY(head, elm); \
|
||||||
|
if ((cmp)(elm, (head)->sph_root) == 0) { \
|
||||||
|
if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
|
||||||
|
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
|
||||||
|
} else { \
|
||||||
|
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
|
||||||
|
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
|
||||||
|
name##_SPLAY(head, elm); \
|
||||||
|
SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
|
||||||
|
} \
|
||||||
|
return (elm); \
|
||||||
|
} \
|
||||||
|
return (NULL); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
void name##_SPLAY(struct name* head, struct type* elm) { \
|
||||||
|
struct type __node, *__left, *__right, *__tmp; \
|
||||||
|
int __comp; \
|
||||||
|
\
|
||||||
|
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
|
||||||
|
__left = __right = &__node; \
|
||||||
|
\
|
||||||
|
while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
|
||||||
|
if (__comp < 0) { \
|
||||||
|
__tmp = SPLAY_LEFT((head)->sph_root, field); \
|
||||||
|
if (__tmp == NULL) \
|
||||||
|
break; \
|
||||||
|
if ((cmp)(elm, __tmp) < 0) { \
|
||||||
|
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
|
||||||
|
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
SPLAY_LINKLEFT(head, __right, field); \
|
||||||
|
} else if (__comp > 0) { \
|
||||||
|
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
|
||||||
|
if (__tmp == NULL) \
|
||||||
|
break; \
|
||||||
|
if ((cmp)(elm, __tmp) > 0) { \
|
||||||
|
SPLAY_ROTATE_LEFT(head, __tmp, field); \
|
||||||
|
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
SPLAY_LINKRIGHT(head, __left, field); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
/* Splay with either the minimum or the maximum element \
|
||||||
|
* Used to find minimum or maximum element in tree. \
|
||||||
|
*/ \
|
||||||
|
void name##_SPLAY_MINMAX(struct name* head, int __comp) { \
|
||||||
|
struct type __node, *__left, *__right, *__tmp; \
|
||||||
|
\
|
||||||
|
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
|
||||||
|
__left = __right = &__node; \
|
||||||
|
\
|
||||||
|
while (1) { \
|
||||||
|
if (__comp < 0) { \
|
||||||
|
__tmp = SPLAY_LEFT((head)->sph_root, field); \
|
||||||
|
if (__tmp == NULL) \
|
||||||
|
break; \
|
||||||
|
if (__comp < 0) { \
|
||||||
|
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
|
||||||
|
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
SPLAY_LINKLEFT(head, __right, field); \
|
||||||
|
} else if (__comp > 0) { \
|
||||||
|
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
|
||||||
|
if (__tmp == NULL) \
|
||||||
|
break; \
|
||||||
|
if (__comp > 0) { \
|
||||||
|
SPLAY_ROTATE_LEFT(head, __tmp, field); \
|
||||||
|
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
SPLAY_LINKRIGHT(head, __left, field); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SPLAY_NEGINF -1
|
||||||
|
#define SPLAY_INF 1
|
||||||
|
|
||||||
|
#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
|
||||||
|
#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
|
||||||
|
#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
|
||||||
|
#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
|
||||||
|
#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
|
||||||
|
#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
|
||||||
|
|
||||||
|
#define SPLAY_FOREACH(x, name, head) \
|
||||||
|
for ((x) = SPLAY_MIN(name, head); (x) != NULL; (x) = SPLAY_NEXT(name, head, x))
|
||||||
|
|
||||||
|
/* Macros that define a red-black tree */
|
||||||
|
#define RB_HEAD(name, type) \
|
||||||
|
struct name { \
|
||||||
|
struct type* rbh_root; /* root of the tree */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_INITIALIZER(root) \
|
||||||
|
{ NULL }
|
||||||
|
|
||||||
|
#define RB_INIT(root) \
|
||||||
|
do { \
|
||||||
|
(root)->rbh_root = NULL; \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define RB_BLACK 0
|
||||||
|
#define RB_RED 1
|
||||||
|
#define RB_ENTRY(type) \
|
||||||
|
struct { \
|
||||||
|
struct type* rbe_left; /* left element */ \
|
||||||
|
struct type* rbe_right; /* right element */ \
|
||||||
|
struct type* rbe_parent; /* parent element */ \
|
||||||
|
int rbe_color; /* node color */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_LEFT(elm, field) (elm)->field.rbe_left
|
||||||
|
#define RB_RIGHT(elm, field) (elm)->field.rbe_right
|
||||||
|
#define RB_PARENT(elm, field) (elm)->field.rbe_parent
|
||||||
|
#define RB_COLOR(elm, field) (elm)->field.rbe_color
|
||||||
|
#define RB_ROOT(head) (head)->rbh_root
|
||||||
|
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
|
||||||
|
|
||||||
|
#define RB_SET(elm, parent, field) \
|
||||||
|
do { \
|
||||||
|
RB_PARENT(elm, field) = parent; \
|
||||||
|
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
|
||||||
|
RB_COLOR(elm, field) = RB_RED; \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define RB_SET_BLACKRED(black, red, field) \
|
||||||
|
do { \
|
||||||
|
RB_COLOR(black, field) = RB_BLACK; \
|
||||||
|
RB_COLOR(red, field) = RB_RED; \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#ifndef RB_AUGMENT
|
||||||
|
#define RB_AUGMENT(x) \
|
||||||
|
do { \
|
||||||
|
} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define RB_ROTATE_LEFT(head, elm, tmp, field) \
|
||||||
|
do { \
|
||||||
|
(tmp) = RB_RIGHT(elm, field); \
|
||||||
|
if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
|
||||||
|
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
|
||||||
|
} \
|
||||||
|
RB_AUGMENT(elm); \
|
||||||
|
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
|
||||||
|
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
|
||||||
|
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
|
||||||
|
else \
|
||||||
|
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
|
||||||
|
} else \
|
||||||
|
(head)->rbh_root = (tmp); \
|
||||||
|
RB_LEFT(tmp, field) = (elm); \
|
||||||
|
RB_PARENT(elm, field) = (tmp); \
|
||||||
|
RB_AUGMENT(tmp); \
|
||||||
|
if ((RB_PARENT(tmp, field))) \
|
||||||
|
RB_AUGMENT(RB_PARENT(tmp, field)); \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
#define RB_ROTATE_RIGHT(head, elm, tmp, field) \
|
||||||
|
do { \
|
||||||
|
(tmp) = RB_LEFT(elm, field); \
|
||||||
|
if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
|
||||||
|
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
|
||||||
|
} \
|
||||||
|
RB_AUGMENT(elm); \
|
||||||
|
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
|
||||||
|
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
|
||||||
|
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
|
||||||
|
else \
|
||||||
|
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
|
||||||
|
} else \
|
||||||
|
(head)->rbh_root = (tmp); \
|
||||||
|
RB_RIGHT(tmp, field) = (elm); \
|
||||||
|
RB_PARENT(elm, field) = (tmp); \
|
||||||
|
RB_AUGMENT(tmp); \
|
||||||
|
if ((RB_PARENT(tmp, field))) \
|
||||||
|
RB_AUGMENT(RB_PARENT(tmp, field)); \
|
||||||
|
} while (/*CONSTCOND*/ 0)
|
||||||
|
|
||||||
|
/* Generates prototypes and inline functions */
|
||||||
|
#define RB_PROTOTYPE(name, type, field, cmp) RB_PROTOTYPE_INTERNAL(name, type, field, cmp, )
|
||||||
|
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
|
||||||
|
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, static)
|
||||||
|
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
|
||||||
|
RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_INSERT(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_REMOVE(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_FIND(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_NFIND(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_FIND_LIGHT(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_NFIND_LIGHT(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_NEXT(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_PREV(name, type, attr); \
|
||||||
|
RB_PROTOTYPE_MINMAX(name, type, attr);
|
||||||
|
#define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \
|
||||||
|
attr void name##_RB_INSERT_COLOR(struct name*, struct type*)
|
||||||
|
#define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \
|
||||||
|
attr void name##_RB_REMOVE_COLOR(struct name*, struct type*, struct type*)
|
||||||
|
#define RB_PROTOTYPE_REMOVE(name, type, attr) \
|
||||||
|
attr struct type* name##_RB_REMOVE(struct name*, struct type*)
|
||||||
|
#define RB_PROTOTYPE_INSERT(name, type, attr) \
|
||||||
|
attr struct type* name##_RB_INSERT(struct name*, struct type*)
|
||||||
|
#define RB_PROTOTYPE_FIND(name, type, attr) \
|
||||||
|
attr struct type* name##_RB_FIND(struct name*, struct type*)
|
||||||
|
#define RB_PROTOTYPE_NFIND(name, type, attr) \
|
||||||
|
attr struct type* name##_RB_NFIND(struct name*, struct type*)
|
||||||
|
#define RB_PROTOTYPE_FIND_LIGHT(name, type, attr) \
|
||||||
|
attr struct type* name##_RB_FIND_LIGHT(struct name*, const void*)
|
||||||
|
#define RB_PROTOTYPE_NFIND_LIGHT(name, type, attr) \
|
||||||
|
attr struct type* name##_RB_NFIND_LIGHT(struct name*, const void*)
|
||||||
|
#define RB_PROTOTYPE_NEXT(name, type, attr) attr struct type* name##_RB_NEXT(struct type*)
|
||||||
|
#define RB_PROTOTYPE_PREV(name, type, attr) attr struct type* name##_RB_PREV(struct type*)
|
||||||
|
#define RB_PROTOTYPE_MINMAX(name, type, attr) attr struct type* name##_RB_MINMAX(struct name*, int)
|
||||||
|
|
||||||
|
/* Main rb operation.
|
||||||
|
* Moves node close to the key of elm to top
|
||||||
|
*/
|
||||||
|
#define RB_GENERATE_WITHOUT_COMPARE(name, type, field) \
|
||||||
|
RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, )
|
||||||
|
#define RB_GENERATE_WITHOUT_COMPARE_STATIC(name, type, field) \
|
||||||
|
RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, static)
|
||||||
|
#define RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \
|
||||||
|
RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
|
||||||
|
RB_GENERATE_REMOVE(name, type, field, attr) \
|
||||||
|
RB_GENERATE_NEXT(name, type, field, attr) \
|
||||||
|
RB_GENERATE_PREV(name, type, field, attr) \
|
||||||
|
RB_GENERATE_MINMAX(name, type, field, attr)
|
||||||
|
|
||||||
|
#define RB_GENERATE_WITH_COMPARE(name, type, field, cmp, lcmp) \
|
||||||
|
RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, )
|
||||||
|
#define RB_GENERATE_WITH_COMPARE_STATIC(name, type, field, cmp, lcmp) \
|
||||||
|
RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, static)
|
||||||
|
#define RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, attr) \
|
||||||
|
RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
|
||||||
|
RB_GENERATE_INSERT(name, type, field, cmp, attr) \
|
||||||
|
RB_GENERATE_FIND(name, type, field, cmp, attr) \
|
||||||
|
RB_GENERATE_NFIND(name, type, field, cmp, attr) \
|
||||||
|
RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \
|
||||||
|
RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr)
|
||||||
|
|
||||||
|
#define RB_GENERATE_ALL(name, type, field, cmp) RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, )
|
||||||
|
#define RB_GENERATE_ALL_STATIC(name, type, field, cmp) \
|
||||||
|
RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, static)
|
||||||
|
#define RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, attr) \
|
||||||
|
RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \
|
||||||
|
RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, attr)
|
||||||
|
|
||||||
|
#define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
|
||||||
|
attr void name##_RB_INSERT_COLOR(struct name* head, struct type* elm) { \
|
||||||
|
struct type *parent, *gparent, *tmp; \
|
||||||
|
while ((parent = RB_PARENT(elm, field)) != NULL && RB_COLOR(parent, field) == RB_RED) { \
|
||||||
|
gparent = RB_PARENT(parent, field); \
|
||||||
|
if (parent == RB_LEFT(gparent, field)) { \
|
||||||
|
tmp = RB_RIGHT(gparent, field); \
|
||||||
|
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
|
||||||
|
RB_COLOR(tmp, field) = RB_BLACK; \
|
||||||
|
RB_SET_BLACKRED(parent, gparent, field); \
|
||||||
|
elm = gparent; \
|
||||||
|
continue; \
|
||||||
|
} \
|
||||||
|
if (RB_RIGHT(parent, field) == elm) { \
|
||||||
|
RB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||||
|
tmp = parent; \
|
||||||
|
parent = elm; \
|
||||||
|
elm = tmp; \
|
||||||
|
} \
|
||||||
|
RB_SET_BLACKRED(parent, gparent, field); \
|
||||||
|
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
|
||||||
|
} else { \
|
||||||
|
tmp = RB_LEFT(gparent, field); \
|
||||||
|
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
|
||||||
|
RB_COLOR(tmp, field) = RB_BLACK; \
|
||||||
|
RB_SET_BLACKRED(parent, gparent, field); \
|
||||||
|
elm = gparent; \
|
||||||
|
continue; \
|
||||||
|
} \
|
||||||
|
if (RB_LEFT(parent, field) == elm) { \
|
||||||
|
RB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||||
|
tmp = parent; \
|
||||||
|
parent = elm; \
|
||||||
|
elm = tmp; \
|
||||||
|
} \
|
||||||
|
RB_SET_BLACKRED(parent, gparent, field); \
|
||||||
|
RB_ROTATE_LEFT(head, gparent, tmp, field); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
|
||||||
|
attr void name##_RB_REMOVE_COLOR(struct name* head, struct type* parent, struct type* elm) { \
|
||||||
|
struct type* tmp; \
|
||||||
|
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && elm != RB_ROOT(head)) { \
|
||||||
|
if (RB_LEFT(parent, field) == elm) { \
|
||||||
|
tmp = RB_RIGHT(parent, field); \
|
||||||
|
if (RB_COLOR(tmp, field) == RB_RED) { \
|
||||||
|
RB_SET_BLACKRED(tmp, parent, field); \
|
||||||
|
RB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||||
|
tmp = RB_RIGHT(parent, field); \
|
||||||
|
} \
|
||||||
|
if ((RB_LEFT(tmp, field) == NULL || \
|
||||||
|
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
|
||||||
|
(RB_RIGHT(tmp, field) == NULL || \
|
||||||
|
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
|
||||||
|
RB_COLOR(tmp, field) = RB_RED; \
|
||||||
|
elm = parent; \
|
||||||
|
parent = RB_PARENT(elm, field); \
|
||||||
|
} else { \
|
||||||
|
if (RB_RIGHT(tmp, field) == NULL || \
|
||||||
|
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \
|
||||||
|
struct type* oleft; \
|
||||||
|
if ((oleft = RB_LEFT(tmp, field)) != NULL) \
|
||||||
|
RB_COLOR(oleft, field) = RB_BLACK; \
|
||||||
|
RB_COLOR(tmp, field) = RB_RED; \
|
||||||
|
RB_ROTATE_RIGHT(head, tmp, oleft, field); \
|
||||||
|
tmp = RB_RIGHT(parent, field); \
|
||||||
|
} \
|
||||||
|
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
|
||||||
|
RB_COLOR(parent, field) = RB_BLACK; \
|
||||||
|
if (RB_RIGHT(tmp, field)) \
|
||||||
|
RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \
|
||||||
|
RB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||||
|
elm = RB_ROOT(head); \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
} else { \
|
||||||
|
tmp = RB_LEFT(parent, field); \
|
||||||
|
if (RB_COLOR(tmp, field) == RB_RED) { \
|
||||||
|
RB_SET_BLACKRED(tmp, parent, field); \
|
||||||
|
RB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||||
|
tmp = RB_LEFT(parent, field); \
|
||||||
|
} \
|
||||||
|
if ((RB_LEFT(tmp, field) == NULL || \
|
||||||
|
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
|
||||||
|
(RB_RIGHT(tmp, field) == NULL || \
|
||||||
|
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
|
||||||
|
RB_COLOR(tmp, field) = RB_RED; \
|
||||||
|
elm = parent; \
|
||||||
|
parent = RB_PARENT(elm, field); \
|
||||||
|
} else { \
|
||||||
|
if (RB_LEFT(tmp, field) == NULL || \
|
||||||
|
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \
|
||||||
|
struct type* oright; \
|
||||||
|
if ((oright = RB_RIGHT(tmp, field)) != NULL) \
|
||||||
|
RB_COLOR(oright, field) = RB_BLACK; \
|
||||||
|
RB_COLOR(tmp, field) = RB_RED; \
|
||||||
|
RB_ROTATE_LEFT(head, tmp, oright, field); \
|
||||||
|
tmp = RB_LEFT(parent, field); \
|
||||||
|
} \
|
||||||
|
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
|
||||||
|
RB_COLOR(parent, field) = RB_BLACK; \
|
||||||
|
if (RB_LEFT(tmp, field)) \
|
||||||
|
RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \
|
||||||
|
RB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||||
|
elm = RB_ROOT(head); \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
if (elm) \
|
||||||
|
RB_COLOR(elm, field) = RB_BLACK; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_REMOVE(name, type, field, attr) \
|
||||||
|
attr struct type* name##_RB_REMOVE(struct name* head, struct type* elm) { \
|
||||||
|
struct type *child, *parent, *old = elm; \
|
||||||
|
int color; \
|
||||||
|
if (RB_LEFT(elm, field) == NULL) \
|
||||||
|
child = RB_RIGHT(elm, field); \
|
||||||
|
else if (RB_RIGHT(elm, field) == NULL) \
|
||||||
|
child = RB_LEFT(elm, field); \
|
||||||
|
else { \
|
||||||
|
struct type* left; \
|
||||||
|
elm = RB_RIGHT(elm, field); \
|
||||||
|
while ((left = RB_LEFT(elm, field)) != NULL) \
|
||||||
|
elm = left; \
|
||||||
|
child = RB_RIGHT(elm, field); \
|
||||||
|
parent = RB_PARENT(elm, field); \
|
||||||
|
color = RB_COLOR(elm, field); \
|
||||||
|
if (child) \
|
||||||
|
RB_PARENT(child, field) = parent; \
|
||||||
|
if (parent) { \
|
||||||
|
if (RB_LEFT(parent, field) == elm) \
|
||||||
|
RB_LEFT(parent, field) = child; \
|
||||||
|
else \
|
||||||
|
RB_RIGHT(parent, field) = child; \
|
||||||
|
RB_AUGMENT(parent); \
|
||||||
|
} else \
|
||||||
|
RB_ROOT(head) = child; \
|
||||||
|
if (RB_PARENT(elm, field) == old) \
|
||||||
|
parent = elm; \
|
||||||
|
(elm)->field = (old)->field; \
|
||||||
|
if (RB_PARENT(old, field)) { \
|
||||||
|
if (RB_LEFT(RB_PARENT(old, field), field) == old) \
|
||||||
|
RB_LEFT(RB_PARENT(old, field), field) = elm; \
|
||||||
|
else \
|
||||||
|
RB_RIGHT(RB_PARENT(old, field), field) = elm; \
|
||||||
|
RB_AUGMENT(RB_PARENT(old, field)); \
|
||||||
|
} else \
|
||||||
|
RB_ROOT(head) = elm; \
|
||||||
|
RB_PARENT(RB_LEFT(old, field), field) = elm; \
|
||||||
|
if (RB_RIGHT(old, field)) \
|
||||||
|
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
|
||||||
|
if (parent) { \
|
||||||
|
left = parent; \
|
||||||
|
do { \
|
||||||
|
RB_AUGMENT(left); \
|
||||||
|
} while ((left = RB_PARENT(left, field)) != NULL); \
|
||||||
|
} \
|
||||||
|
goto color; \
|
||||||
|
} \
|
||||||
|
parent = RB_PARENT(elm, field); \
|
||||||
|
color = RB_COLOR(elm, field); \
|
||||||
|
if (child) \
|
||||||
|
RB_PARENT(child, field) = parent; \
|
||||||
|
if (parent) { \
|
||||||
|
if (RB_LEFT(parent, field) == elm) \
|
||||||
|
RB_LEFT(parent, field) = child; \
|
||||||
|
else \
|
||||||
|
RB_RIGHT(parent, field) = child; \
|
||||||
|
RB_AUGMENT(parent); \
|
||||||
|
} else \
|
||||||
|
RB_ROOT(head) = child; \
|
||||||
|
color: \
|
||||||
|
if (color == RB_BLACK) \
|
||||||
|
name##_RB_REMOVE_COLOR(head, parent, child); \
|
||||||
|
return (old); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_INSERT(name, type, field, cmp, attr) \
|
||||||
|
/* Inserts a node into the RB tree */ \
|
||||||
|
attr struct type* name##_RB_INSERT(struct name* head, struct type* elm) { \
|
||||||
|
struct type* tmp; \
|
||||||
|
struct type* parent = NULL; \
|
||||||
|
int comp = 0; \
|
||||||
|
tmp = RB_ROOT(head); \
|
||||||
|
while (tmp) { \
|
||||||
|
parent = tmp; \
|
||||||
|
comp = (cmp)(elm, parent); \
|
||||||
|
if (comp < 0) \
|
||||||
|
tmp = RB_LEFT(tmp, field); \
|
||||||
|
else if (comp > 0) \
|
||||||
|
tmp = RB_RIGHT(tmp, field); \
|
||||||
|
else \
|
||||||
|
return (tmp); \
|
||||||
|
} \
|
||||||
|
RB_SET(elm, parent, field); \
|
||||||
|
if (parent != NULL) { \
|
||||||
|
if (comp < 0) \
|
||||||
|
RB_LEFT(parent, field) = elm; \
|
||||||
|
else \
|
||||||
|
RB_RIGHT(parent, field) = elm; \
|
||||||
|
RB_AUGMENT(parent); \
|
||||||
|
} else \
|
||||||
|
RB_ROOT(head) = elm; \
|
||||||
|
name##_RB_INSERT_COLOR(head, elm); \
|
||||||
|
return (NULL); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_FIND(name, type, field, cmp, attr) \
|
||||||
|
/* Finds the node with the same key as elm */ \
|
||||||
|
attr struct type* name##_RB_FIND(struct name* head, struct type* elm) { \
|
||||||
|
struct type* tmp = RB_ROOT(head); \
|
||||||
|
int comp; \
|
||||||
|
while (tmp) { \
|
||||||
|
comp = cmp(elm, tmp); \
|
||||||
|
if (comp < 0) \
|
||||||
|
tmp = RB_LEFT(tmp, field); \
|
||||||
|
else if (comp > 0) \
|
||||||
|
tmp = RB_RIGHT(tmp, field); \
|
||||||
|
else \
|
||||||
|
return (tmp); \
|
||||||
|
} \
|
||||||
|
return (NULL); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_NFIND(name, type, field, cmp, attr) \
|
||||||
|
/* Finds the first node greater than or equal to the search key */ \
|
||||||
|
attr struct type* name##_RB_NFIND(struct name* head, struct type* elm) { \
|
||||||
|
struct type* tmp = RB_ROOT(head); \
|
||||||
|
struct type* res = NULL; \
|
||||||
|
int comp; \
|
||||||
|
while (tmp) { \
|
||||||
|
comp = cmp(elm, tmp); \
|
||||||
|
if (comp < 0) { \
|
||||||
|
res = tmp; \
|
||||||
|
tmp = RB_LEFT(tmp, field); \
|
||||||
|
} else if (comp > 0) \
|
||||||
|
tmp = RB_RIGHT(tmp, field); \
|
||||||
|
else \
|
||||||
|
return (tmp); \
|
||||||
|
} \
|
||||||
|
return (res); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \
|
||||||
|
/* Finds the node with the same key as elm */ \
|
||||||
|
attr struct type* name##_RB_FIND_LIGHT(struct name* head, const void* lelm) { \
|
||||||
|
struct type* tmp = RB_ROOT(head); \
|
||||||
|
int comp; \
|
||||||
|
while (tmp) { \
|
||||||
|
comp = lcmp(lelm, tmp); \
|
||||||
|
if (comp < 0) \
|
||||||
|
tmp = RB_LEFT(tmp, field); \
|
||||||
|
else if (comp > 0) \
|
||||||
|
tmp = RB_RIGHT(tmp, field); \
|
||||||
|
else \
|
||||||
|
return (tmp); \
|
||||||
|
} \
|
||||||
|
return (NULL); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr) \
|
||||||
|
/* Finds the first node greater than or equal to the search key */ \
|
||||||
|
attr struct type* name##_RB_NFIND_LIGHT(struct name* head, const void* lelm) { \
|
||||||
|
struct type* tmp = RB_ROOT(head); \
|
||||||
|
struct type* res = NULL; \
|
||||||
|
int comp; \
|
||||||
|
while (tmp) { \
|
||||||
|
comp = lcmp(lelm, tmp); \
|
||||||
|
if (comp < 0) { \
|
||||||
|
res = tmp; \
|
||||||
|
tmp = RB_LEFT(tmp, field); \
|
||||||
|
} else if (comp > 0) \
|
||||||
|
tmp = RB_RIGHT(tmp, field); \
|
||||||
|
else \
|
||||||
|
return (tmp); \
|
||||||
|
} \
|
||||||
|
return (res); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_NEXT(name, type, field, attr) \
|
||||||
|
/* ARGSUSED */ \
|
||||||
|
attr struct type* name##_RB_NEXT(struct type* elm) { \
|
||||||
|
if (RB_RIGHT(elm, field)) { \
|
||||||
|
elm = RB_RIGHT(elm, field); \
|
||||||
|
while (RB_LEFT(elm, field)) \
|
||||||
|
elm = RB_LEFT(elm, field); \
|
||||||
|
} else { \
|
||||||
|
if (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
|
||||||
|
elm = RB_PARENT(elm, field); \
|
||||||
|
else { \
|
||||||
|
while (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
|
||||||
|
elm = RB_PARENT(elm, field); \
|
||||||
|
elm = RB_PARENT(elm, field); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
return (elm); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_PREV(name, type, field, attr) \
|
||||||
|
/* ARGSUSED */ \
|
||||||
|
attr struct type* name##_RB_PREV(struct type* elm) { \
|
||||||
|
if (RB_LEFT(elm, field)) { \
|
||||||
|
elm = RB_LEFT(elm, field); \
|
||||||
|
while (RB_RIGHT(elm, field)) \
|
||||||
|
elm = RB_RIGHT(elm, field); \
|
||||||
|
} else { \
|
||||||
|
if (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
|
||||||
|
elm = RB_PARENT(elm, field); \
|
||||||
|
else { \
|
||||||
|
while (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
|
||||||
|
elm = RB_PARENT(elm, field); \
|
||||||
|
elm = RB_PARENT(elm, field); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
return (elm); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_GENERATE_MINMAX(name, type, field, attr) \
|
||||||
|
attr struct type* name##_RB_MINMAX(struct name* head, int val) { \
|
||||||
|
struct type* tmp = RB_ROOT(head); \
|
||||||
|
struct type* parent = NULL; \
|
||||||
|
while (tmp) { \
|
||||||
|
parent = tmp; \
|
||||||
|
if (val < 0) \
|
||||||
|
tmp = RB_LEFT(tmp, field); \
|
||||||
|
else \
|
||||||
|
tmp = RB_RIGHT(tmp, field); \
|
||||||
|
} \
|
||||||
|
return (parent); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RB_NEGINF -1
|
||||||
|
#define RB_INF 1
|
||||||
|
|
||||||
|
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
|
||||||
|
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
|
||||||
|
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
|
||||||
|
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
|
||||||
|
#define RB_FIND_LIGHT(name, x, y) name##_RB_FIND_LIGHT(x, y)
|
||||||
|
#define RB_NFIND_LIGHT(name, x, y) name##_RB_NFIND_LIGHT(x, y)
|
||||||
|
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
|
||||||
|
#define RB_PREV(name, x, y) name##_RB_PREV(y)
|
||||||
|
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
|
||||||
|
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
|
||||||
|
|
||||||
|
#define RB_FOREACH(x, name, head) \
|
||||||
|
for ((x) = RB_MIN(name, head); (x) != NULL; (x) = name##_RB_NEXT(x))
|
||||||
|
|
||||||
|
#define RB_FOREACH_FROM(x, name, y) \
|
||||||
|
for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); (x) = (y))
|
||||||
|
|
||||||
|
#define RB_FOREACH_SAFE(x, name, head, y) \
|
||||||
|
for ((x) = RB_MIN(name, head); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
|
||||||
|
(x) = (y))
|
||||||
|
|
||||||
|
#define RB_FOREACH_REVERSE(x, name, head) \
|
||||||
|
for ((x) = RB_MAX(name, head); (x) != NULL; (x) = name##_RB_PREV(x))
|
||||||
|
|
||||||
|
#define RB_FOREACH_REVERSE_FROM(x, name, y) \
|
||||||
|
for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); (x) = (y))
|
||||||
|
|
||||||
|
#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
|
||||||
|
for ((x) = RB_MAX(name, head); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
|
||||||
|
(x) = (y))
|
||||||
|
|
||||||
|
#endif /* _SYS_TREE_H_ */
|
@ -1,317 +0,0 @@
|
|||||||
// Copyright 2018 yuzu emulator team
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "common/assert.h"
|
|
||||||
#include "common/common_types.h"
|
|
||||||
#include "core/arm/exclusive_monitor.h"
|
|
||||||
#include "core/core.h"
|
|
||||||
#include "core/hle/kernel/address_arbiter.h"
|
|
||||||
#include "core/hle/kernel/errors.h"
|
|
||||||
#include "core/hle/kernel/handle_table.h"
|
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
|
||||||
#include "core/hle/kernel/time_manager.h"
|
|
||||||
#include "core/hle/result.h"
|
|
||||||
#include "core/memory.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
// Wake up num_to_wake (or all) threads in a vector.
|
|
||||||
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
|
|
||||||
s32 num_to_wake) {
|
|
||||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
|
||||||
// them all.
|
|
||||||
std::size_t last = waiting_threads.size();
|
|
||||||
if (num_to_wake > 0) {
|
|
||||||
last = std::min(last, static_cast<std::size_t>(num_to_wake));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signal the waiting threads.
|
|
||||||
for (std::size_t i = 0; i < last; i++) {
|
|
||||||
waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
|
||||||
RemoveThread(waiting_threads[i]);
|
|
||||||
waiting_threads[i]->WaitForArbitration(false);
|
|
||||||
waiting_threads[i]->ResumeFromWait();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
|
|
||||||
AddressArbiter::~AddressArbiter() = default;
|
|
||||||
|
|
||||||
ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
|
|
||||||
s32 num_to_wake) {
|
|
||||||
switch (type) {
|
|
||||||
case SignalType::Signal:
|
|
||||||
return SignalToAddressOnly(address, num_to_wake);
|
|
||||||
case SignalType::IncrementAndSignalIfEqual:
|
|
||||||
return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
|
||||||
case SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
|
||||||
return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
|
|
||||||
default:
|
|
||||||
return ERR_INVALID_ENUM_VALUE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
|
||||||
KScopedSchedulerLock lock(system.Kernel());
|
|
||||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
|
||||||
GetThreadsWaitingOnAddress(address);
|
|
||||||
WakeThreads(waiting_threads, num_to_wake);
|
|
||||||
return RESULT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
|
||||||
s32 num_to_wake) {
|
|
||||||
KScopedSchedulerLock lock(system.Kernel());
|
|
||||||
auto& memory = system.Memory();
|
|
||||||
|
|
||||||
// Ensure that we can write to the address.
|
|
||||||
if (!memory.IsValidVirtualAddress(address)) {
|
|
||||||
return ERR_INVALID_ADDRESS_STATE;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::size_t current_core = system.CurrentCoreIndex();
|
|
||||||
auto& monitor = system.Monitor();
|
|
||||||
u32 current_value;
|
|
||||||
do {
|
|
||||||
current_value = monitor.ExclusiveRead32(current_core, address);
|
|
||||||
|
|
||||||
if (current_value != static_cast<u32>(value)) {
|
|
||||||
return ERR_INVALID_STATE;
|
|
||||||
}
|
|
||||||
current_value++;
|
|
||||||
} while (!monitor.ExclusiveWrite32(current_core, address, current_value));
|
|
||||||
|
|
||||||
return SignalToAddressOnly(address, num_to_wake);
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
|
||||||
s32 num_to_wake) {
|
|
||||||
KScopedSchedulerLock lock(system.Kernel());
|
|
||||||
auto& memory = system.Memory();
|
|
||||||
|
|
||||||
// Ensure that we can write to the address.
|
|
||||||
if (!memory.IsValidVirtualAddress(address)) {
|
|
||||||
return ERR_INVALID_ADDRESS_STATE;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get threads waiting on the address.
|
|
||||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
|
||||||
GetThreadsWaitingOnAddress(address);
|
|
||||||
|
|
||||||
const std::size_t current_core = system.CurrentCoreIndex();
|
|
||||||
auto& monitor = system.Monitor();
|
|
||||||
s32 updated_value;
|
|
||||||
do {
|
|
||||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
|
||||||
|
|
||||||
if (updated_value != value) {
|
|
||||||
return ERR_INVALID_STATE;
|
|
||||||
}
|
|
||||||
// Determine the modified value depending on the waiting count.
|
|
||||||
if (num_to_wake <= 0) {
|
|
||||||
if (waiting_threads.empty()) {
|
|
||||||
updated_value = value + 1;
|
|
||||||
} else {
|
|
||||||
updated_value = value - 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (waiting_threads.empty()) {
|
|
||||||
updated_value = value + 1;
|
|
||||||
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
|
||||||
updated_value = value - 1;
|
|
||||||
} else {
|
|
||||||
updated_value = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
|
||||||
|
|
||||||
WakeThreads(waiting_threads, num_to_wake);
|
|
||||||
return RESULT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
|
|
||||||
s64 timeout_ns) {
|
|
||||||
switch (type) {
|
|
||||||
case ArbitrationType::WaitIfLessThan:
|
|
||||||
return WaitForAddressIfLessThan(address, value, timeout_ns, false);
|
|
||||||
case ArbitrationType::DecrementAndWaitIfLessThan:
|
|
||||||
return WaitForAddressIfLessThan(address, value, timeout_ns, true);
|
|
||||||
case ArbitrationType::WaitIfEqual:
|
|
||||||
return WaitForAddressIfEqual(address, value, timeout_ns);
|
|
||||||
default:
|
|
||||||
return ERR_INVALID_ENUM_VALUE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
|
||||||
bool should_decrement) {
|
|
||||||
auto& memory = system.Memory();
|
|
||||||
auto& kernel = system.Kernel();
|
|
||||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
|
||||||
|
|
||||||
Handle event_handle = InvalidHandle;
|
|
||||||
{
|
|
||||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
|
||||||
|
|
||||||
if (current_thread->IsPendingTermination()) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return ERR_THREAD_TERMINATING;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that we can read the address.
|
|
||||||
if (!memory.IsValidVirtualAddress(address)) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return ERR_INVALID_ADDRESS_STATE;
|
|
||||||
}
|
|
||||||
|
|
||||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
|
||||||
if (current_value >= value) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return ERR_INVALID_STATE;
|
|
||||||
}
|
|
||||||
|
|
||||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
|
||||||
|
|
||||||
s32 decrement_value;
|
|
||||||
|
|
||||||
const std::size_t current_core = system.CurrentCoreIndex();
|
|
||||||
auto& monitor = system.Monitor();
|
|
||||||
do {
|
|
||||||
current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
|
||||||
if (should_decrement) {
|
|
||||||
decrement_value = current_value - 1;
|
|
||||||
} else {
|
|
||||||
decrement_value = current_value;
|
|
||||||
}
|
|
||||||
} while (
|
|
||||||
!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
|
|
||||||
|
|
||||||
// Short-circuit without rescheduling, if timeout is zero.
|
|
||||||
if (timeout == 0) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return RESULT_TIMEOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
current_thread->SetArbiterWaitAddress(address);
|
|
||||||
InsertThread(SharedFrom(current_thread));
|
|
||||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
|
||||||
current_thread->WaitForArbitration(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (event_handle != InvalidHandle) {
|
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.UnscheduleTimeEvent(event_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock lock(kernel);
|
|
||||||
if (current_thread->IsWaitingForArbitration()) {
|
|
||||||
RemoveThread(SharedFrom(current_thread));
|
|
||||||
current_thread->WaitForArbitration(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return current_thread->GetSignalingResult();
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
|
||||||
auto& memory = system.Memory();
|
|
||||||
auto& kernel = system.Kernel();
|
|
||||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
|
||||||
|
|
||||||
Handle event_handle = InvalidHandle;
|
|
||||||
{
|
|
||||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
|
||||||
|
|
||||||
if (current_thread->IsPendingTermination()) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return ERR_THREAD_TERMINATING;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that we can read the address.
|
|
||||||
if (!memory.IsValidVirtualAddress(address)) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return ERR_INVALID_ADDRESS_STATE;
|
|
||||||
}
|
|
||||||
|
|
||||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
|
||||||
if (current_value != value) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return ERR_INVALID_STATE;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Short-circuit without rescheduling, if timeout is zero.
|
|
||||||
if (timeout == 0) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return RESULT_TIMEOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
|
||||||
current_thread->SetArbiterWaitAddress(address);
|
|
||||||
InsertThread(SharedFrom(current_thread));
|
|
||||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
|
||||||
current_thread->WaitForArbitration(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (event_handle != InvalidHandle) {
|
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.UnscheduleTimeEvent(event_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock lock(kernel);
|
|
||||||
if (current_thread->IsWaitingForArbitration()) {
|
|
||||||
RemoveThread(SharedFrom(current_thread));
|
|
||||||
current_thread->WaitForArbitration(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return current_thread->GetSignalingResult();
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
|
||||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
|
||||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
|
||||||
|
|
||||||
const auto iter =
|
|
||||||
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
|
|
||||||
return entry->GetPriority() >= thread->GetPriority();
|
|
||||||
});
|
|
||||||
|
|
||||||
if (iter == thread_list.cend()) {
|
|
||||||
thread_list.push_back(std::move(thread));
|
|
||||||
} else {
|
|
||||||
thread_list.insert(iter, std::move(thread));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
|
||||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
|
||||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
|
||||||
|
|
||||||
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
|
|
||||||
[&thread](const auto& entry) { return thread == entry; });
|
|
||||||
|
|
||||||
if (iter != thread_list.cend()) {
|
|
||||||
thread_list.erase(iter);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
|
|
||||||
VAddr address) const {
|
|
||||||
const auto iter = arb_threads.find(address);
|
|
||||||
if (iter == arb_threads.cend()) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
|
|
||||||
return {thread_list.cbegin(), thread_list.cend()};
|
|
||||||
}
|
|
||||||
} // namespace Kernel
|
|
@ -1,91 +0,0 @@
|
|||||||
// Copyright 2018 yuzu emulator team
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <list>
|
|
||||||
#include <memory>
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
|
||||||
|
|
||||||
union ResultCode;
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
class System;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
class Thread;
|
|
||||||
|
|
||||||
class AddressArbiter {
|
|
||||||
public:
|
|
||||||
enum class ArbitrationType {
|
|
||||||
WaitIfLessThan = 0,
|
|
||||||
DecrementAndWaitIfLessThan = 1,
|
|
||||||
WaitIfEqual = 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class SignalType {
|
|
||||||
Signal = 0,
|
|
||||||
IncrementAndSignalIfEqual = 1,
|
|
||||||
ModifyByWaitingCountAndSignalIfEqual = 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
explicit AddressArbiter(Core::System& system);
|
|
||||||
~AddressArbiter();
|
|
||||||
|
|
||||||
AddressArbiter(const AddressArbiter&) = delete;
|
|
||||||
AddressArbiter& operator=(const AddressArbiter&) = delete;
|
|
||||||
|
|
||||||
AddressArbiter(AddressArbiter&&) = default;
|
|
||||||
AddressArbiter& operator=(AddressArbiter&&) = delete;
|
|
||||||
|
|
||||||
/// Signals an address being waited on with a particular signaling type.
|
|
||||||
ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
|
|
||||||
|
|
||||||
/// Waits on an address with a particular arbitration type.
|
|
||||||
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
|
|
||||||
|
|
||||||
private:
|
|
||||||
/// Signals an address being waited on.
|
|
||||||
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
|
|
||||||
|
|
||||||
/// Signals an address being waited on and increments its value if equal to the value argument.
|
|
||||||
ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
|
|
||||||
|
|
||||||
/// Signals an address being waited on and modifies its value based on waiting thread count if
|
|
||||||
/// equal to the value argument.
|
|
||||||
ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
|
||||||
s32 num_to_wake);
|
|
||||||
|
|
||||||
/// Waits on an address if the value passed is less than the argument value,
|
|
||||||
/// optionally decrementing.
|
|
||||||
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
|
||||||
bool should_decrement);
|
|
||||||
|
|
||||||
/// Waits on an address if the value passed is equal to the argument value.
|
|
||||||
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
|
|
||||||
|
|
||||||
/// Wake up num_to_wake (or all) threads in a vector.
|
|
||||||
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
|
|
||||||
|
|
||||||
/// Insert a thread into the address arbiter container
|
|
||||||
void InsertThread(std::shared_ptr<Thread> thread);
|
|
||||||
|
|
||||||
/// Removes a thread from the address arbiter container
|
|
||||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
|
||||||
|
|
||||||
// Gets the threads waiting on an address.
|
|
||||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
|
||||||
|
|
||||||
/// List of threads waiting for a address arbiter
|
|
||||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
|
||||||
|
|
||||||
Core::System& system;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
@ -0,0 +1,367 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/arm/exclusive_monitor.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/k_address_arbiter.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
KAddressArbiter::KAddressArbiter(Core::System& system_)
|
||||||
|
: system{system_}, kernel{system.Kernel()} {}
|
||||||
|
KAddressArbiter::~KAddressArbiter() = default;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||||
|
*out = system.Memory().Read32(address);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||||
|
auto& monitor = system.Monitor();
|
||||||
|
const auto current_core = system.CurrentCoreIndex();
|
||||||
|
|
||||||
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
|
||||||
|
// Load the value from the address.
|
||||||
|
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||||
|
|
||||||
|
// Compare it to the desired one.
|
||||||
|
if (current_value < value) {
|
||||||
|
// If less than, we want to try to decrement.
|
||||||
|
const s32 decrement_value = current_value - 1;
|
||||||
|
|
||||||
|
// Decrement and try to store.
|
||||||
|
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
|
||||||
|
// If we failed to store, try again.
|
||||||
|
DecrementIfLessThan(system, out, address, value);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise, clear our exclusive hold and finish
|
||||||
|
monitor.ClearExclusive();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're done.
|
||||||
|
*out = current_value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||||
|
auto& monitor = system.Monitor();
|
||||||
|
const auto current_core = system.CurrentCoreIndex();
|
||||||
|
|
||||||
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
|
||||||
|
// Load the value from the address.
|
||||||
|
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||||
|
|
||||||
|
// Compare it to the desired one.
|
||||||
|
if (current_value == value) {
|
||||||
|
// If equal, we want to try to write the new value.
|
||||||
|
|
||||||
|
// Try to store.
|
||||||
|
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
|
||||||
|
// If we failed to store, try again.
|
||||||
|
UpdateIfEqual(system, out, address, value, new_value);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise, clear our exclusive hold and finish.
|
||||||
|
monitor.ClearExclusive();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're done.
|
||||||
|
*out = current_value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||||
|
// Perform signaling.
|
||||||
|
s32 num_waiters{};
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
|
Thread* target_thread = std::addressof(*it);
|
||||||
|
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||||
|
|
||||||
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
|
target_thread->Wakeup();
|
||||||
|
|
||||||
|
it = thread_tree.erase(it);
|
||||||
|
target_thread->ClearAddressArbiter();
|
||||||
|
++num_waiters;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||||
|
// Perform signaling.
|
||||||
|
s32 num_waiters{};
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
// Check the userspace value.
|
||||||
|
s32 user_value{};
|
||||||
|
R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1),
|
||||||
|
Svc::ResultInvalidCurrentMemory);
|
||||||
|
R_UNLESS(user_value == value, Svc::ResultInvalidState);
|
||||||
|
|
||||||
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
|
Thread* target_thread = std::addressof(*it);
|
||||||
|
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||||
|
|
||||||
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
|
target_thread->Wakeup();
|
||||||
|
|
||||||
|
it = thread_tree.erase(it);
|
||||||
|
target_thread->ClearAddressArbiter();
|
||||||
|
++num_waiters;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||||
|
// Perform signaling.
|
||||||
|
s32 num_waiters{};
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
|
// Determine the updated value.
|
||||||
|
s32 new_value{};
|
||||||
|
if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) {
|
||||||
|
if (count <= 0) {
|
||||||
|
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
|
new_value = value - 2;
|
||||||
|
} else {
|
||||||
|
new_value = value + 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
|
auto tmp_it = it;
|
||||||
|
s32 tmp_num_waiters{};
|
||||||
|
while ((++tmp_it != thread_tree.end()) &&
|
||||||
|
(tmp_it->GetAddressArbiterKey() == addr)) {
|
||||||
|
if ((tmp_num_waiters++) >= count) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tmp_num_waiters < count) {
|
||||||
|
new_value = value - 1;
|
||||||
|
} else {
|
||||||
|
new_value = value;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
new_value = value + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (count <= 0) {
|
||||||
|
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
|
new_value = value - 1;
|
||||||
|
} else {
|
||||||
|
new_value = value + 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
auto tmp_it = it;
|
||||||
|
s32 tmp_num_waiters{};
|
||||||
|
while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) &&
|
||||||
|
(tmp_num_waiters < count + 1)) {
|
||||||
|
++tmp_num_waiters;
|
||||||
|
++tmp_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tmp_num_waiters == 0) {
|
||||||
|
new_value = value + 1;
|
||||||
|
} else if (tmp_num_waiters <= count) {
|
||||||
|
new_value = value - 1;
|
||||||
|
} else {
|
||||||
|
new_value = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the userspace value.
|
||||||
|
s32 user_value{};
|
||||||
|
bool succeeded{};
|
||||||
|
if (value != new_value) {
|
||||||
|
succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value);
|
||||||
|
} else {
|
||||||
|
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory);
|
||||||
|
R_UNLESS(user_value == value, Svc::ResultInvalidState);
|
||||||
|
|
||||||
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
|
Thread* target_thread = std::addressof(*it);
|
||||||
|
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||||
|
|
||||||
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
|
target_thread->Wakeup();
|
||||||
|
|
||||||
|
it = thread_tree.erase(it);
|
||||||
|
target_thread->ClearAddressArbiter();
|
||||||
|
++num_waiters;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||||
|
// Prepare to wait.
|
||||||
|
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
Handle timer = InvalidHandle;
|
||||||
|
|
||||||
|
{
|
||||||
|
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
||||||
|
|
||||||
|
// Check that the thread isn't terminating.
|
||||||
|
if (cur_thread->IsTerminationRequested()) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultTerminationRequested;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the synced object.
|
||||||
|
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||||
|
|
||||||
|
// Read the value from userspace.
|
||||||
|
s32 user_value{};
|
||||||
|
bool succeeded{};
|
||||||
|
if (decrement) {
|
||||||
|
succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value);
|
||||||
|
} else {
|
||||||
|
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!succeeded) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultInvalidCurrentMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the value is less than the specified one.
|
||||||
|
if (user_value >= value) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultInvalidState;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the timeout is non-zero.
|
||||||
|
if (timeout == 0) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultTimedOut;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the arbiter.
|
||||||
|
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
||||||
|
thread_tree.insert(*cur_thread);
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel the timer wait.
|
||||||
|
if (timer != InvalidHandle) {
|
||||||
|
auto& time_manager = kernel.TimeManager();
|
||||||
|
time_manager.UnscheduleTimeEvent(timer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from the address arbiter.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||||
|
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||||
|
cur_thread->ClearAddressArbiter();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the result.
|
||||||
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
|
// Prepare to wait.
|
||||||
|
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
Handle timer = InvalidHandle;
|
||||||
|
|
||||||
|
{
|
||||||
|
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
||||||
|
|
||||||
|
// Check that the thread isn't terminating.
|
||||||
|
if (cur_thread->IsTerminationRequested()) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultTerminationRequested;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the synced object.
|
||||||
|
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||||
|
|
||||||
|
// Read the value from userspace.
|
||||||
|
s32 user_value{};
|
||||||
|
if (!ReadFromUser(system, std::addressof(user_value), addr)) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultInvalidCurrentMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the value is equal.
|
||||||
|
if (value != user_value) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultInvalidState;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the timeout is non-zero.
|
||||||
|
if (timeout == 0) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultTimedOut;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the arbiter.
|
||||||
|
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
||||||
|
thread_tree.insert(*cur_thread);
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel the timer wait.
|
||||||
|
if (timer != InvalidHandle) {
|
||||||
|
auto& time_manager = kernel.TimeManager();
|
||||||
|
time_manager.UnscheduleTimeEvent(timer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from the address arbiter.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||||
|
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||||
|
cur_thread->ClearAddressArbiter();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the result.
|
||||||
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/k_condition_variable.h"
|
||||||
|
#include "core/hle/kernel/svc_types.h"
|
||||||
|
|
||||||
|
union ResultCode;
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KernelCore;
|
||||||
|
|
||||||
|
class KAddressArbiter {
|
||||||
|
public:
|
||||||
|
using ThreadTree = KConditionVariable::ThreadTree;
|
||||||
|
|
||||||
|
explicit KAddressArbiter(Core::System& system_);
|
||||||
|
~KAddressArbiter();
|
||||||
|
|
||||||
|
[[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
|
||||||
|
s32 count) {
|
||||||
|
switch (type) {
|
||||||
|
case Svc::SignalType::Signal:
|
||||||
|
return Signal(addr, count);
|
||||||
|
case Svc::SignalType::SignalAndIncrementIfEqual:
|
||||||
|
return SignalAndIncrementIfEqual(addr, value, count);
|
||||||
|
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||||
|
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||||
|
}
|
||||||
|
UNREACHABLE();
|
||||||
|
return RESULT_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||||
|
s64 timeout) {
|
||||||
|
switch (type) {
|
||||||
|
case Svc::ArbitrationType::WaitIfLessThan:
|
||||||
|
return WaitIfLessThan(addr, value, false, timeout);
|
||||||
|
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
||||||
|
return WaitIfLessThan(addr, value, true, timeout);
|
||||||
|
case Svc::ArbitrationType::WaitIfEqual:
|
||||||
|
return WaitIfEqual(addr, value, timeout);
|
||||||
|
}
|
||||||
|
UNREACHABLE();
|
||||||
|
return RESULT_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
[[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
|
||||||
|
[[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||||
|
[[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||||
|
[[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||||
|
[[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||||
|
|
||||||
|
ThreadTree thread_tree;
|
||||||
|
|
||||||
|
Core::System& system;
|
||||||
|
KernelCore& kernel;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
@ -0,0 +1,349 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "core/arm/exclusive_monitor.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/k_condition_variable.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/process.h"
|
||||||
|
#include "core/hle/kernel/svc_common.h"
|
||||||
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
|
||||||
|
*out = system.Memory().Read32(address);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||||
|
system.Memory().Write32(address, *p);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||||
|
u32 new_orr_mask) {
|
||||||
|
auto& monitor = system.Monitor();
|
||||||
|
const auto current_core = system.CurrentCoreIndex();
|
||||||
|
|
||||||
|
// Load the value from the address.
|
||||||
|
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||||
|
|
||||||
|
// Orr in the new mask.
|
||||||
|
u32 value = expected | new_orr_mask;
|
||||||
|
|
||||||
|
// If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
|
||||||
|
if (!expected) {
|
||||||
|
value = if_zero;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to store.
|
||||||
|
if (!monitor.ExclusiveWrite32(current_core, address, value)) {
|
||||||
|
// If we failed to store, try again.
|
||||||
|
return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're done.
|
||||||
|
*out = expected;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||||
|
: system{system_}, kernel{system.Kernel()} {}
|
||||||
|
|
||||||
|
KConditionVariable::~KConditionVariable() = default;
|
||||||
|
|
||||||
|
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||||
|
Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
|
||||||
|
// Signal the address.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
// Remove waiter thread.
|
||||||
|
s32 num_waiters{};
|
||||||
|
Thread* next_owner_thread =
|
||||||
|
owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||||
|
|
||||||
|
// Determine the next tag.
|
||||||
|
u32 next_value{};
|
||||||
|
if (next_owner_thread) {
|
||||||
|
next_value = next_owner_thread->GetAddressKeyValue();
|
||||||
|
if (num_waiters > 1) {
|
||||||
|
next_value |= Svc::HandleWaitMask;
|
||||||
|
}
|
||||||
|
|
||||||
|
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||||
|
next_owner_thread->Wakeup();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the value to userspace.
|
||||||
|
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||||
|
if (next_owner_thread) {
|
||||||
|
next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Svc::ResultInvalidCurrentMemory;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||||
|
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
|
||||||
|
// Wait for the address.
|
||||||
|
{
|
||||||
|
std::shared_ptr<Thread> owner_thread;
|
||||||
|
ASSERT(!owner_thread);
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||||
|
|
||||||
|
// Check if the thread should terminate.
|
||||||
|
R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested);
|
||||||
|
|
||||||
|
{
|
||||||
|
// Read the tag from userspace.
|
||||||
|
u32 test_tag{};
|
||||||
|
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
|
||||||
|
Svc::ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
// If the tag isn't the handle (with wait mask), we're done.
|
||||||
|
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
|
||||||
|
|
||||||
|
// Get the lock owner thread.
|
||||||
|
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(handle);
|
||||||
|
R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
|
||||||
|
|
||||||
|
// Update the lock.
|
||||||
|
cur_thread->SetAddressKey(addr, value);
|
||||||
|
owner_thread->AddWaiter(cur_thread);
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||||
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT(owner_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the thread as a waiter from the lock owner.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
Thread* owner_thread = cur_thread->GetLockOwner();
|
||||||
|
if (owner_thread != nullptr) {
|
||||||
|
owner_thread->RemoveWaiter(cur_thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the wait result.
|
||||||
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||||
|
}
|
||||||
|
|
||||||
|
Thread* KConditionVariable::SignalImpl(Thread* thread) {
|
||||||
|
// Check pre-conditions.
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// Update the tag.
|
||||||
|
VAddr address = thread->GetAddressKey();
|
||||||
|
u32 own_tag = thread->GetAddressKeyValue();
|
||||||
|
|
||||||
|
u32 prev_tag{};
|
||||||
|
bool can_access{};
|
||||||
|
{
|
||||||
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
can_access = true;
|
||||||
|
if (can_access) {
|
||||||
|
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||||
|
Svc::HandleWaitMask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Thread* thread_to_close = nullptr;
|
||||||
|
if (can_access) {
|
||||||
|
if (prev_tag == InvalidHandle) {
|
||||||
|
// If nobody held the lock previously, we're all good.
|
||||||
|
thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||||
|
thread->Wakeup();
|
||||||
|
} else {
|
||||||
|
// Get the previous owner.
|
||||||
|
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(
|
||||||
|
prev_tag & ~Svc::HandleWaitMask);
|
||||||
|
|
||||||
|
if (owner_thread) {
|
||||||
|
// Add the thread as a waiter on the owner.
|
||||||
|
owner_thread->AddWaiter(thread);
|
||||||
|
thread_to_close = owner_thread.get();
|
||||||
|
} else {
|
||||||
|
// The lock was tagged with a thread that doesn't exist.
|
||||||
|
thread->SetSyncedObject(nullptr, Svc::ResultInvalidState);
|
||||||
|
thread->Wakeup();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If the address wasn't accessible, note so.
|
||||||
|
thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||||
|
thread->Wakeup();
|
||||||
|
}
|
||||||
|
|
||||||
|
return thread_to_close;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
|
// Prepare for signaling.
|
||||||
|
constexpr int MaxThreads = 16;
|
||||||
|
|
||||||
|
// TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
|
||||||
|
// std::shared_ptr.
|
||||||
|
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||||
|
std::array<Thread*, MaxThreads> thread_array;
|
||||||
|
s32 num_to_close{};
|
||||||
|
|
||||||
|
// Perform signaling.
|
||||||
|
s32 num_waiters{};
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
auto it = thread_tree.nfind_light({cv_key, -1});
|
||||||
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
|
(it->GetConditionVariableKey() == cv_key)) {
|
||||||
|
Thread* target_thread = std::addressof(*it);
|
||||||
|
|
||||||
|
if (Thread* thread = SignalImpl(target_thread); thread != nullptr) {
|
||||||
|
if (num_to_close < MaxThreads) {
|
||||||
|
thread_array[num_to_close++] = thread;
|
||||||
|
} else {
|
||||||
|
thread_list.push_back(SharedFrom(thread));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
it = thread_tree.erase(it);
|
||||||
|
target_thread->ClearConditionVariable();
|
||||||
|
++num_waiters;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have no waiters, clear the has waiter flag.
|
||||||
|
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||||
|
const u32 has_waiter_flag{};
|
||||||
|
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close threads in the array.
|
||||||
|
for (auto i = 0; i < num_to_close; ++i) {
|
||||||
|
thread_array[i]->Close();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close threads in the list.
|
||||||
|
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
|
||||||
|
(*it)->Close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||||
|
// Prepare to wait.
|
||||||
|
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
Handle timer = InvalidHandle;
|
||||||
|
|
||||||
|
{
|
||||||
|
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
||||||
|
|
||||||
|
// Set the synced object.
|
||||||
|
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||||
|
|
||||||
|
// Check that the thread isn't terminating.
|
||||||
|
if (cur_thread->IsTerminationRequested()) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultTerminationRequested;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the value and process for the next owner.
|
||||||
|
{
|
||||||
|
// Remove waiter thread.
|
||||||
|
s32 num_waiters{};
|
||||||
|
Thread* next_owner_thread =
|
||||||
|
cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||||
|
|
||||||
|
// Update for the next owner thread.
|
||||||
|
u32 next_value{};
|
||||||
|
if (next_owner_thread != nullptr) {
|
||||||
|
// Get the next tag value.
|
||||||
|
next_value = next_owner_thread->GetAddressKeyValue();
|
||||||
|
if (num_waiters > 1) {
|
||||||
|
next_value |= Svc::HandleWaitMask;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wake up the next owner.
|
||||||
|
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||||
|
next_owner_thread->Wakeup();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write to the cv key.
|
||||||
|
{
|
||||||
|
const u32 has_waiter_flag = 1;
|
||||||
|
WriteToUser(system, key, std::addressof(has_waiter_flag));
|
||||||
|
// TODO(bunnei): We should call DataMemoryBarrier(..) here.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the value to userspace.
|
||||||
|
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultInvalidCurrentMemory;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update condition variable tracking.
|
||||||
|
{
|
||||||
|
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||||
|
thread_tree.insert(*cur_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the timeout is non-zero, set the thread as waiting.
|
||||||
|
if (timeout != 0) {
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||||
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel the timer wait.
|
||||||
|
if (timer != InvalidHandle) {
|
||||||
|
auto& time_manager = kernel.TimeManager();
|
||||||
|
time_manager.UnscheduleTimeEvent(timer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from the condition variable.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||||
|
owner->RemoveWaiter(cur_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_thread->IsWaitingForConditionVariable()) {
|
||||||
|
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||||
|
cur_thread->ClearConditionVariable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the result.
|
||||||
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
@ -0,0 +1,59 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KConditionVariable {
|
||||||
|
public:
|
||||||
|
using ThreadTree = typename Thread::ConditionVariableThreadTreeType;
|
||||||
|
|
||||||
|
explicit KConditionVariable(Core::System& system_);
|
||||||
|
~KConditionVariable();
|
||||||
|
|
||||||
|
// Arbitration
|
||||||
|
[[nodiscard]] ResultCode SignalToAddress(VAddr addr);
|
||||||
|
[[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||||
|
|
||||||
|
// Condition variable
|
||||||
|
void Signal(u64 cv_key, s32 count);
|
||||||
|
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||||
|
|
||||||
|
private:
|
||||||
|
[[nodiscard]] Thread* SignalImpl(Thread* thread);
|
||||||
|
|
||||||
|
ThreadTree thread_tree;
|
||||||
|
|
||||||
|
Core::System& system;
|
||||||
|
KernelCore& kernel;
|
||||||
|
};
|
||||||
|
|
||||||
|
inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||||
|
Thread* thread) {
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
tree->erase(tree->iterator_to(*thread));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||||
|
Thread* thread) {
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
tree->insert(*thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
@ -0,0 +1,172 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
||||||
|
KSynchronizationObject** objects, const s32 num_objects,
|
||||||
|
s64 timeout) {
|
||||||
|
// Allocate space on stack for thread nodes.
|
||||||
|
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||||
|
|
||||||
|
// Prepare for wait.
|
||||||
|
Thread* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
Handle timer = InvalidHandle;
|
||||||
|
|
||||||
|
{
|
||||||
|
// Setup the scheduling lock and sleep.
|
||||||
|
KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout);
|
||||||
|
|
||||||
|
// Check if any of the objects are already signaled.
|
||||||
|
for (auto i = 0; i < num_objects; ++i) {
|
||||||
|
ASSERT(objects[i] != nullptr);
|
||||||
|
|
||||||
|
if (objects[i]->IsSignaled()) {
|
||||||
|
*out_index = i;
|
||||||
|
slp.CancelSleep();
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the timeout is zero.
|
||||||
|
if (timeout == 0) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultTimedOut;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the thread should terminate.
|
||||||
|
if (thread->IsTerminationRequested()) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return Svc::ResultTerminationRequested;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if waiting was canceled.
|
||||||
|
if (thread->IsWaitCancelled()) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
thread->ClearWaitCancelled();
|
||||||
|
return Svc::ResultCancelled;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the waiters.
|
||||||
|
for (auto i = 0; i < num_objects; ++i) {
|
||||||
|
thread_nodes[i].thread = thread;
|
||||||
|
thread_nodes[i].next = nullptr;
|
||||||
|
|
||||||
|
if (objects[i]->thread_list_tail == nullptr) {
|
||||||
|
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
|
||||||
|
} else {
|
||||||
|
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// For debugging only
|
||||||
|
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
|
||||||
|
|
||||||
|
// Mark the thread as waiting.
|
||||||
|
thread->SetCancellable();
|
||||||
|
thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||||
|
thread->SetState(ThreadState::Waiting);
|
||||||
|
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The lock/sleep is done, so we should be able to get our result.
|
||||||
|
|
||||||
|
// Thread is no longer cancellable.
|
||||||
|
thread->ClearCancellable();
|
||||||
|
|
||||||
|
// For debugging only
|
||||||
|
thread->SetWaitObjectsForDebugging({});
|
||||||
|
|
||||||
|
// Cancel the timer as needed.
|
||||||
|
if (timer != InvalidHandle) {
|
||||||
|
auto& time_manager = kernel.TimeManager();
|
||||||
|
time_manager.UnscheduleTimeEvent(timer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the wait result.
|
||||||
|
ResultCode wait_result{RESULT_SUCCESS};
|
||||||
|
s32 sync_index = -1;
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
KSynchronizationObject* synced_obj;
|
||||||
|
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
|
||||||
|
|
||||||
|
for (auto i = 0; i < num_objects; ++i) {
|
||||||
|
// Unlink the object from the list.
|
||||||
|
ThreadListNode* prev_ptr =
|
||||||
|
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
|
||||||
|
ThreadListNode* prev_val = nullptr;
|
||||||
|
ThreadListNode *prev, *tail_prev;
|
||||||
|
|
||||||
|
do {
|
||||||
|
prev = prev_ptr;
|
||||||
|
prev_ptr = prev_ptr->next;
|
||||||
|
tail_prev = prev_val;
|
||||||
|
prev_val = prev_ptr;
|
||||||
|
} while (prev_ptr != std::addressof(thread_nodes[i]));
|
||||||
|
|
||||||
|
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
|
||||||
|
objects[i]->thread_list_tail = tail_prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
prev->next = thread_nodes[i].next;
|
||||||
|
|
||||||
|
if (objects[i] == synced_obj) {
|
||||||
|
sync_index = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set output.
|
||||||
|
*out_index = sync_index;
|
||||||
|
return wait_result;
|
||||||
|
}
|
||||||
|
|
||||||
|
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
|
||||||
|
|
||||||
|
KSynchronizationObject ::~KSynchronizationObject() = default;
|
||||||
|
|
||||||
|
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||||
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
|
// If we're not signaled, we've nothing to notify.
|
||||||
|
if (!this->IsSignaled()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over each thread.
|
||||||
|
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||||
|
Thread* thread = cur_node->thread;
|
||||||
|
if (thread->GetState() == ThreadState::Waiting) {
|
||||||
|
thread->SetSyncedObject(this, result);
|
||||||
|
thread->SetState(ThreadState::Runnable);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<Thread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
|
||||||
|
std::vector<Thread*> threads;
|
||||||
|
|
||||||
|
// If debugging, dump the list of waiters.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||||
|
threads.emplace_back(cur_node->thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return threads;
|
||||||
|
}
|
||||||
|
} // namespace Kernel
|
@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "core/hle/kernel/object.h"
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KernelCore;
|
||||||
|
class Synchronization;
|
||||||
|
class Thread;
|
||||||
|
|
||||||
|
/// Class that represents a Kernel object that a thread can be waiting on
|
||||||
|
class KSynchronizationObject : public Object {
|
||||||
|
public:
|
||||||
|
struct ThreadListNode {
|
||||||
|
ThreadListNode* next{};
|
||||||
|
Thread* thread{};
|
||||||
|
};
|
||||||
|
|
||||||
|
[[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
|
||||||
|
KSynchronizationObject** objects, const s32 num_objects,
|
||||||
|
s64 timeout);
|
||||||
|
|
||||||
|
[[nodiscard]] virtual bool IsSignaled() const = 0;
|
||||||
|
|
||||||
|
[[nodiscard]] std::vector<Thread*> GetWaitingThreadsForDebugging() const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
explicit KSynchronizationObject(KernelCore& kernel);
|
||||||
|
virtual ~KSynchronizationObject();
|
||||||
|
|
||||||
|
void NotifyAvailable(ResultCode result);
|
||||||
|
void NotifyAvailable() {
|
||||||
|
return this->NotifyAvailable(RESULT_SUCCESS);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
ThreadListNode* thread_list_head{};
|
||||||
|
ThreadListNode* thread_list_tail{};
|
||||||
|
};
|
||||||
|
|
||||||
|
// Specialization of DynamicObjectCast for KSynchronizationObjects
|
||||||
|
template <>
|
||||||
|
inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
|
||||||
|
std::shared_ptr<Object> object) {
|
||||||
|
if (object != nullptr && object->IsWaitable()) {
|
||||||
|
return std::static_pointer_cast<KSynchronizationObject>(object);
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
@ -1,170 +0,0 @@
|
|||||||
// Copyright 2014 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "common/assert.h"
|
|
||||||
#include "common/logging/log.h"
|
|
||||||
#include "core/core.h"
|
|
||||||
#include "core/hle/kernel/errors.h"
|
|
||||||
#include "core/hle/kernel/handle_table.h"
|
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
|
||||||
#include "core/hle/kernel/mutex.h"
|
|
||||||
#include "core/hle/kernel/object.h"
|
|
||||||
#include "core/hle/kernel/process.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
|
||||||
#include "core/hle/result.h"
|
|
||||||
#include "core/memory.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
|
|
||||||
/// those.
|
|
||||||
static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
|
||||||
const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
|
|
||||||
|
|
||||||
std::shared_ptr<Thread> highest_priority_thread;
|
|
||||||
u32 num_waiters = 0;
|
|
||||||
|
|
||||||
for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
|
|
||||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
++num_waiters;
|
|
||||||
if (highest_priority_thread == nullptr ||
|
|
||||||
thread->GetPriority() < highest_priority_thread->GetPriority()) {
|
|
||||||
highest_priority_thread = thread;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return {highest_priority_thread, num_waiters};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
|
|
||||||
static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
|
|
||||||
std::shared_ptr<Thread> new_owner) {
|
|
||||||
current_thread->RemoveMutexWaiter(new_owner);
|
|
||||||
const auto threads = current_thread->GetMutexWaitingThreads();
|
|
||||||
for (const auto& thread : threads) {
|
|
||||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ASSERT(thread->GetLockOwner() == current_thread.get());
|
|
||||||
current_thread->RemoveMutexWaiter(thread);
|
|
||||||
if (new_owner != thread)
|
|
||||||
new_owner->AddMutexWaiter(thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Mutex::Mutex(Core::System& system) : system{system} {}
|
|
||||||
Mutex::~Mutex() = default;
|
|
||||||
|
|
||||||
ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
|
||||||
Handle requesting_thread_handle) {
|
|
||||||
// The mutex address must be 4-byte aligned
|
|
||||||
if ((address % sizeof(u32)) != 0) {
|
|
||||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
|
||||||
return ERR_INVALID_ADDRESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
|
||||||
std::shared_ptr<Thread> current_thread =
|
|
||||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock lock(kernel);
|
|
||||||
// The mutex address must be 4-byte aligned
|
|
||||||
if ((address % sizeof(u32)) != 0) {
|
|
||||||
return ERR_INVALID_ADDRESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
|
||||||
std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
|
||||||
std::shared_ptr<Thread> requesting_thread =
|
|
||||||
handle_table.Get<Thread>(requesting_thread_handle);
|
|
||||||
|
|
||||||
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
|
|
||||||
// another thread.
|
|
||||||
ASSERT(requesting_thread == current_thread);
|
|
||||||
|
|
||||||
current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
|
||||||
|
|
||||||
const u32 addr_value = system.Memory().Read32(address);
|
|
||||||
|
|
||||||
// If the mutex isn't being held, just return success.
|
|
||||||
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
|
|
||||||
return RESULT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (holding_thread == nullptr) {
|
|
||||||
return ERR_INVALID_HANDLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until the mutex is released
|
|
||||||
current_thread->SetMutexWaitAddress(address);
|
|
||||||
current_thread->SetWaitHandle(requesting_thread_handle);
|
|
||||||
|
|
||||||
current_thread->SetStatus(ThreadStatus::WaitMutex);
|
|
||||||
|
|
||||||
// Update the lock holder thread's priority to prevent priority inversion.
|
|
||||||
holding_thread->AddMutexWaiter(current_thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock lock(kernel);
|
|
||||||
auto* owner = current_thread->GetLockOwner();
|
|
||||||
if (owner != nullptr) {
|
|
||||||
owner->RemoveMutexWaiter(current_thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return current_thread->GetSignalingResult();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
|
|
||||||
VAddr address) {
|
|
||||||
// The mutex address must be 4-byte aligned
|
|
||||||
if ((address % sizeof(u32)) != 0) {
|
|
||||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
|
||||||
return {ERR_INVALID_ADDRESS, nullptr};
|
|
||||||
}
|
|
||||||
|
|
||||||
auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
|
|
||||||
if (new_owner == nullptr) {
|
|
||||||
system.Memory().Write32(address, 0);
|
|
||||||
return {RESULT_SUCCESS, nullptr};
|
|
||||||
}
|
|
||||||
// Transfer the ownership of the mutex from the previous owner to the new one.
|
|
||||||
TransferMutexOwnership(address, owner, new_owner);
|
|
||||||
u32 mutex_value = new_owner->GetWaitHandle();
|
|
||||||
if (num_waiters >= 2) {
|
|
||||||
// Notify the guest that there are still some threads waiting for the mutex
|
|
||||||
mutex_value |= Mutex::MutexHasWaitersFlag;
|
|
||||||
}
|
|
||||||
new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
|
||||||
new_owner->SetLockOwner(nullptr);
|
|
||||||
new_owner->ResumeFromWait();
|
|
||||||
|
|
||||||
system.Memory().Write32(address, mutex_value);
|
|
||||||
return {RESULT_SUCCESS, new_owner};
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode Mutex::Release(VAddr address) {
|
|
||||||
auto& kernel = system.Kernel();
|
|
||||||
KScopedSchedulerLock lock(kernel);
|
|
||||||
|
|
||||||
std::shared_ptr<Thread> current_thread =
|
|
||||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
|
||||||
|
|
||||||
auto [result, new_owner] = Unlock(current_thread, address);
|
|
||||||
|
|
||||||
if (result != RESULT_SUCCESS && new_owner != nullptr) {
|
|
||||||
new_owner->SetSynchronizationResults(nullptr, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
@ -1,42 +0,0 @@
|
|||||||
// Copyright 2014 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
|
||||||
|
|
||||||
union ResultCode;
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
class System;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
class Mutex final {
|
|
||||||
public:
|
|
||||||
explicit Mutex(Core::System& system);
|
|
||||||
~Mutex();
|
|
||||||
|
|
||||||
/// Flag that indicates that a mutex still has threads waiting for it.
|
|
||||||
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
|
|
||||||
/// Mask of the bits in a mutex address value that contain the mutex owner.
|
|
||||||
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
|
|
||||||
|
|
||||||
/// Attempts to acquire a mutex at the specified address.
|
|
||||||
ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
|
|
||||||
Handle requesting_thread_handle);
|
|
||||||
|
|
||||||
/// Unlocks a mutex for owner at address
|
|
||||||
std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
|
|
||||||
VAddr address);
|
|
||||||
|
|
||||||
/// Releases the mutex at the specified address.
|
|
||||||
ResultCode Release(VAddr address);
|
|
||||||
|
|
||||||
private:
|
|
||||||
Core::System& system;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright 2020 yuzu emulator team
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Svc {
|
||||||
|
|
||||||
|
constexpr s32 ArgumentHandleCountMax = 0x40;
|
||||||
|
constexpr u32 HandleWaitMask{1u << 30};
|
||||||
|
|
||||||
|
} // namespace Kernel::Svc
|
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2020 yuzu emulator team
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
namespace Kernel::Svc {
|
||||||
|
|
||||||
|
constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
|
||||||
|
constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
|
||||||
|
constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
|
||||||
|
constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
|
||||||
|
constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
|
||||||
|
constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
|
||||||
|
constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
|
||||||
|
constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
|
||||||
|
|
||||||
|
} // namespace Kernel::Svc
|
@ -1,116 +0,0 @@
|
|||||||
// Copyright 2020 yuzu Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#include "core/core.h"
|
|
||||||
#include "core/hle/kernel/errors.h"
|
|
||||||
#include "core/hle/kernel/handle_table.h"
|
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
|
||||||
#include "core/hle/kernel/synchronization.h"
|
|
||||||
#include "core/hle/kernel/synchronization_object.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
|
||||||
#include "core/hle/kernel/time_manager.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
Synchronization::Synchronization(Core::System& system) : system{system} {}
|
|
||||||
|
|
||||||
void Synchronization::SignalObject(SynchronizationObject& obj) const {
|
|
||||||
auto& kernel = system.Kernel();
|
|
||||||
KScopedSchedulerLock lock(kernel);
|
|
||||||
if (obj.IsSignaled()) {
|
|
||||||
for (auto thread : obj.GetWaitingThreads()) {
|
|
||||||
if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
|
|
||||||
if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
|
|
||||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
|
|
||||||
ASSERT(thread->IsWaitingSync());
|
|
||||||
}
|
|
||||||
thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
|
|
||||||
thread->ResumeFromWait();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
obj.ClearWaitingThreads();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
|
||||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
|
|
||||||
auto& kernel = system.Kernel();
|
|
||||||
auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
|
|
||||||
Handle event_handle = InvalidHandle;
|
|
||||||
{
|
|
||||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
|
|
||||||
const auto itr =
|
|
||||||
std::find_if(sync_objects.begin(), sync_objects.end(),
|
|
||||||
[thread](const std::shared_ptr<SynchronizationObject>& object) {
|
|
||||||
return object->IsSignaled();
|
|
||||||
});
|
|
||||||
|
|
||||||
if (itr != sync_objects.end()) {
|
|
||||||
// We found a ready object, acquire it and set the result value
|
|
||||||
SynchronizationObject* object = itr->get();
|
|
||||||
object->Acquire(thread);
|
|
||||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
|
||||||
lock.CancelSleep();
|
|
||||||
return {RESULT_SUCCESS, index};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nano_seconds == 0) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return {RESULT_TIMEOUT, InvalidHandle};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (thread->IsPendingTermination()) {
|
|
||||||
lock.CancelSleep();
|
|
||||||
return {ERR_THREAD_TERMINATING, InvalidHandle};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (thread->IsSyncCancelled()) {
|
|
||||||
thread->SetSyncCancelled(false);
|
|
||||||
lock.CancelSleep();
|
|
||||||
return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto& object : sync_objects) {
|
|
||||||
object->AddWaitingThread(SharedFrom(thread));
|
|
||||||
}
|
|
||||||
|
|
||||||
thread->SetSynchronizationObjects(&sync_objects);
|
|
||||||
thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
|
||||||
thread->SetStatus(ThreadStatus::WaitSynch);
|
|
||||||
thread->SetWaitingSync(true);
|
|
||||||
}
|
|
||||||
thread->SetWaitingSync(false);
|
|
||||||
|
|
||||||
if (event_handle != InvalidHandle) {
|
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.UnscheduleTimeEvent(event_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock lock(kernel);
|
|
||||||
ResultCode signaling_result = thread->GetSignalingResult();
|
|
||||||
SynchronizationObject* signaling_object = thread->GetSignalingObject();
|
|
||||||
thread->SetSynchronizationObjects(nullptr);
|
|
||||||
auto shared_thread = SharedFrom(thread);
|
|
||||||
for (auto& obj : sync_objects) {
|
|
||||||
obj->RemoveWaitingThread(shared_thread);
|
|
||||||
}
|
|
||||||
if (signaling_object != nullptr) {
|
|
||||||
const auto itr = std::find_if(
|
|
||||||
sync_objects.begin(), sync_objects.end(),
|
|
||||||
[signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
|
|
||||||
return object.get() == signaling_object;
|
|
||||||
});
|
|
||||||
ASSERT(itr != sync_objects.end());
|
|
||||||
signaling_object->Acquire(thread);
|
|
||||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
|
||||||
return {signaling_result, index};
|
|
||||||
}
|
|
||||||
return {signaling_result, -1};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
@ -1,44 +0,0 @@
|
|||||||
// Copyright 2020 yuzu Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "core/hle/kernel/object.h"
|
|
||||||
#include "core/hle/result.h"
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
class System;
|
|
||||||
} // namespace Core
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
class SynchronizationObject;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The 'Synchronization' class is an interface for handling synchronization methods
|
|
||||||
* used by Synchronization objects and synchronization SVCs. This centralizes processing of
|
|
||||||
* such
|
|
||||||
*/
|
|
||||||
class Synchronization {
|
|
||||||
public:
|
|
||||||
explicit Synchronization(Core::System& system);
|
|
||||||
|
|
||||||
/// Signals a synchronization object, waking up all its waiting threads
|
|
||||||
void SignalObject(SynchronizationObject& obj) const;
|
|
||||||
|
|
||||||
/// Tries to see if waiting for any of the sync_objects is necessary, if not
|
|
||||||
/// it returns Success and the handle index of the signaled sync object. In
|
|
||||||
/// case not, the current thread will be locked and wait for nano_seconds or
|
|
||||||
/// for a synchronization object to signal.
|
|
||||||
std::pair<ResultCode, Handle> WaitFor(
|
|
||||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
|
|
||||||
|
|
||||||
private:
|
|
||||||
Core::System& system;
|
|
||||||
};
|
|
||||||
} // namespace Kernel
|
|
@ -1,49 +0,0 @@
|
|||||||
// Copyright 2014 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include "common/assert.h"
|
|
||||||
#include "common/common_types.h"
|
|
||||||
#include "common/logging/log.h"
|
|
||||||
#include "core/core.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
|
||||||
#include "core/hle/kernel/object.h"
|
|
||||||
#include "core/hle/kernel/process.h"
|
|
||||||
#include "core/hle/kernel/synchronization.h"
|
|
||||||
#include "core/hle/kernel/synchronization_object.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
|
|
||||||
SynchronizationObject::~SynchronizationObject() = default;
|
|
||||||
|
|
||||||
void SynchronizationObject::Signal() {
|
|
||||||
kernel.Synchronization().SignalObject(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
|
|
||||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
|
||||||
if (itr == waiting_threads.end())
|
|
||||||
waiting_threads.push_back(std::move(thread));
|
|
||||||
}
|
|
||||||
|
|
||||||
void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
|
|
||||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
|
||||||
// If a thread passed multiple handles to the same object,
|
|
||||||
// the kernel might attempt to remove the thread from the object's
|
|
||||||
// waiting threads list multiple times.
|
|
||||||
if (itr != waiting_threads.end())
|
|
||||||
waiting_threads.erase(itr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SynchronizationObject::ClearWaitingThreads() {
|
|
||||||
waiting_threads.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
|
|
||||||
return waiting_threads;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
@ -1,77 +0,0 @@
|
|||||||
// Copyright 2014 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <memory>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "core/hle/kernel/object.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
class KernelCore;
|
|
||||||
class Synchronization;
|
|
||||||
class Thread;
|
|
||||||
|
|
||||||
/// Class that represents a Kernel object that a thread can be waiting on
|
|
||||||
class SynchronizationObject : public Object {
|
|
||||||
public:
|
|
||||||
explicit SynchronizationObject(KernelCore& kernel);
|
|
||||||
~SynchronizationObject() override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if the specified thread should wait until the object is available
|
|
||||||
* @param thread The thread about which we're deciding.
|
|
||||||
* @return True if the current thread should wait due to this object being unavailable
|
|
||||||
*/
|
|
||||||
virtual bool ShouldWait(const Thread* thread) const = 0;
|
|
||||||
|
|
||||||
/// Acquire/lock the object for the specified thread if it is available
|
|
||||||
virtual void Acquire(Thread* thread) = 0;
|
|
||||||
|
|
||||||
/// Signal this object
|
|
||||||
virtual void Signal();
|
|
||||||
|
|
||||||
virtual bool IsSignaled() const {
|
|
||||||
return is_signaled;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add a thread to wait on this object
|
|
||||||
* @param thread Pointer to thread to add
|
|
||||||
*/
|
|
||||||
void AddWaitingThread(std::shared_ptr<Thread> thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Removes a thread from waiting on this object (e.g. if it was resumed already)
|
|
||||||
* @param thread Pointer to thread to remove
|
|
||||||
*/
|
|
||||||
void RemoveWaitingThread(std::shared_ptr<Thread> thread);
|
|
||||||
|
|
||||||
/// Get a const reference to the waiting threads list for debug use
|
|
||||||
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
|
|
||||||
|
|
||||||
void ClearWaitingThreads();
|
|
||||||
|
|
||||||
protected:
|
|
||||||
std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
|
|
||||||
|
|
||||||
private:
|
|
||||||
/// Threads waiting for this object to become available
|
|
||||||
std::vector<std::shared_ptr<Thread>> waiting_threads;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Specialization of DynamicObjectCast for SynchronizationObjects
|
|
||||||
template <>
|
|
||||||
inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
|
|
||||||
std::shared_ptr<Object> object) {
|
|
||||||
if (object != nullptr && object->IsWaitable()) {
|
|
||||||
return std::static_pointer_cast<SynchronizationObject>(object);
|
|
||||||
}
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
Loading…
Reference in New Issue