diff options
author | Lioncash <[email protected]> | 2021-05-08 12:11:36 -0400 |
---|---|---|
committer | Lioncash <[email protected]> | 2021-05-08 12:33:26 -0400 |
commit | 2f62bae9e3bbdd80cd374aaf0b93890e937d5b3d (patch) | |
tree | da5cbd25dcb367559c1bf619a7438fa1e18ff600 | |
parent | faa067f175cbf5e916ed75776817f0046e6731c4 (diff) | |
download | yuzu-android-2f62bae9e3bbdd80cd374aaf0b93890e937d5b3d.tar.gz yuzu-android-2f62bae9e3bbdd80cd374aaf0b93890e937d5b3d.zip |
kernel: Eliminate variable shadowing
Now that the large kernel refactor is merged, we can eliminate the
remaining variable shadowing cases.
40 files changed, 138 insertions, 140 deletions
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 0906b8cfb..d136be452 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h @@ -69,16 +69,16 @@ public: AlwaysMoveHandles = 1, }; - explicit ResponseBuilder(Kernel::HLERequestContext& context, u32 normal_params_size, + explicit ResponseBuilder(Kernel::HLERequestContext& ctx, u32 normal_params_size, u32 num_handles_to_copy = 0, u32 num_objects_to_move = 0, Flags flags = Flags::None) - : RequestHelperBase(context), normal_params_size(normal_params_size), + : RequestHelperBase(ctx), normal_params_size(normal_params_size), num_handles_to_copy(num_handles_to_copy), - num_objects_to_move(num_objects_to_move), kernel{context.kernel} { + num_objects_to_move(num_objects_to_move), kernel{ctx.kernel} { memset(cmdbuf, 0, sizeof(u32) * IPC::COMMAND_BUFFER_LENGTH); - context.ClearIncomingObjects(); + ctx.ClearIncomingObjects(); IPC::CommandHeader header{}; @@ -90,13 +90,13 @@ public: u32 num_domain_objects{}; const bool always_move_handles{ (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; - if (!context.Session()->IsDomain() || always_move_handles) { + if (!ctx.Session()->IsDomain() || always_move_handles) { num_handles_to_move = num_objects_to_move; } else { num_domain_objects = num_objects_to_move; } - if (context.Session()->IsDomain()) { + if (ctx.Session()->IsDomain()) { raw_data_size += sizeof(DomainMessageHeader) / 4 + num_domain_objects; } @@ -116,7 +116,7 @@ public: AlignWithPadding(); - if (context.Session()->IsDomain() && context.HasDomainMessageHeader()) { + if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) { IPC::DomainMessageHeader domain_header{}; domain_header.num_objects = num_domain_objects; PushRaw(domain_header); @@ -341,9 +341,9 @@ class RequestParser : public RequestHelperBase { public: explicit RequestParser(u32* command_buffer) : RequestHelperBase(command_buffer) {} - explicit RequestParser(Kernel::HLERequestContext& context) : RequestHelperBase(context) { - ASSERT_MSG(context.GetDataPayloadOffset(), "context is incomplete"); - Skip(context.GetDataPayloadOffset(), false); + explicit RequestParser(Kernel::HLERequestContext& ctx) : RequestHelperBase(ctx) { + ASSERT_MSG(ctx.GetDataPayloadOffset(), "context is incomplete"); + Skip(ctx.GetDataPayloadOffset(), false); // Skip the u64 command id, it's already stored in the context static constexpr u32 CommandIdSize = 2; Skip(CommandIdSize, false); diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index b505d20a6..93907f75e 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -180,12 +180,12 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTab return RESULT_SUCCESS; } -ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) { - auto& owner_process = *thread.GetOwnerProcess(); +ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) { + auto& owner_process = *requesting_thread.GetOwnerProcess(); auto& handle_table = owner_process.GetHandleTable(); std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf; - memory.ReadBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), + memory.ReadBlock(owner_process, requesting_thread.GetTLSAddress(), dst_cmdbuf.data(), dst_cmdbuf.size() * sizeof(u32)); // The header was already built in the internal command buffer. Attempt to parse it to verify @@ -242,7 +242,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) { } // Copy the translated command buffer back into the thread's command buffer area. - memory.WriteBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), + memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), dst_cmdbuf.data(), dst_cmdbuf.size() * sizeof(u32)); return RESULT_SUCCESS; diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index fa031c121..21e384706 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -126,7 +126,7 @@ public: u32_le* src_cmdbuf); /// Writes data from this context back to the requesting process/thread. - ResultCode WriteToOutgoingCommandBuffer(KThread& thread); + ResultCode WriteToOutgoingCommandBuffer(KThread& requesting_thread); u32_le GetCommand() const { return command; diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index 765e46670..bc18582be 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -177,7 +177,7 @@ class KAutoObjectWithListContainer; class KAutoObjectWithList : public KAutoObject { public: - explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_), kernel(kernel_) {} + explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {} static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { const u64 lid = lhs.GetId(); @@ -204,11 +204,7 @@ public: private: friend class KAutoObjectWithListContainer; -private: Common::IntrusiveRedBlackTreeNode list_node; - -protected: - KernelCore& kernel; }; template <typename T> diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp index b6f1d713f..e14b915b9 100644 --- a/src/core/hle/kernel/k_client_port.cpp +++ b/src/core/hle/kernel/k_client_port.cpp @@ -13,7 +13,7 @@ namespace Kernel { -KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KClientPort::~KClientPort() = default; void KClientPort::Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_) { diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h index ec1d7e12e..d00ce3ddd 100644 --- a/src/core/hle/kernel/k_client_port.h +++ b/src/core/hle/kernel/k_client_port.h @@ -21,7 +21,7 @@ class KClientPort final : public KSynchronizationObject { KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject); public: - explicit KClientPort(KernelCore& kernel); + explicit KClientPort(KernelCore& kernel_); virtual ~KClientPort() override; void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_); diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp index 0618dc246..8ad1be762 100644 --- a/src/core/hle/kernel/k_client_session.cpp +++ b/src/core/hle/kernel/k_client_session.cpp @@ -12,7 +12,8 @@ namespace Kernel { -KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} +KClientSession::KClientSession(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_} {} KClientSession::~KClientSession() = default; void KClientSession::Destroy() { diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h index 6476a588b..720a8c243 100644 --- a/src/core/hle/kernel/k_client_session.h +++ b/src/core/hle/kernel/k_client_session.h @@ -33,7 +33,7 @@ class KClientSession final KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject); public: - explicit KClientSession(KernelCore& kernel); + explicit KClientSession(KernelCore& kernel_); virtual ~KClientSession(); void Initialize(KSession* parent_, std::string&& name_) { diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index f51cf3e7b..ce3bade60 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -254,8 +254,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { } // Close threads in the list. - for (auto it = thread_list.begin(); it != thread_list.end(); - it = thread_list.erase(kernel, it)) { + for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { (*it).Close(); } } diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index 986355b78..0720efece 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp @@ -8,8 +8,9 @@ namespace Kernel { -KEvent::KEvent(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel}, readable_event{kernel}, writable_event{kernel} {} +KEvent::KEvent(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_}, readable_event{kernel_}, writable_event{ + kernel_} {} KEvent::~KEvent() = default; diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h index 4ca869930..9a59ffb70 100644 --- a/src/core/hle/kernel/k_event.h +++ b/src/core/hle/kernel/k_event.h @@ -19,7 +19,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject); public: - explicit KEvent(KernelCore& kernel); + explicit KEvent(KernelCore& kernel_); virtual ~KEvent(); void Initialize(std::string&& name); diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h index 500f44685..540e518cd 100644 --- a/src/core/hle/kernel/k_linked_list.h +++ b/src/core/hle/kernel/k_linked_list.h @@ -124,7 +124,7 @@ public: ~KLinkedList() { // Erase all elements. - for (auto it = this->begin(); it != this->end(); it = this->erase(kernel, it)) { + for (auto it = begin(); it != end(); it = erase(it)) { } // Ensure we succeeded. @@ -223,7 +223,7 @@ public: this->erase(this->begin()); } - iterator erase(KernelCore& kernel, const iterator pos) { + iterator erase(const iterator pos) { KLinkedListNode* freed_node = std::addressof(*pos.m_base_it); iterator ret = iterator(BaseList::erase(pos.m_base_it)); KLinkedListNode::Free(kernel, freed_node); diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index 4a2d88008..44bfeb0d5 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -17,8 +17,8 @@ KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr, VAddr end_addr) KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { auto node{memory_block_tree.begin()}; while (node != end()) { - const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; - if (node->GetAddress() <= addr && end_addr - 1 >= addr) { + const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; + if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { return node; } node = std::next(node); @@ -67,7 +67,7 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attribute) { - const VAddr end_addr{addr + num_pages * PageSize}; + const VAddr update_end_addr{addr + num_pages * PageSize}; iterator node{memory_block_tree.begin()}; prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; @@ -78,7 +78,7 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState const VAddr cur_addr{block->GetAddress()}; const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; - if (addr < cur_end_addr && cur_addr < end_addr) { + if (addr < cur_end_addr && cur_addr < update_end_addr) { if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { node = next_node; continue; @@ -89,8 +89,8 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState memory_block_tree.insert(node, block->Split(addr)); } - if (end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(end_addr)); + if (update_end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); } new_node->Update(state, perm, attribute); @@ -98,7 +98,7 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState MergeAdjacent(new_node, next_node); } - if (cur_end_addr - 1 >= end_addr - 1) { + if (cur_end_addr - 1 >= update_end_addr - 1) { break; } @@ -108,7 +108,7 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attribute) { - const VAddr end_addr{addr + num_pages * PageSize}; + const VAddr update_end_addr{addr + num_pages * PageSize}; iterator node{memory_block_tree.begin()}; while (node != memory_block_tree.end()) { @@ -117,15 +117,15 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState const VAddr cur_addr{block->GetAddress()}; const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; - if (addr < cur_end_addr && cur_addr < end_addr) { + if (addr < cur_end_addr && cur_addr < update_end_addr) { iterator new_node{node}; if (addr > cur_addr) { memory_block_tree.insert(node, block->Split(addr)); } - if (end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(end_addr)); + if (update_end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); } new_node->Update(state, perm, attribute); @@ -133,7 +133,7 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState MergeAdjacent(new_node, next_node); } - if (cur_end_addr - 1 >= end_addr - 1) { + if (cur_end_addr - 1 >= update_end_addr - 1) { break; } @@ -143,7 +143,7 @@ void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, KMemoryPermission perm) { - const VAddr end_addr{addr + num_pages * PageSize}; + const VAddr update_end_addr{addr + num_pages * PageSize}; iterator node{memory_block_tree.begin()}; while (node != memory_block_tree.end()) { @@ -152,15 +152,15 @@ void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc const VAddr cur_addr{block->GetAddress()}; const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; - if (addr < cur_end_addr && cur_addr < end_addr) { + if (addr < cur_end_addr && cur_addr < update_end_addr) { iterator new_node{node}; if (addr > cur_addr) { memory_block_tree.insert(node, block->Split(addr)); } - if (end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(end_addr)); + if (update_end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); } lock_func(new_node, perm); @@ -168,7 +168,7 @@ void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc MergeAdjacent(new_node, next_node); } - if (cur_end_addr - 1 >= end_addr - 1) { + if (cur_end_addr - 1 >= update_end_addr - 1) { break; } diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index a861c04ab..90ab8fd62 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -82,9 +82,9 @@ public: type_id = type; } - constexpr bool Contains(u64 address) const { + constexpr bool Contains(u64 addr) const { ASSERT(this->GetEndAddress() != 0); - return this->GetAddress() <= address && address <= this->GetLastAddress(); + return this->GetAddress() <= addr && addr <= this->GetLastAddress(); } constexpr bool IsDerivedFrom(u32 type) const { diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp index 734aa2a8c..feb2bb11f 100644 --- a/src/core/hle/kernel/k_port.cpp +++ b/src/core/hle/kernel/k_port.cpp @@ -9,8 +9,8 @@ namespace Kernel { -KPort::KPort(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {} +KPort::KPort(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} KPort::~KPort() = default; diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h index f1b2838d8..960f1f3a3 100644 --- a/src/core/hle/kernel/k_port.h +++ b/src/core/hle/kernel/k_port.h @@ -21,7 +21,7 @@ class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjec KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject); public: - explicit KPort(KernelCore& kernel); + explicit KPort(KernelCore& kernel_); virtual ~KPort(); static void PostDestroy([[maybe_unused]] uintptr_t arg) {} diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 174318180..bdcbaeeaa 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -118,11 +118,11 @@ private: std::bitset<num_slot_entries> is_slot_used; }; -ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string name, +ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name, ProcessType type) { auto& kernel = system.Kernel(); - process->name = std::move(name); + process->name = std::move(process_name); process->resource_limit = kernel.GetSystemResourceLimit(); process->status = ProcessStatus::Created; @@ -373,8 +373,8 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { void KProcess::PrepareForTermination() { ChangeStatus(ProcessStatus::Exiting); - const auto stop_threads = [this](const std::vector<KThread*>& thread_list) { - for (auto& thread : thread_list) { + const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { + for (auto& thread : in_thread_list) { if (thread->GetOwnerProcess() != this) continue; @@ -491,10 +491,10 @@ bool KProcess::IsSignaled() const { return is_signaled; } -KProcess::KProcess(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel}, - page_table{std::make_unique<KPageTable>(kernel.System())}, handle_table{kernel}, - address_arbiter{kernel.System()}, condition_var{kernel.System()}, state_lock{kernel} {} +KProcess::KProcess(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_}, + page_table{std::make_unique<KPageTable>(kernel_.System())}, handle_table{kernel_}, + address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_} {} KProcess::~KProcess() = default; diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index 62ab26b05..123d71cd3 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -67,7 +67,7 @@ class KProcess final KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); public: - explicit KProcess(KernelCore& kernel); + explicit KProcess(KernelCore& kernel_); ~KProcess() override; enum : u64 { @@ -90,7 +90,7 @@ public: static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; - static ResultCode Initialize(KProcess* process, Core::System& system, std::string name, + static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name, ProcessType type); /// Gets a reference to the process' page table. diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp index 8fef4bb00..0ea2d0275 100644 --- a/src/core/hle/kernel/k_readable_event.cpp +++ b/src/core/hle/kernel/k_readable_event.cpp @@ -12,7 +12,7 @@ namespace Kernel { -KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KReadableEvent::~KReadableEvent() = default; diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h index 1783ef0b8..33cd1dd3e 100644 --- a/src/core/hle/kernel/k_readable_event.h +++ b/src/core/hle/kernel/k_readable_event.h @@ -18,7 +18,7 @@ class KReadableEvent : public KSynchronizationObject { KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); public: - explicit KReadableEvent(KernelCore& kernel); + explicit KReadableEvent(KernelCore& kernel_); ~KReadableEvent() override; void Initialize(KEvent* parent_, std::string&& name_) { diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp index ad5095bfd..bf20bf7d0 100644 --- a/src/core/hle/kernel/k_resource_limit.cpp +++ b/src/core/hle/kernel/k_resource_limit.cpp @@ -10,8 +10,8 @@ namespace Kernel { constexpr s64 DefaultTimeout = 10000000000; // 10 seconds -KResourceLimit::KResourceLimit(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {} +KResourceLimit::KResourceLimit(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_}, lock{kernel_}, cond_var{kernel_} {} KResourceLimit::~KResourceLimit() = default; void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) { diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h index 66ebf32df..0debbbb51 100644 --- a/src/core/hle/kernel/k_resource_limit.h +++ b/src/core/hle/kernel/k_resource_limit.h @@ -36,7 +36,7 @@ class KResourceLimit final KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject); public: - explicit KResourceLimit(KernelCore& kernel); + explicit KResourceLimit(KernelCore& kernel_); virtual ~KResourceLimit(); void Initialize(const Core::Timing::CoreTiming* core_timing_); diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 0115fe6d1..e256e9415 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -259,7 +259,7 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread } } -void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { +void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { ASSERT(system.GlobalSchedulerContext().IsLocked()); // Get a reference to the priority queue. @@ -267,7 +267,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { auto& priority_queue = GetPriorityQueue(kernel); // Rotate the front of the queue to the end. - KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority); + KThread* top_thread = priority_queue.GetScheduledFront(cpu_core_id, priority); KThread* next_thread = nullptr; if (top_thread != nullptr) { next_thread = priority_queue.MoveToScheduledBack(top_thread); @@ -279,7 +279,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { // While we have a suggested thread, try to migrate it! { - KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority); + KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id, priority); while (suggested != nullptr) { // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); @@ -300,7 +300,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { // to the front of the queue. if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { - suggested->SetActiveCore(core_id); + suggested->SetActiveCore(cpu_core_id); priority_queue.ChangeCore(suggested_core, suggested, true); IncrementScheduledCount(suggested); break; @@ -308,22 +308,22 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } // Get the next suggestion. - suggested = priority_queue.GetSamePriorityNext(core_id, suggested); + suggested = priority_queue.GetSamePriorityNext(cpu_core_id, suggested); } } // Now that we might have migrated a thread with the same priority, check if we can do better. { - KThread* best_thread = priority_queue.GetScheduledFront(core_id); + KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id); if (best_thread == GetCurrentThread()) { - best_thread = priority_queue.GetScheduledNext(core_id, best_thread); + best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread); } // If the best thread we can choose has a priority the same or worse than ours, try to // migrate a higher priority thread. if (best_thread != nullptr && best_thread->GetPriority() >= priority) { - KThread* suggested = priority_queue.GetSuggestedFront(core_id); + KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id); while (suggested != nullptr) { // If the suggestion's priority is the same as ours, don't bother. if (suggested->GetPriority() >= best_thread->GetPriority()) { @@ -342,7 +342,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { - suggested->SetActiveCore(core_id); + suggested->SetActiveCore(cpu_core_id); priority_queue.ChangeCore(suggested_core, suggested, true); IncrementScheduledCount(suggested); break; @@ -350,7 +350,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } // Get the next suggestion. - suggested = priority_queue.GetSuggestedNext(core_id, suggested); + suggested = priority_queue.GetSuggestedNext(cpu_core_id, suggested); } } } diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index b789a64a4..13a2414e6 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -141,7 +141,7 @@ private: [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); - void RotateScheduledQueue(s32 core_id, s32 priority); + void RotateScheduledQueue(s32 cpu_core_id, s32 priority); void Schedule() { ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp index 5e44c48e2..8cbde177a 100644 --- a/src/core/hle/kernel/k_server_port.cpp +++ b/src/core/hle/kernel/k_server_port.cpp @@ -14,7 +14,7 @@ namespace Kernel { -KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KServerPort::KServerPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KServerPort::~KServerPort() = default; void KServerPort::Initialize(KPort* parent_, std::string&& name_) { diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h index 558c8ed4d..e76792253 100644 --- a/src/core/hle/kernel/k_server_port.h +++ b/src/core/hle/kernel/k_server_port.h @@ -29,7 +29,7 @@ private: using SessionList = boost::intrusive::list<KServerSession>; public: - explicit KServerPort(KernelCore& kernel); + explicit KServerPort(KernelCore& kernel_); virtual ~KServerPort() override; using HLEHandler = std::shared_ptr<SessionRequestHandler>; diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index c8acaa453..b28cc2499 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -23,7 +23,7 @@ namespace Kernel { -KServerSession::KServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KServerSession::~KServerSession() { kernel.ReleaseServiceThread(service_thread); diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h index 77095bb85..597d76d38 100644 --- a/src/core/hle/kernel/k_server_session.h +++ b/src/core/hle/kernel/k_server_session.h @@ -40,7 +40,7 @@ class KServerSession final : public KSynchronizationObject, friend class ServiceThread; public: - explicit KServerSession(KernelCore& kernel); + explicit KServerSession(KernelCore& kernel_); virtual ~KServerSession() override; virtual void Destroy() override; diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp index 7b0bc177d..025b8b555 100644 --- a/src/core/hle/kernel/k_session.cpp +++ b/src/core/hle/kernel/k_session.cpp @@ -11,8 +11,8 @@ namespace Kernel { -KSession::KSession(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {} +KSession::KSession(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} KSession::~KSession() = default; void KSession::Initialize(KClientPort* port_, const std::string& name_) { diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h index 4321b7885..16901e19c 100644 --- a/src/core/hle/kernel/k_session.h +++ b/src/core/hle/kernel/k_session.h @@ -17,7 +17,7 @@ class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAut KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject); public: - explicit KSession(KernelCore& kernel); + explicit KSession(KernelCore& kernel_); virtual ~KSession() override; void Initialize(KClientPort* port_, const std::string& name_); diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 1da57a4c3..7770b1868 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -12,14 +12,14 @@ namespace Kernel { -KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} +KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} KSharedMemory::~KSharedMemory() { kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size); } -ResultCode KSharedMemory::Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_, - KProcess* owner_process_, KPageLinkedList&& page_list_, +ResultCode KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, + KPageLinkedList&& page_list_, Svc::MemoryPermission owner_permission_, Svc::MemoryPermission user_permission_, PAddr physical_address_, std::size_t size_, @@ -32,7 +32,7 @@ ResultCode KSharedMemory::Initialize(KernelCore& kernel_, Core::DeviceMemory& de user_permission = user_permission_; physical_address = physical_address_; size = size_; - name = name_; + name = std::move(name_); // Get the resource limit. KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); @@ -67,9 +67,9 @@ void KSharedMemory::Finalize() { KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize(); } -ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t size, +ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size, Svc::MemoryPermission permissions) { - const u64 page_count{(size + PageSize - 1) / PageSize}; + const u64 page_count{(map_size + PageSize - 1) / PageSize}; if (page_list.GetNumPages() != page_count) { UNIMPLEMENTED_MSG("Page count does not match"); @@ -86,8 +86,8 @@ ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size ConvertToKMemoryPermission(permissions)); } -ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t size) { - const u64 page_count{(size + PageSize - 1) / PageSize}; +ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { + const u64 page_count{(unmap_size + PageSize - 1) / PageSize}; if (page_list.GetNumPages() != page_count) { UNIMPLEMENTED_MSG("Page count does not match"); diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 28939c93c..553a56327 100644 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h @@ -24,12 +24,11 @@ class KSharedMemory final KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); public: - explicit KSharedMemory(KernelCore& kernel); + explicit KSharedMemory(KernelCore& kernel_); ~KSharedMemory() override; - ResultCode Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_, - KProcess* owner_process_, KPageLinkedList&& page_list_, - Svc::MemoryPermission owner_permission_, + ResultCode Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, + KPageLinkedList&& page_list_, Svc::MemoryPermission owner_permission_, Svc::MemoryPermission user_permission_, PAddr physical_address_, std::size_t size_, std::string name_); @@ -37,19 +36,19 @@ public: * Maps a shared memory block to an address in the target process' address space * @param target_process Process on which to map the memory block * @param address Address in system memory to map shared memory block to - * @param size Size of the shared memory block to map + * @param map_size Size of the shared memory block to map * @param permissions Memory block map permissions (specified by SVC field) */ - ResultCode Map(KProcess& target_process, VAddr address, std::size_t size, + ResultCode Map(KProcess& target_process, VAddr address, std::size_t map_size, Svc::MemoryPermission permissions); /** * Unmaps a shared memory block from an address in the target process' address space * @param target_process Process on which to unmap the memory block * @param address Address in system memory to unmap shared memory block - * @param size Size of the shared memory block to unmap + * @param unmap_size Size of the shared memory block to unmap */ - ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t size); + ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size); /** * Gets a pointer to the shared memory block diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 460b8a714..45380dea0 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -18,18 +18,18 @@ void KSynchronizationObject::Finalize() { KAutoObject::Finalize(); } -ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, +ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, KSynchronizationObject** objects, const s32 num_objects, s64 timeout) { // Allocate space on stack for thread nodes. std::vector<ThreadListNode> thread_nodes(num_objects); // Prepare for wait. - KThread* thread = kernel.CurrentScheduler()->GetCurrentThread(); + KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread(); { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp{kernel, thread, timeout}; + KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout}; // Check if any of the objects are already signaled. for (auto i = 0; i < num_objects; ++i) { @@ -94,13 +94,13 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, thread->SetWaitObjectsForDebugging({}); // Cancel the timer as needed. - kernel.TimeManager().UnscheduleTimeEvent(thread); + kernel_ctx.TimeManager().UnscheduleTimeEvent(thread); // Get the wait result. ResultCode wait_result{RESULT_SUCCESS}; s32 sync_index = -1; { - KScopedSchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel_ctx); KSynchronizationObject* synced_obj; wait_result = thread->GetWaitResult(std::addressof(synced_obj)); @@ -135,7 +135,8 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, return wait_result; } -KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {} +KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) + : KAutoObjectWithList{kernel_} {} KSynchronizationObject::~KSynchronizationObject() = default; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index ef6dfeeca..e3f08f256 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -60,8 +60,8 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, namespace Kernel { -KThread::KThread(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {} +KThread::KThread(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} KThread::~KThread() = default; ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, @@ -479,7 +479,7 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m return RESULT_SUCCESS; } -ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { +ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { ASSERT(parent != nullptr); ASSERT(v_affinity_mask != 0); KScopedLightLock lk{activity_pause_lock}; @@ -491,18 +491,18 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { ASSERT(num_core_migration_disables >= 0); // If the core id is no-update magic, preserve the ideal core id. - if (core_id == Svc::IdealCoreNoUpdate) { - core_id = virtual_ideal_core_id; - R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination); + if (cpu_core_id == Svc::IdealCoreNoUpdate) { + cpu_core_id = virtual_ideal_core_id; + R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination); } // Set the virtual core/affinity mask. - virtual_ideal_core_id = core_id; + virtual_ideal_core_id = cpu_core_id; virtual_affinity_mask = v_affinity_mask; // Translate the virtual core to a physical core. - if (core_id >= 0) { - core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id]; + if (cpu_core_id >= 0) { + cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id]; } // Translate the virtual affinity mask to a physical one. @@ -517,7 +517,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { const KAffinityMask old_mask = physical_affinity_mask; // Set our new ideals. - physical_ideal_core_id = core_id; + physical_ideal_core_id = cpu_core_id; physical_affinity_mask.SetAffinityMask(p_affinity_mask); if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { @@ -535,7 +535,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { } } else { // Otherwise, we edit the original affinity for restoration later. - original_physical_ideal_core_id = core_id; + original_physical_ideal_core_id = cpu_core_id; original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); } } @@ -851,8 +851,8 @@ void KThread::RemoveWaiterImpl(KThread* thread) { thread->SetLockOwner(nullptr); } -void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); +void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) { + ASSERT(kernel_ctx.GlobalSchedulerContext().IsLocked()); while (true) { // We want to inherit priority where possible. @@ -868,7 +868,7 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { // Ensure we don't violate condition variable red black tree invariants. if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { - BeforeUpdatePriority(kernel, cv_tree, thread); + BeforeUpdatePriority(kernel_ctx, cv_tree, thread); } // Change the priority. @@ -877,11 +877,11 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { // Restore the condition variable, if relevant. if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { - AfterUpdatePriority(kernel, cv_tree, thread); + AfterUpdatePriority(kernel_ctx, cv_tree, thread); } // Update the scheduler. - KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority); + KScheduler::OnThreadPriorityChanged(kernel_ctx, thread, old_priority); // Keep the lock owner up to date. KThread* lock_owner = thread->GetLockOwner(); diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 4145ef56c..4abfc2b49 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -111,7 +111,7 @@ public: static constexpr s32 DefaultThreadPriority = 44; static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1; - explicit KThread(KernelCore& kernel); + explicit KThread(KernelCore& kernel_); ~KThread() override; public: @@ -318,7 +318,7 @@ public: [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); - [[nodiscard]] ResultCode SetCoreMask(s32 core_id, u64 v_affinity_mask); + [[nodiscard]] ResultCode SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity); @@ -649,7 +649,7 @@ private: std::function<void(void*)>&& init_func, void* init_func_parameter); - static void RestorePriority(KernelCore& kernel, KThread* thread); + static void RestorePriority(KernelCore& kernel_ctx, KThread* thread); // For core KThread implementation ThreadContext32 thread_context_32{}; diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp index 201617d32..5bc33706d 100644 --- a/src/core/hle/kernel/k_transfer_memory.cpp +++ b/src/core/hle/kernel/k_transfer_memory.cpp @@ -9,8 +9,8 @@ namespace Kernel { -KTransferMemory::KTransferMemory(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel} {} +KTransferMemory::KTransferMemory(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_} {} KTransferMemory::~KTransferMemory() = default; diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h index f56398b9c..838fd2b18 100644 --- a/src/core/hle/kernel/k_transfer_memory.h +++ b/src/core/hle/kernel/k_transfer_memory.h @@ -26,7 +26,7 @@ class KTransferMemory final KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); public: - explicit KTransferMemory(KernelCore& kernel); + explicit KTransferMemory(KernelCore& kernel_); virtual ~KTransferMemory() override; ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_); diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp index a430e0661..b7b83c151 100644 --- a/src/core/hle/kernel/k_writable_event.cpp +++ b/src/core/hle/kernel/k_writable_event.cpp @@ -8,7 +8,8 @@ namespace Kernel { -KWritableEvent::KWritableEvent(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} +KWritableEvent::KWritableEvent(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_} {} KWritableEvent::~KWritableEvent() = default; diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h index 154d2382c..607b0eadb 100644 --- a/src/core/hle/kernel/k_writable_event.h +++ b/src/core/hle/kernel/k_writable_event.h @@ -18,7 +18,7 @@ class KWritableEvent final KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject); public: - explicit KWritableEvent(KernelCore& kernel); + explicit KWritableEvent(KernelCore& kernel_); ~KWritableEvent() override; virtual void Destroy() override; diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 32bbf2d9b..bd4e4d350 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -51,11 +51,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); namespace Kernel { struct KernelCore::Impl { - explicit Impl(Core::System& system, KernelCore& kernel) - : time_manager{system}, object_list_container{kernel}, system{system} {} + explicit Impl(Core::System& system_, KernelCore& kernel_) + : time_manager{system_}, object_list_container{kernel_}, system{system_} {} - void SetMulticore(bool is_multicore) { - this->is_multicore = is_multicore; + void SetMulticore(bool is_multi) { + is_multicore = is_multi; } void Initialize(KernelCore& kernel) { @@ -599,19 +599,19 @@ struct KernelCore::Impl { irs_shared_mem = KSharedMemory::Create(system.Kernel()); time_shared_mem = KSharedMemory::Create(system.Kernel()); - hid_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize}, Svc::MemoryPermission::None, Svc::MemoryPermission::Read, hid_phys_addr, hid_size, "HID:SharedMemory"); - font_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + font_shared_mem->Initialize(system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize}, Svc::MemoryPermission::None, Svc::MemoryPermission::Read, font_phys_addr, font_size, "Font:SharedMemory"); - irs_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize}, Svc::MemoryPermission::None, Svc::MemoryPermission::Read, irs_phys_addr, irs_size, "IRS:SharedMemory"); - time_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + time_shared_mem->Initialize(system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize}, Svc::MemoryPermission::None, Svc::MemoryPermission::Read, time_phys_addr, time_size, "Time:SharedMemory"); |