1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
|
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <tuple>
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
ServerSession::ServerSession(KernelCore& kernel) : WaitObject{kernel} {}
ServerSession::~ServerSession() {
// This destructor will be called automatically when the last ServerSession handle is closed by
// the emulated application.
// Decrease the port's connection count.
if (parent->port)
parent->port->ConnectionClosed();
// TODO(Subv): Wake up all the ClientSession's waiting threads and set
// the SendSyncRequest result to 0xC920181A.
parent->server = nullptr;
}
ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, std::string name) {
SharedPtr<ServerSession> server_session(new ServerSession(kernel));
server_session->name = std::move(name);
server_session->parent = nullptr;
return MakeResult(std::move(server_session));
}
bool ServerSession::ShouldWait(Thread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (parent->client == nullptr)
return false;
// Wait if we have no pending requests, or if we're currently handling a request.
return pending_requesting_threads.empty() || currently_handling != nullptr;
}
void ServerSession::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// We are now handling a request, pop it from the stack.
// TODO(Subv): What happens if the client endpoint is closed before any requests are made?
ASSERT(!pending_requesting_threads.empty());
currently_handling = pending_requesting_threads.back();
pending_requesting_threads.pop_back();
}
ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
auto* const domain_message_header = context.GetDomainMessageHeader();
if (domain_message_header) {
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
context.SetDomainRequestHandlers(domain_request_handlers);
// If there is a DomainMessageHeader, then this is CommandType "Request"
const u32 object_id{context.GetDomainMessageHeader()->object_id};
switch (domain_message_header->command) {
case IPC::DomainMessageHeader::CommandType::SendMessage:
if (object_id > domain_request_handlers.size()) {
LOG_CRITICAL(IPC,
"object_id {} is too big! This probably means a recent service call "
"to {} needed to return a new interface!",
object_id, name);
UNREACHABLE();
return RESULT_SUCCESS; // Ignore error if asserts are off
}
return domain_request_handlers[object_id - 1]->HandleSyncRequest(context);
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
domain_request_handlers[object_id - 1] = nullptr;
IPC::ResponseBuilder rb{context, 2};
rb.Push(RESULT_SUCCESS);
return RESULT_SUCCESS;
}
}
LOG_CRITICAL(IPC, "Unknown domain command={}",
static_cast<int>(domain_message_header->command.Value()));
ASSERT(false);
}
return RESULT_SUCCESS;
}
ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) {
// The ServerSession received a sync request, this means that there's new data available
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
// similar.
Kernel::HLERequestContext context(this);
u32* cmd_buf = (u32*)Memory::GetPointer(thread->GetTLSAddress());
context.PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
ResultCode result = RESULT_SUCCESS;
// If the session has been converted to a domain, handle the domain request
if (IsDomain() && context.GetDomainMessageHeader()) {
result = HandleDomainSyncRequest(context);
// If there is no domain header, the regular session handler is used
} else if (hle_handler != nullptr) {
// If this ServerSession has an associated HLE handler, forward the request to it.
result = hle_handler->HandleSyncRequest(context);
}
if (thread->GetStatus() == ThreadStatus::Running) {
// Put the thread to sleep until the server replies, it will be awoken in
// svcReplyAndReceive for LLE servers.
thread->SetStatus(ThreadStatus::WaitIPC);
if (hle_handler != nullptr) {
// For HLE services, we put the request threads to sleep for a short duration to
// simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for
// other reasons like an async callback. The IPC overhead is needed to prevent
// starvation when a thread only does sync requests to HLE services while a
// lower-priority thread is waiting to run.
// This delay was approximated in a homebrew application by measuring the average time
// it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC
// request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have
// a high variance and vary between models.
static constexpr u64 IPCDelayNanoseconds = 39000;
thread->WakeAfterDelay(IPCDelayNanoseconds);
} else {
// Add the thread to the list of threads that have issued a sync request with this
// server.
pending_requesting_threads.push_back(std::move(thread));
}
}
// If this ServerSession does not have an HLE implementation, just wake up the threads waiting
// on it.
WakeupAllWaitingThreads();
// Handle scenario when ConvertToDomain command was issued, as we must do the conversion at the
// end of the command such that only commands following this one are handled as domains
if (convert_to_domain) {
ASSERT_MSG(IsSession(), "ServerSession is already a domain instance.");
domain_request_handlers = {hle_handler};
convert_to_domain = false;
}
return result;
}
ServerSession::SessionPair ServerSession::CreateSessionPair(KernelCore& kernel,
const std::string& name,
SharedPtr<ClientPort> port) {
auto server_session = ServerSession::Create(kernel, name + "_Server").Unwrap();
SharedPtr<ClientSession> client_session(new ClientSession(kernel));
client_session->name = name + "_Client";
std::shared_ptr<Session> parent(new Session);
parent->client = client_session.get();
parent->server = server_session.get();
parent->port = std::move(port);
client_session->parent = parent;
server_session->parent = parent;
return std::make_tuple(std::move(server_session), std::move(client_session));
}
} // namespace Kernel
|