Flow-IPC 1.0.0
Flow-IPC project: Full implementation reference.
native_socket_stream_acceptor.cpp
Go to the documentation of this file.
1/* Flow-IPC: Core
2 * Copyright 2023 Akamai Technologies, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the
5 * "License"); you may not use this file except in
6 * compliance with the License. You may obtain a copy
7 * of the License at
8 *
9 * https://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in
12 * writing, software distributed under the License is
13 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
14 * CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing
16 * permissions and limitations under the License. */
17
18/// @file
23#include <flow/error/error.hpp>
24#include <flow/common.hpp>
25#include <boost/move/make_unique.hpp>
26
27namespace ipc::transport
28{
29
30// Initializers.
31
32// It's a reference due to `static` things being initialized in unknown order before main().
33const Shared_name& Native_socket_stream_acceptor::S_RESOURCE_TYPE_ID = Sync_io_obj::S_RESOURCE_TYPE_ID;
34
35// Implementations.
36
38 const Shared_name& absolute_name_arg,
39 Error_code* err_code) :
40 flow::log::Log_context(logger_ptr, Log_component::S_TRANSPORT),
41 m_absolute_name(absolute_name_arg),
42 m_worker(get_logger(), flow::util::ostream_op_string(*this)), // Start the 1 thread.
43 m_next_peer_socket(*(m_worker.task_engine())) // Steady state: start it as empty, per doc header.
44{
49 using flow::error::Runtime_error;
50 using boost::system::system_error;
51
52 /* For simplicity we'll just do all the work in thread W we're about to start. Whether to do some initial stuff
53 * in this thread or not doesn't matter much. We have promised that,
54 * upon our return -- no later -- the Native_socket_stream_acceptor
55 * will be listening (assuming no errors). So wait for that (the startup) to finish using start() arg. */
56 Error_code sys_err_code;
57
58 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Awaiting initial setup/listening in worker thread.");
59 m_worker.start([&]() // Execute all this synchronously in the thread.
60 {
61 auto const asio_engine = m_worker.task_engine();
62
63 FLOW_LOG_INFO("Acceptor [" << *this << "]: Starting (am in worker thread).");
64
65 const auto local_endpoint = endpoint_at_shared_name(get_logger(), m_absolute_name, &sys_err_code);
66 assert((local_endpoint == Endpoint()) == bool(sys_err_code));
67 if (sys_err_code) // It logged.
68 {
69 return; // Escape the post() callback, that is.
70 }
71 // else
72
73 // Start a listening acceptor socket at that endpoint!
74
75 try
76 {
77 // Throws on error. (It's annoying there's no error-code-returning API; but it's normal in boost.asio ctors.)
78 m_acceptor.reset(new Acceptor(*asio_engine, local_endpoint));
79 // @todo Is reuse_addr appropriate? Do we run into the already-exists error in practice? Revisit.
80 }
81 catch (const system_error& exc)
82 {
83 assert(!m_acceptor);
84 FLOW_LOG_WARNING("Acceptor [" << *this << "]: Unable to open/bind/listen native local stream socket; could "
85 "be due to address/name clash; details logged below.");
86 sys_err_code = exc.code();
87 FLOW_ERROR_SYS_ERROR_LOG_WARNING();
88 return; // Escape the post() callback, that is.
89 }
90
91 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
92 "Successfully made endpoint and open/bind/listen-ed on it. Ready for connections.");
93
94 /* OK! Background-wait for the first incoming connection. As explained in m_next_peer_socket doc header
95 * m_next_peer_socket is empty/unconnected at entry to each such background-wait; set to connected state
96 * by boost.asio as it launches our callback; and moved/emptied again by that callback, as it starts the next
97 * background-wait. We have the just the one thread, so it's just a simple serial sequence. */
98
99 m_acceptor->async_accept(m_next_peer_socket,
100 [this](const Error_code& async_err_code)
101 {
102 // We are in thread W.
103 on_next_peer_socket_or_error(async_err_code);
104 });
105
106 assert(!sys_err_code); // Success.
107 }); // m_worker.start()
108
109 if (sys_err_code)
110 {
111 /* Just keep the thread going; even though it's not gonna be doing any listening.
112 * @todo We could stop it here no problem. And it could help something, somewhere, marginally to not have an
113 * extra thread around. But then this has to be coded for all over the place;
114 * didn't seem essential, particularly since these error conditions are highly irregular and likely to be very
115 * bad anyway. */
116
117 if (err_code)
118 {
119 *err_code = sys_err_code;
120 return;
121 }
122 // else
123 throw Runtime_error(sys_err_code, FLOW_UTIL_WHERE_AM_I_STR());
124 }
125 // else
126 assert(!sys_err_code);
127
128 FLOW_LOG_INFO("Acceptor [" << *this << "]: Ready for incoming connections.");
129} // Native_socket_stream_acceptor::Native_socket_stream_acceptor()
130
132{
133 using flow::async::Single_thread_task_loop;
134
135 // We are in thread U. By contract in doc header, they must not call us from a completion handler (thread W).
136
137 FLOW_LOG_INFO("Acceptor [" << *this << "]: Shutting down. Next acceptor socket will close; all our internal "
138 "async handlers will be canceled; and worker thread thread will be joined.");
139
140 // stop() logic is similar to what happens in Native_socket_stream::Impl dtor. Keeping cmnts light.
141 m_worker.stop();
142 // Thread W is (synchronously!) no more.
143
144 // Post-stop() poll() logic is similar to what happens in Native_socket_stream::Impl dtor. Keeping cmnts light.
145
146 FLOW_LOG_INFO("Acceptor [" << *this << "]: Continuing shutdown. Next we will run pending handlers from some "
147 "other thread. In this user thread we will await those handlers' completion and then return.");
148 Single_thread_task_loop one_thread(get_logger(), "temp_deinit");
149 one_thread.start([&]()
150 {
151 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
152 "In transient finisher thread: Shall run all pending internal handlers (typically none).");
153
154 const auto task_engine = m_worker.task_engine();
155 task_engine->restart();
156 const auto count = task_engine->poll();
157 if (count != 0)
158 {
159 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
160 "In transient finisher thread: Ran [" << count << "] internal handlers after all.");
161 }
162 task_engine->stop();
163
164 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
165 "In transient finisher thread: Shall run all pending user handlers (feeding operation-aborted).");
166
167 while (!m_pending_user_requests_q.empty())
168 {
169 FLOW_LOG_TRACE("Running a queued async-accept completion handler.");
173 FLOW_LOG_TRACE("User accept handler finished. Popped from user request deficit queue.");
174 } // while (!m_pending_user_requests_q.empty())
175
176 FLOW_LOG_INFO("Transient finisher exiting.");
177 }); // one_thread.start()
178 // Here thread exits/joins synchronously.
179} // Native_socket_stream_acceptor::~Native_socket_stream_acceptor()
180
182{
184 using flow::util::ostream_op_string;
185 using std::holds_alternative;
186
187 // We are in thread W.
188 if (sys_err_code == boost::asio::error::operation_aborted)
189 {
190 return; // Stuff is shutting down. GTFO.
191 }
192 // else
193 assert(sys_err_code != boost::asio::error::would_block); // Not possible for async handlers.
194
195 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Incoming connection, or error when trying to accept one.");
196 if (sys_err_code)
197 {
198 // Close/empty the potentially-almost-kinda-accepted socket. Probably unnecessary but can't hurt.
199 Error_code dummy;
200 m_next_peer_socket.close(dummy);
201
202 if (sys_err_code == boost::asio::error::connection_aborted)
203 {
204 FLOW_LOG_WARNING("Incoming connection aborted halfway during connection; this is quite weird but "
205 "should not be fatal. Ignoring. Still listening.");
206 // Fall through.
207 }
208 else
209 {
210 FLOW_LOG_WARNING("Acceptor [" << *this << "]: The background accept failed fatally. "
211 "Closing acceptor; no longer listening. Details follow.");
212 FLOW_ERROR_SYS_ERROR_LOG_WARNING();
213
214 // Close/destroy the acceptor. And of course don't call async_accept() on it again as we normally would below.
215 m_acceptor->close(dummy);
216
217 // Queue up and handle result.
218
219 /* Shouldn't have gotten here if any other error had been emitted. We would've closed m_acceptor and certainly
220 * not run m_acceptor->async_accept() again. */
221 assert(m_pending_results_q.empty() || (!holds_alternative<Error_code>(m_pending_results_q.back())));
222
223 // We are in steady state. Make this one change...
224 m_pending_results_q.push(sys_err_code);
225 // ...and immediately handle it appropriately to get back into steady state.
227
228 // Do *not* m_acceptor->async_accept() again. (It's closed actually, so we couldn't anyway.)
229 return;
230 }
231 } // if (sys_err_code)
232 else // if (!sys_err_code)
233 {
234 /* We'll enqueue the resulting peer socket handle from the new socket. There are a few subtleties:
235 * - We need to make a new Peer, which must be passed the boost.asio Peer_socket, or similar,
236 * to wrap. On that guy, they will internally call things like Peer_socket::async_read_some().
237 * - m_next_peer_socket, basically, stores two pieces of data: the raw native socket handle (of new peer socket),
238 * and the Task_engine that is to be used to execute the logic of all async_ calls on it (e.g.,
239 * Peer_socket::async_read_some()) (basically 1-1 to our m_worker).
240 * - Can we pass m_next_peer_socket into the Native_socket_stream ctor then? Well, no. Why? Answer:
241 * Suppose we did. Now suppose Native_socket_stream calls async_read_some() on it. Suppose some bytes are
242 * indeed read from the opposing peer. Now some callback internal to the Native_socket_stream must be called.
243 * On what thread would it be called? Answer: per above, on m_worker. Do we want that? Well, no, because
244 * that class sets up its own thread for that. (Briefly: Actually, why not use one thread? Answer: Even
245 * if we thought that was a good design for perf or something, consider that m_worker goes away in
246 * our dtor, so we'd have to actively share m_worker (maybe via shared_ptr) with the offshoot
247 * Native_socket_stream. Simply, we just aren't designing it that way. We want them to run an independent
248 * thread, though this is not where I will justify that.)
249 * - So what to do? I was hoping there'd be a move-constructor-ish thing in Peer_socket,
250 * but there isn't one that lets one also specify the Task_engine; and it isn't apparently possible to change
251 * a Peer_socket's Task_engine (thing returned by .get_executor()) after construction.
252 * Worst-case, we could suck out the native socket handle (see just above) and then Native_socket_stream
253 * ctor could construct and take over that guy. It should be entirely safe, since it is a new socket, and
254 * we haven't started any async ops on it yet, but it still feels dodgy. Turns out the safe way to do it
255 * is basically that, but one can Peer_socket::release() to "officially" safely "eject" an open
256 * socket. So we do that, and Peer_socket can take over the "ejected" native socket handle in
257 * its ctor.
258 * - We promise in our contract to propagate get_logger() to any child peer sockets. This is where it happens. */
259
260 /* .release() won't throw except in Windows <8.1, where it'll always throw (unsupported; per boost.asio docs).
261 * We don't worry about Windows generally; and anyway in .hpp somewhere we already should've ensured Linux in
262 * particular. Just re-check that for sanity for now. (This is a bit of future-proofing, so that the problem is
263 * obvious if porting the code.) */
264#ifndef FLOW_OS_LINUX
265# error "Should not have gotten to this line; should have required Linux; the next thing assumes not-Win-<8.1."
266#endif
267 // Could store a raw handle too, but this is exactly as fast and adds some logging niceties.
268 Native_handle native_peer_socket(m_next_peer_socket.release());
269 assert(!m_next_peer_socket.is_open()); // Non-exhaustive sanity check that it's back in empty/unconnected state.
270 FLOW_LOG_TRACE("Acceptor [" << *this << "]: "
271 "Ejected ownership of new incoming peer socket [" << native_peer_socket << "].");
272
273 auto new_peer
274 = boost::movelib::make_unique<Peer>
275 (get_logger(),
276 // Nickname is, like, "_pathName_of_this_acceptor=>native_hndl[35]" (as I write this).
277 ostream_op_string(m_absolute_name.str(), "=>", native_peer_socket),
278 std::move(native_peer_socket));
279 // Caution: native_peer_socket is now invalid.
280 assert(native_peer_socket.null());
281
282 // Queue up and handle result.
283
284 // As above -- on error we wouldn't have kept trying to accept more.
285 assert(m_pending_results_q.empty() || (!holds_alternative<Error_code>(m_pending_results_q.back())));
286
287 // We are in steady state. Make this one change...
288 m_pending_results_q.emplace(std::move(new_peer)); // (new_peer may now be hosed.)
289 // ...and immediately handle it appropriately to get back into steady state.
291 } // else if (!sys_err_code)
292
293 // Either there was success (!sys_err_code), or a non-fatal error (otherwise). Keep the async chain going.
294
295 /* @todo Does it help perf-wise to spin through non-blocking accepts here (in case more incoming peers have been
296 * queued up by OS) until would-block? I (ygoldfel) have done it in the past when doing TCP/UDP reads, but I never
297 * really checked whether it's beneficial, and anyway this situation is not really the same (incoming load
298 * should be much less intense here). */
299
300 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Starting the next background accept.");
301 m_acceptor->async_accept(m_next_peer_socket,
302 [this](const Error_code& async_err_code)
303 {
304 // We are in thread W.
305 on_next_peer_socket_or_error(async_err_code);
306 });
307} // Native_socket_stream_acceptor::on_next_peer_socket_or_error()
308
310{
311 using boost::movelib::make_unique;
312 using std::get;
313 using std::holds_alternative;
314
315 // We are in thread U/W. (They *are* allowed to invoke async_accept() from within their completion handler.)
316
317 /* We don't lock our state, hence we do everything in thread W.
318 *
319 * If we are in thread U: Post on thread W.
320 *
321 * If we are in thread W already (being invoked from earlier user completion handler): Still post on thread W.
322 * Otherwise we may well invoke handler synchronously (if surplus is available at the moment) which would
323 * mean nested handler invocation, which we promised not to do (for good reason: if, say, their handler
324 * is bracketed by a non-recursive lock, then they would get a deadlock trying to acquire the lock in
325 * the 2nd -- inner -- handler execution). */
326 m_worker.post([this, target_peer, on_done_func = std::move(on_done_func)]
327 () mutable // To allow for the on_done_func to be move()d again.
328 {
329 // We are in thread W.
330
331 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Handling async-accept request.");
332
333 auto new_req = make_unique<User_request>();
334 new_req->m_target_peer = target_peer;
335 new_req->m_on_done_func = std::move(on_done_func);
336
337 // We are in steady state. Make this one change....
338 m_pending_user_requests_q.emplace(std::move(new_req)); // (new_req may now be hosed.)
339 // ...and immediately handle it appropriately to get back into steady state:
340
341 if (m_pending_results_q.empty())
342 {
343 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-accept request pushed onto deficit queue; "
344 "but there is no surplus (no pending results). Will await results.");
345 return;
346 }
347 // else if (!m_pending_results_q.empty())
348
349 /* If deficit existed *before* the request was pushed, and there's surplus too, then it wasn't steady state
350 * pre-push. Violates our invariant (see data member doc headers). */
351 assert(m_pending_user_requests_q.size() == 1);
352
353 auto& peer_or_err_code = m_pending_results_q.front();
354 if (holds_alternative<Error_code>(peer_or_err_code))
355 {
356 assert(m_pending_results_q.size() == 1); // An error always caps the queue (and never leaves it).
357 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-request pushed onto deficit queue; "
358 "and there is surplus in the form of a fatal error code. Will feed error to the request "
359 "*without* popping it from surplus queue (size remains 1).");
360 feed_error_result_to_deficit(get<Error_code>(peer_or_err_code));
361 }
362 else
363 {
364 assert(holds_alternative<Peer_ptr>(peer_or_err_code));
365
366 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-request pushed onto deficit queue; "
367 "and there is surplus in the form of a new peer handle. Will feed handle to the request. "
368 "Queue size will become [" << (m_pending_results_q.size() - 1) << "].");
369
370 Peer_ptr peer(std::move(get<Peer_ptr>(peer_or_err_code)));
372 feed_success_result_to_deficit(std::move(peer));
373 }
374 }); // m_worker.post()
375} // Native_socket_stream_acceptor::async_accept_impl()
376
378{
379 using std::get;
380 using std::holds_alternative;
381
382 // We are in thread W.
383
384 assert((!m_pending_results_q.empty()) && holds_alternative<Error_code>(m_pending_results_q.back()));
385
386 if (m_pending_user_requests_q.empty())
387 {
388 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Fatal error pushed onto surplus queue; "
389 "but there is no deficit (no pending requests). Will await async-accept request(s).");
390 return;
391 }
392 // else if (!m_pending_user_requests_q.empty())
393
394 /* If surplus existed *before* the error was pushed, and there's deficit too, then it wasn't steady state pre-push.
395 * Violates our pre-condition. */
396 assert(m_pending_results_q.size() == 1);
397
398 const auto err_code = get<Error_code>(m_pending_results_q.front());
399 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Fatal error pushed onto surplus queue; "
400 "and there is deficit (1+ pending requests). Will feed error to all pending requests *without* "
401 "popping surplus queue, whose size remains 1.");
403} // Native_socket_stream_acceptor::finalize_q_surplus_on_error()
404
406{
407 using std::get;
408 using std::holds_alternative;
409
410 // We are in thread W.
411
412 assert((!m_pending_results_q.empty()) && holds_alternative<Peer_ptr>(m_pending_results_q.back()));
413
414 if (m_pending_user_requests_q.empty())
415 {
416 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New peer socket handle pushed onto surplus queue; "
417 "but there is no deficit (no pending requests). Will await async-accept request(s).");
418 return;
419 }
420 // else if (!m_pending_user_requests_q.empty())
421
422 /* If surplus existed *before* the handle was pushed, and there's deficit too, then it wasn't steady state pre-push.
423 * Violates our pre-condition. */
424 assert(m_pending_results_q.size() == 1);
425
426 Peer_ptr peer(std::move(get<Peer_ptr>(m_pending_results_q.front())));
428 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New peer socket handle pushed onto surplus queue; "
429 "and there is deficit (1+ pending requests). Will feed to next pending request, having "
430 "popped it from surplus queue (size is now 0).");
431 feed_success_result_to_deficit(std::move(peer));
432} // Native_socket_stream_acceptor::finalize_q_surplus_on_success()
433
435{
436 assert(!m_pending_user_requests_q.empty());
437
438 size_t idx = 0;
439 do // while (!empty())
440 {
441 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Feeding to user async-accept request handler [" << idx << "]: "
442 "Error code [" << err_code << "] [" << err_code.message() << "].");
443 m_pending_user_requests_q.front()->m_on_done_func(err_code);
445
446 ++idx;
447 }
448 while (!m_pending_user_requests_q.empty());
449} // Native_socket_stream_acceptor::feed_error_result_to_deficit()
450
452{
453 assert(!m_pending_user_requests_q.empty());
454
455 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Feeding to user async-accept request handler: "
456 "Socket stream [" << *peer << "]. User request queue size post-pop is "
457 "[" << (m_pending_user_requests_q.size() - 1) << "].");
458 auto& head_request = m_pending_user_requests_q.front();
459 *head_request->m_target_peer = std::move(*peer);
460 head_request->m_on_done_func(Error_code());
462} // Native_socket_stream_acceptor::feed_success_result_to_deficit()
463
465{
466 return m_absolute_name;
467}
468
469std::ostream& operator<<(std::ostream& os, const Native_socket_stream_acceptor& val)
470{
471 return os << "sh_name[" << val.absolute_name() << "]@" << static_cast<const void*>(&val);
472}
473
474} // namespace ipc::transport
A server object that binds to a Shared_name and listens for incoming Native_socket_stream connect att...
static const Shared_name & S_RESOURCE_TYPE_ID
Shared_name relative-folder fragment (no separators) identifying this resource type.
void async_accept_impl(Peer *target_peer, On_peer_accepted_func &&on_done_func)
Non-template impl of async_accept().
flow::async::Single_thread_task_loop m_worker
A single-threaded async task loop that starts in constructor and ends in destructor.
boost::movelib::unique_ptr< Peer > Peer_ptr
Short-hand for internally stored PEER-state sync_io::Native_socket_stream in m_pending_results_q.
std::queue< User_request::Ptr > m_pending_user_requests_q
Queue storing deficit async-accept requests queued up due to lacking pending ready peer socket handle...
void finalize_q_surplus_on_error()
In thread W, in steady state except for an Error_code just pushed to the back of m_pending_results_q ...
~Native_socket_stream_acceptor()
Destroys this acceptor which will stop listening in the background and cancel any pending completion ...
Native_socket_stream_acceptor(flow::log::Logger *logger_ptr, const Shared_name &absolute_name, Error_code *err_code=0)
Creates the Native_socket_stream_acceptor and immediately begins listening in the background,...
asio_local_stream_socket::Peer_socket m_next_peer_socket
Unix domain peer socket, always empty/unconnected while a background m_acceptor.async_accept() is pro...
void feed_error_result_to_deficit(const Error_code &err_code)
In thread W, gets back to steady state by feeding the given Error_code (which must be the sole elemen...
boost::movelib::unique_ptr< asio_local_stream_socket::Acceptor > m_acceptor
Unix domain socket acceptor.
flow::async::Task_asio_err On_peer_accepted_func
Short-hand for callback called on new peer-to-peer connection; or on unrecoverable error.
void finalize_q_surplus_on_success()
In thread W, in steady state, introduces the just-established peer socket handle into the state machi...
std::queue< std::variant< Peer_ptr, Error_code > > m_pending_results_q
Queue storing surplus finalized async-accept results queued up due to lacking async_accept() requests...
const Shared_name & absolute_name() const
Returns the full name/address to which the constructor bound, or attempted to bind,...
void feed_success_result_to_deficit(Peer_ptr &&peer)
In thread W, gets back to steady state by feeding the given just-connected peer socket (which must ha...
void on_next_peer_socket_or_error(const Error_code &sys_err_code)
Handler for incoming connection on m_acceptor.
Implements both sync_io::Native_handle_sender and sync_io::Native_handle_receiver concepts by using a...
String-wrapping abstraction representing a name uniquely distinguishing a kernel-persistent entity fr...
const std::string & str() const
Returns (sans copying) ref to immutable entire wrapped name string, suitable to pass into sys calls w...
Protocol::endpoint Endpoint
Short-hand for boost.asio Unix domain peer stream-socket endpoint.
Protocol::acceptor Acceptor
Short-hand for boost.asio Unix domain stream-socket acceptor (listening guy) socket.
Protocol::socket Peer_socket
Short-hand for boost.asio Unix domain peer stream-socket (usually-connected-or-empty guy).
Endpoint endpoint_at_shared_name(flow::log::Logger *logger_ptr, const Shared_name &absolute_name, Error_code *err_code)
Returns an Endpoint corresponding to the given absolute Shared_name, so that an Acceptor or Peer_sock...
@ S_OBJECT_SHUTDOWN_ABORTED_COMPLETION_HANDLER
Async completion handler is being called prematurely, because underlying object is shutting down,...
Flow-IPC module providing transmission of structured messages and/or low-level blobs (and more) betwe...
util::Shared_name Shared_name
Convenience alias for the commonly used type util::Shared_name.
std::ostream & operator<<(std::ostream &os, const Bipc_mq_handle &val)
Prints string representation of the given Bipc_mq_handle to the given ostream.
flow::util::String_view String_view
Short-hand for Flow's String_view.
Definition: util_fwd.hpp:109
Log_component
The flow::log::Component payload enumeration containing various log components used by Flow-IPC inter...
Definition: common.hpp:322
flow::Error_code Error_code
Short-hand for flow::Error_code which is very common.
Definition: common.hpp:297
A monolayer-thin wrapper around a native handle, a/k/a descriptor a/k/a FD.
bool null() const
Returns true if and only if m_native_handle equals S_NULL_HANDLE.