Flow-IPC 1.0.2
Flow-IPC project: Full implementation reference.
native_socket_stream_acceptor.cpp
Go to the documentation of this file.
1/* Flow-IPC: Core
2 * Copyright 2023 Akamai Technologies, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the
5 * "License"); you may not use this file except in
6 * compliance with the License. You may obtain a copy
7 * of the License at
8 *
9 * https://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in
12 * writing, software distributed under the License is
13 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
14 * CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing
16 * permissions and limitations under the License. */
17
18/// @file
23#include <flow/error/error.hpp>
24#include <flow/common.hpp>
25#include <boost/move/make_unique.hpp>
26
27namespace ipc::transport
28{
29
30// Initializers.
31
32// It's a reference due to `static` things being initialized in unknown order before main().
33const Shared_name& Native_socket_stream_acceptor::S_RESOURCE_TYPE_ID = Sync_io_obj::S_RESOURCE_TYPE_ID;
34
35// Implementations.
36
38 const Shared_name& absolute_name_arg,
39 Error_code* err_code) :
40 flow::log::Log_context(logger_ptr, Log_component::S_TRANSPORT),
41 m_absolute_name(absolute_name_arg),
42 m_worker(get_logger(), flow::util::ostream_op_string(*this)), // Start the 1 thread.
43 m_next_peer_socket(*(m_worker.task_engine())) // Steady state: start it as empty, per doc header.
44{
49 using flow::error::Runtime_error;
50 using boost::system::system_error;
51
52 /* For simplicity we'll just do all the work in thread W we're about to start. Whether to do some initial stuff
53 * in this thread or not doesn't matter much. We have promised that,
54 * upon our return -- no later -- the Native_socket_stream_acceptor
55 * will be listening (assuming no errors). So wait for that (the startup) to finish using start() arg. */
56 Error_code sys_err_code;
57
58 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Awaiting initial setup/listening in worker thread.");
59 m_worker.start([&]() // Execute all this synchronously in the thread.
60 {
61 auto const asio_engine = m_worker.task_engine();
62
63 FLOW_LOG_INFO("Acceptor [" << *this << "]: Starting (am in worker thread).");
64
65 const auto local_endpoint = endpoint_at_shared_name(get_logger(), m_absolute_name, &sys_err_code);
66 assert((local_endpoint == Endpoint()) == bool(sys_err_code));
67 if (sys_err_code) // It logged.
68 {
69 return; // Escape the post() callback, that is.
70 }
71 // else
72
73 // Start a listening acceptor socket at that endpoint!
74
75 try
76 {
77 // Throws on error. (It's annoying there's no error-code-returning API; but it's normal in boost.asio ctors.)
78 m_acceptor.reset(new Acceptor(*asio_engine, local_endpoint));
79 // @todo Is reuse_addr appropriate? Do we run into the already-exists error in practice? Revisit.
80 }
81 catch (const system_error& exc)
82 {
83 assert(!m_acceptor);
84 FLOW_LOG_WARNING("Acceptor [" << *this << "]: Unable to open/bind/listen native local stream socket; could "
85 "be due to address/name clash; details logged below.");
86 sys_err_code = exc.code();
87 FLOW_ERROR_SYS_ERROR_LOG_WARNING();
88 return; // Escape the post() callback, that is.
89 }
90
91 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
92 "Successfully made endpoint and open/bind/listen-ed on it. Ready for connections.");
93
94 /* OK! Background-wait for the first incoming connection. As explained in m_next_peer_socket doc header
95 * m_next_peer_socket is empty/unconnected at entry to each such background-wait; set to connected state
96 * by boost.asio as it launches our callback; and moved/emptied again by that callback, as it starts the next
97 * background-wait. We have the just the one thread, so it's just a simple serial sequence. */
98
99 m_acceptor->async_accept(m_next_peer_socket,
100 [this](const Error_code& async_err_code)
101 {
102 // We are in thread W.
103 on_next_peer_socket_or_error(async_err_code);
104 });
105
106 assert(!sys_err_code); // Success.
107 }); // m_worker.start()
108
109 if (sys_err_code)
110 {
111 /* Just keep the thread going; even though it's not gonna be doing any listening.
112 * @todo We could stop it here no problem. And it could help something, somewhere, marginally to not have an
113 * extra thread around. But then this has to be coded for all over the place;
114 * didn't seem essential, particularly since these error conditions are highly irregular and likely to be very
115 * bad anyway. */
116
117 if (err_code)
118 {
119 *err_code = sys_err_code;
120 return;
121 }
122 // else
123 throw Runtime_error(sys_err_code, FLOW_UTIL_WHERE_AM_I_STR());
124 }
125 // else
126 assert(!sys_err_code);
127
128 FLOW_LOG_INFO("Acceptor [" << *this << "]: Ready for incoming connections.");
129} // Native_socket_stream_acceptor::Native_socket_stream_acceptor()
130
132{
133 using flow::async::Single_thread_task_loop;
134
135 // We are in thread U. By contract in doc header, they must not call us from a completion handler (thread W).
136
137 FLOW_LOG_INFO("Acceptor [" << *this << "]: Shutting down. Next acceptor socket will close; all our internal "
138 "async handlers will be canceled; and worker thread thread will be joined.");
139
140 // stop() logic is similar to what happens in Native_socket_stream::Impl dtor. Keeping cmnts light.
141 m_worker.stop();
142 // Thread W is (synchronously!) no more.
143
144 // Post-stop() poll() logic is similar to what happens in Native_socket_stream::Impl dtor. Keeping cmnts light.
145
146 FLOW_LOG_INFO("Acceptor [" << *this << "]: Continuing shutdown. Next we will run pending handlers from some "
147 "other thread. In this user thread we will await those handlers' completion and then return.");
148 Single_thread_task_loop one_thread(get_logger(), "temp_deinit");
149 one_thread.start([&]()
150 {
151 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
152 "In transient finisher thread: Shall run all pending internal handlers (typically none).");
153
154 const auto task_engine = m_worker.task_engine();
155 task_engine->restart();
156 const auto count = task_engine->poll();
157 if (count != 0)
158 {
159 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
160 "In transient finisher thread: Ran [" << count << "] internal handlers after all.");
161 }
162 task_engine->stop();
163
164 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
165 "In transient finisher thread: Shall run all pending user handlers (feeding operation-aborted).");
166
167 while (!m_pending_user_requests_q.empty())
168 {
169 FLOW_LOG_TRACE("Running a queued async-accept completion handler.");
173 FLOW_LOG_TRACE("User accept handler finished. Popped from user request deficit queue.");
174 } // while (!m_pending_user_requests_q.empty())
175
176 FLOW_LOG_INFO("Transient finisher exiting.");
177 }); // one_thread.start()
178 // Here thread exits/joins synchronously.
179} // Native_socket_stream_acceptor::~Native_socket_stream_acceptor()
180
182{
184 using flow::util::ostream_op_string;
185 using std::holds_alternative;
186
187 // We are in thread W.
188 if (sys_err_code == boost::asio::error::operation_aborted)
189 {
190 return; // Stuff is shutting down. GTFO.
191 }
192 // else
193 assert(sys_err_code != boost::asio::error::would_block); // Not possible for async handlers.
194
195 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Incoming connection, or error when trying to accept one.");
196 if (sys_err_code)
197 {
198 // Close/empty the potentially-almost-kinda-accepted socket. Probably unnecessary but can't hurt.
199 Error_code dummy;
200 m_next_peer_socket.close(dummy);
201
202 if (sys_err_code == boost::asio::error::connection_aborted)
203 {
204 FLOW_LOG_WARNING("Incoming connection aborted halfway during connection; this is quite weird but "
205 "should not be fatal. Ignoring. Still listening.");
206 // Fall through.
207 }
208 else
209 {
210 FLOW_LOG_WARNING("Acceptor [" << *this << "]: The background accept failed fatally. "
211 "Closing acceptor; no longer listening. Details follow.");
212 FLOW_ERROR_SYS_ERROR_LOG_WARNING();
213
214 // Close/destroy the acceptor. And of course don't call async_accept() on it again as we normally would below.
215 m_acceptor->close(dummy);
216
217 // Queue up and handle result.
218
219 /* Shouldn't have gotten here if any other error had been emitted. We would've closed m_acceptor and certainly
220 * not run m_acceptor->async_accept() again. */
221 assert(m_pending_results_q.empty() || (!holds_alternative<Error_code>(m_pending_results_q.back())));
222
223 // We are in steady state. Make this one change...
224 m_pending_results_q.push(sys_err_code);
225 // ...and immediately handle it appropriately to get back into steady state.
227
228 // Do *not* m_acceptor->async_accept() again. (It's closed actually, so we couldn't anyway.)
229 return;
230 }
231 } // if (sys_err_code)
232 else // if (!sys_err_code)
233 {
234 /* We'll enqueue the resulting peer socket handle from the new socket. There are a few subtleties:
235 * - We need to make a new Peer, which must be passed the boost.asio Peer_socket, or similar,
236 * to wrap. On that guy, they will internally call things like Peer_socket::async_read_some().
237 * - m_next_peer_socket, basically, stores two pieces of data: the raw native socket handle (of new peer socket),
238 * and the Task_engine that is to be used to execute the logic of all async_ calls on it (e.g.,
239 * Peer_socket::async_read_some()) (basically 1-1 to our m_worker).
240 * - Can we pass m_next_peer_socket into the Native_socket_stream ctor then? Well, no. Why? Answer:
241 * Suppose we did. Now suppose Native_socket_stream calls async_read_some() on it. Suppose some bytes are
242 * indeed read from the opposing peer. Now some callback internal to the Native_socket_stream must be called.
243 * On what thread would it be called? Answer: per above, on m_worker. Do we want that? Well, no, because
244 * that class sets up its own thread for that. (Briefly: Actually, why not use one thread? Answer: Even
245 * if we thought that was a good design for perf or something, consider that m_worker goes away in
246 * our dtor, so we'd have to actively share m_worker (maybe via shared_ptr) with the offshoot
247 * Native_socket_stream. Simply, we just aren't designing it that way. We want them to run an independent
248 * thread, though this is not where I will justify that.)
249 * - So what to do? I was hoping there'd be a move-constructor-ish thing in Peer_socket,
250 * but there isn't one that lets one also specify the Task_engine; and it isn't apparently possible to change
251 * a Peer_socket's Task_engine (thing returned by .get_executor()) after construction.
252 * Worst-case, we could suck out the native socket handle (see just above) and then Native_socket_stream
253 * ctor could construct and take over that guy. It should be entirely safe, since it is a new socket, and
254 * we haven't started any async ops on it yet, but it still feels dodgy. Turns out the safe way to do it
255 * is basically that, but one can Peer_socket::release() to "officially" safely "eject" an open
256 * socket. So we do that, and Peer_socket can take over the "ejected" native socket handle in
257 * its ctor.
258 * - We promise in our contract to propagate get_logger() to any child peer sockets. This is where it happens. */
259
260 /* .release() won't throw except in Windows <8.1, where it'll always throw (unsupported; per boost.asio docs).
261 * We don't worry about Windows generally; and anyway in .hpp somewhere we already should've ensured Linux in
262 * particular. Just re-check that for sanity for now. (This is a bit of future-proofing, so that the problem is
263 * obvious if porting the code.) */
264#ifndef FLOW_OS_LINUX
265 static_assert(false, "Should not have gotten to this line; should have required Linux; "
266 "the next thing assumes not-Win-<8.1.");
267#endif
268 // Could store a raw handle too, but this is exactly as fast and adds some logging niceties.
269 Native_handle native_peer_socket(m_next_peer_socket.release());
270 assert(!m_next_peer_socket.is_open()); // Non-exhaustive sanity check that it's back in empty/unconnected state.
271 FLOW_LOG_TRACE("Acceptor [" << *this << "]: "
272 "Ejected ownership of new incoming peer socket [" << native_peer_socket << "].");
273
274 auto new_peer
275 = boost::movelib::make_unique<Peer>
276 (get_logger(),
277 // Nickname is, like, "_pathName_of_this_acceptor=>native_hndl[35]" (as I write this).
278 ostream_op_string(m_absolute_name.str(), "=>", native_peer_socket),
279 std::move(native_peer_socket));
280 // Caution: native_peer_socket is now invalid.
281 assert(native_peer_socket.null());
282
283 // Queue up and handle result.
284
285 // As above -- on error we wouldn't have kept trying to accept more.
286 assert(m_pending_results_q.empty() || (!holds_alternative<Error_code>(m_pending_results_q.back())));
287
288 // We are in steady state. Make this one change...
289 m_pending_results_q.emplace(std::move(new_peer)); // (new_peer may now be hosed.)
290 // ...and immediately handle it appropriately to get back into steady state.
292 } // else if (!sys_err_code)
293
294 // Either there was success (!sys_err_code), or a non-fatal error (otherwise). Keep the async chain going.
295
296 /* @todo Does it help perf-wise to spin through non-blocking accepts here (in case more incoming peers have been
297 * queued up by OS) until would-block? I (ygoldfel) have done it in the past when doing TCP/UDP reads, but I never
298 * really checked whether it's beneficial, and anyway this situation is not really the same (incoming load
299 * should be much less intense here). */
300
301 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Starting the next background accept.");
302 m_acceptor->async_accept(m_next_peer_socket,
303 [this](const Error_code& async_err_code)
304 {
305 // We are in thread W.
306 on_next_peer_socket_or_error(async_err_code);
307 });
308} // Native_socket_stream_acceptor::on_next_peer_socket_or_error()
309
311{
312 using boost::movelib::make_unique;
313 using std::get;
314 using std::holds_alternative;
315
316 // We are in thread U/W. (They *are* allowed to invoke async_accept() from within their completion handler.)
317
318 /* We don't lock our state, hence we do everything in thread W.
319 *
320 * If we are in thread U: Post on thread W.
321 *
322 * If we are in thread W already (being invoked from earlier user completion handler): Still post on thread W.
323 * Otherwise we may well invoke handler synchronously (if surplus is available at the moment) which would
324 * mean nested handler invocation, which we promised not to do (for good reason: if, say, their handler
325 * is bracketed by a non-recursive lock, then they would get a deadlock trying to acquire the lock in
326 * the 2nd -- inner -- handler execution). */
327 m_worker.post([this, target_peer, on_done_func = std::move(on_done_func)]
328 () mutable // To allow for the on_done_func to be move()d again.
329 {
330 // We are in thread W.
331
332 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Handling async-accept request.");
333
334 auto new_req = make_unique<User_request>();
335 new_req->m_target_peer = target_peer;
336 new_req->m_on_done_func = std::move(on_done_func);
337
338 // We are in steady state. Make this one change....
339 m_pending_user_requests_q.emplace(std::move(new_req)); // (new_req may now be hosed.)
340 // ...and immediately handle it appropriately to get back into steady state:
341
342 if (m_pending_results_q.empty())
343 {
344 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-accept request pushed onto deficit queue; "
345 "but there is no surplus (no pending results). Will await results.");
346 return;
347 }
348 // else if (!m_pending_results_q.empty())
349
350 /* If deficit existed *before* the request was pushed, and there's surplus too, then it wasn't steady state
351 * pre-push. Violates our invariant (see data member doc headers). */
352 assert(m_pending_user_requests_q.size() == 1);
353
354 auto& peer_or_err_code = m_pending_results_q.front();
355 if (holds_alternative<Error_code>(peer_or_err_code))
356 {
357 assert(m_pending_results_q.size() == 1); // An error always caps the queue (and never leaves it).
358 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-request pushed onto deficit queue; "
359 "and there is surplus in the form of a fatal error code. Will feed error to the request "
360 "*without* popping it from surplus queue (size remains 1).");
361 feed_error_result_to_deficit(get<Error_code>(peer_or_err_code));
362 }
363 else
364 {
365 assert(holds_alternative<Peer_ptr>(peer_or_err_code));
366
367 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-request pushed onto deficit queue; "
368 "and there is surplus in the form of a new peer handle. Will feed handle to the request. "
369 "Queue size will become [" << (m_pending_results_q.size() - 1) << "].");
370
371 Peer_ptr peer(std::move(get<Peer_ptr>(peer_or_err_code)));
373 feed_success_result_to_deficit(std::move(peer));
374 }
375 }); // m_worker.post()
376} // Native_socket_stream_acceptor::async_accept_impl()
377
379{
380 using std::get;
381 using std::holds_alternative;
382
383 // We are in thread W.
384
385 assert((!m_pending_results_q.empty()) && holds_alternative<Error_code>(m_pending_results_q.back()));
386
387 if (m_pending_user_requests_q.empty())
388 {
389 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Fatal error pushed onto surplus queue; "
390 "but there is no deficit (no pending requests). Will await async-accept request(s).");
391 return;
392 }
393 // else if (!m_pending_user_requests_q.empty())
394
395 /* If surplus existed *before* the error was pushed, and there's deficit too, then it wasn't steady state pre-push.
396 * Violates our pre-condition. */
397 assert(m_pending_results_q.size() == 1);
398
399 const auto err_code = get<Error_code>(m_pending_results_q.front());
400 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Fatal error pushed onto surplus queue; "
401 "and there is deficit (1+ pending requests). Will feed error to all pending requests *without* "
402 "popping surplus queue, whose size remains 1.");
404} // Native_socket_stream_acceptor::finalize_q_surplus_on_error()
405
407{
408 using std::get;
409 using std::holds_alternative;
410
411 // We are in thread W.
412
413 assert((!m_pending_results_q.empty()) && holds_alternative<Peer_ptr>(m_pending_results_q.back()));
414
415 if (m_pending_user_requests_q.empty())
416 {
417 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New peer socket handle pushed onto surplus queue; "
418 "but there is no deficit (no pending requests). Will await async-accept request(s).");
419 return;
420 }
421 // else if (!m_pending_user_requests_q.empty())
422
423 /* If surplus existed *before* the handle was pushed, and there's deficit too, then it wasn't steady state pre-push.
424 * Violates our pre-condition. */
425 assert(m_pending_results_q.size() == 1);
426
427 Peer_ptr peer(std::move(get<Peer_ptr>(m_pending_results_q.front())));
429 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New peer socket handle pushed onto surplus queue; "
430 "and there is deficit (1+ pending requests). Will feed to next pending request, having "
431 "popped it from surplus queue (size is now 0).");
432 feed_success_result_to_deficit(std::move(peer));
433} // Native_socket_stream_acceptor::finalize_q_surplus_on_success()
434
436{
437 assert(!m_pending_user_requests_q.empty());
438
439 size_t idx = 0;
440 do // while (!empty())
441 {
442 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Feeding to user async-accept request handler [" << idx << "]: "
443 "Error code [" << err_code << "] [" << err_code.message() << "].");
444 m_pending_user_requests_q.front()->m_on_done_func(err_code);
446
447 ++idx;
448 }
449 while (!m_pending_user_requests_q.empty());
450} // Native_socket_stream_acceptor::feed_error_result_to_deficit()
451
453{
454 assert(!m_pending_user_requests_q.empty());
455
456 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Feeding to user async-accept request handler: "
457 "Socket stream [" << *peer << "]. User request queue size post-pop is "
458 "[" << (m_pending_user_requests_q.size() - 1) << "].");
459 auto& head_request = m_pending_user_requests_q.front();
460 *head_request->m_target_peer = std::move(*peer);
461 head_request->m_on_done_func(Error_code());
463} // Native_socket_stream_acceptor::feed_success_result_to_deficit()
464
466{
467 return m_absolute_name;
468}
469
470std::ostream& operator<<(std::ostream& os, const Native_socket_stream_acceptor& val)
471{
472 return os << "sh_name[" << val.absolute_name() << "]@" << static_cast<const void*>(&val);
473}
474
475} // namespace ipc::transport
A server object that binds to a Shared_name and listens for incoming Native_socket_stream connect att...
static const Shared_name & S_RESOURCE_TYPE_ID
Shared_name relative-folder fragment (no separators) identifying this resource type.
void async_accept_impl(Peer *target_peer, On_peer_accepted_func &&on_done_func)
Non-template impl of async_accept().
flow::async::Single_thread_task_loop m_worker
A single-threaded async task loop that starts in constructor and ends in destructor.
boost::movelib::unique_ptr< Peer > Peer_ptr
Short-hand for internally stored PEER-state sync_io::Native_socket_stream in m_pending_results_q.
std::queue< User_request::Ptr > m_pending_user_requests_q
Queue storing deficit async-accept requests queued up due to lacking pending ready peer socket handle...
void finalize_q_surplus_on_error()
In thread W, in steady state except for an Error_code just pushed to the back of m_pending_results_q ...
~Native_socket_stream_acceptor()
Destroys this acceptor which will stop listening in the background and cancel any pending completion ...
Native_socket_stream_acceptor(flow::log::Logger *logger_ptr, const Shared_name &absolute_name, Error_code *err_code=0)
Creates the Native_socket_stream_acceptor and immediately begins listening in the background,...
asio_local_stream_socket::Peer_socket m_next_peer_socket
Unix domain peer socket, always empty/unconnected while a background m_acceptor.async_accept() is pro...
void feed_error_result_to_deficit(const Error_code &err_code)
In thread W, gets back to steady state by feeding the given Error_code (which must be the sole elemen...
boost::movelib::unique_ptr< asio_local_stream_socket::Acceptor > m_acceptor
Unix domain socket acceptor.
flow::async::Task_asio_err On_peer_accepted_func
Short-hand for callback called on new peer-to-peer connection; or on unrecoverable error.
void finalize_q_surplus_on_success()
In thread W, in steady state, introduces the just-established peer socket handle into the state machi...
std::queue< std::variant< Peer_ptr, Error_code > > m_pending_results_q
Queue storing surplus finalized async-accept results queued up due to lacking async_accept() requests...
const Shared_name & absolute_name() const
Returns the full name/address to which the constructor bound, or attempted to bind,...
void feed_success_result_to_deficit(Peer_ptr &&peer)
In thread W, gets back to steady state by feeding the given just-connected peer socket (which must ha...
void on_next_peer_socket_or_error(const Error_code &sys_err_code)
Handler for incoming connection on m_acceptor.
Implements both sync_io::Native_handle_sender and sync_io::Native_handle_receiver concepts by using a...
String-wrapping abstraction representing a name uniquely distinguishing a kernel-persistent entity fr...
const std::string & str() const
Returns (sans copying) ref to immutable entire wrapped name string, suitable to pass into sys calls w...
Protocol::endpoint Endpoint
Short-hand for boost.asio Unix domain peer stream-socket endpoint.
Protocol::acceptor Acceptor
Short-hand for boost.asio Unix domain stream-socket acceptor (listening guy) socket.
Protocol::socket Peer_socket
Short-hand for boost.asio Unix domain peer stream-socket (usually-connected-or-empty guy).
Endpoint endpoint_at_shared_name(flow::log::Logger *logger_ptr, const Shared_name &absolute_name, Error_code *err_code)
Returns an Endpoint corresponding to the given absolute Shared_name, so that an Acceptor or Peer_sock...
@ S_OBJECT_SHUTDOWN_ABORTED_COMPLETION_HANDLER
Async completion handler is being called prematurely, because underlying object is shutting down,...
Flow-IPC module providing transmission of structured messages and/or low-level blobs (and more) betwe...
util::Shared_name Shared_name
Convenience alias for the commonly used type util::Shared_name.
std::ostream & operator<<(std::ostream &os, const Bipc_mq_handle &val)
Prints string representation of the given Bipc_mq_handle to the given ostream.
flow::util::String_view String_view
Short-hand for Flow's String_view.
Definition: util_fwd.hpp:115
Log_component
The flow::log::Component payload enumeration containing various log components used by Flow-IPC inter...
Definition: common.hpp:323
flow::Error_code Error_code
Short-hand for flow::Error_code which is very common.
Definition: common.hpp:298
A monolayer-thin wrapper around a native handle, a/k/a descriptor a/k/a FD.
bool null() const
Returns true if and only if m_native_handle equals S_NULL_HANDLE.