Flow-IPC 2.0.0
Flow-IPC project: Full implementation reference.
native_socket_stream_acceptor.cpp
Go to the documentation of this file.
1/* Flow-IPC: Core
2 * Copyright 2023 Akamai Technologies, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the
5 * "License"); you may not use this file except in
6 * compliance with the License. You may obtain a copy
7 * of the License at
8 *
9 * https://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in
12 * writing, software distributed under the License is
13 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
14 * CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing
16 * permissions and limitations under the License. */
17
18/// @file
23#include <flow/error/error.hpp>
24#include <flow/common.hpp>
25#include <boost/move/make_unique.hpp>
26
27namespace ipc::transport
28{
29
30// Initializers.
31
32// It's a reference due to `static` things being initialized in unknown order before main().
33const Shared_name& Native_socket_stream_acceptor::S_RESOURCE_TYPE_ID = Sync_io_obj::S_RESOURCE_TYPE_ID;
34
35// Implementations.
36
38 const Shared_name& absolute_name_arg,
39 Error_code* err_code) :
40 flow::log::Log_context(logger_ptr, Log_component::S_TRANSPORT),
41 m_absolute_name(absolute_name_arg),
42 m_worker(get_logger(), // Start the 1 thread.
43 /* (Linux) OS thread name will truncate m_absolute_name to 15-5=10 chars here; high chance that'll include
44 * something decently useful; probably not everything though. It's a decent attempt. */
45 flow::util::ostream_op_string("NSSA-", m_absolute_name)),
46 m_next_peer_socket(*(m_worker.task_engine())) // Steady state: start it as empty, per doc header.
47{
52 using flow::error::Runtime_error;
53 using flow::async::reset_thread_pinning;
54 using boost::system::system_error;
55
56 /* For simplicity we'll just do all the work in thread W we're about to start. Whether to do some initial stuff
57 * in this thread or not doesn't matter much. We have promised that,
58 * upon our return -- no later -- the Native_socket_stream_acceptor
59 * will be listening (assuming no errors). So wait for that (the startup) to finish using start() arg. */
60 Error_code sys_err_code;
61
62 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Awaiting initial setup/listening in worker thread.");
63 m_worker.start([&]() // Execute all this synchronously in the thread.
64 {
65 reset_thread_pinning(get_logger()); // Don't inherit any strange core-affinity! Worker must float free.
66
67 auto const asio_engine = m_worker.task_engine();
68
69 FLOW_LOG_INFO("Acceptor [" << *this << "]: Starting (am in worker thread).");
70
71 const auto local_endpoint = endpoint_at_shared_name(get_logger(), m_absolute_name, &sys_err_code);
72 assert((local_endpoint == Endpoint()) == bool(sys_err_code));
73 if (sys_err_code) // It logged.
74 {
75 return; // Escape the start() callback, that is.
76 }
77 // else
78
79 // Start a listening acceptor socket at that endpoint!
80
81 try
82 {
83 // Throws on error. (It's annoying there's no error-code-returning API; but it's normal in boost.asio ctors.)
84 m_acceptor.reset(new Acceptor(*asio_engine, local_endpoint));
85 // @todo Is reuse_addr appropriate? Do we run into the already-exists error in practice? Revisit.
86 }
87 catch (const system_error& exc)
88 {
89 assert(!m_acceptor);
90 FLOW_LOG_WARNING("Acceptor [" << *this << "]: Unable to open/bind/listen native local stream socket; could "
91 "be due to address/name clash; details logged below.");
92 sys_err_code = exc.code();
93 FLOW_ERROR_SYS_ERROR_LOG_WARNING();
94 return; // Escape the start() callback, that is.
95 }
96
97 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
98 "Successfully made endpoint and open/bind/listen-ed on it. Ready for connections.");
99
100 /* OK! Background-wait for the first incoming connection. As explained in m_next_peer_socket doc header
101 * m_next_peer_socket is empty/unconnected at entry to each such background-wait; set to connected state
102 * by boost.asio as it launches our callback; and moved/emptied again by that callback, as it starts the next
103 * background-wait. We have the just the one thread, so it's just a simple serial sequence. */
104
105 m_acceptor->async_accept(m_next_peer_socket,
106 [this](const Error_code& async_err_code)
107 {
108 // We are in thread W.
109 on_next_peer_socket_or_error(async_err_code);
110 });
111
112 assert(!sys_err_code); // Success.
113 }); // m_worker.start()
114
115 if (sys_err_code)
116 {
117 /* Just keep the thread going; even though it's not gonna be doing any listening.
118 * @todo We could stop it here no problem. And it could help something, somewhere, marginally to not have an
119 * extra thread around. But then this has to be coded for all over the place;
120 * didn't seem essential, particularly since these error conditions are highly irregular and likely to be very
121 * bad anyway. */
122
123 if (err_code)
124 {
125 *err_code = sys_err_code;
126 return;
127 }
128 // else
129 throw Runtime_error(sys_err_code, FLOW_UTIL_WHERE_AM_I_STR());
130 }
131 // else
132 assert(!sys_err_code);
133
134 FLOW_LOG_INFO("Acceptor [" << *this << "]: Ready for incoming connections.");
135} // Native_socket_stream_acceptor::Native_socket_stream_acceptor()
136
138{
139 using flow::async::Single_thread_task_loop;
140 using flow::async::reset_thread_pinning;
141 using flow::util::ostream_op_string;
142
143 // We are in thread U. By contract in doc header, they must not call us from a completion handler (thread W).
144
145 FLOW_LOG_INFO("Acceptor [" << *this << "]: Shutting down. Next acceptor socket will close; all our internal "
146 "async handlers will be canceled; and worker thread thread will be joined.");
147
148 // stop() logic is similar to what happens in Native_socket_stream::Impl dtor. Keeping cmnts light.
149 m_worker.stop();
150 // Thread W is (synchronously!) no more.
151
152 // Post-stop() poll() logic is similar to what happens in Native_socket_stream::Impl dtor. Keeping cmnts light.
153
154 FLOW_LOG_INFO("Acceptor [" << *this << "]: Continuing shutdown. Next we will run pending handlers from some "
155 "other thread. In this user thread we will await those handlers' completion and then return.");
156 Single_thread_task_loop one_thread(get_logger(), ostream_op_string("NSSADeinit-", m_absolute_name));
157
158 one_thread.start([&]()
159 {
160 reset_thread_pinning(get_logger()); // Don't inherit any strange core-affinity. Float free.
161
162 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
163 "In transient finisher thread: Shall run all pending internal handlers (typically none).");
164
165 const auto task_engine = m_worker.task_engine();
166 task_engine->restart();
167 const auto count = task_engine->poll();
168 if (count != 0)
169 {
170 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
171 "In transient finisher thread: Ran [" << count << "] internal handlers after all.");
172 }
173 task_engine->stop();
174
175 FLOW_LOG_INFO("Acceptor [" << *this << "]: "
176 "In transient finisher thread: Shall run all pending user handlers (feeding operation-aborted).");
177
178 while (!m_pending_user_requests_q.empty())
179 {
180 FLOW_LOG_TRACE("Running a queued async-accept completion handler.");
184 FLOW_LOG_TRACE("User accept handler finished. Popped from user request deficit queue.");
185 } // while (!m_pending_user_requests_q.empty())
186
187 FLOW_LOG_INFO("Transient finisher exiting.");
188 }); // one_thread.start()
189 // Here thread exits/joins synchronously.
190} // Native_socket_stream_acceptor::~Native_socket_stream_acceptor()
191
193{
195 using flow::util::ostream_op_string;
196 using std::holds_alternative;
197
198 // We are in thread W.
199 if (sys_err_code == boost::asio::error::operation_aborted)
200 {
201 return; // Stuff is shutting down. GTFO.
202 }
203 // else
204 assert(sys_err_code != boost::asio::error::would_block); // Not possible for async handlers.
205
206 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Incoming connection, or error when trying to accept one.");
207 if (sys_err_code)
208 {
209 // Close/empty the potentially-almost-kinda-accepted socket. Probably unnecessary but can't hurt.
210 Error_code dummy;
211 m_next_peer_socket.close(dummy);
212
213 if (sys_err_code == boost::asio::error::connection_aborted)
214 {
215 FLOW_LOG_WARNING("Incoming connection aborted halfway during connection; this is quite weird but "
216 "should not be fatal. Ignoring. Still listening.");
217 // Fall through.
218 }
219 else
220 {
221 FLOW_LOG_WARNING("Acceptor [" << *this << "]: The background accept failed fatally. "
222 "Closing acceptor; no longer listening. Details follow.");
223 FLOW_ERROR_SYS_ERROR_LOG_WARNING();
224
225 // Close/destroy the acceptor. And of course don't call async_accept() on it again as we normally would below.
226 m_acceptor->close(dummy);
227
228 // Queue up and handle result.
229
230 /* Shouldn't have gotten here if any other error had been emitted. We would've closed m_acceptor and certainly
231 * not run m_acceptor->async_accept() again. */
232 assert(m_pending_results_q.empty() || (!holds_alternative<Error_code>(m_pending_results_q.back())));
233
234 // We are in steady state. Make this one change...
235 m_pending_results_q.push(sys_err_code);
236 // ...and immediately handle it appropriately to get back into steady state.
238
239 // Do *not* m_acceptor->async_accept() again. (It's closed actually, so we couldn't anyway.)
240 return;
241 }
242 } // if (sys_err_code)
243 else // if (!sys_err_code)
244 {
245 /* We'll enqueue the resulting peer socket handle from the new socket. There are a few subtleties:
246 * - We need to make a new Peer, which must be passed the boost.asio Peer_socket, or similar,
247 * to wrap. On that guy, they will internally call things like Peer_socket::async_read_some().
248 * - m_next_peer_socket, basically, stores two pieces of data: the raw native socket handle (of new peer socket),
249 * and the Task_engine that is to be used to execute the logic of all async_ calls on it (e.g.,
250 * Peer_socket::async_read_some()) (basically 1-1 to our m_worker).
251 * - Can we pass m_next_peer_socket into the Native_socket_stream ctor then? Well, no. Why? Answer:
252 * Suppose we did. Now suppose Native_socket_stream calls async_read_some() on it. Suppose some bytes are
253 * indeed read from the opposing peer. Now some callback internal to the Native_socket_stream must be called.
254 * On what thread would it be called? Answer: per above, on m_worker. Do we want that? Well, no, because
255 * that class sets up its own thread for that. (Briefly: Actually, why not use one thread? Answer: Even
256 * if we thought that was a good design for perf or something, consider that m_worker goes away in
257 * our dtor, so we'd have to actively share m_worker (maybe via shared_ptr) with the offshoot
258 * Native_socket_stream. Simply, we just aren't designing it that way. We want them to run an independent
259 * thread, though this is not where I will justify that.)
260 * - So what to do? I was hoping there'd be a move-constructor-ish thing in Peer_socket,
261 * but there isn't one that lets one also specify the Task_engine; and it isn't apparently possible to change
262 * a Peer_socket's Task_engine (thing returned by .get_executor()) after construction.
263 * Worst-case, we could suck out the native socket handle (see just above) and then Native_socket_stream
264 * ctor could construct and take over that guy. It should be entirely safe, since it is a new socket, and
265 * we haven't started any async ops on it yet, but it still feels dodgy. Turns out the safe way to do it
266 * is basically that, but one can Peer_socket::release() to "officially" safely "eject" an open
267 * socket. So we do that, and Peer_socket can take over the "ejected" native socket handle in
268 * its ctor.
269 * - We promise in our contract to propagate get_logger() to any child peer sockets. This is where it happens. */
270
271 /* .release() won't throw except in Windows <8.1, where it'll always throw (unsupported; per boost.asio docs).
272 * We don't worry about Windows generally; and anyway in .hpp somewhere we already should've ensured Linux in
273 * particular. Just re-check that for sanity for now. (This is a bit of future-proofing, so that the problem is
274 * obvious if porting the code.) */
275#ifndef FLOW_OS_LINUX
276 static_assert(false, "Should not have gotten to this line; should have required Linux; "
277 "the next thing assumes not-Win-<8.1.");
278#endif
279 // Could store a raw handle too, but this is exactly as fast and adds some logging niceties.
280 Native_handle native_peer_socket(m_next_peer_socket.release());
281 assert(!m_next_peer_socket.is_open()); // Non-exhaustive sanity check that it's back in empty/unconnected state.
282 FLOW_LOG_TRACE("Acceptor [" << *this << "]: "
283 "Ejected ownership of new incoming peer socket [" << native_peer_socket << "].");
284
285 auto new_peer
286 = boost::movelib::make_unique<Peer>
287 (get_logger(),
288 // Nickname is, like, "_pathName_of_this_acceptor=>native_hndl[35]" (as I write this).
289 ostream_op_string(m_absolute_name.str(), "=>", native_peer_socket),
290 std::move(native_peer_socket));
291 // Caution: native_peer_socket is now invalid.
292 assert(native_peer_socket.null());
293
294 // Queue up and handle result.
295
296 // As above -- on error we wouldn't have kept trying to accept more.
297 assert(m_pending_results_q.empty() || (!holds_alternative<Error_code>(m_pending_results_q.back())));
298
299 // We are in steady state. Make this one change...
300 m_pending_results_q.emplace(std::move(new_peer)); // (new_peer may now be hosed.)
301 // ...and immediately handle it appropriately to get back into steady state.
303 } // else if (!sys_err_code)
304
305 // Either there was success (!sys_err_code), or a non-fatal error (otherwise). Keep the async chain going.
306
307 /* @todo Does it help perf-wise to spin through non-blocking accepts here (in case more incoming peers have been
308 * queued up by OS) until would-block? I (ygoldfel) have done it in the past when doing TCP/UDP reads, but I never
309 * really checked whether it's beneficial, and anyway this situation is not really the same (incoming load
310 * should be much less intense here). */
311
312 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Starting the next background accept.");
313 m_acceptor->async_accept(m_next_peer_socket,
314 [this](const Error_code& async_err_code)
315 {
316 // We are in thread W.
317 on_next_peer_socket_or_error(async_err_code);
318 });
319} // Native_socket_stream_acceptor::on_next_peer_socket_or_error()
320
322{
323 using boost::movelib::make_unique;
324 using std::get;
325 using std::holds_alternative;
326
327 // We are in thread U/W. (They *are* allowed to invoke async_accept() from within their completion handler.)
328
329 /* We don't lock our state, hence we do everything in thread W.
330 *
331 * If we are in thread U: Post on thread W.
332 *
333 * If we are in thread W already (being invoked from earlier user completion handler): Still post on thread W.
334 * Otherwise we may well invoke handler synchronously (if surplus is available at the moment) which would
335 * mean nested handler invocation, which we promised not to do (for good reason: if, say, their handler
336 * is bracketed by a non-recursive lock, then they would get a deadlock trying to acquire the lock in
337 * the 2nd -- inner -- handler execution). */
338 m_worker.post([this, target_peer, on_done_func = std::move(on_done_func)]
339 () mutable // To allow for the on_done_func to be move()d again.
340 {
341 // We are in thread W.
342
343 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Handling async-accept request.");
344
345 auto new_req = make_unique<User_request>();
346 new_req->m_target_peer = target_peer;
347 new_req->m_on_done_func = std::move(on_done_func);
348
349 // We are in steady state. Make this one change....
350 m_pending_user_requests_q.emplace(std::move(new_req)); // (new_req may now be hosed.)
351 // ...and immediately handle it appropriately to get back into steady state:
352
353 if (m_pending_results_q.empty())
354 {
355 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-accept request pushed onto deficit queue; "
356 "but there is no surplus (no pending results). Will await results.");
357 return;
358 }
359 // else if (!m_pending_results_q.empty())
360
361 /* If deficit existed *before* the request was pushed, and there's surplus too, then it wasn't steady state
362 * pre-push. Violates our invariant (see data member doc headers). */
363 assert(m_pending_user_requests_q.size() == 1);
364
365 auto& peer_or_err_code = m_pending_results_q.front();
366 if (holds_alternative<Error_code>(peer_or_err_code))
367 {
368 assert(m_pending_results_q.size() == 1); // An error always caps the queue (and never leaves it).
369 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-request pushed onto deficit queue; "
370 "and there is surplus in the form of a fatal error code. Will feed error to the request "
371 "*without* popping it from surplus queue (size remains 1).");
372 feed_error_result_to_deficit(get<Error_code>(peer_or_err_code));
373 }
374 else
375 {
376 assert(holds_alternative<Peer_ptr>(peer_or_err_code));
377
378 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New async-request pushed onto deficit queue; "
379 "and there is surplus in the form of a new peer handle. Will feed handle to the request. "
380 "Queue size will become [" << (m_pending_results_q.size() - 1) << "].");
381
382 Peer_ptr peer(std::move(get<Peer_ptr>(peer_or_err_code)));
384 feed_success_result_to_deficit(std::move(peer));
385 }
386 }); // m_worker.post()
387} // Native_socket_stream_acceptor::async_accept_impl()
388
390{
391 using std::get;
392 using std::holds_alternative;
393
394 // We are in thread W.
395
396 assert((!m_pending_results_q.empty()) && holds_alternative<Error_code>(m_pending_results_q.back()));
397
398 if (m_pending_user_requests_q.empty())
399 {
400 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Fatal error pushed onto surplus queue; "
401 "but there is no deficit (no pending requests). Will await async-accept request(s).");
402 return;
403 }
404 // else if (!m_pending_user_requests_q.empty())
405
406 /* If surplus existed *before* the error was pushed, and there's deficit too, then it wasn't steady state pre-push.
407 * Violates our pre-condition. */
408 assert(m_pending_results_q.size() == 1);
409
410 const auto err_code = get<Error_code>(m_pending_results_q.front());
411 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Fatal error pushed onto surplus queue; "
412 "and there is deficit (1+ pending requests). Will feed error to all pending requests *without* "
413 "popping surplus queue, whose size remains 1.");
415} // Native_socket_stream_acceptor::finalize_q_surplus_on_error()
416
418{
419 using std::get;
420 using std::holds_alternative;
421
422 // We are in thread W.
423
424 assert((!m_pending_results_q.empty()) && holds_alternative<Peer_ptr>(m_pending_results_q.back()));
425
426 if (m_pending_user_requests_q.empty())
427 {
428 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New peer socket handle pushed onto surplus queue; "
429 "but there is no deficit (no pending requests). Will await async-accept request(s).");
430 return;
431 }
432 // else if (!m_pending_user_requests_q.empty())
433
434 /* If surplus existed *before* the handle was pushed, and there's deficit too, then it wasn't steady state pre-push.
435 * Violates our pre-condition. */
436 assert(m_pending_results_q.size() == 1);
437
438 Peer_ptr peer(std::move(get<Peer_ptr>(m_pending_results_q.front())));
440 FLOW_LOG_TRACE("Acceptor [" << *this << "]: New peer socket handle pushed onto surplus queue; "
441 "and there is deficit (1+ pending requests). Will feed to next pending request, having "
442 "popped it from surplus queue (size is now 0).");
443 feed_success_result_to_deficit(std::move(peer));
444} // Native_socket_stream_acceptor::finalize_q_surplus_on_success()
445
447{
448 assert(!m_pending_user_requests_q.empty());
449
450 size_t idx = 0;
451 do // while (!empty())
452 {
453 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Feeding to user async-accept request handler [" << idx << "]: "
454 "Error code [" << err_code << "] [" << err_code.message() << "].");
455 m_pending_user_requests_q.front()->m_on_done_func(err_code);
457
458 ++idx;
459 }
460 while (!m_pending_user_requests_q.empty());
461} // Native_socket_stream_acceptor::feed_error_result_to_deficit()
462
464{
465 assert(!m_pending_user_requests_q.empty());
466
467 FLOW_LOG_TRACE("Acceptor [" << *this << "]: Feeding to user async-accept request handler: "
468 "Socket stream [" << *peer << "]. User request queue size post-pop is "
469 "[" << (m_pending_user_requests_q.size() - 1) << "].");
470 auto& head_request = m_pending_user_requests_q.front();
471 *head_request->m_target_peer = std::move(*peer);
472 head_request->m_on_done_func(Error_code());
474} // Native_socket_stream_acceptor::feed_success_result_to_deficit()
475
477{
478 return m_absolute_name;
479}
480
481std::ostream& operator<<(std::ostream& os, const Native_socket_stream_acceptor& val)
482{
483 return os << "sh_name[" << val.absolute_name() << "]@" << static_cast<const void*>(&val);
484}
485
486} // namespace ipc::transport
A server object that binds to a Shared_name and listens for incoming Native_socket_stream connect att...
static const Shared_name & S_RESOURCE_TYPE_ID
Shared_name relative-folder fragment (no separators) identifying this resource type.
void async_accept_impl(Peer *target_peer, On_peer_accepted_func &&on_done_func)
Non-template impl of async_accept().
flow::async::Single_thread_task_loop m_worker
A single-threaded async task loop that starts in constructor and ends in destructor.
boost::movelib::unique_ptr< Peer > Peer_ptr
Short-hand for internally stored PEER-state sync_io::Native_socket_stream in m_pending_results_q.
std::queue< User_request::Ptr > m_pending_user_requests_q
Queue storing deficit async-accept requests queued up due to lacking pending ready peer socket handle...
void finalize_q_surplus_on_error()
In thread W, in steady state except for an Error_code just pushed to the back of m_pending_results_q ...
~Native_socket_stream_acceptor()
Destroys this acceptor which will stop listening in the background and cancel any pending completion ...
Native_socket_stream_acceptor(flow::log::Logger *logger_ptr, const Shared_name &absolute_name, Error_code *err_code=0)
Creates the Native_socket_stream_acceptor and immediately begins listening in the background,...
asio_local_stream_socket::Peer_socket m_next_peer_socket
Unix domain peer socket, always empty/unconnected while a background m_acceptor.async_accept() is pro...
void feed_error_result_to_deficit(const Error_code &err_code)
In thread W, gets back to steady state by feeding the given Error_code (which must be the sole elemen...
boost::movelib::unique_ptr< asio_local_stream_socket::Acceptor > m_acceptor
Unix domain socket acceptor.
flow::async::Task_asio_err On_peer_accepted_func
Short-hand for callback called on new peer-to-peer connection; or on unrecoverable error.
void finalize_q_surplus_on_success()
In thread W, in steady state, introduces the just-established peer socket handle into the state machi...
std::queue< std::variant< Peer_ptr, Error_code > > m_pending_results_q
Queue storing surplus finalized async-accept results queued up due to lacking async_accept() requests...
const Shared_name & absolute_name() const
Returns the full name/address to which the constructor bound, or attempted to bind,...
void feed_success_result_to_deficit(Peer_ptr &&peer)
In thread W, gets back to steady state by feeding the given just-connected peer socket (which must ha...
void on_next_peer_socket_or_error(const Error_code &sys_err_code)
Handler for incoming connection on m_acceptor.
Implements both sync_io::Native_handle_sender and sync_io::Native_handle_receiver concepts by using a...
String-wrapping abstraction representing a name uniquely distinguishing a kernel-persistent entity fr...
const std::string & str() const
Returns (sans copying) ref to immutable entire wrapped name string, suitable to pass into sys calls w...
Protocol::endpoint Endpoint
Short-hand for boost.asio Unix domain peer stream-socket endpoint.
Protocol::acceptor Acceptor
Short-hand for boost.asio Unix domain stream-socket acceptor (listening guy) socket.
Protocol::socket Peer_socket
Short-hand for boost.asio Unix domain peer stream-socket (usually-connected-or-empty guy).
Endpoint endpoint_at_shared_name(flow::log::Logger *logger_ptr, const Shared_name &absolute_name, Error_code *err_code)
Returns an Endpoint corresponding to the given absolute Shared_name, so that an Acceptor or Peer_sock...
@ S_OBJECT_SHUTDOWN_ABORTED_COMPLETION_HANDLER
Async completion handler is being called prematurely, because underlying object is shutting down,...
Flow-IPC module providing transmission of structured messages and/or low-level blobs (and more) betwe...
util::Shared_name Shared_name
Convenience alias for the commonly used type util::Shared_name.
std::ostream & operator<<(std::ostream &os, const Bipc_mq_handle &val)
Prints string representation of the given Bipc_mq_handle to the given ostream.
flow::util::String_view String_view
Short-hand for Flow's String_view.
Definition: util_fwd.hpp:115
Log_component
The flow::log::Component payload enumeration containing various log components used by Flow-IPC inter...
Definition: common.hpp:323
flow::Error_code Error_code
Short-hand for flow::Error_code which is very common.
Definition: common.hpp:298
A monolayer-thin wrapper around a native handle, a/k/a descriptor a/k/a FD.
bool null() const
Returns true if and only if m_native_handle equals S_NULL_HANDLE.