2020-03-22 13:47:21 +00:00
|
|
|
use crate::{
|
2021-04-15 08:16:42 +00:00
|
|
|
api::{ConnectAddr, ListenAddr, NetworkConnectError, Participant},
|
2021-01-22 16:09:20 +00:00
|
|
|
channel::Protocols,
|
2021-04-15 08:16:42 +00:00
|
|
|
metrics::{NetworkMetrics, ProtocolInfo},
|
2020-07-16 19:39:33 +00:00
|
|
|
participant::{B2sPrioStatistic, BParticipant, S2bCreateChannel, S2bShutdownBparticipant},
|
2020-04-08 14:26:42 +00:00
|
|
|
};
|
2021-04-27 15:59:36 +00:00
|
|
|
use futures_util::StreamExt;
|
2021-05-04 13:27:30 +00:00
|
|
|
use hashbrown::HashMap;
|
2021-04-27 15:59:36 +00:00
|
|
|
use network_protocol::{Cid, Pid, ProtocolMetricCache, ProtocolMetrics};
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-04-24 10:56:04 +00:00
|
|
|
use prometheus::Registry;
|
2020-05-26 13:06:03 +00:00
|
|
|
use rand::Rng;
|
2020-03-22 13:47:21 +00:00
|
|
|
use std::{
|
|
|
|
sync::{
|
|
|
|
atomic::{AtomicBool, AtomicU64, Ordering},
|
|
|
|
Arc,
|
|
|
|
},
|
2021-01-22 16:09:20 +00:00
|
|
|
time::Duration,
|
2020-03-22 13:47:21 +00:00
|
|
|
};
|
2021-01-15 13:04:32 +00:00
|
|
|
use tokio::{
|
2021-04-27 15:59:36 +00:00
|
|
|
io,
|
2021-01-15 13:04:32 +00:00
|
|
|
sync::{mpsc, oneshot, Mutex},
|
|
|
|
};
|
|
|
|
use tokio_stream::wrappers::UnboundedReceiverStream;
|
2020-03-22 13:47:21 +00:00
|
|
|
use tracing::*;
|
|
|
|
|
2021-02-10 10:37:42 +00:00
|
|
|
// Naming of Channels `x2x`
|
|
|
|
// - a: api
|
|
|
|
// - s: scheduler
|
|
|
|
// - b: bparticipant
|
|
|
|
// - p: prios
|
|
|
|
// - r: protocol
|
|
|
|
// - w: wire
|
|
|
|
// - c: channel/handshake
|
|
|
|
|
2020-07-14 22:18:04 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct ParticipantInfo {
|
|
|
|
secret: u128,
|
2021-09-20 10:05:41 +00:00
|
|
|
#[allow(dead_code)]
|
2020-07-14 22:18:04 +00:00
|
|
|
s2b_create_channel_s: mpsc::UnboundedSender<S2bCreateChannel>,
|
2020-07-16 19:39:33 +00:00
|
|
|
s2b_shutdown_bparticipant_s: Option<oneshot::Sender<S2bShutdownBparticipant>>,
|
2020-07-14 22:18:04 +00:00
|
|
|
}
|
|
|
|
|
2021-04-15 08:16:42 +00:00
|
|
|
type A2sListen = (ListenAddr, oneshot::Sender<io::Result<()>>);
|
2021-02-21 23:48:30 +00:00
|
|
|
pub(crate) type A2sConnect = (
|
2021-04-15 08:16:42 +00:00
|
|
|
ConnectAddr,
|
2021-02-21 23:48:30 +00:00
|
|
|
oneshot::Sender<Result<Participant, NetworkConnectError>>,
|
|
|
|
);
|
2020-07-16 19:39:33 +00:00
|
|
|
type A2sDisconnect = (Pid, S2bShutdownBparticipant);
|
2020-07-14 22:18:04 +00:00
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct ControlChannels {
|
2020-07-14 22:18:04 +00:00
|
|
|
a2s_listen_r: mpsc::UnboundedReceiver<A2sListen>,
|
|
|
|
a2s_connect_r: mpsc::UnboundedReceiver<A2sConnect>,
|
2020-05-15 12:29:17 +00:00
|
|
|
a2s_scheduler_shutdown_r: oneshot::Receiver<()>,
|
2020-07-14 22:18:04 +00:00
|
|
|
a2s_disconnect_r: mpsc::UnboundedReceiver<A2sDisconnect>,
|
|
|
|
b2s_prio_statistic_r: mpsc::UnboundedReceiver<B2sPrioStatistic>,
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
struct ParticipantChannels {
|
2020-05-15 12:29:17 +00:00
|
|
|
s2a_connected_s: mpsc::UnboundedSender<Participant>,
|
2020-07-14 22:18:04 +00:00
|
|
|
a2s_disconnect_s: mpsc::UnboundedSender<A2sDisconnect>,
|
|
|
|
b2s_prio_statistic_s: mpsc::UnboundedSender<B2sPrioStatistic>,
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct Scheduler {
|
|
|
|
local_pid: Pid,
|
2020-05-26 13:06:03 +00:00
|
|
|
local_secret: u128,
|
2020-03-22 13:47:21 +00:00
|
|
|
closed: AtomicBool,
|
|
|
|
run_channels: Option<ControlChannels>,
|
2020-05-15 12:29:17 +00:00
|
|
|
participant_channels: Arc<Mutex<Option<ParticipantChannels>>>,
|
2020-08-23 19:43:17 +00:00
|
|
|
participants: Arc<Mutex<HashMap<Pid, ParticipantInfo>>>,
|
2020-03-22 13:47:21 +00:00
|
|
|
channel_ids: Arc<AtomicU64>,
|
2021-04-15 08:16:42 +00:00
|
|
|
channel_listener: Mutex<HashMap<ProtocolInfo, oneshot::Sender<()>>>,
|
2020-04-24 10:56:04 +00:00
|
|
|
metrics: Arc<NetworkMetrics>,
|
2021-02-14 17:45:12 +00:00
|
|
|
protocol_metrics: Arc<ProtocolMetrics>,
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Scheduler {
|
|
|
|
pub fn new(
|
|
|
|
local_pid: Pid,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")] registry: Option<&Registry>,
|
2020-03-22 13:47:21 +00:00
|
|
|
) -> (
|
|
|
|
Self,
|
2020-07-14 22:18:04 +00:00
|
|
|
mpsc::UnboundedSender<A2sListen>,
|
|
|
|
mpsc::UnboundedSender<A2sConnect>,
|
2020-03-22 13:47:21 +00:00
|
|
|
mpsc::UnboundedReceiver<Participant>,
|
|
|
|
oneshot::Sender<()>,
|
|
|
|
) {
|
2021-01-15 13:04:32 +00:00
|
|
|
let (a2s_listen_s, a2s_listen_r) = mpsc::unbounded_channel::<A2sListen>();
|
|
|
|
let (a2s_connect_s, a2s_connect_r) = mpsc::unbounded_channel::<A2sConnect>();
|
|
|
|
let (s2a_connected_s, s2a_connected_r) = mpsc::unbounded_channel::<Participant>();
|
2020-05-15 12:29:17 +00:00
|
|
|
let (a2s_scheduler_shutdown_s, a2s_scheduler_shutdown_r) = oneshot::channel::<()>();
|
2021-01-15 13:04:32 +00:00
|
|
|
let (a2s_disconnect_s, a2s_disconnect_r) = mpsc::unbounded_channel::<A2sDisconnect>();
|
|
|
|
let (b2s_prio_statistic_s, b2s_prio_statistic_r) =
|
|
|
|
mpsc::unbounded_channel::<B2sPrioStatistic>();
|
2020-03-22 13:47:21 +00:00
|
|
|
|
|
|
|
let run_channels = Some(ControlChannels {
|
2020-05-15 12:29:17 +00:00
|
|
|
a2s_listen_r,
|
|
|
|
a2s_connect_r,
|
|
|
|
a2s_scheduler_shutdown_r,
|
|
|
|
a2s_disconnect_r,
|
2020-05-27 15:58:57 +00:00
|
|
|
b2s_prio_statistic_r,
|
2020-03-22 13:47:21 +00:00
|
|
|
});
|
|
|
|
|
2020-05-04 13:27:58 +00:00
|
|
|
let participant_channels = ParticipantChannels {
|
2020-05-15 12:29:17 +00:00
|
|
|
s2a_connected_s,
|
|
|
|
a2s_disconnect_s,
|
2020-05-27 15:58:57 +00:00
|
|
|
b2s_prio_statistic_s,
|
2020-05-04 13:27:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let metrics = Arc::new(NetworkMetrics::new(&local_pid).unwrap());
|
2021-02-14 17:45:12 +00:00
|
|
|
let protocol_metrics = Arc::new(ProtocolMetrics::new().unwrap());
|
2020-07-14 23:34:41 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
{
|
|
|
|
if let Some(registry) = registry {
|
|
|
|
metrics.register(registry).unwrap();
|
2021-02-14 17:45:12 +00:00
|
|
|
protocol_metrics.register(registry).unwrap();
|
2020-07-14 23:34:41 +00:00
|
|
|
}
|
2020-04-24 10:56:04 +00:00
|
|
|
}
|
|
|
|
|
2020-05-26 13:06:03 +00:00
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
let local_secret: u128 = rng.gen();
|
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
(
|
|
|
|
Self {
|
|
|
|
local_pid,
|
2020-05-26 13:06:03 +00:00
|
|
|
local_secret,
|
2020-03-22 13:47:21 +00:00
|
|
|
closed: AtomicBool::new(false),
|
|
|
|
run_channels,
|
2020-05-15 12:29:17 +00:00
|
|
|
participant_channels: Arc::new(Mutex::new(Some(participant_channels))),
|
2020-08-23 19:43:17 +00:00
|
|
|
participants: Arc::new(Mutex::new(HashMap::new())),
|
2020-03-22 13:47:21 +00:00
|
|
|
channel_ids: Arc::new(AtomicU64::new(0)),
|
2020-08-23 19:43:17 +00:00
|
|
|
channel_listener: Mutex::new(HashMap::new()),
|
2020-04-24 10:56:04 +00:00
|
|
|
metrics,
|
2021-02-14 17:45:12 +00:00
|
|
|
protocol_metrics,
|
2020-03-22 13:47:21 +00:00
|
|
|
},
|
2020-05-15 12:29:17 +00:00
|
|
|
a2s_listen_s,
|
|
|
|
a2s_connect_s,
|
|
|
|
s2a_connected_r,
|
|
|
|
a2s_scheduler_shutdown_s,
|
2020-03-22 13:47:21 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn run(mut self) {
|
2021-04-27 15:59:36 +00:00
|
|
|
let run_channels = self
|
|
|
|
.run_channels
|
|
|
|
.take()
|
|
|
|
.expect("run() can only be called once");
|
2020-03-22 13:47:21 +00:00
|
|
|
|
2021-01-15 13:04:32 +00:00
|
|
|
tokio::join!(
|
2020-05-15 12:29:17 +00:00
|
|
|
self.listen_mgr(run_channels.a2s_listen_r),
|
|
|
|
self.connect_mgr(run_channels.a2s_connect_r),
|
|
|
|
self.disconnect_mgr(run_channels.a2s_disconnect_r),
|
2020-05-27 15:58:57 +00:00
|
|
|
self.prio_adj_mgr(run_channels.b2s_prio_statistic_r),
|
2020-05-15 12:29:17 +00:00
|
|
|
self.scheduler_shutdown_mgr(run_channels.a2s_scheduler_shutdown_r),
|
2020-03-22 13:47:21 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-07-14 22:18:04 +00:00
|
|
|
async fn listen_mgr(&self, a2s_listen_r: mpsc::UnboundedReceiver<A2sListen>) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Start listen_mgr");
|
2021-01-15 13:04:32 +00:00
|
|
|
let a2s_listen_r = UnboundedReceiverStream::new(a2s_listen_r);
|
2020-05-15 12:29:17 +00:00
|
|
|
a2s_listen_r
|
2020-05-27 15:58:57 +00:00
|
|
|
.for_each_concurrent(None, |(address, s2a_listen_result_s)| {
|
2020-06-08 09:47:39 +00:00
|
|
|
let address = address;
|
2021-04-27 15:59:36 +00:00
|
|
|
let cids = Arc::clone(&self.channel_ids);
|
|
|
|
|
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
let mcache = self.metrics.connect_requests_cache(&address);
|
|
|
|
|
|
|
|
debug!(?address, "Got request to open a channel_creator");
|
|
|
|
self.metrics.listen_request(&address);
|
|
|
|
let (s2s_stop_listening_s, s2s_stop_listening_r) = oneshot::channel::<()>();
|
|
|
|
let (c2s_protocol_s, mut c2s_protocol_r) = mpsc::unbounded_channel();
|
|
|
|
let metrics = Arc::clone(&self.protocol_metrics);
|
2020-04-24 10:56:04 +00:00
|
|
|
|
|
|
|
async move {
|
|
|
|
self.channel_listener
|
2020-08-23 19:43:17 +00:00
|
|
|
.lock()
|
2020-04-24 10:56:04 +00:00
|
|
|
.await
|
2021-04-27 15:59:36 +00:00
|
|
|
.insert(address.clone().into(), s2s_stop_listening_s);
|
|
|
|
|
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
mcache.inc();
|
|
|
|
|
|
|
|
let res = match address {
|
|
|
|
ListenAddr::Tcp(addr) => {
|
|
|
|
Protocols::with_tcp_listen(
|
|
|
|
addr,
|
|
|
|
cids,
|
|
|
|
metrics,
|
|
|
|
s2s_stop_listening_r,
|
|
|
|
c2s_protocol_s,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
},
|
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
ListenAddr::Quic(addr, ref server_config) => {
|
|
|
|
Protocols::with_quic_listen(
|
|
|
|
addr,
|
|
|
|
server_config.clone(),
|
|
|
|
cids,
|
|
|
|
metrics,
|
|
|
|
s2s_stop_listening_r,
|
|
|
|
c2s_protocol_s,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
},
|
|
|
|
ListenAddr::Mpsc(addr) => {
|
|
|
|
Protocols::with_mpsc_listen(
|
|
|
|
addr,
|
|
|
|
cids,
|
|
|
|
metrics,
|
|
|
|
s2s_stop_listening_r,
|
|
|
|
c2s_protocol_s,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
},
|
|
|
|
_ => unimplemented!(),
|
|
|
|
};
|
|
|
|
let _ = s2a_listen_result_s.send(res);
|
|
|
|
|
|
|
|
while let Some((prot, cid)) = c2s_protocol_r.recv().await {
|
|
|
|
self.init_protocol(prot, cid, None, true).await;
|
|
|
|
}
|
2020-04-24 10:56:04 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.await;
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Stop listen_mgr");
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
|
|
|
|
2021-02-21 23:48:30 +00:00
|
|
|
async fn connect_mgr(&self, mut a2s_connect_r: mpsc::UnboundedReceiver<A2sConnect>) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Start connect_mgr");
|
2021-01-15 13:04:32 +00:00
|
|
|
while let Some((addr, pid_sender)) = a2s_connect_r.recv().await {
|
2021-03-25 11:22:31 +00:00
|
|
|
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
|
2021-04-27 15:59:36 +00:00
|
|
|
let metrics =
|
|
|
|
ProtocolMetricCache::new(&cid.to_string(), Arc::clone(&self.protocol_metrics));
|
2021-03-25 11:22:31 +00:00
|
|
|
self.metrics.connect_request(&addr);
|
2021-04-22 19:37:27 +00:00
|
|
|
let protocol = match addr {
|
2021-04-27 15:59:36 +00:00
|
|
|
ConnectAddr::Tcp(addr) => Protocols::with_tcp_connect(addr, metrics).await,
|
2021-04-15 08:16:42 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
ConnectAddr::Quic(addr, ref config, name) => {
|
2021-04-27 15:59:36 +00:00
|
|
|
Protocols::with_quic_connect(addr, config.clone(), name, metrics).await
|
2021-02-10 10:37:42 +00:00
|
|
|
},
|
2021-04-27 15:59:36 +00:00
|
|
|
ConnectAddr::Mpsc(addr) => Protocols::with_mpsc_connect(addr, metrics).await,
|
2020-03-22 13:47:21 +00:00
|
|
|
_ => unimplemented!(),
|
2020-04-24 10:56:04 +00:00
|
|
|
};
|
2021-04-22 19:37:27 +00:00
|
|
|
let protocol = match protocol {
|
|
|
|
Ok(p) => p,
|
|
|
|
Err(e) => {
|
|
|
|
pid_sender.send(Err(e)).unwrap();
|
|
|
|
continue;
|
|
|
|
},
|
|
|
|
};
|
|
|
|
self.init_protocol(protocol, cid, Some(pid_sender), false)
|
2020-05-04 13:27:58 +00:00
|
|
|
.await;
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Stop connect_mgr");
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 09:45:38 +00:00
|
|
|
async fn disconnect_mgr(&self, a2s_disconnect_r: mpsc::UnboundedReceiver<A2sDisconnect>) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Start disconnect_mgr");
|
2021-02-26 09:45:38 +00:00
|
|
|
|
|
|
|
let a2s_disconnect_r = UnboundedReceiverStream::new(a2s_disconnect_r);
|
|
|
|
a2s_disconnect_r
|
|
|
|
.for_each_concurrent(
|
|
|
|
None,
|
|
|
|
|(pid, (timeout_time, return_once_successful_shutdown))| {
|
|
|
|
//Closing Participants is done the following way:
|
|
|
|
// 1. We drop our senders and receivers
|
|
|
|
// 2. we need to close BParticipant, this will drop its senderns and receivers
|
|
|
|
// 3. Participant will try to access the BParticipant senders and receivers with
|
|
|
|
// their next api action, it will fail and be closed then.
|
|
|
|
let participants = Arc::clone(&self.participants);
|
|
|
|
async move {
|
|
|
|
trace!(?pid, "Got request to close participant");
|
|
|
|
let pi = participants.lock().await.remove(&pid);
|
|
|
|
trace!(?pid, "dropped participants lock");
|
2021-04-13 18:17:50 +00:00
|
|
|
let r = if let Some(mut pi) = pi {
|
2021-02-26 09:45:38 +00:00
|
|
|
let (finished_sender, finished_receiver) = oneshot::channel();
|
Added non-admin moderators and timed bans.
The security model has been updated to reflect this change (for example,
moderators cannot revert a ban by an administrator). Ban history is
also now recorded in the ban file, and much more information about the
ban is stored (whitelists and administrators also have extra
information).
To support the new information without losing important information,
this commit also introduces a new migration path for editable settings
(both from legacy to the new format, and between versions). Examples
of how to do this correctly, and migrate to new versions of a settings
file, are in the settings/ subdirectory.
As part of this effort, editable settings have been revamped to
guarantee atomic saves (due to the increased amount of information in
each file), some latent bugs in networking were fixed, and server-cli
has been updated to go through StructOpt for both calls through TUI
and argv, greatly simplifying parsing logic.
2021-05-08 18:22:21 +00:00
|
|
|
// NOTE: If there's nothing to synchronize on (because the send failed)
|
|
|
|
// we can assume everything relevant was shut down.
|
|
|
|
let _ = pi
|
|
|
|
.s2b_shutdown_bparticipant_s
|
2021-02-26 09:45:38 +00:00
|
|
|
.take()
|
|
|
|
.unwrap()
|
Added non-admin moderators and timed bans.
The security model has been updated to reflect this change (for example,
moderators cannot revert a ban by an administrator). Ban history is
also now recorded in the ban file, and much more information about the
ban is stored (whitelists and administrators also have extra
information).
To support the new information without losing important information,
this commit also introduces a new migration path for editable settings
(both from legacy to the new format, and between versions). Examples
of how to do this correctly, and migrate to new versions of a settings
file, are in the settings/ subdirectory.
As part of this effort, editable settings have been revamped to
guarantee atomic saves (due to the increased amount of information in
each file), some latent bugs in networking were fixed, and server-cli
has been updated to go through StructOpt for both calls through TUI
and argv, greatly simplifying parsing logic.
2021-05-08 18:22:21 +00:00
|
|
|
.send((timeout_time, finished_sender));
|
2021-02-26 09:45:38 +00:00
|
|
|
drop(pi);
|
|
|
|
trace!(?pid, "dropped bparticipant, waiting for finish");
|
Added non-admin moderators and timed bans.
The security model has been updated to reflect this change (for example,
moderators cannot revert a ban by an administrator). Ban history is
also now recorded in the ban file, and much more information about the
ban is stored (whitelists and administrators also have extra
information).
To support the new information without losing important information,
this commit also introduces a new migration path for editable settings
(both from legacy to the new format, and between versions). Examples
of how to do this correctly, and migrate to new versions of a settings
file, are in the settings/ subdirectory.
As part of this effort, editable settings have been revamped to
guarantee atomic saves (due to the increased amount of information in
each file), some latent bugs in networking were fixed, and server-cli
has been updated to go through StructOpt for both calls through TUI
and argv, greatly simplifying parsing logic.
2021-05-08 18:22:21 +00:00
|
|
|
// If await fails, already shut down, so send Ok(()).
|
|
|
|
let e = finished_receiver.await.unwrap_or(Ok(()));
|
2021-02-26 09:45:38 +00:00
|
|
|
trace!(?pid, "waiting completed");
|
2021-04-13 18:17:50 +00:00
|
|
|
// can fail as api.rs has a timeout
|
|
|
|
return_once_successful_shutdown.send(e)
|
2021-02-26 09:45:38 +00:00
|
|
|
} else {
|
|
|
|
debug!(?pid, "Looks like participant is already dropped");
|
2021-04-13 18:17:50 +00:00
|
|
|
return_once_successful_shutdown.send(Ok(()))
|
|
|
|
};
|
|
|
|
if r.is_err() {
|
|
|
|
trace!(?pid, "Closed participant with timeout");
|
|
|
|
} else {
|
|
|
|
trace!(?pid, "Closed participant");
|
2021-02-26 09:45:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.await;
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Stop disconnect_mgr");
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
|
|
|
|
2020-05-27 15:58:57 +00:00
|
|
|
async fn prio_adj_mgr(
|
|
|
|
&self,
|
2020-07-14 22:18:04 +00:00
|
|
|
mut b2s_prio_statistic_r: mpsc::UnboundedReceiver<B2sPrioStatistic>,
|
2020-05-27 15:58:57 +00:00
|
|
|
) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Start prio_adj_mgr");
|
2021-01-15 13:04:32 +00:00
|
|
|
while let Some((_pid, _frame_cnt, _unused)) = b2s_prio_statistic_r.recv().await {
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
//TODO adjust prios in participants here!
|
|
|
|
}
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Stop prio_adj_mgr");
|
2020-05-24 23:17:03 +00:00
|
|
|
}
|
|
|
|
|
2020-05-15 12:29:17 +00:00
|
|
|
async fn scheduler_shutdown_mgr(&self, a2s_scheduler_shutdown_r: oneshot::Receiver<()>) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Start scheduler_shutdown_mgr");
|
2021-04-13 18:17:50 +00:00
|
|
|
if a2s_scheduler_shutdown_r.await.is_err() {
|
|
|
|
warn!("Schedule shutdown got triggered because a2s_scheduler_shutdown_r failed");
|
|
|
|
};
|
2020-10-13 17:01:53 +00:00
|
|
|
info!("Shutdown of scheduler requested");
|
2021-04-07 21:17:09 +00:00
|
|
|
self.closed.store(true, Ordering::SeqCst);
|
2020-07-05 22:13:53 +00:00
|
|
|
debug!("Shutting down all BParticipants gracefully");
|
2020-08-23 19:43:17 +00:00
|
|
|
let mut participants = self.participants.lock().await;
|
2020-07-04 00:04:33 +00:00
|
|
|
let waitings = participants
|
|
|
|
.drain()
|
|
|
|
.map(|(pid, mut pi)| {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!(?pid, "Shutting down BParticipants");
|
2020-07-04 00:04:33 +00:00
|
|
|
let (finished_sender, finished_receiver) = oneshot::channel();
|
|
|
|
pi.s2b_shutdown_bparticipant_s
|
|
|
|
.take()
|
|
|
|
.unwrap()
|
2021-01-22 16:09:20 +00:00
|
|
|
.send((Duration::from_secs(120), finished_sender))
|
2020-07-04 00:04:33 +00:00
|
|
|
.unwrap();
|
|
|
|
(pid, finished_receiver)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
2020-10-13 17:01:53 +00:00
|
|
|
drop(participants);
|
2020-07-05 22:13:53 +00:00
|
|
|
debug!("Wait for partiticipants to be shut down");
|
2020-05-15 12:29:17 +00:00
|
|
|
for (pid, recv) in waitings {
|
2020-05-27 15:58:57 +00:00
|
|
|
if let Err(e) = recv.await {
|
|
|
|
error!(
|
2020-05-15 12:29:17 +00:00
|
|
|
?pid,
|
|
|
|
?e,
|
2020-08-25 12:21:25 +00:00
|
|
|
"Failed to finish sending all remaining messages to participant when shutting \
|
|
|
|
down"
|
2020-05-27 15:58:57 +00:00
|
|
|
);
|
2020-05-15 12:29:17 +00:00
|
|
|
};
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
2020-08-21 14:21:00 +00:00
|
|
|
debug!("shutting down protocol listeners");
|
2020-08-23 19:43:17 +00:00
|
|
|
for (addr, end_channel_sender) in self.channel_listener.lock().await.drain() {
|
2020-08-21 14:21:00 +00:00
|
|
|
trace!(?addr, "stopping listen on protocol");
|
|
|
|
if let Err(e) = end_channel_sender.send(()) {
|
|
|
|
warn!(?addr, ?e, "listener crashed/disconnected already");
|
|
|
|
}
|
|
|
|
}
|
2020-07-09 07:58:21 +00:00
|
|
|
debug!("Scheduler shut down gracefully");
|
2020-05-15 12:29:17 +00:00
|
|
|
//removing the possibility to create new participants, needed to close down
|
|
|
|
// some mgr:
|
|
|
|
self.participant_channels.lock().await.take();
|
|
|
|
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Stop scheduler_shutdown_mgr");
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 14:26:42 +00:00
|
|
|
async fn init_protocol(
|
2020-04-24 10:56:04 +00:00
|
|
|
&self,
|
2021-01-22 16:09:20 +00:00
|
|
|
mut protocol: Protocols,
|
2021-02-14 17:45:12 +00:00
|
|
|
cid: Cid,
|
2021-02-21 23:48:30 +00:00
|
|
|
s2a_return_pid_s: Option<oneshot::Sender<Result<Participant, NetworkConnectError>>>,
|
2020-04-08 14:26:42 +00:00
|
|
|
send_handshake: bool,
|
|
|
|
) {
|
|
|
|
//channels are unknown till PID is known!
|
|
|
|
/* When A connects to a NETWORK, we, the listener answers with a Handshake.
|
|
|
|
Pro: - Its easier to debug, as someone who opens a port gets a magic number back!
|
2020-08-25 12:21:25 +00:00
|
|
|
Contra: - DOS possibility because we answer first
|
2020-04-08 14:26:42 +00:00
|
|
|
- Speed, because otherwise the message can be send with the creation
|
|
|
|
*/
|
2021-01-15 13:04:32 +00:00
|
|
|
let participant_channels = self.participant_channels.lock().await.clone().unwrap();
|
2020-05-04 13:27:58 +00:00
|
|
|
// spawn is needed here, e.g. for TCP connect it would mean that only 1
|
|
|
|
// participant can be in handshake phase ever! Someone could deadlock
|
|
|
|
// the whole server easily for new clients UDP doesnt work at all, as
|
|
|
|
// the UDP listening is done in another place.
|
2020-09-27 16:20:40 +00:00
|
|
|
let participants = Arc::clone(&self.participants);
|
|
|
|
let metrics = Arc::clone(&self.metrics);
|
2020-05-04 13:27:58 +00:00
|
|
|
let local_pid = self.local_pid;
|
2020-05-26 13:06:03 +00:00
|
|
|
let local_secret = self.local_secret;
|
|
|
|
// this is necessary for UDP to work at all and to remove code duplication
|
2021-02-14 17:45:12 +00:00
|
|
|
tokio::spawn(
|
2020-05-26 13:06:03 +00:00
|
|
|
async move {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!(?cid, "Open channel and be ready for Handshake");
|
2021-01-22 16:09:20 +00:00
|
|
|
use network_protocol::InitProtocol;
|
|
|
|
let init_result = protocol
|
|
|
|
.initialize(send_handshake, local_pid, local_secret)
|
2020-08-18 11:48:26 +00:00
|
|
|
.instrument(tracing::info_span!("handshake", ?cid))
|
2021-01-22 16:09:20 +00:00
|
|
|
.await;
|
|
|
|
match init_result {
|
|
|
|
Ok((pid, sid, secret)) => {
|
2020-05-26 13:06:03 +00:00
|
|
|
trace!(
|
|
|
|
?cid,
|
|
|
|
?pid,
|
2020-07-05 22:13:53 +00:00
|
|
|
"Detected that my channel is ready!, activating it :)"
|
2020-05-04 13:27:58 +00:00
|
|
|
);
|
2020-08-23 19:43:17 +00:00
|
|
|
let mut participants = participants.lock().await;
|
2020-05-26 13:06:03 +00:00
|
|
|
if !participants.contains_key(&pid) {
|
2020-07-05 22:13:53 +00:00
|
|
|
debug!(?cid, "New participant connected via a channel");
|
2020-05-26 13:06:03 +00:00
|
|
|
let (
|
|
|
|
bparticipant,
|
2021-01-22 16:09:20 +00:00
|
|
|
a2b_open_stream_s,
|
2020-05-26 13:06:03 +00:00
|
|
|
b2a_stream_opened_r,
|
2021-01-15 13:04:32 +00:00
|
|
|
s2b_create_channel_s,
|
2020-05-26 13:06:03 +00:00
|
|
|
s2b_shutdown_bparticipant_s,
|
2021-03-25 17:28:50 +00:00
|
|
|
b2a_bandwidth_stats_r,
|
2021-02-14 17:45:12 +00:00
|
|
|
) = BParticipant::new(local_pid, pid, sid, Arc::clone(&metrics));
|
2020-05-04 13:27:58 +00:00
|
|
|
|
2020-05-26 13:06:03 +00:00
|
|
|
let participant = Participant::new(
|
|
|
|
local_pid,
|
|
|
|
pid,
|
2021-01-22 16:09:20 +00:00
|
|
|
a2b_open_stream_s,
|
2020-05-26 13:06:03 +00:00
|
|
|
b2a_stream_opened_r,
|
2021-03-25 17:28:50 +00:00
|
|
|
b2a_bandwidth_stats_r,
|
2020-05-26 13:06:03 +00:00
|
|
|
participant_channels.a2s_disconnect_s,
|
|
|
|
);
|
|
|
|
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-26 13:06:03 +00:00
|
|
|
metrics.participants_connected_total.inc();
|
|
|
|
participants.insert(pid, ParticipantInfo {
|
|
|
|
secret,
|
|
|
|
s2b_create_channel_s: s2b_create_channel_s.clone(),
|
|
|
|
s2b_shutdown_bparticipant_s: Some(s2b_shutdown_bparticipant_s),
|
|
|
|
});
|
2020-08-22 10:03:06 +00:00
|
|
|
drop(participants);
|
|
|
|
trace!("dropped participants lock");
|
2021-02-10 10:37:42 +00:00
|
|
|
let p = pid;
|
2021-02-14 17:45:12 +00:00
|
|
|
tokio::spawn(
|
2020-05-26 13:06:03 +00:00
|
|
|
bparticipant
|
2020-05-27 15:58:57 +00:00
|
|
|
.run(participant_channels.b2s_prio_statistic_s)
|
2021-02-10 10:37:42 +00:00
|
|
|
.instrument(tracing::info_span!("remote", ?p)),
|
2020-05-26 13:06:03 +00:00
|
|
|
);
|
|
|
|
//create a new channel within BParticipant and wait for it to run
|
|
|
|
let (b2s_create_channel_done_s, b2s_create_channel_done_r) =
|
|
|
|
oneshot::channel();
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
//From now on wire connects directly with bparticipant!
|
2020-05-26 13:06:03 +00:00
|
|
|
s2b_create_channel_s
|
2021-01-22 16:09:20 +00:00
|
|
|
.send((cid, sid, protocol, b2s_create_channel_done_s))
|
2020-05-04 13:27:58 +00:00
|
|
|
.unwrap();
|
2020-05-26 13:06:03 +00:00
|
|
|
b2s_create_channel_done_r.await.unwrap();
|
|
|
|
if let Some(pid_oneshot) = s2a_return_pid_s {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
// someone is waiting with `connect`, so give them their PID
|
2020-05-26 13:06:03 +00:00
|
|
|
pid_oneshot.send(Ok(participant)).unwrap();
|
|
|
|
} else {
|
2020-08-25 12:21:25 +00:00
|
|
|
// no one is waiting on this Participant, return in to Network
|
2021-03-03 09:39:21 +00:00
|
|
|
if participant_channels
|
2020-05-26 13:06:03 +00:00
|
|
|
.s2a_connected_s
|
|
|
|
.send(participant)
|
2021-03-03 09:39:21 +00:00
|
|
|
.is_err()
|
|
|
|
{
|
|
|
|
warn!("seems like Network already got closed");
|
|
|
|
};
|
2020-05-26 13:06:03 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
let pi = &participants[&pid];
|
2020-08-18 11:48:26 +00:00
|
|
|
trace!(
|
|
|
|
?cid,
|
|
|
|
"2nd+ channel of participant, going to compare security ids"
|
|
|
|
);
|
2020-05-26 13:06:03 +00:00
|
|
|
if pi.secret != secret {
|
|
|
|
warn!(
|
2020-08-18 11:48:26 +00:00
|
|
|
?cid,
|
2020-05-26 13:06:03 +00:00
|
|
|
?pid,
|
|
|
|
?secret,
|
|
|
|
"Detected incompatible Secret!, this is probably an attack!"
|
|
|
|
);
|
2020-08-18 11:48:26 +00:00
|
|
|
error!(?cid, "Just dropping here, TODO handle this correctly!");
|
2020-05-26 13:06:03 +00:00
|
|
|
//TODO
|
|
|
|
if let Some(pid_oneshot) = s2a_return_pid_s {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
// someone is waiting with `connect`, so give them their Error
|
2020-05-26 13:06:03 +00:00
|
|
|
pid_oneshot
|
2021-02-21 23:48:30 +00:00
|
|
|
.send(Err(NetworkConnectError::InvalidSecret))
|
2020-05-26 13:06:03 +00:00
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
error!(
|
2020-08-18 11:48:26 +00:00
|
|
|
?cid,
|
2020-07-05 22:13:53 +00:00
|
|
|
"Ufff i cant answer the pid_oneshot. as i need to create the SAME \
|
2020-05-26 13:06:03 +00:00
|
|
|
participant. maybe switch to ARC"
|
|
|
|
);
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
2020-05-26 13:06:03 +00:00
|
|
|
//From now on this CHANNEL can receiver other frames!
|
|
|
|
// move directly to participant!
|
|
|
|
},
|
2021-01-22 16:09:20 +00:00
|
|
|
Err(e) => {
|
|
|
|
debug!(?cid, ?e, "Handshake from a new connection failed");
|
2021-03-27 15:27:59 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
metrics.failed_handshakes_total.inc();
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
if let Some(pid_oneshot) = s2a_return_pid_s {
|
|
|
|
// someone is waiting with `connect`, so give them their Error
|
2020-08-18 11:48:26 +00:00
|
|
|
trace!(?cid, "returning the Err to api who requested the connect");
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pid_oneshot
|
2021-02-21 23:48:30 +00:00
|
|
|
.send(Err(NetworkConnectError::Handshake(e)))
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
},
|
2020-05-26 13:06:03 +00:00
|
|
|
}
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
2020-08-18 11:48:26 +00:00
|
|
|
.instrument(tracing::info_span!("")),
|
2020-05-26 13:06:03 +00:00
|
|
|
); /*WORKAROUND FOR SPAN NOT TO GET LOST*/
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
|
|
|
}
|