2020-01-13 16:53:28 +00:00
|
|
|
use crate::{
|
2020-02-21 13:08:34 +00:00
|
|
|
channel::{Channel, ChannelProtocols},
|
|
|
|
controller::Controller,
|
2020-02-21 15:10:55 +00:00
|
|
|
message::{self, InCommingMessage, OutGoingMessage},
|
2020-02-21 13:08:34 +00:00
|
|
|
metrics::NetworkMetrics,
|
2020-03-10 00:07:36 +00:00
|
|
|
mpsc::MpscChannel,
|
2020-02-21 13:08:34 +00:00
|
|
|
tcp::TcpChannel,
|
2020-03-10 00:07:36 +00:00
|
|
|
types::{CtrlMsg, Pid, Sid, TokenObjects},
|
2020-01-13 16:53:28 +00:00
|
|
|
};
|
2019-12-20 13:56:01 +00:00
|
|
|
use enumset::*;
|
2020-03-04 15:52:30 +00:00
|
|
|
use futures::stream::StreamExt;
|
2019-12-20 13:56:01 +00:00
|
|
|
use mio::{
|
|
|
|
self,
|
|
|
|
net::{TcpListener, TcpStream},
|
2020-01-13 16:53:28 +00:00
|
|
|
PollOpt, Ready,
|
2019-12-20 13:56:01 +00:00
|
|
|
};
|
2020-03-10 00:07:36 +00:00
|
|
|
use mio_extras;
|
2020-02-04 15:42:04 +00:00
|
|
|
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
|
|
|
use std::{
|
|
|
|
collections::HashMap,
|
2020-03-10 00:07:36 +00:00
|
|
|
sync::{atomic::AtomicBool, mpsc, Arc, Mutex, RwLock},
|
2020-02-04 15:42:04 +00:00
|
|
|
};
|
2020-01-22 16:44:32 +00:00
|
|
|
use tlid;
|
2020-01-13 16:53:28 +00:00
|
|
|
use tracing::*;
|
|
|
|
use uuid::Uuid;
|
|
|
|
use uvth::ThreadPool;
|
2019-12-20 13:56:01 +00:00
|
|
|
|
2020-01-13 16:53:28 +00:00
|
|
|
#[derive(Clone, Debug)]
|
2019-12-20 13:56:01 +00:00
|
|
|
pub enum Address {
|
|
|
|
Tcp(std::net::SocketAddr),
|
|
|
|
Udp(std::net::SocketAddr),
|
2020-03-10 00:07:36 +00:00
|
|
|
Mpsc(u64),
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-22 16:44:32 +00:00
|
|
|
#[derive(Serialize, Deserialize, EnumSetType, Debug)]
|
|
|
|
#[enumset(serialize_repr = "u8")]
|
2019-12-20 13:56:01 +00:00
|
|
|
pub enum Promise {
|
|
|
|
InOrder,
|
|
|
|
NoCorrupt,
|
|
|
|
GuaranteedDelivery,
|
|
|
|
Encrypted,
|
|
|
|
}
|
|
|
|
|
2020-03-10 00:07:36 +00:00
|
|
|
#[derive(Clone)]
|
2019-12-20 13:56:01 +00:00
|
|
|
pub struct Participant {
|
2020-02-10 17:25:47 +00:00
|
|
|
remote_pid: Pid,
|
2020-02-21 15:10:55 +00:00
|
|
|
network_controller: Arc<Vec<Controller>>,
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
|
2020-02-04 15:42:04 +00:00
|
|
|
pub struct Stream {
|
|
|
|
sid: Sid,
|
2020-03-10 00:07:36 +00:00
|
|
|
remote_pid: Pid,
|
|
|
|
closed: AtomicBool,
|
|
|
|
closed_rx: mpsc::Receiver<()>,
|
2020-03-04 10:59:19 +00:00
|
|
|
msg_rx: futures::channel::mpsc::UnboundedReceiver<InCommingMessage>,
|
2020-03-04 00:37:36 +00:00
|
|
|
ctr_tx: mio_extras::channel::Sender<CtrlMsg>,
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|
2019-12-20 13:56:01 +00:00
|
|
|
|
2020-02-21 15:10:55 +00:00
|
|
|
pub struct Network {
|
2020-03-10 00:07:36 +00:00
|
|
|
_token_pool: tlid::Pool<tlid::Wrapping<usize>>,
|
|
|
|
_worker_pool: tlid::Pool<tlid::Wrapping<u64>>,
|
2020-02-04 15:42:04 +00:00
|
|
|
controller: Arc<Vec<Controller>>,
|
2020-03-10 00:07:36 +00:00
|
|
|
_thread_pool: Arc<ThreadPool>,
|
2020-02-04 15:42:04 +00:00
|
|
|
participant_id: Pid,
|
2020-03-10 00:07:36 +00:00
|
|
|
sid_backup_per_participant: Arc<RwLock<HashMap<Pid, tlid::Pool<tlid::Checked<Sid>>>>>,
|
|
|
|
participants: RwLock<Vec<Participant>>,
|
|
|
|
_metrics: Arc<Option<NetworkMetrics>>,
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 15:10:55 +00:00
|
|
|
impl Network {
|
2020-01-13 16:53:28 +00:00
|
|
|
pub fn new(participant_id: Uuid, thread_pool: Arc<ThreadPool>) -> Self {
|
2020-01-22 16:44:32 +00:00
|
|
|
let mut token_pool = tlid::Pool::new_full();
|
2020-02-04 15:42:04 +00:00
|
|
|
let mut worker_pool = tlid::Pool::new_full();
|
2020-03-10 00:07:36 +00:00
|
|
|
let sid_backup_per_participant = Arc::new(RwLock::new(HashMap::new()));
|
2020-02-04 15:42:04 +00:00
|
|
|
for _ in 0..participant_id.as_u128().rem_euclid(64) {
|
|
|
|
worker_pool.next();
|
|
|
|
//random offset from 0 for tests where multiple networks are
|
|
|
|
// created and we do not want to polute the traces with
|
2020-03-10 00:07:36 +00:00
|
|
|
// network pid everywhere
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|
2020-02-19 17:08:57 +00:00
|
|
|
let metrics = Arc::new(None);
|
2020-02-04 15:42:04 +00:00
|
|
|
let controller = Arc::new(vec![Controller::new(
|
|
|
|
worker_pool.next(),
|
2020-01-22 16:44:32 +00:00
|
|
|
participant_id,
|
2020-01-13 16:53:28 +00:00
|
|
|
thread_pool.clone(),
|
2020-01-22 16:44:32 +00:00
|
|
|
token_pool.subpool(1000000).unwrap(),
|
2020-02-19 17:08:57 +00:00
|
|
|
metrics.clone(),
|
2020-03-10 00:07:36 +00:00
|
|
|
sid_backup_per_participant.clone(),
|
2020-01-13 16:53:28 +00:00
|
|
|
)]);
|
2020-03-10 00:07:36 +00:00
|
|
|
let participants = RwLock::new(vec![]);
|
2019-12-20 13:56:01 +00:00
|
|
|
Self {
|
2020-03-10 00:07:36 +00:00
|
|
|
_token_pool: token_pool,
|
|
|
|
_worker_pool: worker_pool,
|
2020-02-04 15:42:04 +00:00
|
|
|
controller,
|
2020-03-10 00:07:36 +00:00
|
|
|
_thread_pool: thread_pool,
|
2020-01-13 16:53:28 +00:00
|
|
|
participant_id,
|
2020-03-10 00:07:36 +00:00
|
|
|
sid_backup_per_participant,
|
|
|
|
participants,
|
|
|
|
_metrics: metrics,
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-04 15:42:04 +00:00
|
|
|
fn get_lowest_worker<'a: 'b, 'b>(list: &'a Arc<Vec<Controller>>) -> &'a Controller { &list[0] }
|
|
|
|
|
2020-03-10 00:07:36 +00:00
|
|
|
pub fn listen(&self, address: &Address) -> Result<(), NetworkError> {
|
2020-02-10 17:25:47 +00:00
|
|
|
let span = span!(Level::TRACE, "listen", ?address);
|
2020-02-04 15:42:04 +00:00
|
|
|
let worker = Self::get_lowest_worker(&self.controller);
|
2020-02-10 17:25:47 +00:00
|
|
|
let _enter = span.enter();
|
|
|
|
match address {
|
|
|
|
Address::Tcp(a) => {
|
|
|
|
let tcp_listener = TcpListener::bind(&a)?;
|
|
|
|
info!("listening");
|
|
|
|
worker.get_tx().send(CtrlMsg::Register(
|
|
|
|
TokenObjects::TcpListener(tcp_listener),
|
|
|
|
Ready::readable(),
|
|
|
|
PollOpt::edge(),
|
|
|
|
))?;
|
|
|
|
},
|
2020-03-10 00:07:36 +00:00
|
|
|
Address::Udp(_) => unimplemented!(
|
|
|
|
"UDP is currently not supportet problem is in internal worker - channel view. I \
|
|
|
|
except to have every Channel it#s own socket, but UDP shares a Socket with \
|
|
|
|
everyone on it. So there needs to be a instance that detects new connections \
|
|
|
|
inside worker and then creates a new channel for them, while handling needs to \
|
|
|
|
be done in UDP layer... however i am to lazy to build it yet."
|
|
|
|
),
|
|
|
|
Address::Mpsc(a) => {
|
|
|
|
let (listen_tx, listen_rx) = mio_extras::channel::channel();
|
|
|
|
let (connect_tx, conntect_rx) = mio_extras::channel::channel();
|
|
|
|
let mut registry = (*crate::mpsc::MPSC_REGISTRY).write().unwrap();
|
|
|
|
registry.insert(*a, Mutex::new((listen_tx, conntect_rx)));
|
|
|
|
info!("listening");
|
|
|
|
let mpsc_channel = MpscChannel::new(connect_tx, listen_rx);
|
|
|
|
let mut channel = Channel::new(
|
|
|
|
self.participant_id,
|
|
|
|
ChannelProtocols::Mpsc(mpsc_channel),
|
|
|
|
self.sid_backup_per_participant.clone(),
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
channel.handshake();
|
|
|
|
channel.tick_send();
|
|
|
|
worker.get_tx().send(CtrlMsg::Register(
|
|
|
|
TokenObjects::Channel(channel),
|
|
|
|
Ready::readable() | Ready::writable(),
|
|
|
|
PollOpt::edge(),
|
|
|
|
))?;
|
|
|
|
},
|
2020-02-10 17:25:47 +00:00
|
|
|
};
|
|
|
|
Ok(())
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
|
2020-02-10 17:25:47 +00:00
|
|
|
pub async fn connect(&self, address: &Address) -> Result<Participant, NetworkError> {
|
2020-02-04 15:42:04 +00:00
|
|
|
let worker = Self::get_lowest_worker(&self.controller);
|
2020-03-10 00:07:36 +00:00
|
|
|
let sid_backup_per_participant = self.sid_backup_per_participant.clone();
|
2020-03-04 15:52:30 +00:00
|
|
|
let span = span!(Level::INFO, "connect", ?address);
|
2020-02-10 17:25:47 +00:00
|
|
|
let _enter = span.enter();
|
|
|
|
match address {
|
|
|
|
Address::Tcp(a) => {
|
|
|
|
info!("connecting");
|
|
|
|
let tcp_stream = TcpStream::connect(&a)?;
|
|
|
|
let tcp_channel = TcpChannel::new(tcp_stream);
|
2020-02-21 15:10:55 +00:00
|
|
|
let (ctrl_tx, ctrl_rx) = mpsc::channel::<Pid>();
|
2020-03-04 15:52:30 +00:00
|
|
|
let channel = Channel::new(
|
2020-03-10 00:07:36 +00:00
|
|
|
self.participant_id,
|
2020-02-20 16:04:58 +00:00
|
|
|
ChannelProtocols::Tcp(tcp_channel),
|
2020-03-10 00:07:36 +00:00
|
|
|
sid_backup_per_participant,
|
2020-02-20 16:04:58 +00:00
|
|
|
Some(ctrl_tx),
|
|
|
|
);
|
2020-02-10 17:25:47 +00:00
|
|
|
worker.get_tx().send(CtrlMsg::Register(
|
2020-02-20 16:04:58 +00:00
|
|
|
TokenObjects::Channel(channel),
|
2020-02-10 17:25:47 +00:00
|
|
|
Ready::readable() | Ready::writable(),
|
|
|
|
PollOpt::edge(),
|
|
|
|
))?;
|
2020-02-20 16:04:58 +00:00
|
|
|
let remote_pid = ctrl_rx.recv().unwrap();
|
|
|
|
info!(?remote_pid, " sucessfully connected to");
|
2020-03-10 00:07:36 +00:00
|
|
|
let part = Participant {
|
2020-02-20 16:04:58 +00:00
|
|
|
remote_pid,
|
2020-02-21 15:10:55 +00:00
|
|
|
network_controller: self.controller.clone(),
|
2020-03-10 00:07:36 +00:00
|
|
|
};
|
|
|
|
self.participants.write().unwrap().push(part.clone());
|
|
|
|
return Ok(part);
|
2020-02-10 17:25:47 +00:00
|
|
|
},
|
|
|
|
Address::Udp(_) => unimplemented!("lazy me"),
|
2020-03-10 00:07:36 +00:00
|
|
|
Address::Mpsc(a) => {
|
|
|
|
let mut registry = (*crate::mpsc::MPSC_REGISTRY).write().unwrap();
|
|
|
|
let (listen_tx, conntect_rx) = match registry.remove(a) {
|
|
|
|
Some(x) => x.into_inner().unwrap(),
|
|
|
|
None => {
|
|
|
|
error!("could not connect to mpsc");
|
|
|
|
return Err(NetworkError::NetworkDestroyed);
|
|
|
|
},
|
|
|
|
};
|
|
|
|
info!("connect to mpsc");
|
|
|
|
let mpsc_channel = MpscChannel::new(listen_tx, conntect_rx);
|
|
|
|
let (ctrl_tx, ctrl_rx) = mpsc::channel::<Pid>();
|
|
|
|
let channel = Channel::new(
|
|
|
|
self.participant_id,
|
|
|
|
ChannelProtocols::Mpsc(mpsc_channel),
|
|
|
|
self.sid_backup_per_participant.clone(),
|
|
|
|
Some(ctrl_tx),
|
|
|
|
);
|
|
|
|
worker.get_tx().send(CtrlMsg::Register(
|
|
|
|
TokenObjects::Channel(channel),
|
|
|
|
Ready::readable() | Ready::writable(),
|
|
|
|
PollOpt::edge(),
|
|
|
|
))?;
|
|
|
|
|
|
|
|
let remote_pid = ctrl_rx.recv().unwrap();
|
|
|
|
info!(?remote_pid, " sucessfully connected to");
|
|
|
|
let part = Participant {
|
|
|
|
remote_pid,
|
|
|
|
network_controller: self.controller.clone(),
|
|
|
|
};
|
|
|
|
self.participants.write().unwrap().push(part.clone());
|
|
|
|
return Ok(part);
|
|
|
|
},
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 00:07:36 +00:00
|
|
|
pub fn disconnect(&self, _participant: Participant) -> Result<(), NetworkError> {
|
|
|
|
//todo: close all channels to a participant!
|
|
|
|
unimplemented!("sda");
|
2020-01-22 16:44:32 +00:00
|
|
|
}
|
2020-01-13 16:53:28 +00:00
|
|
|
|
2020-03-10 00:07:36 +00:00
|
|
|
pub fn participants(&self) -> std::sync::RwLockReadGuard<Vec<Participant>> {
|
|
|
|
self.participants.read().unwrap()
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 15:10:55 +00:00
|
|
|
pub async fn connected(&self) -> Result<Participant, NetworkError> {
|
2020-02-10 17:25:47 +00:00
|
|
|
// returns if a Participant connected and is ready
|
2020-02-21 15:10:55 +00:00
|
|
|
loop {
|
|
|
|
//ARRGGG
|
|
|
|
for worker in self.controller.iter() {
|
|
|
|
//TODO harden!
|
2020-03-10 00:07:36 +00:00
|
|
|
worker.tick();
|
|
|
|
if let Ok(remote_pid) = worker.get_participant_connect_rx().try_recv() {
|
|
|
|
let part = Participant {
|
|
|
|
remote_pid,
|
|
|
|
network_controller: self.controller.clone(),
|
|
|
|
};
|
|
|
|
self.participants.write().unwrap().push(part.clone());
|
|
|
|
return Ok(part);
|
2020-02-21 15:10:55 +00:00
|
|
|
};
|
|
|
|
}
|
2020-03-10 00:07:36 +00:00
|
|
|
std::thread::sleep(std::time::Duration::from_millis(1));
|
2020-02-21 15:10:55 +00:00
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 00:07:36 +00:00
|
|
|
pub fn multisend<M: Serialize>(
|
2020-02-10 17:25:47 +00:00
|
|
|
&self,
|
|
|
|
streams: Vec<Stream>,
|
|
|
|
msg: M,
|
|
|
|
) -> Result<(), NetworkError> {
|
2020-03-04 15:52:30 +00:00
|
|
|
let messagebuffer = Arc::new(message::serialize(&msg));
|
|
|
|
//TODO: why do we need a look here, i want my own local directory which is
|
|
|
|
// updated by workes via a channel and needs to be intepreted on a send but it
|
|
|
|
// should almost ever be empty except for new channel creations and stream
|
|
|
|
// creations!
|
|
|
|
for stream in streams {
|
|
|
|
stream
|
|
|
|
.ctr_tx
|
|
|
|
.send(CtrlMsg::Send(OutGoingMessage {
|
|
|
|
buffer: messagebuffer.clone(),
|
|
|
|
cursor: 0,
|
|
|
|
mid: None,
|
|
|
|
sid: stream.sid,
|
|
|
|
}))
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
Ok(())
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Participant {
|
2020-03-10 00:07:36 +00:00
|
|
|
pub fn open(&self, prio: u8, promises: EnumSet<Promise>) -> Result<Stream, ParticipantError> {
|
2020-03-04 10:59:19 +00:00
|
|
|
let (msg_tx, msg_rx) = futures::channel::mpsc::unbounded::<InCommingMessage>();
|
2020-02-21 15:10:55 +00:00
|
|
|
for controller in self.network_controller.iter() {
|
2020-03-10 00:07:36 +00:00
|
|
|
//trigger tick:
|
|
|
|
controller.tick();
|
|
|
|
let parts = controller.participants();
|
|
|
|
let (stream_close_tx, stream_close_rx) = mpsc::channel();
|
|
|
|
let sid = match parts.get(&self.remote_pid) {
|
|
|
|
Some(p) => {
|
|
|
|
let sid = p.sid_pool.write().unwrap().next();
|
|
|
|
//prepare the closing of the new stream already
|
|
|
|
p.stream_close_txs
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.insert(sid, stream_close_tx);
|
|
|
|
sid
|
|
|
|
},
|
|
|
|
None => return Err(ParticipantError::ParticipantDisconected), /* TODO: participant was never connected in the first case maybe... */
|
|
|
|
};
|
2020-03-04 00:37:36 +00:00
|
|
|
let tx = controller.get_tx();
|
|
|
|
tx.send(CtrlMsg::OpenStream {
|
|
|
|
pid: self.remote_pid,
|
2020-03-10 00:07:36 +00:00
|
|
|
sid,
|
2020-03-04 00:37:36 +00:00
|
|
|
prio,
|
|
|
|
promises,
|
|
|
|
msg_tx,
|
|
|
|
})
|
|
|
|
.unwrap();
|
|
|
|
info!(?sid, " sucessfully opened stream");
|
2020-03-10 00:07:36 +00:00
|
|
|
return Ok(Stream::new(
|
2020-03-04 00:37:36 +00:00
|
|
|
sid,
|
2020-03-10 00:07:36 +00:00
|
|
|
self.remote_pid,
|
|
|
|
stream_close_rx,
|
2020-03-04 00:37:36 +00:00
|
|
|
msg_rx,
|
2020-03-10 00:07:36 +00:00
|
|
|
tx,
|
|
|
|
));
|
2020-03-04 15:52:30 +00:00
|
|
|
}
|
|
|
|
Err(ParticipantError::ParticipantDisconected)
|
|
|
|
}
|
2020-02-21 15:10:55 +00:00
|
|
|
|
|
|
|
pub async fn opened(&self) -> Result<Stream, ParticipantError> {
|
2020-03-10 00:07:36 +00:00
|
|
|
//TODO: make this async native!
|
2020-02-21 15:10:55 +00:00
|
|
|
loop {
|
2020-03-10 00:07:36 +00:00
|
|
|
// Going to all workers in a network, but only receive on specific channels!
|
2020-02-21 15:10:55 +00:00
|
|
|
for worker in self.network_controller.iter() {
|
2020-03-10 00:07:36 +00:00
|
|
|
worker.tick();
|
|
|
|
let parts = worker.participants();
|
|
|
|
if let Some(p) = parts.get(&self.remote_pid) {
|
|
|
|
if let Ok(stream) = p.stream_open_rx.try_recv() {
|
|
|
|
//need a try, as i depend on the tick, it's the same thread...
|
|
|
|
debug!("delivering a stream");
|
|
|
|
return Ok(stream);
|
|
|
|
};
|
|
|
|
}
|
2020-02-21 15:10:55 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Stream {
|
|
|
|
//TODO: What about SEND instead of Serializeable if it goes via PIPE ?
|
2020-03-10 00:07:36 +00:00
|
|
|
//TODO: timeout per message or per stream ? stream or ? like for Position Data,
|
|
|
|
// if not transmitted within 1 second, throw away...
|
|
|
|
pub(crate) fn new(
|
|
|
|
sid: Sid,
|
|
|
|
remote_pid: Pid,
|
|
|
|
closed_rx: mpsc::Receiver<()>,
|
|
|
|
msg_rx: futures::channel::mpsc::UnboundedReceiver<InCommingMessage>,
|
|
|
|
ctr_tx: mio_extras::channel::Sender<CtrlMsg>,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
sid,
|
|
|
|
remote_pid,
|
|
|
|
closed: AtomicBool::new(false),
|
|
|
|
closed_rx,
|
|
|
|
msg_rx,
|
|
|
|
ctr_tx,
|
|
|
|
}
|
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
|
2020-02-21 15:10:55 +00:00
|
|
|
pub fn send<M: Serialize>(&self, msg: M) -> Result<(), StreamError> {
|
2020-03-10 00:07:36 +00:00
|
|
|
if self.is_closed() {
|
|
|
|
return Err(StreamError::StreamClosed);
|
|
|
|
}
|
2020-02-21 15:10:55 +00:00
|
|
|
let messagebuffer = Arc::new(message::serialize(&msg));
|
2020-03-04 00:37:36 +00:00
|
|
|
self.ctr_tx
|
|
|
|
.send(CtrlMsg::Send(OutGoingMessage {
|
2020-03-04 15:52:30 +00:00
|
|
|
buffer: messagebuffer,
|
2020-03-04 00:37:36 +00:00
|
|
|
cursor: 0,
|
|
|
|
mid: None,
|
|
|
|
sid: self.sid,
|
|
|
|
}))
|
|
|
|
.unwrap();
|
2020-02-21 15:10:55 +00:00
|
|
|
Ok(())
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
|
2020-03-04 10:59:19 +00:00
|
|
|
pub async fn recv<M: DeserializeOwned>(&mut self) -> Result<M, StreamError> {
|
2020-03-10 00:07:36 +00:00
|
|
|
if self.is_closed() {
|
|
|
|
return Err(StreamError::StreamClosed);
|
|
|
|
}
|
2020-03-04 10:59:19 +00:00
|
|
|
match self.msg_rx.next().await {
|
|
|
|
Some(msg) => {
|
2020-02-21 15:10:55 +00:00
|
|
|
info!(?msg, "delivering a message");
|
2020-03-04 00:37:36 +00:00
|
|
|
Ok(message::deserialize(msg.buffer))
|
2020-02-21 15:10:55 +00:00
|
|
|
},
|
2020-03-04 10:59:19 +00:00
|
|
|
None => panic!(
|
|
|
|
"Unexpected error, probably stream was destroyed... maybe i dont know yet, no \
|
|
|
|
idea of async stuff"
|
|
|
|
),
|
2020-02-21 15:10:55 +00:00
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
2020-03-10 00:07:36 +00:00
|
|
|
|
|
|
|
pub fn close(mut self) -> Result<(), StreamError> { self.intclose() }
|
|
|
|
|
|
|
|
fn is_closed(&self) -> bool {
|
|
|
|
use core::sync::atomic::Ordering;
|
|
|
|
if self.closed.load(Ordering::Relaxed) {
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
if let Ok(()) = self.closed_rx.try_recv() {
|
|
|
|
self.closed.store(true, Ordering::SeqCst); //TODO: Is this the right Ordering?
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn intclose(&mut self) -> Result<(), StreamError> {
|
|
|
|
use core::sync::atomic::Ordering;
|
|
|
|
if self.is_closed() {
|
|
|
|
return Err(StreamError::StreamClosed);
|
|
|
|
}
|
|
|
|
self.ctr_tx
|
|
|
|
.send(CtrlMsg::CloseStream {
|
|
|
|
pid: self.remote_pid,
|
|
|
|
sid: self.sid,
|
|
|
|
})
|
|
|
|
.unwrap();
|
|
|
|
self.closed.store(true, Ordering::SeqCst); //TODO: Is this the right Ordering?
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Stream {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let _ = self.intclose().map_err(
|
|
|
|
|e| error!(?self.sid, ?e, "could not properly shutdown stream, which got out of scope"),
|
|
|
|
);
|
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum NetworkError {
|
|
|
|
NetworkDestroyed,
|
|
|
|
WorkerDestroyed,
|
|
|
|
IoError(std::io::Error),
|
|
|
|
}
|
|
|
|
|
2020-02-21 15:10:55 +00:00
|
|
|
#[derive(Debug, PartialEq)]
|
2020-02-10 17:25:47 +00:00
|
|
|
pub enum ParticipantError {
|
|
|
|
ParticipantDisconected,
|
|
|
|
}
|
|
|
|
|
2020-02-21 15:10:55 +00:00
|
|
|
#[derive(Debug, PartialEq)]
|
2020-02-10 17:25:47 +00:00
|
|
|
pub enum StreamError {
|
|
|
|
StreamClosed,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<std::io::Error> for NetworkError {
|
|
|
|
fn from(err: std::io::Error) -> Self { NetworkError::IoError(err) }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> From<mio_extras::channel::SendError<T>> for NetworkError {
|
2020-03-04 15:52:30 +00:00
|
|
|
fn from(_err: mio_extras::channel::SendError<T>) -> Self { NetworkError::WorkerDestroyed }
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|