2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
use crate::metrics::{CidFrameCache, NetworkMetrics};
|
2020-08-21 12:01:49 +00:00
|
|
|
use crate::{
|
|
|
|
participant::C2pFrame,
|
|
|
|
types::{Cid, Frame, Mid, Pid, Sid},
|
|
|
|
};
|
2020-04-08 14:26:42 +00:00
|
|
|
use async_std::{
|
|
|
|
net::{TcpStream, UdpSocket},
|
|
|
|
prelude::*,
|
|
|
|
};
|
2020-05-04 13:27:58 +00:00
|
|
|
use futures::{
|
|
|
|
channel::{mpsc, oneshot},
|
2020-07-10 13:31:26 +00:00
|
|
|
future::{Fuse, FutureExt},
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
lock::Mutex,
|
2020-05-04 13:27:58 +00:00
|
|
|
select,
|
|
|
|
sink::SinkExt,
|
|
|
|
stream::StreamExt,
|
|
|
|
};
|
2020-07-09 11:42:38 +00:00
|
|
|
use std::{convert::TryFrom, net::SocketAddr, sync::Arc};
|
2020-04-08 14:26:42 +00:00
|
|
|
use tracing::*;
|
|
|
|
|
2020-04-24 10:56:04 +00:00
|
|
|
// Reserving bytes 0, 10, 13 as i have enough space and want to make it easy to
|
|
|
|
// detect a invalid client, e.g. sending an empty line would make 10 first char
|
|
|
|
// const FRAME_RESERVED_1: u8 = 0;
|
|
|
|
const FRAME_HANDSHAKE: u8 = 1;
|
2020-05-26 13:06:03 +00:00
|
|
|
const FRAME_INIT: u8 = 2;
|
2020-04-24 10:56:04 +00:00
|
|
|
const FRAME_SHUTDOWN: u8 = 3;
|
|
|
|
const FRAME_OPEN_STREAM: u8 = 4;
|
|
|
|
const FRAME_CLOSE_STREAM: u8 = 5;
|
|
|
|
const FRAME_DATA_HEADER: u8 = 6;
|
|
|
|
const FRAME_DATA: u8 = 7;
|
|
|
|
const FRAME_RAW: u8 = 8;
|
|
|
|
//const FRAME_RESERVED_2: u8 = 10;
|
|
|
|
//const FRAME_RESERVED_3: u8 = 13;
|
|
|
|
|
2020-04-08 14:26:42 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) enum Protocols {
|
|
|
|
Tcp(TcpProtocol),
|
|
|
|
Udp(UdpProtocol),
|
|
|
|
//Mpsc(MpscChannel),
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) struct TcpProtocol {
|
|
|
|
stream: TcpStream,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-04-24 10:56:04 +00:00
|
|
|
metrics: Arc<NetworkMetrics>,
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) struct UdpProtocol {
|
|
|
|
socket: Arc<UdpSocket>,
|
|
|
|
remote_addr: SocketAddr,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-04-24 10:56:04 +00:00
|
|
|
metrics: Arc<NetworkMetrics>,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
data_in: Mutex<mpsc::UnboundedReceiver<Vec<u8>>>,
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:43:29 +00:00
|
|
|
//TODO: PERFORMACE: Use BufWriter and BufReader from std::io!
|
2020-04-08 14:26:42 +00:00
|
|
|
impl TcpProtocol {
|
2020-07-14 23:34:41 +00:00
|
|
|
pub(crate) fn new(
|
|
|
|
stream: TcpStream,
|
|
|
|
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
stream,
|
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
metrics,
|
|
|
|
}
|
2020-04-24 10:56:04 +00:00
|
|
|
}
|
2020-04-08 14:26:42 +00:00
|
|
|
|
2020-05-27 15:58:57 +00:00
|
|
|
/// read_except and if it fails, close the protocol
|
2020-07-10 13:31:26 +00:00
|
|
|
async fn read_or_close(
|
2020-08-21 12:01:49 +00:00
|
|
|
cid: Cid,
|
2020-05-27 15:58:57 +00:00
|
|
|
mut stream: &TcpStream,
|
|
|
|
mut bytes: &mut [u8],
|
2020-07-10 13:31:26 +00:00
|
|
|
mut end_receiver: &mut Fuse<oneshot::Receiver<()>>,
|
2020-08-21 12:01:49 +00:00
|
|
|
w2c_cid_frame_s: &mut mpsc::UnboundedSender<C2pFrame>,
|
2020-07-10 13:31:26 +00:00
|
|
|
) -> bool {
|
|
|
|
match select! {
|
|
|
|
r = stream.read_exact(&mut bytes).fuse() => Some(r),
|
|
|
|
_ = end_receiver => None,
|
|
|
|
} {
|
|
|
|
Some(Ok(_)) => false,
|
|
|
|
Some(Err(e)) => {
|
2020-08-18 15:52:19 +00:00
|
|
|
info!(?e, "Closing tcp protocol due to read error");
|
2020-08-21 12:01:49 +00:00
|
|
|
//w2c_cid_frame_s is shared, dropping it wouldn't notify the receiver as every
|
|
|
|
// channel is holding a sender! thats why Ne need a explicit
|
|
|
|
// STOP here
|
|
|
|
w2c_cid_frame_s
|
|
|
|
.send((cid, Err(())))
|
|
|
|
.await
|
|
|
|
.expect("Channel or Participant seems no longer to exist");
|
2020-07-10 13:31:26 +00:00
|
|
|
true
|
|
|
|
},
|
|
|
|
None => {
|
2020-08-18 11:48:26 +00:00
|
|
|
trace!("shutdown requested");
|
2020-07-10 13:31:26 +00:00
|
|
|
true
|
|
|
|
},
|
2020-05-27 15:58:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pub async fn read_from_wire(
|
2020-05-04 13:27:58 +00:00
|
|
|
&self,
|
|
|
|
cid: Cid,
|
2020-08-21 12:01:49 +00:00
|
|
|
w2c_cid_frame_s: &mut mpsc::UnboundedSender<C2pFrame>,
|
2020-07-10 13:31:26 +00:00
|
|
|
end_r: oneshot::Receiver<()>,
|
2020-05-04 13:27:58 +00:00
|
|
|
) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Starting up tcp read()");
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_in_total.clone(), cid);
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
let throughput_cache = self
|
|
|
|
.metrics
|
|
|
|
.wire_in_throughput
|
|
|
|
.with_label_values(&[&cid.to_string()]);
|
2020-07-10 13:31:26 +00:00
|
|
|
let stream = self.stream.clone();
|
|
|
|
let mut end_r = end_r.fuse();
|
|
|
|
|
|
|
|
macro_rules! read_or_close {
|
|
|
|
($x:expr) => {
|
2020-08-21 12:01:49 +00:00
|
|
|
if TcpProtocol::read_or_close(cid, &stream, $x, &mut end_r, w2c_cid_frame_s).await {
|
2020-08-18 11:48:26 +00:00
|
|
|
trace!("read_or_close requested a shutdown");
|
2020-07-10 13:31:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2020-05-27 15:58:57 +00:00
|
|
|
|
2020-04-08 14:26:42 +00:00
|
|
|
loop {
|
2020-07-10 13:31:26 +00:00
|
|
|
let frame_no = {
|
|
|
|
let mut bytes = [0u8; 1];
|
|
|
|
read_or_close!(&mut bytes);
|
|
|
|
bytes[0]
|
2020-05-04 13:27:58 +00:00
|
|
|
};
|
2020-04-24 10:56:04 +00:00
|
|
|
let frame = match frame_no {
|
|
|
|
FRAME_HANDSHAKE => {
|
|
|
|
let mut bytes = [0u8; 19];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-07-09 11:42:38 +00:00
|
|
|
let magic_number = *<&[u8; 7]>::try_from(&bytes[0..7]).unwrap();
|
2020-04-24 10:56:04 +00:00
|
|
|
Frame::Handshake {
|
|
|
|
magic_number,
|
|
|
|
version: [
|
2020-07-09 11:42:38 +00:00
|
|
|
u32::from_le_bytes(*<&[u8; 4]>::try_from(&bytes[7..11]).unwrap()),
|
|
|
|
u32::from_le_bytes(*<&[u8; 4]>::try_from(&bytes[11..15]).unwrap()),
|
|
|
|
u32::from_le_bytes(*<&[u8; 4]>::try_from(&bytes[15..19]).unwrap()),
|
2020-04-24 10:56:04 +00:00
|
|
|
],
|
|
|
|
}
|
2020-04-08 14:26:42 +00:00
|
|
|
},
|
2020-05-26 13:06:03 +00:00
|
|
|
FRAME_INIT => {
|
2020-04-24 10:56:04 +00:00
|
|
|
let mut bytes = [0u8; 16];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-04-24 10:56:04 +00:00
|
|
|
let pid = Pid::from_le_bytes(bytes);
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-05-26 13:06:03 +00:00
|
|
|
let secret = u128::from_le_bytes(bytes);
|
|
|
|
Frame::Init { pid, secret }
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
FRAME_SHUTDOWN => Frame::Shutdown,
|
|
|
|
FRAME_OPEN_STREAM => {
|
|
|
|
let mut bytes = [0u8; 10];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-07-09 11:42:38 +00:00
|
|
|
let sid = Sid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
|
2020-04-24 10:56:04 +00:00
|
|
|
let prio = bytes[8];
|
|
|
|
let promises = bytes[9];
|
|
|
|
Frame::OpenStream {
|
|
|
|
sid,
|
|
|
|
prio,
|
|
|
|
promises,
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
},
|
2020-04-24 10:56:04 +00:00
|
|
|
FRAME_CLOSE_STREAM => {
|
|
|
|
let mut bytes = [0u8; 8];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-07-09 11:42:38 +00:00
|
|
|
let sid = Sid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
|
2020-04-24 10:56:04 +00:00
|
|
|
Frame::CloseStream { sid }
|
|
|
|
},
|
|
|
|
FRAME_DATA_HEADER => {
|
|
|
|
let mut bytes = [0u8; 24];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-07-09 11:42:38 +00:00
|
|
|
let mid = Mid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
|
|
|
|
let sid = Sid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[8..16]).unwrap());
|
|
|
|
let length = u64::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[16..24]).unwrap());
|
2020-04-24 10:56:04 +00:00
|
|
|
Frame::DataHeader { mid, sid, length }
|
|
|
|
},
|
|
|
|
FRAME_DATA => {
|
|
|
|
let mut bytes = [0u8; 18];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-07-09 11:42:38 +00:00
|
|
|
let mid = Mid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
|
|
|
|
let start = u64::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[8..16]).unwrap());
|
|
|
|
let length = u16::from_le_bytes(*<&[u8; 2]>::try_from(&bytes[16..18]).unwrap());
|
2020-07-05 22:32:38 +00:00
|
|
|
let mut data = vec![0; length as usize];
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
throughput_cache.inc_by(length as i64);
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut data);
|
2020-04-24 10:56:04 +00:00
|
|
|
Frame::Data { mid, start, data }
|
|
|
|
},
|
|
|
|
FRAME_RAW => {
|
|
|
|
let mut bytes = [0u8; 2];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut bytes);
|
2020-04-24 10:56:04 +00:00
|
|
|
let length = u16::from_le_bytes([bytes[0], bytes[1]]);
|
|
|
|
let mut data = vec![0; length as usize];
|
2020-07-10 13:31:26 +00:00
|
|
|
read_or_close!(&mut data);
|
2020-04-24 10:56:04 +00:00
|
|
|
Frame::Raw(data)
|
|
|
|
},
|
2020-07-10 13:31:26 +00:00
|
|
|
other => {
|
2020-04-24 10:56:04 +00:00
|
|
|
// report a RAW frame, but cannot rely on the next 2 bytes to be a size.
|
2020-07-10 13:31:26 +00:00
|
|
|
// guessing 32 bytes, which might help to sort down issues
|
|
|
|
let mut data = vec![0; 32];
|
|
|
|
//keep the first byte!
|
|
|
|
read_or_close!(&mut data[1..]);
|
|
|
|
data[0] = other;
|
2020-08-21 22:50:01 +00:00
|
|
|
warn!(?data, "got a unexpected RAW msg");
|
2020-04-24 10:56:04 +00:00
|
|
|
Frame::Raw(data)
|
|
|
|
},
|
|
|
|
};
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
metrics_cache.with_label_values(&frame).inc();
|
2020-07-04 10:17:33 +00:00
|
|
|
w2c_cid_frame_s
|
2020-08-21 12:01:49 +00:00
|
|
|
.send((cid, Ok(frame)))
|
2020-07-04 10:17:33 +00:00
|
|
|
.await
|
|
|
|
.expect("Channel or Participant seems no longer to exist");
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Shutting down tcp read()");
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
|
2020-05-27 15:58:57 +00:00
|
|
|
/// read_except and if it fails, close the protocol
|
|
|
|
async fn write_or_close(
|
|
|
|
stream: &mut TcpStream,
|
|
|
|
bytes: &[u8],
|
2020-07-10 13:31:26 +00:00
|
|
|
c2w_frame_r: &mut mpsc::UnboundedReceiver<Frame>,
|
2020-05-27 15:58:57 +00:00
|
|
|
) -> bool {
|
|
|
|
match stream.write_all(&bytes).await {
|
|
|
|
Err(e) => {
|
2020-08-18 11:48:26 +00:00
|
|
|
info!(
|
2020-05-27 15:58:57 +00:00
|
|
|
?e,
|
2020-07-05 22:13:53 +00:00
|
|
|
"Got an error writing to tcp, going to close this channel"
|
2020-05-27 15:58:57 +00:00
|
|
|
);
|
2020-07-10 13:31:26 +00:00
|
|
|
c2w_frame_r.close();
|
2020-05-27 15:58:57 +00:00
|
|
|
true
|
|
|
|
},
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pub async fn write_to_wire(&self, cid: Cid, mut c2w_frame_r: mpsc::UnboundedReceiver<Frame>) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Starting up tcp write()");
|
2020-04-08 14:26:42 +00:00
|
|
|
let mut stream = self.stream.clone();
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_out_total.clone(), cid);
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
let throughput_cache = self
|
|
|
|
.metrics
|
|
|
|
.wire_out_throughput
|
|
|
|
.with_label_values(&[&cid.to_string()]);
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(not(feature = "metrics"))]
|
|
|
|
let _cid = cid;
|
2020-07-10 13:31:26 +00:00
|
|
|
|
|
|
|
macro_rules! write_or_close {
|
|
|
|
($x:expr) => {
|
|
|
|
if TcpProtocol::write_or_close(&mut stream, $x, &mut c2w_frame_r).await {
|
2020-08-18 11:48:26 +00:00
|
|
|
trace!("write_or_close requested a shutdown");
|
2020-07-10 13:31:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
while let Some(frame) = c2w_frame_r.next().await {
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
metrics_cache.with_label_values(&frame).inc();
|
2020-07-10 13:31:26 +00:00
|
|
|
match frame {
|
2020-04-24 10:56:04 +00:00
|
|
|
Frame::Handshake {
|
|
|
|
magic_number,
|
|
|
|
version,
|
|
|
|
} => {
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_HANDSHAKE.to_be_bytes());
|
|
|
|
write_or_close!(&magic_number);
|
|
|
|
write_or_close!(&version[0].to_le_bytes());
|
|
|
|
write_or_close!(&version[1].to_le_bytes());
|
|
|
|
write_or_close!(&version[2].to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
2020-05-26 13:06:03 +00:00
|
|
|
Frame::Init { pid, secret } => {
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_INIT.to_be_bytes());
|
|
|
|
write_or_close!(&pid.to_le_bytes());
|
|
|
|
write_or_close!(&secret.to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
Frame::Shutdown => {
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_SHUTDOWN.to_be_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
Frame::OpenStream {
|
|
|
|
sid,
|
|
|
|
prio,
|
|
|
|
promises,
|
|
|
|
} => {
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_OPEN_STREAM.to_be_bytes());
|
|
|
|
write_or_close!(&sid.to_le_bytes());
|
|
|
|
write_or_close!(&prio.to_le_bytes());
|
|
|
|
write_or_close!(&promises.to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
Frame::CloseStream { sid } => {
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_CLOSE_STREAM.to_be_bytes());
|
|
|
|
write_or_close!(&sid.to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
Frame::DataHeader { mid, sid, length } => {
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_DATA_HEADER.to_be_bytes());
|
|
|
|
write_or_close!(&mid.to_le_bytes());
|
|
|
|
write_or_close!(&sid.to_le_bytes());
|
|
|
|
write_or_close!(&length.to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
Frame::Data { mid, start, data } => {
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
throughput_cache.inc_by(data.len() as i64);
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_DATA.to_be_bytes());
|
|
|
|
write_or_close!(&mid.to_le_bytes());
|
|
|
|
write_or_close!(&start.to_le_bytes());
|
|
|
|
write_or_close!(&(data.len() as u16).to_le_bytes());
|
|
|
|
write_or_close!(&data);
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
Frame::Raw(data) => {
|
2020-07-10 13:31:26 +00:00
|
|
|
write_or_close!(&FRAME_RAW.to_be_bytes());
|
|
|
|
write_or_close!(&(data.len() as u16).to_le_bytes());
|
|
|
|
write_or_close!(&data);
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
}
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
2020-04-24 10:56:04 +00:00
|
|
|
trace!("shutting down tcp write()");
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl UdpProtocol {
|
|
|
|
pub(crate) fn new(
|
|
|
|
socket: Arc<UdpSocket>,
|
|
|
|
remote_addr: SocketAddr,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>,
|
2020-04-08 14:26:42 +00:00
|
|
|
data_in: mpsc::UnboundedReceiver<Vec<u8>>,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
socket,
|
|
|
|
remote_addr,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-04-24 10:56:04 +00:00
|
|
|
metrics,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
data_in: Mutex::new(data_in),
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pub async fn read_from_wire(
|
2020-05-04 13:27:58 +00:00
|
|
|
&self,
|
|
|
|
cid: Cid,
|
2020-08-21 12:01:49 +00:00
|
|
|
w2c_cid_frame_s: &mut mpsc::UnboundedSender<C2pFrame>,
|
2020-07-10 13:31:26 +00:00
|
|
|
end_r: oneshot::Receiver<()>,
|
2020-05-04 13:27:58 +00:00
|
|
|
) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Starting up udp read()");
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_in_total.clone(), cid);
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
let throughput_cache = self
|
|
|
|
.metrics
|
|
|
|
.wire_in_throughput
|
|
|
|
.with_label_values(&[&cid.to_string()]);
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
let mut data_in = self.data_in.lock().await;
|
2020-07-10 13:31:26 +00:00
|
|
|
let mut end_r = end_r.fuse();
|
2020-05-04 13:27:58 +00:00
|
|
|
while let Some(bytes) = select! {
|
2020-08-21 12:01:49 +00:00
|
|
|
r = data_in.next().fuse() => match r {
|
|
|
|
Some(r) => Some(r),
|
|
|
|
None => {
|
|
|
|
info!("Udp read ended");
|
|
|
|
w2c_cid_frame_s.send((cid, Err(()))).await.expect("Channel or Participant seems no longer to exist");
|
|
|
|
None
|
|
|
|
}
|
|
|
|
},
|
2020-07-10 13:31:26 +00:00
|
|
|
_ = end_r => None,
|
2020-05-04 13:27:58 +00:00
|
|
|
} {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Got raw UDP message with len: {}", bytes.len());
|
2020-04-24 10:56:04 +00:00
|
|
|
let frame_no = bytes[0];
|
|
|
|
let frame = match frame_no {
|
|
|
|
FRAME_HANDSHAKE => {
|
|
|
|
let bytes = &bytes[1..20];
|
|
|
|
let magic_number = [
|
|
|
|
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
|
|
|
|
];
|
|
|
|
Frame::Handshake {
|
|
|
|
magic_number,
|
|
|
|
version: [
|
|
|
|
u32::from_le_bytes([bytes[7], bytes[8], bytes[9], bytes[10]]),
|
|
|
|
u32::from_le_bytes([bytes[11], bytes[12], bytes[13], bytes[14]]),
|
|
|
|
u32::from_le_bytes([bytes[15], bytes[16], bytes[17], bytes[18]]),
|
|
|
|
],
|
|
|
|
}
|
|
|
|
},
|
2020-05-26 13:06:03 +00:00
|
|
|
FRAME_INIT => {
|
2020-04-24 10:56:04 +00:00
|
|
|
let pid = Pid::from_le_bytes([
|
|
|
|
bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
|
|
|
|
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14],
|
|
|
|
bytes[15], bytes[16],
|
|
|
|
]);
|
2020-05-26 13:06:03 +00:00
|
|
|
let secret = u128::from_le_bytes([
|
|
|
|
bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], bytes[22],
|
|
|
|
bytes[23], bytes[24], bytes[25], bytes[26], bytes[27], bytes[28],
|
|
|
|
bytes[29], bytes[30], bytes[31], bytes[32],
|
|
|
|
]);
|
|
|
|
Frame::Init { pid, secret }
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
FRAME_SHUTDOWN => Frame::Shutdown,
|
|
|
|
FRAME_OPEN_STREAM => {
|
|
|
|
let bytes = &bytes[1..11];
|
|
|
|
let sid = Sid::from_le_bytes([
|
|
|
|
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
|
|
|
|
bytes[7],
|
|
|
|
]);
|
|
|
|
let prio = bytes[8];
|
|
|
|
let promises = bytes[9];
|
|
|
|
Frame::OpenStream {
|
|
|
|
sid,
|
|
|
|
prio,
|
|
|
|
promises,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
FRAME_CLOSE_STREAM => {
|
|
|
|
let bytes = &bytes[1..9];
|
|
|
|
let sid = Sid::from_le_bytes([
|
|
|
|
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
|
|
|
|
bytes[7],
|
|
|
|
]);
|
|
|
|
Frame::CloseStream { sid }
|
|
|
|
},
|
|
|
|
FRAME_DATA_HEADER => {
|
|
|
|
let bytes = &bytes[1..25];
|
|
|
|
let mid = Mid::from_le_bytes([
|
|
|
|
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
|
|
|
|
bytes[7],
|
|
|
|
]);
|
|
|
|
let sid = Sid::from_le_bytes([
|
|
|
|
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14],
|
|
|
|
bytes[15],
|
|
|
|
]);
|
|
|
|
let length = u64::from_le_bytes([
|
|
|
|
bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21],
|
|
|
|
bytes[22], bytes[23],
|
|
|
|
]);
|
|
|
|
Frame::DataHeader { mid, sid, length }
|
|
|
|
},
|
|
|
|
FRAME_DATA => {
|
|
|
|
let mid = Mid::from_le_bytes([
|
|
|
|
bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
|
|
|
|
bytes[8],
|
|
|
|
]);
|
|
|
|
let start = u64::from_le_bytes([
|
|
|
|
bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14], bytes[15],
|
|
|
|
bytes[16],
|
|
|
|
]);
|
|
|
|
let length = u16::from_le_bytes([bytes[17], bytes[18]]);
|
|
|
|
let mut data = vec![0; length as usize];
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
throughput_cache.inc_by(length as i64);
|
2020-04-24 10:56:04 +00:00
|
|
|
data.copy_from_slice(&bytes[19..]);
|
|
|
|
Frame::Data { mid, start, data }
|
|
|
|
},
|
|
|
|
FRAME_RAW => {
|
|
|
|
let length = u16::from_le_bytes([bytes[1], bytes[2]]);
|
|
|
|
let mut data = vec![0; length as usize];
|
|
|
|
data.copy_from_slice(&bytes[3..]);
|
|
|
|
Frame::Raw(data)
|
|
|
|
},
|
|
|
|
_ => Frame::Raw(bytes),
|
|
|
|
};
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
metrics_cache.with_label_values(&frame).inc();
|
2020-08-21 12:01:49 +00:00
|
|
|
w2c_cid_frame_s.send((cid, Ok(frame))).await.unwrap();
|
2020-04-24 10:56:04 +00:00
|
|
|
}
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Shutting down udp read()");
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pub async fn write_to_wire(&self, cid: Cid, mut c2w_frame_r: mpsc::UnboundedReceiver<Frame>) {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Starting up udp write()");
|
2020-04-24 10:56:04 +00:00
|
|
|
let mut buffer = [0u8; 2000];
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_out_total.clone(), cid);
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
let throughput_cache = self
|
|
|
|
.metrics
|
|
|
|
.wire_out_throughput
|
|
|
|
.with_label_values(&[&cid.to_string()]);
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(not(feature = "metrics"))]
|
|
|
|
let _cid = cid;
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
while let Some(frame) = c2w_frame_r.next().await {
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-24 23:17:03 +00:00
|
|
|
metrics_cache.with_label_values(&frame).inc();
|
2020-04-24 10:56:04 +00:00
|
|
|
let len = match frame {
|
|
|
|
Frame::Handshake {
|
|
|
|
magic_number,
|
|
|
|
version,
|
|
|
|
} => {
|
|
|
|
let x = FRAME_HANDSHAKE.to_be_bytes();
|
|
|
|
buffer[0] = x[0];
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[1..8].copy_from_slice(&magic_number);
|
|
|
|
buffer[8..12].copy_from_slice(&version[0].to_le_bytes());
|
|
|
|
buffer[12..16].copy_from_slice(&version[1].to_le_bytes());
|
|
|
|
buffer[16..20].copy_from_slice(&version[2].to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
20
|
|
|
|
},
|
2020-05-26 13:06:03 +00:00
|
|
|
Frame::Init { pid, secret } => {
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[0] = FRAME_INIT.to_be_bytes()[0];
|
|
|
|
buffer[1..17].copy_from_slice(&pid.to_le_bytes());
|
|
|
|
buffer[17..33].copy_from_slice(&secret.to_le_bytes());
|
2020-05-26 13:06:03 +00:00
|
|
|
33
|
2020-04-24 10:56:04 +00:00
|
|
|
},
|
|
|
|
Frame::Shutdown => {
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[0] = FRAME_SHUTDOWN.to_be_bytes()[0];
|
2020-04-24 10:56:04 +00:00
|
|
|
1
|
|
|
|
},
|
|
|
|
Frame::OpenStream {
|
|
|
|
sid,
|
|
|
|
prio,
|
|
|
|
promises,
|
|
|
|
} => {
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[0] = FRAME_OPEN_STREAM.to_be_bytes()[0];
|
|
|
|
buffer[1..9].copy_from_slice(&sid.to_le_bytes());
|
|
|
|
buffer[9] = prio.to_le_bytes()[0];
|
|
|
|
buffer[10] = promises.to_le_bytes()[0];
|
2020-04-24 10:56:04 +00:00
|
|
|
11
|
|
|
|
},
|
|
|
|
Frame::CloseStream { sid } => {
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[0] = FRAME_CLOSE_STREAM.to_be_bytes()[0];
|
|
|
|
buffer[1..9].copy_from_slice(&sid.to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
9
|
|
|
|
},
|
|
|
|
Frame::DataHeader { mid, sid, length } => {
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[0] = FRAME_DATA_HEADER.to_be_bytes()[0];
|
|
|
|
buffer[1..9].copy_from_slice(&mid.to_le_bytes());
|
|
|
|
buffer[9..17].copy_from_slice(&sid.to_le_bytes());
|
|
|
|
buffer[17..25].copy_from_slice(&length.to_le_bytes());
|
2020-04-24 10:56:04 +00:00
|
|
|
25
|
|
|
|
},
|
|
|
|
Frame::Data { mid, start, data } => {
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[0] = FRAME_DATA.to_be_bytes()[0];
|
|
|
|
buffer[1..9].copy_from_slice(&mid.to_le_bytes());
|
|
|
|
buffer[9..17].copy_from_slice(&start.to_le_bytes());
|
|
|
|
buffer[17..19].copy_from_slice(&(data.len() as u16).to_le_bytes());
|
2020-06-08 09:47:39 +00:00
|
|
|
buffer[19..(data.len() + 19)].clone_from_slice(&data[..]);
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-27 15:58:57 +00:00
|
|
|
throughput_cache.inc_by(data.len() as i64);
|
2020-04-24 10:56:04 +00:00
|
|
|
19 + data.len()
|
|
|
|
},
|
|
|
|
Frame::Raw(data) => {
|
2020-07-04 00:04:33 +00:00
|
|
|
buffer[0] = FRAME_RAW.to_be_bytes()[0];
|
|
|
|
buffer[1..3].copy_from_slice(&(data.len() as u16).to_le_bytes());
|
2020-06-08 09:47:39 +00:00
|
|
|
buffer[3..(data.len() + 3)].clone_from_slice(&data[..]);
|
2020-04-24 10:56:04 +00:00
|
|
|
3 + data.len()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
let mut start = 0;
|
|
|
|
while start < len {
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!(?start, ?len, "Splitting up udp frame in multiple packages");
|
2020-04-24 10:56:04 +00:00
|
|
|
match self
|
|
|
|
.socket
|
|
|
|
.send_to(&buffer[start..len], self.remote_addr)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(n) => {
|
|
|
|
start += n;
|
|
|
|
if n != len {
|
|
|
|
error!(
|
|
|
|
"THIS DOESNT WORK, as RECEIVER CURRENLTY ONLY HANDLES 1 FRAME per \
|
|
|
|
UDP message. splitting up will fail!"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
},
|
2020-07-05 22:13:53 +00:00
|
|
|
Err(e) => error!(?e, "Need to handle that error!"),
|
2020-04-24 10:56:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-07-05 22:13:53 +00:00
|
|
|
trace!("Shutting down udp write()");
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
}
|
2020-07-10 13:31:26 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2020-08-21 12:01:49 +00:00
|
|
|
use crate::{metrics::NetworkMetrics, types::Pid};
|
2020-07-10 13:31:26 +00:00
|
|
|
use async_std::net;
|
|
|
|
use futures::{executor::block_on, stream::StreamExt};
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn tcp_read_handshake() {
|
|
|
|
let pid = Pid::new();
|
|
|
|
let cid = 80085;
|
|
|
|
let metrics = Arc::new(NetworkMetrics::new(&pid).unwrap());
|
|
|
|
let addr = std::net::SocketAddrV4::new(std::net::Ipv4Addr::new(127, 0, 0, 1), 50500);
|
|
|
|
block_on(async {
|
|
|
|
let server = net::TcpListener::bind(addr).await.unwrap();
|
|
|
|
let mut client = net::TcpStream::connect(addr).await.unwrap();
|
|
|
|
|
|
|
|
let s_stream = server.incoming().next().await.unwrap().unwrap();
|
|
|
|
let prot = TcpProtocol::new(s_stream, metrics);
|
|
|
|
|
|
|
|
//Send Handshake
|
|
|
|
client.write_all(&[FRAME_HANDSHAKE]).await.unwrap();
|
|
|
|
client.write_all(b"HELLOWO").await.unwrap();
|
|
|
|
client.write_all(&1337u32.to_le_bytes()).await.unwrap();
|
|
|
|
client.write_all(&0u32.to_le_bytes()).await.unwrap();
|
|
|
|
client.write_all(&42u32.to_le_bytes()).await.unwrap();
|
|
|
|
client.flush();
|
|
|
|
|
|
|
|
//handle data
|
2020-08-21 12:01:49 +00:00
|
|
|
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<C2pFrame>();
|
2020-07-10 13:31:26 +00:00
|
|
|
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
|
|
|
|
let cid2 = cid;
|
|
|
|
let t = std::thread::spawn(move || {
|
|
|
|
block_on(async {
|
|
|
|
prot.read_from_wire(cid2, &mut w2c_cid_frame_s, read_stop_receiver)
|
|
|
|
.await;
|
|
|
|
})
|
|
|
|
});
|
|
|
|
// Assert than we get some value back! Its a Handshake!
|
|
|
|
//async_std::task::sleep(std::time::Duration::from_millis(1000));
|
|
|
|
let (cid_r, frame) = w2c_cid_frame_r.next().await.unwrap();
|
|
|
|
assert_eq!(cid, cid_r);
|
2020-08-21 12:01:49 +00:00
|
|
|
if let Ok(Frame::Handshake {
|
2020-07-10 13:31:26 +00:00
|
|
|
magic_number,
|
|
|
|
version,
|
2020-08-21 12:01:49 +00:00
|
|
|
}) = frame
|
2020-07-10 13:31:26 +00:00
|
|
|
{
|
|
|
|
assert_eq!(&magic_number, b"HELLOWO");
|
|
|
|
assert_eq!(version, [1337, 0, 42]);
|
|
|
|
} else {
|
|
|
|
panic!("wrong handshake");
|
|
|
|
}
|
|
|
|
read_stop_sender.send(()).unwrap();
|
|
|
|
t.join().unwrap();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn tcp_read_garbage() {
|
|
|
|
let pid = Pid::new();
|
|
|
|
let cid = 80085;
|
|
|
|
let metrics = Arc::new(NetworkMetrics::new(&pid).unwrap());
|
|
|
|
let addr = std::net::SocketAddrV4::new(std::net::Ipv4Addr::new(127, 0, 0, 1), 50501);
|
|
|
|
block_on(async {
|
|
|
|
let server = net::TcpListener::bind(addr).await.unwrap();
|
|
|
|
let mut client = net::TcpStream::connect(addr).await.unwrap();
|
|
|
|
|
|
|
|
let s_stream = server.incoming().next().await.unwrap().unwrap();
|
|
|
|
let prot = TcpProtocol::new(s_stream, metrics);
|
|
|
|
|
|
|
|
//Send Handshake
|
|
|
|
client
|
|
|
|
.write_all("x4hrtzsektfhxugzdtz5r78gzrtzfhxfdthfthuzhfzzufasgasdfg".as_bytes())
|
|
|
|
.await
|
|
|
|
.unwrap();
|
|
|
|
client.flush();
|
|
|
|
|
|
|
|
//handle data
|
2020-08-21 12:01:49 +00:00
|
|
|
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<C2pFrame>();
|
2020-07-10 13:31:26 +00:00
|
|
|
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
|
|
|
|
let cid2 = cid;
|
|
|
|
let t = std::thread::spawn(move || {
|
|
|
|
block_on(async {
|
|
|
|
prot.read_from_wire(cid2, &mut w2c_cid_frame_s, read_stop_receiver)
|
|
|
|
.await;
|
|
|
|
})
|
|
|
|
});
|
|
|
|
// Assert than we get some value back! Its a Raw!
|
|
|
|
let (cid_r, frame) = w2c_cid_frame_r.next().await.unwrap();
|
|
|
|
assert_eq!(cid, cid_r);
|
2020-08-21 12:01:49 +00:00
|
|
|
if let Ok(Frame::Raw(data)) = frame {
|
2020-07-10 13:31:26 +00:00
|
|
|
assert_eq!(&data.as_slice(), b"x4hrtzsektfhxugzdtz5r78gzrtzfhxf");
|
|
|
|
} else {
|
|
|
|
panic!("wrong frame type");
|
|
|
|
}
|
|
|
|
read_stop_sender.send(()).unwrap();
|
|
|
|
t.join().unwrap();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|