2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
use crate::metrics::NetworkMetrics;
|
2020-02-04 15:42:04 +00:00
|
|
|
use crate::{
|
2020-08-21 12:01:49 +00:00
|
|
|
participant::C2pFrame,
|
2020-04-08 14:26:42 +00:00
|
|
|
protocols::Protocols,
|
2020-02-21 13:08:34 +00:00
|
|
|
types::{
|
2020-04-08 14:26:42 +00:00
|
|
|
Cid, Frame, Pid, Sid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2, VELOREN_MAGIC_NUMBER,
|
2020-02-21 13:08:34 +00:00
|
|
|
VELOREN_NETWORK_VERSION,
|
2020-02-20 16:04:58 +00:00
|
|
|
},
|
2020-02-04 15:42:04 +00:00
|
|
|
};
|
2020-04-08 14:26:42 +00:00
|
|
|
use futures::{
|
|
|
|
channel::{mpsc, oneshot},
|
2020-08-21 12:01:49 +00:00
|
|
|
join,
|
2020-04-08 14:26:42 +00:00
|
|
|
sink::SinkExt,
|
|
|
|
stream::StreamExt,
|
2020-05-04 13:27:58 +00:00
|
|
|
FutureExt,
|
2020-04-08 14:26:42 +00:00
|
|
|
};
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")] use std::sync::Arc;
|
2020-02-04 15:42:04 +00:00
|
|
|
use tracing::*;
|
|
|
|
|
2020-02-20 16:04:58 +00:00
|
|
|
pub(crate) struct Channel {
|
2020-03-22 13:47:21 +00:00
|
|
|
cid: Cid,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
c2w_frame_r: Option<mpsc::UnboundedReceiver<Frame>>,
|
2020-05-04 13:27:58 +00:00
|
|
|
read_stop_receiver: Option<oneshot::Receiver<()>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Channel {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pub fn new(cid: u64) -> (Self, mpsc::UnboundedSender<Frame>, oneshot::Sender<()>) {
|
|
|
|
let (c2w_frame_s, c2w_frame_r) = mpsc::unbounded::<Frame>();
|
2020-05-04 13:27:58 +00:00
|
|
|
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
|
|
|
|
(
|
|
|
|
Self {
|
|
|
|
cid,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
c2w_frame_r: Some(c2w_frame_r),
|
2020-05-04 13:27:58 +00:00
|
|
|
read_stop_receiver: Some(read_stop_receiver),
|
|
|
|
},
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
c2w_frame_s,
|
2020-05-04 13:27:58 +00:00
|
|
|
read_stop_sender,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn run(
|
|
|
|
mut self,
|
|
|
|
protocol: Protocols,
|
2020-08-21 12:01:49 +00:00
|
|
|
mut w2c_cid_frame_s: mpsc::UnboundedSender<C2pFrame>,
|
|
|
|
mut leftover_cid_frame: Vec<C2pFrame>,
|
2020-05-04 13:27:58 +00:00
|
|
|
) {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
let c2w_frame_r = self.c2w_frame_r.take().unwrap();
|
2020-05-04 13:27:58 +00:00
|
|
|
let read_stop_receiver = self.read_stop_receiver.take().unwrap();
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
//reapply leftovers from handshake
|
|
|
|
let cnt = leftover_cid_frame.len();
|
2020-08-21 22:50:01 +00:00
|
|
|
trace!(?cnt, "Reapplying leftovers");
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
for cid_frame in leftover_cid_frame.drain(..) {
|
|
|
|
w2c_cid_frame_s.send(cid_frame).await.unwrap();
|
|
|
|
}
|
2020-08-21 22:50:01 +00:00
|
|
|
trace!(?cnt, "All leftovers reapplied");
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
|
2020-08-21 22:50:01 +00:00
|
|
|
trace!("Start up channel");
|
2020-05-04 13:27:58 +00:00
|
|
|
match protocol {
|
|
|
|
Protocols::Tcp(tcp) => {
|
2020-08-21 12:01:49 +00:00
|
|
|
join!(
|
|
|
|
tcp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver),
|
|
|
|
tcp.write_to_wire(self.cid, c2w_frame_r),
|
2020-05-04 13:27:58 +00:00
|
|
|
);
|
|
|
|
},
|
|
|
|
Protocols::Udp(udp) => {
|
2020-08-21 12:01:49 +00:00
|
|
|
join!(
|
|
|
|
udp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver),
|
|
|
|
udp.write_to_wire(self.cid, c2w_frame_r),
|
2020-05-04 13:27:58 +00:00
|
|
|
);
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-08-21 22:50:01 +00:00
|
|
|
trace!("Shut down channel");
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|
|
|
|
|
2020-05-26 13:06:03 +00:00
|
|
|
#[derive(Debug)]
|
2020-05-04 13:27:58 +00:00
|
|
|
pub(crate) struct Handshake {
|
|
|
|
cid: Cid,
|
|
|
|
local_pid: Pid,
|
2020-05-26 13:06:03 +00:00
|
|
|
secret: u128,
|
2020-05-04 13:27:58 +00:00
|
|
|
init_handshake: bool,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-04 13:27:58 +00:00
|
|
|
metrics: Arc<NetworkMetrics>,
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2020-05-04 13:27:58 +00:00
|
|
|
impl Handshake {
|
2020-04-08 14:26:42 +00:00
|
|
|
#[cfg(debug_assertions)]
|
2020-08-25 12:21:25 +00:00
|
|
|
const WRONG_NUMBER: &'static [u8] = "Handshake does not contain the magic number required by \
|
2020-02-04 15:42:04 +00:00
|
|
|
veloren server.\nWe are not sure if you are a valid \
|
|
|
|
veloren client.\nClosing the connection"
|
|
|
|
.as_bytes();
|
2020-04-08 14:26:42 +00:00
|
|
|
#[cfg(debug_assertions)]
|
2020-03-22 13:47:21 +00:00
|
|
|
const WRONG_VERSION: &'static str = "Handshake does contain a correct magic number, but \
|
2020-02-04 15:42:04 +00:00
|
|
|
invalid version.\nWe don't know how to communicate with \
|
2020-03-22 13:47:21 +00:00
|
|
|
you.\nClosing the connection";
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2020-05-04 13:27:58 +00:00
|
|
|
pub fn new(
|
|
|
|
cid: u64,
|
|
|
|
local_pid: Pid,
|
2020-05-26 13:06:03 +00:00
|
|
|
secret: u128,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>,
|
2020-05-04 13:27:58 +00:00
|
|
|
init_handshake: bool,
|
|
|
|
) -> Self {
|
2020-02-10 17:25:47 +00:00
|
|
|
Self {
|
2020-03-22 13:47:21 +00:00
|
|
|
cid,
|
2020-02-04 15:42:04 +00:00
|
|
|
local_pid,
|
2020-05-26 13:06:03 +00:00
|
|
|
secret,
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-04-24 10:56:04 +00:00
|
|
|
metrics,
|
2020-05-04 13:27:58 +00:00
|
|
|
init_handshake,
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-21 12:01:49 +00:00
|
|
|
pub async fn setup(self, protocol: &Protocols) -> Result<(Pid, Sid, u128, Vec<C2pFrame>), ()> {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
let (c2w_frame_s, c2w_frame_r) = mpsc::unbounded::<Frame>();
|
2020-08-21 12:01:49 +00:00
|
|
|
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<C2pFrame>();
|
2020-02-10 17:25:47 +00:00
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
|
2020-05-04 09:44:09 +00:00
|
|
|
let handler_future =
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
self.frame_handler(&mut w2c_cid_frame_r, c2w_frame_s, read_stop_sender);
|
|
|
|
let res = match protocol {
|
2020-04-08 14:26:42 +00:00
|
|
|
Protocols::Tcp(tcp) => {
|
2020-05-04 13:27:58 +00:00
|
|
|
(join! {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
tcp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver),
|
|
|
|
tcp.write_to_wire(self.cid, c2w_frame_r).fuse(),
|
2020-04-08 14:26:42 +00:00
|
|
|
handler_future,
|
2020-05-04 13:27:58 +00:00
|
|
|
})
|
|
|
|
.2
|
2020-04-08 14:26:42 +00:00
|
|
|
},
|
|
|
|
Protocols::Udp(udp) => {
|
2020-05-04 13:27:58 +00:00
|
|
|
(join! {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
udp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver),
|
|
|
|
udp.write_to_wire(self.cid, c2w_frame_r),
|
2020-04-08 14:26:42 +00:00
|
|
|
handler_future,
|
2020-05-04 13:27:58 +00:00
|
|
|
})
|
|
|
|
.2
|
2020-04-08 14:26:42 +00:00
|
|
|
},
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(res) => {
|
|
|
|
let mut leftover_frames = vec![];
|
|
|
|
while let Ok(Some(cid_frame)) = w2c_cid_frame_r.try_next() {
|
|
|
|
leftover_frames.push(cid_frame);
|
|
|
|
}
|
|
|
|
let cnt = leftover_frames.len();
|
|
|
|
if cnt > 0 {
|
2020-08-21 22:50:01 +00:00
|
|
|
debug!(
|
|
|
|
?cnt,
|
2020-08-25 12:21:25 +00:00
|
|
|
"Some additional frames got already transferred, piping them to the \
|
2020-08-21 22:50:01 +00:00
|
|
|
bparticipant as leftover_frames"
|
|
|
|
);
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
}
|
|
|
|
Ok((res.0, res.1, res.2, leftover_frames))
|
|
|
|
},
|
2020-06-30 14:56:49 +00:00
|
|
|
Err(()) => Err(()),
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
|
2020-05-04 13:27:58 +00:00
|
|
|
async fn frame_handler(
|
2020-03-22 13:47:21 +00:00
|
|
|
&self,
|
2020-08-21 12:01:49 +00:00
|
|
|
w2c_cid_frame_r: &mut mpsc::UnboundedReceiver<C2pFrame>,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
mut c2w_frame_s: mpsc::UnboundedSender<Frame>,
|
2020-07-10 13:31:26 +00:00
|
|
|
read_stop_sender: oneshot::Sender<()>,
|
2020-05-26 13:06:03 +00:00
|
|
|
) -> Result<(Pid, Sid, u128), ()> {
|
2020-03-22 13:47:21 +00:00
|
|
|
const ERR_S: &str = "Got A Raw Message, these are usually Debug Messages indicating that \
|
|
|
|
something went wrong on network layer and connection will be closed";
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-04-24 10:56:04 +00:00
|
|
|
let cid_string = self.cid.to_string();
|
2020-05-04 13:27:58 +00:00
|
|
|
|
|
|
|
if self.init_handshake {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
self.send_handshake(&mut c2w_frame_s).await;
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
|
|
|
|
2020-07-14 23:34:41 +00:00
|
|
|
let frame = w2c_cid_frame_r.next().await.map(|(_cid, frame)| frame);
|
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
{
|
2020-08-21 12:01:49 +00:00
|
|
|
if let Some(Ok(ref frame)) = frame {
|
2020-05-04 13:27:58 +00:00
|
|
|
self.metrics
|
|
|
|
.frames_in_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&cid_string, &frame.get_string()])
|
2020-05-04 13:27:58 +00:00
|
|
|
.inc();
|
2020-07-14 23:34:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
let r = match frame {
|
2020-08-21 12:01:49 +00:00
|
|
|
Some(Ok(Frame::Handshake {
|
2020-07-14 23:34:41 +00:00
|
|
|
magic_number,
|
|
|
|
version,
|
2020-08-21 12:01:49 +00:00
|
|
|
})) => {
|
2020-07-14 23:34:41 +00:00
|
|
|
trace!(?magic_number, ?version, "Recv handshake");
|
2020-05-04 13:27:58 +00:00
|
|
|
if magic_number != VELOREN_MAGIC_NUMBER {
|
2020-07-05 22:13:53 +00:00
|
|
|
error!(?magic_number, "Connection with invalid magic_number");
|
2020-05-04 13:27:58 +00:00
|
|
|
#[cfg(debug_assertions)]
|
2020-07-14 23:34:41 +00:00
|
|
|
self.send_raw_and_shutdown(&mut c2w_frame_s, Self::WRONG_NUMBER.to_vec())
|
|
|
|
.await;
|
2020-07-10 13:31:26 +00:00
|
|
|
Err(())
|
|
|
|
} else if version != VELOREN_NETWORK_VERSION {
|
2020-07-05 22:13:53 +00:00
|
|
|
error!(?version, "Connection with wrong network version");
|
2020-05-04 13:27:58 +00:00
|
|
|
#[cfg(debug_assertions)]
|
2020-07-14 23:34:41 +00:00
|
|
|
self.send_raw_and_shutdown(
|
|
|
|
&mut c2w_frame_s,
|
|
|
|
format!(
|
|
|
|
"{} Our Version: {:?}\nYour Version: {:?}\nClosing the connection",
|
|
|
|
Self::WRONG_VERSION,
|
|
|
|
VELOREN_NETWORK_VERSION,
|
|
|
|
version,
|
|
|
|
)
|
|
|
|
.as_bytes()
|
|
|
|
.to_vec(),
|
|
|
|
)
|
|
|
|
.await;
|
2020-07-10 13:31:26 +00:00
|
|
|
Err(())
|
2020-05-04 13:27:58 +00:00
|
|
|
} else {
|
2020-07-10 13:31:26 +00:00
|
|
|
debug!("Handshake completed");
|
|
|
|
if self.init_handshake {
|
2020-08-26 23:55:13 +00:00
|
|
|
self.send_init(&mut c2w_frame_s).await;
|
2020-07-10 13:31:26 +00:00
|
|
|
} else {
|
|
|
|
self.send_handshake(&mut c2w_frame_s).await;
|
|
|
|
}
|
|
|
|
Ok(())
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
|
|
|
},
|
2020-08-21 12:01:49 +00:00
|
|
|
Some(Ok(frame)) => {
|
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
self.metrics
|
|
|
|
.frames_in_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&cid_string, frame.get_string()])
|
2020-08-21 12:01:49 +00:00
|
|
|
.inc();
|
|
|
|
if let Frame::Raw(bytes) = frame {
|
|
|
|
match std::str::from_utf8(bytes.as_slice()) {
|
|
|
|
Ok(string) => error!(?string, ERR_S),
|
|
|
|
_ => error!(?bytes, ERR_S),
|
|
|
|
}
|
|
|
|
}
|
2020-07-10 13:31:26 +00:00
|
|
|
Err(())
|
2020-05-04 13:27:58 +00:00
|
|
|
},
|
2020-08-21 12:01:49 +00:00
|
|
|
Some(Err(())) => {
|
|
|
|
info!("Protocol got interrupted");
|
2020-07-10 13:31:26 +00:00
|
|
|
Err(())
|
2020-05-04 13:27:58 +00:00
|
|
|
},
|
2020-07-10 13:31:26 +00:00
|
|
|
None => Err(()),
|
2020-05-04 13:27:58 +00:00
|
|
|
};
|
2020-07-10 13:31:26 +00:00
|
|
|
if let Err(()) = r {
|
|
|
|
if let Err(e) = read_stop_sender.send(()) {
|
|
|
|
trace!(
|
|
|
|
?e,
|
|
|
|
"couldn't stop protocol, probably it encountered a Protocol Stop and closed \
|
|
|
|
itself already, which is fine"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
return Err(());
|
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2020-07-14 23:34:41 +00:00
|
|
|
let frame = w2c_cid_frame_r.next().await.map(|(_cid, frame)| frame);
|
|
|
|
let r = match frame {
|
2020-08-21 12:01:49 +00:00
|
|
|
Some(Ok(Frame::Init { pid, secret })) => {
|
2020-05-04 13:27:58 +00:00
|
|
|
debug!(?pid, "Participant send their ID");
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-04 13:27:58 +00:00
|
|
|
self.metrics
|
|
|
|
.frames_in_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&cid_string, "ParticipantId"])
|
2020-05-04 13:27:58 +00:00
|
|
|
.inc();
|
|
|
|
let stream_id_offset = if self.init_handshake {
|
|
|
|
STREAM_ID_OFFSET1
|
|
|
|
} else {
|
2020-08-26 23:55:13 +00:00
|
|
|
self.send_init(&mut c2w_frame_s).await;
|
2020-05-04 13:27:58 +00:00
|
|
|
STREAM_ID_OFFSET2
|
|
|
|
};
|
2020-07-05 22:13:53 +00:00
|
|
|
info!(?pid, "This Handshake is now configured!");
|
2020-06-08 09:47:39 +00:00
|
|
|
Ok((pid, stream_id_offset, secret))
|
2020-05-04 13:27:58 +00:00
|
|
|
},
|
2020-08-21 12:01:49 +00:00
|
|
|
Some(Ok(frame)) => {
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-04 13:27:58 +00:00
|
|
|
self.metrics
|
|
|
|
.frames_in_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&cid_string, frame.get_string()])
|
2020-05-04 13:27:58 +00:00
|
|
|
.inc();
|
2020-08-21 12:01:49 +00:00
|
|
|
if let Frame::Raw(bytes) = frame {
|
|
|
|
match std::str::from_utf8(bytes.as_slice()) {
|
2020-07-14 23:34:41 +00:00
|
|
|
Ok(string) => error!(?string, ERR_S),
|
|
|
|
_ => error!(?bytes, ERR_S),
|
2020-08-21 12:01:49 +00:00
|
|
|
}
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
2020-06-08 09:47:39 +00:00
|
|
|
Err(())
|
2020-05-04 13:27:58 +00:00
|
|
|
},
|
2020-08-21 12:01:49 +00:00
|
|
|
Some(Err(())) => {
|
|
|
|
info!("Protocol got interrupted");
|
|
|
|
Err(())
|
|
|
|
},
|
2020-06-08 09:47:39 +00:00
|
|
|
None => Err(()),
|
2020-07-10 13:31:26 +00:00
|
|
|
};
|
|
|
|
if r.is_err() {
|
|
|
|
if let Err(e) = read_stop_sender.send(()) {
|
|
|
|
trace!(
|
|
|
|
?e,
|
|
|
|
"couldn't stop protocol, probably it encountered a Protocol Stop and closed \
|
|
|
|
itself already, which is fine"
|
|
|
|
);
|
|
|
|
}
|
2020-06-08 09:47:39 +00:00
|
|
|
}
|
2020-07-10 13:31:26 +00:00
|
|
|
r
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
async fn send_handshake(&self, c2w_frame_s: &mut mpsc::UnboundedSender<Frame>) {
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-04 13:27:58 +00:00
|
|
|
self.metrics
|
|
|
|
.frames_out_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&self.cid.to_string(), "Handshake"])
|
2020-05-04 13:27:58 +00:00
|
|
|
.inc();
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
c2w_frame_s
|
2020-03-22 13:47:21 +00:00
|
|
|
.send(Frame::Handshake {
|
2020-04-24 10:56:04 +00:00
|
|
|
magic_number: VELOREN_MAGIC_NUMBER,
|
2020-03-22 13:47:21 +00:00
|
|
|
version: VELOREN_NETWORK_VERSION,
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.unwrap();
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 07:32:04 +00:00
|
|
|
async fn send_init(&self, c2w_frame_s: &mut mpsc::UnboundedSender<Frame>) {
|
2020-07-14 23:34:41 +00:00
|
|
|
#[cfg(feature = "metrics")]
|
2020-05-04 13:27:58 +00:00
|
|
|
self.metrics
|
|
|
|
.frames_out_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&self.cid.to_string(), "ParticipantId"])
|
2020-05-04 13:27:58 +00:00
|
|
|
.inc();
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
c2w_frame_s
|
2020-05-26 13:06:03 +00:00
|
|
|
.send(Frame::Init {
|
2020-03-22 13:47:21 +00:00
|
|
|
pid: self.local_pid,
|
2020-05-26 13:06:03 +00:00
|
|
|
secret: self.secret,
|
2020-03-22 13:47:21 +00:00
|
|
|
})
|
|
|
|
.await
|
|
|
|
.unwrap();
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
2020-07-14 23:34:41 +00:00
|
|
|
|
|
|
|
#[cfg(debug_assertions)]
|
|
|
|
async fn send_raw_and_shutdown(
|
|
|
|
&self,
|
|
|
|
c2w_frame_s: &mut mpsc::UnboundedSender<Frame>,
|
|
|
|
data: Vec<u8>,
|
|
|
|
) {
|
|
|
|
debug!("Sending client instructions before killing");
|
|
|
|
#[cfg(feature = "metrics")]
|
|
|
|
{
|
|
|
|
let cid_string = self.cid.to_string();
|
|
|
|
self.metrics
|
|
|
|
.frames_out_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&cid_string, "Raw"])
|
2020-07-14 23:34:41 +00:00
|
|
|
.inc();
|
|
|
|
self.metrics
|
|
|
|
.frames_out_total
|
2020-08-26 23:55:13 +00:00
|
|
|
.with_label_values(&[&cid_string, "Shutdown"])
|
2020-07-14 23:34:41 +00:00
|
|
|
.inc();
|
|
|
|
}
|
|
|
|
c2w_frame_s.send(Frame::Raw(data)).await.unwrap();
|
|
|
|
c2w_frame_s.send(Frame::Shutdown).await.unwrap();
|
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|