2019-12-20 13:56:01 +00:00
|
|
|
use bincode;
|
2020-02-04 15:42:04 +00:00
|
|
|
use serde::{de::DeserializeOwned, Serialize};
|
2020-01-13 16:53:28 +00:00
|
|
|
//use std::collections::VecDeque;
|
2020-02-21 13:08:34 +00:00
|
|
|
use crate::types::{Mid, Sid};
|
2020-01-13 16:53:28 +00:00
|
|
|
use std::sync::Arc;
|
2019-12-20 13:56:01 +00:00
|
|
|
|
2020-05-10 02:07:46 +00:00
|
|
|
//Todo: Evaluate switching to VecDeque for quickly adding and removing data
|
|
|
|
// from front, back.
|
|
|
|
// - It would prob requiere custom bincode code but thats possible.
|
|
|
|
/// Support struct used for optimising sending the same Message to multiple
|
|
|
|
/// [`Stream`]
|
|
|
|
///
|
|
|
|
/// For an example usage see: [`send_raw`]
|
|
|
|
///
|
|
|
|
/// [`Stream`]: crate::api::Stream
|
|
|
|
/// [`send_raw`]: crate::api::Stream::send_raw
|
2020-04-24 10:56:04 +00:00
|
|
|
pub struct MessageBuffer {
|
2020-02-04 15:42:04 +00:00
|
|
|
pub data: Vec<u8>,
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-13 16:53:28 +00:00
|
|
|
#[derive(Debug)]
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pub(crate) struct OutgoingMessage {
|
2020-02-04 15:42:04 +00:00
|
|
|
pub buffer: Arc<MessageBuffer>,
|
|
|
|
pub cursor: u64,
|
2020-03-22 13:47:21 +00:00
|
|
|
pub mid: Mid,
|
2020-02-04 15:42:04 +00:00
|
|
|
pub sid: Sid,
|
2019-12-20 13:56:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-13 16:53:28 +00:00
|
|
|
#[derive(Debug)]
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
pub(crate) struct IncomingMessage {
|
2020-02-04 15:42:04 +00:00
|
|
|
pub buffer: MessageBuffer,
|
|
|
|
pub length: u64,
|
|
|
|
pub mid: Mid,
|
|
|
|
pub sid: Sid,
|
2020-01-13 16:53:28 +00:00
|
|
|
}
|
|
|
|
|
2020-02-04 15:42:04 +00:00
|
|
|
pub(crate) fn serialize<M: Serialize>(message: &M) -> MessageBuffer {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
//this will never fail: https://docs.rs/bincode/0.8.0/bincode/fn.serialize.html
|
2020-04-08 14:26:42 +00:00
|
|
|
let writer = bincode::serialize(message).unwrap();
|
2019-12-20 13:56:01 +00:00
|
|
|
MessageBuffer { data: writer }
|
|
|
|
}
|
|
|
|
|
2020-02-04 15:42:04 +00:00
|
|
|
pub(crate) fn deserialize<M: DeserializeOwned>(buffer: MessageBuffer) -> M {
|
|
|
|
let span = buffer.data;
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
//this might fail if you choose the wrong type for M. in that case probably X
|
|
|
|
// got transfered while you assume Y. probably this means your application
|
|
|
|
// logic is wrong. E.g. You expect a String, but just get a u8.
|
|
|
|
let decoded: M = bincode::deserialize(span.as_slice()).expect(
|
|
|
|
"deserialisation failed, this is probably due to a programming error on YOUR side, \
|
|
|
|
probably the type send by remote isn't what you are expecting. change the type of `M`",
|
|
|
|
);
|
2020-02-04 15:42:04 +00:00
|
|
|
decoded
|
|
|
|
}
|
|
|
|
|
2020-02-25 18:30:50 +00:00
|
|
|
impl std::fmt::Debug for MessageBuffer {
|
|
|
|
#[inline]
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
//TODO: small messages!
|
|
|
|
let len = self.data.len();
|
|
|
|
if len > 20 {
|
|
|
|
write!(
|
|
|
|
f,
|
|
|
|
"MessageBuffer(len: {}, {}, {}, {}, {:?}..{:?})",
|
|
|
|
len,
|
2020-04-24 10:56:04 +00:00
|
|
|
u32::from_le_bytes([self.data[0], self.data[1], self.data[2], self.data[3]]),
|
|
|
|
u32::from_le_bytes([self.data[4], self.data[5], self.data[6], self.data[7]]),
|
|
|
|
u32::from_le_bytes([self.data[8], self.data[9], self.data[10], self.data[11]]),
|
2020-02-25 18:30:50 +00:00
|
|
|
&self.data[13..16],
|
|
|
|
&self.data[len - 8..len]
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
write!(f, "MessageBuffer(len: {}, {:?})", len, &self.data[..])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 13:56:01 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use crate::message::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn serialize_test() {
|
|
|
|
let msg = "abc";
|
|
|
|
let mb = serialize(&msg);
|
|
|
|
assert_eq!(mb.data.len(), 11);
|
|
|
|
assert_eq!(mb.data[0], 3);
|
|
|
|
assert_eq!(mb.data[1], 0);
|
|
|
|
assert_eq!(mb.data[7], 0);
|
|
|
|
assert_eq!(mb.data[8], 'a' as u8);
|
|
|
|
assert_eq!(mb.data[8], 97);
|
|
|
|
assert_eq!(mb.data[9], 'b' as u8);
|
|
|
|
assert_eq!(mb.data[10], 'c' as u8);
|
|
|
|
}
|
|
|
|
}
|