2020-05-10 02:07:46 +00:00
|
|
|
//!Priorities are handled the following way.
|
|
|
|
//!Prios from 0-63 are allowed.
|
|
|
|
//!all 5 numbers the throughput is halved.
|
|
|
|
//!E.g. in the same time 100 prio0 messages are send, only 50 prio5, 25 prio10,
|
|
|
|
//! 12 prio15 or 6 prio20 messages are send. Note: TODO: prio0 will be send
|
|
|
|
//! immeadiatly when found!
|
2020-03-10 00:07:36 +00:00
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
use crate::{
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
message::OutgoingMessage,
|
2020-05-27 15:58:57 +00:00
|
|
|
metrics::NetworkMetrics,
|
2020-05-22 14:00:08 +00:00
|
|
|
types::{Frame, Prio, Sid},
|
2020-03-22 13:47:21 +00:00
|
|
|
};
|
2020-05-15 12:29:17 +00:00
|
|
|
use futures::channel::oneshot;
|
2020-03-10 00:07:36 +00:00
|
|
|
use std::{
|
2020-04-08 14:26:42 +00:00
|
|
|
collections::{HashMap, HashSet, VecDeque},
|
2020-05-27 15:58:57 +00:00
|
|
|
sync::{
|
|
|
|
mpsc::{channel, Receiver, Sender},
|
|
|
|
Arc,
|
|
|
|
},
|
2020-03-10 00:07:36 +00:00
|
|
|
};
|
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
use tracing::*;
|
|
|
|
|
2020-03-10 00:07:36 +00:00
|
|
|
const PRIO_MAX: usize = 64;
|
|
|
|
|
2020-05-15 12:29:17 +00:00
|
|
|
struct PidSidInfo {
|
|
|
|
len: u64,
|
|
|
|
empty_notify: Option<oneshot::Sender<()>>,
|
|
|
|
}
|
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
pub(crate) struct PrioManager {
|
2020-03-10 00:07:36 +00:00
|
|
|
points: [u32; PRIO_MAX],
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
messages: [VecDeque<(Sid, OutgoingMessage)>; PRIO_MAX],
|
|
|
|
messages_rx: Receiver<(Prio, Sid, OutgoingMessage)>,
|
2020-05-22 14:00:08 +00:00
|
|
|
sid_owned: HashMap<Sid, PidSidInfo>,
|
2020-05-15 12:29:17 +00:00
|
|
|
//you can register to be notified if a pid_sid combination is flushed completly here
|
2020-05-22 14:00:08 +00:00
|
|
|
sid_flushed_rx: Receiver<(Sid, oneshot::Sender<()>)>,
|
2020-03-10 00:07:36 +00:00
|
|
|
queued: HashSet<u8>,
|
2020-05-27 15:58:57 +00:00
|
|
|
metrics: Arc<NetworkMetrics>,
|
|
|
|
pid: String,
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PrioManager {
|
|
|
|
const FRAME_DATA_SIZE: u64 = 1400;
|
|
|
|
const PRIOS: [u32; PRIO_MAX] = [
|
|
|
|
100, 115, 132, 152, 174, 200, 230, 264, 303, 348, 400, 459, 528, 606, 696, 800, 919, 1056,
|
|
|
|
1213, 1393, 1600, 1838, 2111, 2425, 2786, 3200, 3676, 4222, 4850, 5572, 6400, 7352, 8445,
|
|
|
|
9701, 11143, 12800, 14703, 16890, 19401, 22286, 25600, 29407, 33779, 38802, 44572, 51200,
|
|
|
|
58813, 67559, 77605, 89144, 102400, 117627, 135118, 155209, 178289, 204800, 235253, 270235,
|
|
|
|
310419, 356578, 409600, 470507, 540470, 620838,
|
|
|
|
];
|
|
|
|
|
2020-06-08 09:47:39 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
2020-05-27 15:58:57 +00:00
|
|
|
pub fn new(
|
|
|
|
metrics: Arc<NetworkMetrics>,
|
|
|
|
pid: String,
|
|
|
|
) -> (
|
2020-05-15 12:29:17 +00:00
|
|
|
Self,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
Sender<(Prio, Sid, OutgoingMessage)>,
|
2020-05-22 14:00:08 +00:00
|
|
|
Sender<(Sid, oneshot::Sender<()>)>,
|
2020-05-15 12:29:17 +00:00
|
|
|
) {
|
|
|
|
// (a2p_msg_s, a2p_msg_r)
|
2020-03-10 00:07:36 +00:00
|
|
|
let (messages_tx, messages_rx) = channel();
|
2020-05-22 14:00:08 +00:00
|
|
|
let (sid_flushed_tx, sid_flushed_rx) = channel();
|
2020-03-22 13:47:21 +00:00
|
|
|
(
|
|
|
|
Self {
|
|
|
|
points: [0; PRIO_MAX],
|
|
|
|
messages: [
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
VecDeque::new(),
|
|
|
|
],
|
|
|
|
messages_rx,
|
|
|
|
queued: HashSet::new(), //TODO: optimize with u64 and 64 bits
|
2020-05-22 14:00:08 +00:00
|
|
|
sid_flushed_rx,
|
|
|
|
sid_owned: HashMap::new(),
|
2020-05-27 15:58:57 +00:00
|
|
|
metrics,
|
|
|
|
pid,
|
2020-03-22 13:47:21 +00:00
|
|
|
},
|
2020-03-10 00:07:36 +00:00
|
|
|
messages_tx,
|
2020-05-22 14:00:08 +00:00
|
|
|
sid_flushed_tx,
|
2020-03-22 13:47:21 +00:00
|
|
|
)
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
|
|
|
|
2020-05-26 13:06:03 +00:00
|
|
|
async fn tick(&mut self) {
|
2020-03-10 00:07:36 +00:00
|
|
|
// Check Range
|
2020-05-27 15:58:57 +00:00
|
|
|
let mut messages = 0;
|
2020-05-15 12:29:17 +00:00
|
|
|
let mut closed = 0;
|
2020-05-22 14:00:08 +00:00
|
|
|
for (prio, sid, msg) in self.messages_rx.try_iter() {
|
2020-03-10 00:07:36 +00:00
|
|
|
debug_assert!(prio as usize <= PRIO_MAX);
|
2020-05-27 15:58:57 +00:00
|
|
|
messages += 1;
|
|
|
|
self.metrics
|
|
|
|
.message_out_total
|
|
|
|
.with_label_values(&[&self.pid, &sid.to_string()])
|
|
|
|
.inc();
|
|
|
|
self.metrics
|
|
|
|
.message_out_throughput
|
|
|
|
.with_label_values(&[&self.pid, &sid.to_string()])
|
|
|
|
.inc_by(msg.buffer.data.len() as i64);
|
2020-05-22 14:00:08 +00:00
|
|
|
//trace!(?prio, ?sid, "tick");
|
2020-03-10 00:07:36 +00:00
|
|
|
self.queued.insert(prio);
|
2020-05-22 14:00:08 +00:00
|
|
|
self.messages[prio as usize].push_back((sid, msg));
|
|
|
|
if let Some(cnt) = self.sid_owned.get_mut(&sid) {
|
2020-05-15 12:29:17 +00:00
|
|
|
cnt.len += 1;
|
|
|
|
} else {
|
2020-05-22 14:00:08 +00:00
|
|
|
self.sid_owned.insert(sid, PidSidInfo {
|
2020-05-15 12:29:17 +00:00
|
|
|
len: 1,
|
|
|
|
empty_notify: None,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//this must be AFTER messages
|
2020-05-22 14:00:08 +00:00
|
|
|
for (sid, return_sender) in self.sid_flushed_rx.try_iter() {
|
2020-05-15 12:29:17 +00:00
|
|
|
closed += 1;
|
2020-05-22 14:00:08 +00:00
|
|
|
if let Some(cnt) = self.sid_owned.get_mut(&sid) {
|
2020-05-15 12:29:17 +00:00
|
|
|
// register sender
|
|
|
|
cnt.empty_notify = Some(return_sender);
|
2020-04-08 14:26:42 +00:00
|
|
|
} else {
|
2020-05-15 12:29:17 +00:00
|
|
|
// return immediately
|
2020-05-26 13:06:03 +00:00
|
|
|
return_sender.send(()).unwrap();
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-27 15:58:57 +00:00
|
|
|
if messages > 0 || closed > 0 {
|
|
|
|
trace!(?messages, ?closed, "tick");
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//if None returned, we are empty!
|
|
|
|
fn calc_next_prio(&self) -> Option<u8> {
|
|
|
|
// compare all queued prios, max 64 operations
|
|
|
|
let mut lowest = std::u32::MAX;
|
|
|
|
let mut lowest_id = None;
|
|
|
|
for &n in &self.queued {
|
|
|
|
let n_points = self.points[n as usize];
|
|
|
|
if n_points < lowest {
|
|
|
|
lowest = n_points;
|
|
|
|
lowest_id = Some(n)
|
|
|
|
} else if n_points == lowest && lowest_id.is_some() && n < lowest_id.unwrap() {
|
|
|
|
//on equial points lowest first!
|
|
|
|
lowest_id = Some(n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lowest_id
|
|
|
|
}
|
|
|
|
|
|
|
|
/// returns if msg is empty
|
2020-05-22 14:00:08 +00:00
|
|
|
fn tick_msg<E: Extend<(Sid, Frame)>>(
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
msg: &mut OutgoingMessage,
|
2020-03-22 13:47:21 +00:00
|
|
|
msg_sid: Sid,
|
|
|
|
frames: &mut E,
|
|
|
|
) -> bool {
|
2020-03-10 00:07:36 +00:00
|
|
|
let to_send = std::cmp::min(
|
|
|
|
msg.buffer.data.len() as u64 - msg.cursor,
|
|
|
|
Self::FRAME_DATA_SIZE,
|
|
|
|
);
|
|
|
|
if to_send > 0 {
|
|
|
|
if msg.cursor == 0 {
|
2020-05-22 14:00:08 +00:00
|
|
|
frames.extend(std::iter::once((msg_sid, Frame::DataHeader {
|
2020-03-22 13:47:21 +00:00
|
|
|
mid: msg.mid,
|
2020-03-10 00:07:36 +00:00
|
|
|
sid: msg.sid,
|
|
|
|
length: msg.buffer.data.len() as u64,
|
2020-03-22 13:47:21 +00:00
|
|
|
})));
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
2020-05-22 14:00:08 +00:00
|
|
|
frames.extend(std::iter::once((msg_sid, Frame::Data {
|
2020-04-24 10:56:04 +00:00
|
|
|
mid: msg.mid,
|
2020-03-10 00:07:36 +00:00
|
|
|
start: msg.cursor,
|
|
|
|
data: msg.buffer.data[msg.cursor as usize..(msg.cursor + to_send) as usize]
|
|
|
|
.to_vec(),
|
2020-03-22 13:47:21 +00:00
|
|
|
})));
|
2020-03-10 00:07:36 +00:00
|
|
|
};
|
|
|
|
msg.cursor += to_send;
|
|
|
|
msg.cursor >= msg.buffer.data.len() as u64
|
|
|
|
}
|
|
|
|
|
|
|
|
/// no_of_frames = frames.len()
|
|
|
|
/// Your goal is to try to find a realistic no_of_frames!
|
|
|
|
/// no_of_frames should be choosen so, that all Frames can be send out till
|
|
|
|
/// the next tick!
|
|
|
|
/// - if no_of_frames is too high you will fill either the Socket buffer,
|
|
|
|
/// or your internal buffer. In that case you will increase latency for
|
|
|
|
/// high prio messages!
|
|
|
|
/// - if no_of_frames is too low you wont saturate your Socket fully, thus
|
|
|
|
/// have a lower bandwidth as possible
|
2020-05-22 14:00:08 +00:00
|
|
|
pub async fn fill_frames<E: Extend<(Sid, Frame)>>(
|
2020-03-22 13:47:21 +00:00
|
|
|
&mut self,
|
|
|
|
no_of_frames: usize,
|
|
|
|
frames: &mut E,
|
|
|
|
) {
|
2020-05-27 15:58:57 +00:00
|
|
|
for v in self.messages.iter_mut() {
|
|
|
|
v.reserve_exact(no_of_frames)
|
|
|
|
}
|
2020-05-26 13:06:03 +00:00
|
|
|
self.tick().await;
|
2020-03-10 00:07:36 +00:00
|
|
|
for _ in 0..no_of_frames {
|
|
|
|
match self.calc_next_prio() {
|
|
|
|
Some(prio) => {
|
2020-05-27 15:58:57 +00:00
|
|
|
//let prio2 = self.calc_next_prio().unwrap();
|
2020-04-08 14:26:42 +00:00
|
|
|
//trace!(?prio, "handle next prio");
|
2020-03-10 00:07:36 +00:00
|
|
|
self.points[prio as usize] += Self::PRIOS[prio as usize];
|
|
|
|
//pop message from front of VecDeque, handle it and push it back, so that all
|
|
|
|
// => messages with same prio get a fair chance :)
|
|
|
|
//TODO: evalaute not poping every time
|
|
|
|
match self.messages[prio as usize].pop_front() {
|
2020-05-22 14:00:08 +00:00
|
|
|
Some((sid, mut msg)) => {
|
|
|
|
if Self::tick_msg(&mut msg, sid, frames) {
|
2020-03-10 00:07:36 +00:00
|
|
|
//debug!(?m.mid, "finish message");
|
|
|
|
//check if prio is empty
|
|
|
|
if self.messages[prio as usize].is_empty() {
|
|
|
|
self.queued.remove(&prio);
|
|
|
|
}
|
2020-04-08 14:26:42 +00:00
|
|
|
//decrease pid_sid counter by 1 again
|
2020-05-22 14:00:08 +00:00
|
|
|
let cnt = self.sid_owned.get_mut(&sid).expect(
|
2020-04-08 14:26:42 +00:00
|
|
|
"the pid_sid_owned counter works wrong, more pid,sid removed \
|
|
|
|
than inserted",
|
|
|
|
);
|
2020-05-15 12:29:17 +00:00
|
|
|
cnt.len -= 1;
|
|
|
|
if cnt.len == 0 {
|
2020-05-22 14:00:08 +00:00
|
|
|
let cnt = self.sid_owned.remove(&sid).unwrap();
|
2020-06-08 09:47:39 +00:00
|
|
|
if let Some(empty_notify) = cnt.empty_notify {
|
|
|
|
empty_notify.send(()).unwrap();
|
|
|
|
}
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
2020-03-10 00:07:36 +00:00
|
|
|
} else {
|
2020-06-26 13:30:56 +00:00
|
|
|
trace!(?msg.mid, "repush message");
|
|
|
|
self.messages[prio as usize].push_front((sid, msg));
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
|
|
|
},
|
|
|
|
None => unreachable!("msg not in VecDeque, but queued"),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
None => {
|
|
|
|
//QUEUE is empty, we are clearing the POINTS to not build up huge pipes of
|
|
|
|
// POINTS on a prio from the past
|
|
|
|
self.points = [0; PRIO_MAX];
|
|
|
|
break;
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
2020-03-10 00:07:36 +00:00
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
impl std::fmt::Debug for PrioManager {
|
|
|
|
#[inline]
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
let mut cnt = 0;
|
|
|
|
for m in self.messages.iter() {
|
|
|
|
cnt += m.len();
|
|
|
|
}
|
|
|
|
write!(f, "PrioManager(len: {}, queued: {:?})", cnt, &self.queued,)
|
|
|
|
}
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use crate::{
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
message::{MessageBuffer, OutgoingMessage},
|
2020-05-27 15:58:57 +00:00
|
|
|
metrics::NetworkMetrics,
|
2020-03-10 00:07:36 +00:00
|
|
|
prios::*,
|
2020-05-27 15:58:57 +00:00
|
|
|
types::{Frame, Pid, Prio, Sid},
|
|
|
|
};
|
|
|
|
use futures::{channel::oneshot, executor::block_on};
|
|
|
|
use std::{
|
|
|
|
collections::VecDeque,
|
|
|
|
sync::{mpsc::Sender, Arc},
|
2020-03-10 00:07:36 +00:00
|
|
|
};
|
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
const SIZE: u64 = PrioManager::FRAME_DATA_SIZE;
|
|
|
|
const USIZE: usize = PrioManager::FRAME_DATA_SIZE as usize;
|
|
|
|
|
2020-05-27 15:58:57 +00:00
|
|
|
fn mock_new() -> (
|
|
|
|
PrioManager,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
Sender<(Prio, Sid, OutgoingMessage)>,
|
2020-05-27 15:58:57 +00:00
|
|
|
Sender<(Sid, oneshot::Sender<()>)>,
|
|
|
|
) {
|
|
|
|
let pid = Pid::fake(1);
|
|
|
|
PrioManager::new(
|
|
|
|
Arc::new(NetworkMetrics::new(&pid).unwrap()),
|
|
|
|
pid.to_string(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
fn mock_out(prio: Prio, sid: u64) -> (Prio, Sid, OutgoingMessage) {
|
2020-04-08 14:26:42 +00:00
|
|
|
let sid = Sid::new(sid);
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
(prio, sid, OutgoingMessage {
|
2020-03-10 00:07:36 +00:00
|
|
|
buffer: Arc::new(MessageBuffer {
|
|
|
|
data: vec![48, 49, 50],
|
|
|
|
}),
|
|
|
|
cursor: 0,
|
2020-03-22 13:47:21 +00:00
|
|
|
mid: 1,
|
2020-03-10 00:07:36 +00:00
|
|
|
sid,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
fn mock_out_large(prio: Prio, sid: u64) -> (Prio, Sid, OutgoingMessage) {
|
2020-04-08 14:26:42 +00:00
|
|
|
let sid = Sid::new(sid);
|
2020-03-22 13:47:21 +00:00
|
|
|
let mut data = vec![48; USIZE];
|
|
|
|
data.append(&mut vec![49; USIZE]);
|
2020-03-10 00:07:36 +00:00
|
|
|
data.append(&mut vec![50; 20]);
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
(prio, sid, OutgoingMessage {
|
2020-03-10 00:07:36 +00:00
|
|
|
buffer: Arc::new(MessageBuffer { data }),
|
|
|
|
cursor: 0,
|
2020-03-22 13:47:21 +00:00
|
|
|
mid: 1,
|
2020-03-10 00:07:36 +00:00
|
|
|
sid,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-22 14:00:08 +00:00
|
|
|
fn assert_header(frames: &mut VecDeque<(Sid, Frame)>, f_sid: u64, f_length: u64) {
|
2020-03-10 00:07:36 +00:00
|
|
|
let frame = frames
|
|
|
|
.pop_front()
|
2020-03-22 13:47:21 +00:00
|
|
|
.expect("frames vecdeque doesn't contain enough frames!")
|
2020-05-26 13:06:03 +00:00
|
|
|
.1;
|
2020-03-10 00:07:36 +00:00
|
|
|
if let Frame::DataHeader { mid, sid, length } = frame {
|
|
|
|
assert_eq!(mid, 1);
|
2020-04-08 14:26:42 +00:00
|
|
|
assert_eq!(sid, Sid::new(f_sid));
|
2020-03-10 00:07:36 +00:00
|
|
|
assert_eq!(length, f_length);
|
|
|
|
} else {
|
|
|
|
panic!("wrong frame type!, expected DataHeader");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-22 14:00:08 +00:00
|
|
|
fn assert_data(frames: &mut VecDeque<(Sid, Frame)>, f_start: u64, f_data: Vec<u8>) {
|
2020-03-10 00:07:36 +00:00
|
|
|
let frame = frames
|
|
|
|
.pop_front()
|
2020-03-22 13:47:21 +00:00
|
|
|
.expect("frames vecdeque doesn't contain enough frames!")
|
2020-05-26 13:06:03 +00:00
|
|
|
.1;
|
2020-04-24 10:56:04 +00:00
|
|
|
if let Frame::Data { mid, start, data } = frame {
|
|
|
|
assert_eq!(mid, 1);
|
2020-03-10 00:07:36 +00:00
|
|
|
assert_eq!(start, f_start);
|
|
|
|
assert_eq!(data, f_data);
|
|
|
|
} else {
|
|
|
|
panic!("wrong frame type!, expected Data");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_p16() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(16, 1337)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(100, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
|
|
|
assert_header(&mut frames, 1337, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_p16_p20() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(16, 1337)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 42)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-04-08 14:26:42 +00:00
|
|
|
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(100, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
assert_header(&mut frames, 1337, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_header(&mut frames, 42, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_p20_p16() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(20, 42)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 1337)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(100, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
|
|
|
assert_header(&mut frames, 1337, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_header(&mut frames, 42, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn multiple_p16_p20() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(20, 2)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 1)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 3)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 5)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 4)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 7)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 6)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 10)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 8)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 12)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 9)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 11)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 13)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(100, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
|
|
|
for i in 1..14 {
|
|
|
|
assert_header(&mut frames, i, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
}
|
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn multiple_fill_frames_p16_p20() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(20, 2)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 1)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 3)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 5)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 4)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 7)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 6)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 10)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 8)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 12)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 9)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 11)).unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 13)).unwrap();
|
2020-04-08 14:26:42 +00:00
|
|
|
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(3, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
for i in 1..4 {
|
|
|
|
assert_header(&mut frames, i, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
}
|
|
|
|
assert!(frames.is_empty());
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(11, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
for i in 4..14 {
|
|
|
|
assert_header(&mut frames, i, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
}
|
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_large_p16() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out_large(16, 1)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(100, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
assert_header(&mut frames, 1, SIZE * 2 + 20);
|
|
|
|
assert_data(&mut frames, 0, vec![48; USIZE]);
|
|
|
|
assert_data(&mut frames, SIZE, vec![49; USIZE]);
|
|
|
|
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
|
2020-03-10 00:07:36 +00:00
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn multiple_large_p16() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out_large(16, 1)).unwrap();
|
|
|
|
msg_tx.send(mock_out_large(16, 2)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(100, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
assert_header(&mut frames, 1, SIZE * 2 + 20);
|
|
|
|
assert_data(&mut frames, 0, vec![48; USIZE]);
|
2020-06-26 13:30:56 +00:00
|
|
|
assert_data(&mut frames, SIZE, vec![49; USIZE]);
|
|
|
|
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
|
2020-03-22 13:47:21 +00:00
|
|
|
assert_header(&mut frames, 2, SIZE * 2 + 20);
|
|
|
|
assert_data(&mut frames, 0, vec![48; USIZE]);
|
|
|
|
assert_data(&mut frames, SIZE, vec![49; USIZE]);
|
|
|
|
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
|
2020-03-10 00:07:36 +00:00
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn multiple_large_p16_sudden_p0() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out_large(16, 1)).unwrap();
|
|
|
|
msg_tx.send(mock_out_large(16, 2)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-06-26 13:30:56 +00:00
|
|
|
block_on(mgr.fill_frames(2, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
assert_header(&mut frames, 1, SIZE * 2 + 20);
|
|
|
|
assert_data(&mut frames, 0, vec![48; USIZE]);
|
|
|
|
assert_data(&mut frames, SIZE, vec![49; USIZE]);
|
|
|
|
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(0, 3)).unwrap();
|
|
|
|
block_on(mgr.fill_frames(100, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
|
|
|
assert_header(&mut frames, 3, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
|
2020-03-22 13:47:21 +00:00
|
|
|
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
|
2020-06-26 13:30:56 +00:00
|
|
|
assert_header(&mut frames, 2, SIZE * 2 + 20);
|
|
|
|
assert_data(&mut frames, 0, vec![48; USIZE]);
|
|
|
|
assert_data(&mut frames, SIZE, vec![49; USIZE]);
|
2020-03-22 13:47:21 +00:00
|
|
|
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
|
2020-03-10 00:07:36 +00:00
|
|
|
assert!(frames.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_p20_thousand_p16_at_once() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-03-10 00:07:36 +00:00
|
|
|
for _ in 0..998 {
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(16, 2)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(20, 1)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 2)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 2)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(2000, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
|
|
|
assert_header(&mut frames, 2, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_header(&mut frames, 1, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_header(&mut frames, 2, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_header(&mut frames, 2, 3);
|
|
|
|
//unimportant
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_p20_thousand_p16_later() {
|
2020-05-27 15:58:57 +00:00
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
2020-03-10 00:07:36 +00:00
|
|
|
for _ in 0..998 {
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(16, 2)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(2000, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
//^unimportant frames, gonna be dropped
|
2020-05-26 13:06:03 +00:00
|
|
|
msg_tx.send(mock_out(20, 1)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 2)).unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 2)).unwrap();
|
2020-03-10 00:07:36 +00:00
|
|
|
let mut frames = VecDeque::new();
|
2020-05-26 13:06:03 +00:00
|
|
|
block_on(mgr.fill_frames(2000, &mut frames));
|
2020-03-10 00:07:36 +00:00
|
|
|
|
|
|
|
//important in that test is, that after the first frames got cleared i reset
|
|
|
|
// the Points even though 998 prio 16 messages have been send at this
|
|
|
|
// point and 0 prio20 messages the next mesasge is a prio16 message
|
|
|
|
// again, and only then prio20! we dont want to build dept over a idling
|
|
|
|
// connection
|
|
|
|
assert_header(&mut frames, 2, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_header(&mut frames, 1, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_header(&mut frames, 2, 3);
|
|
|
|
//unimportant
|
|
|
|
}
|
2020-06-26 13:30:56 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn gigantic_message() {
|
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
|
|
|
let mut data = vec![1; USIZE];
|
|
|
|
data.extend_from_slice(&vec![2; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![3; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![4; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![5; USIZE]);
|
|
|
|
let sid = Sid::new(2);
|
|
|
|
msg_tx
|
|
|
|
.send((16, sid, OutgoingMessage {
|
|
|
|
buffer: Arc::new(MessageBuffer { data }),
|
|
|
|
cursor: 0,
|
|
|
|
mid: 1,
|
|
|
|
sid,
|
|
|
|
}))
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let mut frames = VecDeque::new();
|
|
|
|
block_on(mgr.fill_frames(2000, &mut frames));
|
|
|
|
|
|
|
|
assert_header(&mut frames, 2, 7000);
|
|
|
|
assert_data(&mut frames, 0, vec![1; USIZE]);
|
|
|
|
assert_data(&mut frames, 1400, vec![2; USIZE]);
|
|
|
|
assert_data(&mut frames, 2800, vec![3; USIZE]);
|
|
|
|
assert_data(&mut frames, 4200, vec![4; USIZE]);
|
|
|
|
assert_data(&mut frames, 5600, vec![5; USIZE]);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn gigantic_message_order() {
|
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
|
|
|
let mut data = vec![1; USIZE];
|
|
|
|
data.extend_from_slice(&vec![2; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![3; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![4; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![5; USIZE]);
|
|
|
|
let sid = Sid::new(2);
|
|
|
|
msg_tx
|
|
|
|
.send((16, sid, OutgoingMessage {
|
|
|
|
buffer: Arc::new(MessageBuffer { data }),
|
|
|
|
cursor: 0,
|
|
|
|
mid: 1,
|
|
|
|
sid,
|
|
|
|
}))
|
|
|
|
.unwrap();
|
|
|
|
msg_tx.send(mock_out(16, 8)).unwrap();
|
|
|
|
|
|
|
|
let mut frames = VecDeque::new();
|
|
|
|
block_on(mgr.fill_frames(2000, &mut frames));
|
|
|
|
|
|
|
|
assert_header(&mut frames, 2, 7000);
|
|
|
|
assert_data(&mut frames, 0, vec![1; USIZE]);
|
|
|
|
assert_data(&mut frames, 1400, vec![2; USIZE]);
|
|
|
|
assert_data(&mut frames, 2800, vec![3; USIZE]);
|
|
|
|
assert_data(&mut frames, 4200, vec![4; USIZE]);
|
|
|
|
assert_data(&mut frames, 5600, vec![5; USIZE]);
|
|
|
|
assert_header(&mut frames, 8, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn gigantic_message_order_other_prio() {
|
|
|
|
let (mut mgr, msg_tx, _flush_tx) = mock_new();
|
|
|
|
let mut data = vec![1; USIZE];
|
|
|
|
data.extend_from_slice(&vec![2; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![3; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![4; USIZE]);
|
|
|
|
data.extend_from_slice(&vec![5; USIZE]);
|
|
|
|
let sid = Sid::new(2);
|
|
|
|
msg_tx
|
|
|
|
.send((16, sid, OutgoingMessage {
|
|
|
|
buffer: Arc::new(MessageBuffer { data }),
|
|
|
|
cursor: 0,
|
|
|
|
mid: 1,
|
|
|
|
sid,
|
|
|
|
}))
|
|
|
|
.unwrap();
|
|
|
|
msg_tx.send(mock_out(20, 8)).unwrap();
|
|
|
|
|
|
|
|
let mut frames = VecDeque::new();
|
|
|
|
block_on(mgr.fill_frames(2000, &mut frames));
|
|
|
|
|
|
|
|
assert_header(&mut frames, 2, 7000);
|
|
|
|
assert_data(&mut frames, 0, vec![1; USIZE]);
|
|
|
|
assert_header(&mut frames, 8, 3);
|
|
|
|
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
|
|
|
assert_data(&mut frames, 1400, vec![2; USIZE]);
|
|
|
|
assert_data(&mut frames, 2800, vec![3; USIZE]);
|
|
|
|
assert_data(&mut frames, 4200, vec![4; USIZE]);
|
|
|
|
assert_data(&mut frames, 5600, vec![5; USIZE]);
|
|
|
|
}
|
2020-03-10 00:07:36 +00:00
|
|
|
}
|