2022-07-01 11:23:46 +00:00
|
|
|
#![feature(assert_matches)]
|
2020-07-11 14:08:25 +00:00
|
|
|
//! How to read those tests:
|
|
|
|
//! - in the first line we call the helper, this is only debug code. in case
|
|
|
|
//! you want to have tracing for a special test you set set the bool = true
|
|
|
|
//! and the sleep to 10000 and your test will start 10 sec delayed with
|
2020-08-25 12:21:25 +00:00
|
|
|
//! tracing. You need a delay as otherwise the other tests pollute your trace
|
2020-07-11 14:08:25 +00:00
|
|
|
//! - the second line is to simulate a client and a server
|
|
|
|
//! `network_participant_stream` will return
|
|
|
|
//! - 2 networks
|
|
|
|
//! - 2 participants
|
|
|
|
//! - 2 streams
|
|
|
|
//! each one `linked` to their counterpart.
|
|
|
|
//! You see a cryptic use of rust `_` this is because we are testing the
|
|
|
|
//! `drop` behavior here.
|
|
|
|
//! - A `_` means this is directly dropped after the line executes, thus
|
|
|
|
//! immediately executing its `Drop` impl.
|
|
|
|
//! - A `_p1_a` e.g. means we don't use that Participant yet, but we must
|
|
|
|
//! not `drop` it yet as we might want to use the Streams.
|
|
|
|
//! - You sometimes see sleep(1000ms) this is used when we rely on the
|
|
|
|
//! underlying TCP functionality, as this simulates client and server
|
|
|
|
|
2022-07-01 11:23:46 +00:00
|
|
|
use std::{assert_matches::assert_matches, sync::Arc};
|
2021-01-15 13:04:32 +00:00
|
|
|
use tokio::runtime::Runtime;
|
2022-07-01 11:23:46 +00:00
|
|
|
use veloren_network::{Network, ParticipantError, ParticipantEvent, Pid, Promises, StreamError};
|
2020-05-27 15:58:57 +00:00
|
|
|
mod helper;
|
2021-11-19 08:36:39 +00:00
|
|
|
use helper::{network_participant_stream, tcp, SLEEP_EXTERNAL, SLEEP_INTERNAL};
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_network() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _, _p1_a, s1_a, _, _p1_b, mut s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_INTERNAL);
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
|
2021-01-15 13:04:32 +00:00
|
|
|
let msg1: Result<String, _> = r.block_on(s1_b.recv());
|
2020-05-27 15:58:57 +00:00
|
|
|
assert_eq!(msg1, Err(StreamError::StreamClosed));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_participant() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, p1_a, s1_a, _n_b, p1_b, mut s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
|
2021-01-15 13:04:32 +00:00
|
|
|
r.block_on(p1_a.disconnect()).unwrap();
|
|
|
|
r.block_on(p1_b.disconnect()).unwrap();
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
|
|
|
|
assert_eq!(
|
2021-01-15 13:04:32 +00:00
|
|
|
r.block_on(s1_b.recv::<String>()),
|
2020-05-27 15:58:57 +00:00
|
|
|
Err(StreamError::StreamClosed)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_stream() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2021-01-15 13:04:32 +00:00
|
|
|
let (r, _n_a, _, mut s1_a, _n_b, _, _) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
// s1_b is dropped directly while s1_a isn't
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_INTERNAL);
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
|
|
|
|
assert_eq!(
|
2021-01-15 13:04:32 +00:00
|
|
|
r.block_on(s1_a.recv::<String>()),
|
2020-05-27 15:58:57 +00:00
|
|
|
Err(StreamError::StreamClosed)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-01-19 08:48:33 +00:00
|
|
|
///WE must NOT create runtimes inside a Runtime, this check needs to verify
|
|
|
|
/// that we dont panic there
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
#[test]
|
|
|
|
fn close_streams_in_block_on() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2021-01-15 13:04:32 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) = network_participant_stream(tcp());
|
|
|
|
r.block_on(async {
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
//make it locally so that they are dropped later
|
2022-09-09 15:29:43 +00:00
|
|
|
let s1_a = s1_a;
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
let mut s1_b = s1_b;
|
|
|
|
s1_a.send("ping").unwrap();
|
|
|
|
assert_eq!(s1_b.recv().await, Ok("ping".to_string()));
|
|
|
|
drop(s1_a);
|
|
|
|
});
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
}
|
|
|
|
|
2020-05-27 15:58:57 +00:00
|
|
|
#[test]
|
|
|
|
fn stream_simple_3msg_then_close() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
s1_a.send(1u8).unwrap();
|
|
|
|
s1_a.send(42).unwrap();
|
|
|
|
s1_a.send("3rdMessage").unwrap();
|
2021-01-15 13:04:32 +00:00
|
|
|
assert_eq!(r.block_on(s1_b.recv()), Ok(1u8));
|
|
|
|
assert_eq!(r.block_on(s1_b.recv()), Ok(42));
|
|
|
|
assert_eq!(r.block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
|
2020-05-27 15:58:57 +00:00
|
|
|
drop(s1_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-05-27 15:58:57 +00:00
|
|
|
assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn stream_send_first_then_receive() {
|
|
|
|
// recv should still be possible even if stream got closed if they are in queue
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
|
|
|
|
s1_a.send(1u8).unwrap();
|
|
|
|
s1_a.send(42).unwrap();
|
|
|
|
s1_a.send("3rdMessage").unwrap();
|
|
|
|
drop(s1_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-01-15 13:04:32 +00:00
|
|
|
assert_eq!(r.block_on(s1_b.recv()), Ok(1u8));
|
|
|
|
assert_eq!(r.block_on(s1_b.recv()), Ok(42));
|
|
|
|
assert_eq!(r.block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
|
2020-05-27 15:58:57 +00:00
|
|
|
assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed));
|
|
|
|
}
|
|
|
|
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
#[test]
|
|
|
|
fn stream_send_1_then_close_stream() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
s1_a.send("this message must be received, even if stream is closed already!")
|
|
|
|
.unwrap();
|
|
|
|
drop(s1_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
let exp = Ok("this message must be received, even if stream is closed already!".to_string());
|
2021-01-15 13:04:32 +00:00
|
|
|
assert_eq!(r.block_on(s1_b.recv()), exp);
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
println!("all received and done");
|
|
|
|
}
|
|
|
|
|
2020-05-27 15:58:57 +00:00
|
|
|
#[test]
|
|
|
|
fn stream_send_100000_then_close_stream() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
for _ in 0..100000 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(s1_a);
|
|
|
|
let exp = Ok("woop_PARTY_HARD_woop".to_string());
|
|
|
|
println!("start receiving");
|
2021-01-15 13:04:32 +00:00
|
|
|
r.block_on(async {
|
2020-05-27 15:58:57 +00:00
|
|
|
for _ in 0..100000 {
|
|
|
|
assert_eq!(s1_b.recv().await, exp);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
println!("all received and done");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn stream_send_100000_then_close_stream_remote() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
for _ in 0..100000 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(s1_a);
|
|
|
|
drop(_s1_b);
|
|
|
|
//no receiving
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
|
2020-05-27 15:58:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn stream_send_100000_then_close_stream_remote2() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
for _ in 0..100000 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(_s1_b);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-05-27 15:58:57 +00:00
|
|
|
drop(s1_a);
|
|
|
|
//no receiving
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
|
2020-05-27 15:58:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn stream_send_100000_then_close_stream_remote3() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2020-05-27 15:58:57 +00:00
|
|
|
for _ in 0..100000 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(_s1_b);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-05-27 15:58:57 +00:00
|
|
|
drop(s1_a);
|
|
|
|
//no receiving
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
|
2020-05-27 15:58:57 +00:00
|
|
|
}
|
2020-07-09 07:58:21 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_part_then_network() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2020-07-09 07:58:21 +00:00
|
|
|
for _ in 0..1000 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(p_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-07-09 07:58:21 +00:00
|
|
|
drop(n_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_INTERNAL);
|
2020-07-09 07:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_network_then_part() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2020-07-09 07:58:21 +00:00
|
|
|
for _ in 0..1000 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(n_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-07-09 07:58:21 +00:00
|
|
|
drop(p_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_INTERNAL);
|
2020-07-09 07:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_network_then_disconnect_part() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2020-07-09 07:58:21 +00:00
|
|
|
for _ in 0..1000 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(n_a);
|
2021-01-15 13:04:32 +00:00
|
|
|
assert!(r.block_on(p_a.disconnect()).is_err());
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-03-03 09:39:21 +00:00
|
|
|
drop((_n_b, _p_b)); //clean teardown
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_runtime_then_network() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2021-03-03 09:39:21 +00:00
|
|
|
for _ in 0..100 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(r);
|
|
|
|
drop(_n_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-03-03 09:39:21 +00:00
|
|
|
drop(_p_b);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_runtime_then_part() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2021-03-03 09:39:21 +00:00
|
|
|
for _ in 0..100 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
drop(r);
|
|
|
|
drop(_p_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-03-03 09:39:21 +00:00
|
|
|
drop(_p_b);
|
|
|
|
drop(_n_a);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_network_from_async() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2021-03-03 09:39:21 +00:00
|
|
|
for _ in 0..100 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
r.block_on(async move {
|
|
|
|
drop(_n_a);
|
|
|
|
});
|
|
|
|
drop(_p_b);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_part_from_async() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
2021-03-03 09:39:21 +00:00
|
|
|
for _ in 0..100 {
|
|
|
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
|
|
|
}
|
|
|
|
r.block_on(async move {
|
|
|
|
p_a.disconnect().await.unwrap();
|
|
|
|
drop(_p_b);
|
|
|
|
});
|
|
|
|
drop(_n_a);
|
2020-07-09 07:58:21 +00:00
|
|
|
}
|
2020-07-16 19:39:33 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn opened_stream_before_remote_part_is_closed() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
|
|
|
let s2_a = r.block_on(p_a.open(4, Promises::empty(), 0)).unwrap();
|
2020-07-16 19:39:33 +00:00
|
|
|
s2_a.send("HelloWorld").unwrap();
|
2021-01-15 13:04:32 +00:00
|
|
|
let mut s2_b = r.block_on(p_b.opened()).unwrap();
|
2020-07-16 19:39:33 +00:00
|
|
|
drop(p_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-01-15 13:04:32 +00:00
|
|
|
assert_eq!(r.block_on(s2_b.recv()), Ok("HelloWorld".to_string()));
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _n_b, p_b)); //clean teardown
|
2020-07-16 19:39:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn opened_stream_after_remote_part_is_closed() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
|
|
|
let s2_a = r.block_on(p_a.open(3, Promises::empty(), 0)).unwrap();
|
2020-07-16 19:39:33 +00:00
|
|
|
s2_a.send("HelloWorld").unwrap();
|
|
|
|
drop(p_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-01-15 13:04:32 +00:00
|
|
|
let mut s2_b = r.block_on(p_b.opened()).unwrap();
|
|
|
|
assert_eq!(r.block_on(s2_b.recv()), Ok("HelloWorld".to_string()));
|
2020-07-16 19:39:33 +00:00
|
|
|
assert_eq!(
|
2021-01-15 13:04:32 +00:00
|
|
|
r.block_on(p_b.opened()).unwrap_err(),
|
2020-07-16 19:39:33 +00:00
|
|
|
ParticipantError::ParticipantDisconnected
|
|
|
|
);
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _n_b, p_b)); //clean teardown
|
2020-07-16 19:39:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn open_stream_after_remote_part_is_closed() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
|
|
|
let s2_a = r.block_on(p_a.open(4, Promises::empty(), 0)).unwrap();
|
2020-07-16 19:39:33 +00:00
|
|
|
s2_a.send("HelloWorld").unwrap();
|
|
|
|
drop(p_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-01-15 13:04:32 +00:00
|
|
|
let mut s2_b = r.block_on(p_b.opened()).unwrap();
|
|
|
|
assert_eq!(r.block_on(s2_b.recv()), Ok("HelloWorld".to_string()));
|
2020-07-16 19:39:33 +00:00
|
|
|
assert_eq!(
|
2021-02-18 00:01:57 +00:00
|
|
|
r.block_on(p_b.open(5, Promises::empty(), 0)).unwrap_err(),
|
2020-07-16 19:39:33 +00:00
|
|
|
ParticipantError::ParticipantDisconnected
|
|
|
|
);
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _n_b, p_b)); //clean teardown
|
2020-07-16 19:39:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn failed_stream_open_after_remote_part_is_closed() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
2020-07-16 19:39:33 +00:00
|
|
|
drop(p_a);
|
|
|
|
assert_eq!(
|
2021-01-15 13:04:32 +00:00
|
|
|
r.block_on(p_b.opened()).unwrap_err(),
|
2020-07-16 19:39:33 +00:00
|
|
|
ParticipantError::ParticipantDisconnected
|
|
|
|
);
|
2021-01-19 08:48:33 +00:00
|
|
|
drop((_n_a, _n_b, p_b)); //clean teardown
|
2020-07-16 19:39:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn open_participant_before_remote_part_is_closed() {
|
2020-08-24 13:38:08 +00:00
|
|
|
let (_, _) = helper::setup(false, 0);
|
2021-01-15 13:04:32 +00:00
|
|
|
let r = Arc::new(Runtime::new().unwrap());
|
2022-09-09 15:29:43 +00:00
|
|
|
let mut n_a = Network::new(Pid::fake(0), &r);
|
2021-03-03 09:39:21 +00:00
|
|
|
let n_b = Network::new(Pid::fake(1), &r);
|
2020-07-16 19:39:33 +00:00
|
|
|
let addr = tcp();
|
2021-04-15 08:16:42 +00:00
|
|
|
r.block_on(n_a.listen(addr.0)).unwrap();
|
|
|
|
let p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
2022-09-09 15:29:43 +00:00
|
|
|
let s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
2020-07-16 19:39:33 +00:00
|
|
|
s1_b.send("HelloWorld").unwrap();
|
2022-09-09 15:29:43 +00:00
|
|
|
let mut p_a = r.block_on(n_a.connected()).unwrap();
|
2020-07-16 19:39:33 +00:00
|
|
|
drop(s1_b);
|
|
|
|
drop(p_b);
|
|
|
|
drop(n_b);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2021-01-15 13:04:32 +00:00
|
|
|
let mut s1_a = r.block_on(p_a.opened()).unwrap();
|
|
|
|
assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
|
2020-07-16 19:39:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn open_participant_after_remote_part_is_closed() {
|
2020-08-24 13:38:08 +00:00
|
|
|
let (_, _) = helper::setup(false, 0);
|
2021-01-15 13:04:32 +00:00
|
|
|
let r = Arc::new(Runtime::new().unwrap());
|
2022-09-09 15:29:43 +00:00
|
|
|
let mut n_a = Network::new(Pid::fake(0), &r);
|
2021-03-03 09:39:21 +00:00
|
|
|
let n_b = Network::new(Pid::fake(1), &r);
|
2020-07-16 19:39:33 +00:00
|
|
|
let addr = tcp();
|
2021-04-15 08:16:42 +00:00
|
|
|
r.block_on(n_a.listen(addr.0)).unwrap();
|
|
|
|
let p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
2022-09-09 15:29:43 +00:00
|
|
|
let s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
2020-07-16 19:39:33 +00:00
|
|
|
s1_b.send("HelloWorld").unwrap();
|
|
|
|
drop(s1_b);
|
|
|
|
drop(p_b);
|
|
|
|
drop(n_b);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2022-09-09 15:29:43 +00:00
|
|
|
let mut p_a = r.block_on(n_a.connected()).unwrap();
|
2021-01-15 13:04:32 +00:00
|
|
|
let mut s1_a = r.block_on(p_a.opened()).unwrap();
|
|
|
|
assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
|
2020-07-16 19:39:33 +00:00
|
|
|
}
|
2020-08-21 14:21:00 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn close_network_scheduler_completely() {
|
2020-08-24 13:38:08 +00:00
|
|
|
let (_, _) = helper::setup(false, 0);
|
2021-01-15 13:04:32 +00:00
|
|
|
let r = Arc::new(Runtime::new().unwrap());
|
2022-09-09 15:29:43 +00:00
|
|
|
let mut n_a = Network::new(Pid::fake(0), &r);
|
2021-03-03 09:39:21 +00:00
|
|
|
let n_b = Network::new(Pid::fake(1), &r);
|
2020-08-21 14:21:00 +00:00
|
|
|
let addr = tcp();
|
2021-04-15 08:16:42 +00:00
|
|
|
r.block_on(n_a.listen(addr.0)).unwrap();
|
2022-09-09 15:29:43 +00:00
|
|
|
let mut p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
2022-07-01 11:23:46 +00:00
|
|
|
assert_matches!(
|
|
|
|
r.block_on(p_b.fetch_event()),
|
|
|
|
Ok(ParticipantEvent::ChannelCreated(_))
|
|
|
|
);
|
2022-09-09 15:29:43 +00:00
|
|
|
let s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
2020-08-21 14:21:00 +00:00
|
|
|
s1_b.send("HelloWorld").unwrap();
|
|
|
|
|
2022-09-09 15:29:43 +00:00
|
|
|
let mut p_a = r.block_on(n_a.connected()).unwrap();
|
2022-07-01 11:23:46 +00:00
|
|
|
assert_matches!(
|
|
|
|
r.block_on(p_a.fetch_event()),
|
|
|
|
Ok(ParticipantEvent::ChannelCreated(_))
|
|
|
|
);
|
|
|
|
assert_matches!(p_a.try_fetch_event(), Ok(None));
|
|
|
|
assert_matches!(p_b.try_fetch_event(), Ok(None));
|
2021-01-15 13:04:32 +00:00
|
|
|
let mut s1_a = r.block_on(p_a.opened()).unwrap();
|
|
|
|
assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
|
2020-08-21 14:21:00 +00:00
|
|
|
drop(n_a);
|
|
|
|
drop(n_b);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL); //p_b is INTERNAL, but p_a is EXTERNAL
|
2022-07-01 11:23:46 +00:00
|
|
|
assert_matches!(
|
|
|
|
p_a.try_fetch_event(),
|
|
|
|
Ok(Some(ParticipantEvent::ChannelDeleted(_)))
|
|
|
|
);
|
|
|
|
assert_matches!(
|
|
|
|
r.block_on(p_b.fetch_event()),
|
|
|
|
Ok(ParticipantEvent::ChannelDeleted(_))
|
|
|
|
);
|
|
|
|
assert_matches!(p_a.try_fetch_event(), Err(_));
|
|
|
|
assert_matches!(p_b.try_fetch_event(), Err(_));
|
2021-01-19 08:48:33 +00:00
|
|
|
|
|
|
|
drop(p_b);
|
|
|
|
drop(p_a);
|
2021-01-15 13:04:32 +00:00
|
|
|
let runtime = Arc::try_unwrap(r).expect("runtime is not alone, there still exist a reference");
|
2021-11-19 08:36:39 +00:00
|
|
|
runtime.shutdown_timeout(SLEEP_INTERNAL);
|
2020-08-21 14:21:00 +00:00
|
|
|
}
|
2020-10-16 09:21:18 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn dont_panic_on_multiply_recv_after_close() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
2020-10-16 09:21:18 +00:00
|
|
|
|
|
|
|
s1_a.send(11u32).unwrap();
|
|
|
|
drop(s1_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-10-16 09:21:18 +00:00
|
|
|
assert_eq!(s1_b.try_recv::<u32>(), Ok(Some(11u32)));
|
|
|
|
assert_eq!(s1_b.try_recv::<String>(), Err(StreamError::StreamClosed));
|
|
|
|
// There was a "Feature" in futures::channels that they panic when you call recv
|
|
|
|
// a second time after it showed end of stream
|
|
|
|
assert_eq!(s1_b.try_recv::<String>(), Err(StreamError::StreamClosed));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn dont_panic_on_recv_send_after_close() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
2020-10-16 09:21:18 +00:00
|
|
|
|
|
|
|
s1_a.send(11u32).unwrap();
|
|
|
|
drop(s1_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-10-16 09:21:18 +00:00
|
|
|
assert_eq!(s1_b.try_recv::<u32>(), Ok(Some(11u32)));
|
|
|
|
assert_eq!(s1_b.try_recv::<String>(), Err(StreamError::StreamClosed));
|
|
|
|
assert_eq!(s1_b.send("foobar"), Err(StreamError::StreamClosed));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn dont_panic_on_multiple_send_after_close() {
|
|
|
|
let (_, _) = helper::setup(false, 0);
|
2022-09-09 15:29:43 +00:00
|
|
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
2020-10-16 09:21:18 +00:00
|
|
|
|
|
|
|
s1_a.send(11u32).unwrap();
|
|
|
|
drop(s1_a);
|
|
|
|
drop(_p_a);
|
2021-11-19 08:36:39 +00:00
|
|
|
std::thread::sleep(SLEEP_EXTERNAL);
|
2020-10-16 09:21:18 +00:00
|
|
|
assert_eq!(s1_b.try_recv::<u32>(), Ok(Some(11u32)));
|
|
|
|
assert_eq!(s1_b.try_recv::<String>(), Err(StreamError::StreamClosed));
|
|
|
|
assert_eq!(s1_b.send("foobar"), Err(StreamError::StreamClosed));
|
|
|
|
assert_eq!(s1_b.send("foobar"), Err(StreamError::StreamClosed));
|
|
|
|
}
|