2021-01-22 16:09:20 +00:00
|
|
|
use async_trait::async_trait;
|
2021-02-10 10:37:42 +00:00
|
|
|
use bytes::BytesMut;
|
2021-01-22 16:09:20 +00:00
|
|
|
use network_protocol::{
|
2021-04-11 21:37:48 +00:00
|
|
|
QuicDataFormat, QuicDataFormatStream, QuicSendProtocol, QuicRecvProtocol,
|
2021-03-25 17:28:50 +00:00
|
|
|
Bandwidth, Cid, InitProtocolError, MpscMsg, MpscRecvProtocol, MpscSendProtocol, Pid,
|
|
|
|
ProtocolError, ProtocolEvent, ProtocolMetricCache, ProtocolMetrics, Sid, TcpRecvProtocol,
|
|
|
|
TcpSendProtocol, UnreliableDrain, UnreliableSink,
|
2020-04-08 14:26:42 +00:00
|
|
|
};
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")] use quinn::*;
|
2021-02-14 17:45:12 +00:00
|
|
|
use std::{sync::Arc, time::Duration};
|
2021-01-15 13:04:32 +00:00
|
|
|
use tokio::{
|
2021-01-22 16:09:20 +00:00
|
|
|
io::{AsyncReadExt, AsyncWriteExt},
|
|
|
|
net::tcp::{OwnedReadHalf, OwnedWriteHalf},
|
|
|
|
sync::mpsc,
|
2021-01-15 13:04:32 +00:00
|
|
|
};
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) enum Protocols {
|
2021-02-14 17:45:12 +00:00
|
|
|
Tcp((TcpSendProtocol<TcpDrain>, TcpRecvProtocol<TcpSink>)),
|
|
|
|
Mpsc((MpscSendProtocol<MpscDrain>, MpscRecvProtocol<MpscSink>)),
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
Quic((QuicSendProtocol<QuicDrain>, QuicRecvProtocol<QuicSink>)),
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) enum SendProtocols {
|
2021-02-14 17:45:12 +00:00
|
|
|
Tcp(TcpSendProtocol<TcpDrain>),
|
|
|
|
Mpsc(MpscSendProtocol<MpscDrain>),
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
Quic(QuicSendProtocol<QuicDrain>),
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) enum RecvProtocols {
|
2021-02-14 17:45:12 +00:00
|
|
|
Tcp(TcpRecvProtocol<TcpSink>),
|
|
|
|
Mpsc(MpscRecvProtocol<MpscSink>),
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
Quic(QuicSendProtocol<QuicDrain>),
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Protocols {
|
2021-02-14 17:45:12 +00:00
|
|
|
pub(crate) fn new_tcp(
|
|
|
|
stream: tokio::net::TcpStream,
|
|
|
|
cid: Cid,
|
|
|
|
metrics: Arc<ProtocolMetrics>,
|
|
|
|
) -> Self {
|
2021-01-22 16:09:20 +00:00
|
|
|
let (r, w) = stream.into_split();
|
2021-02-14 17:45:12 +00:00
|
|
|
let metrics = ProtocolMetricCache::new(&cid.to_string(), metrics);
|
2021-01-22 16:09:20 +00:00
|
|
|
|
2021-02-14 17:45:12 +00:00
|
|
|
let sp = TcpSendProtocol::new(TcpDrain { half: w }, metrics.clone());
|
|
|
|
let rp = TcpRecvProtocol::new(
|
2021-02-10 10:37:42 +00:00
|
|
|
TcpSink {
|
|
|
|
half: r,
|
|
|
|
buffer: BytesMut::new(),
|
|
|
|
},
|
2021-02-14 17:45:12 +00:00
|
|
|
metrics,
|
2021-02-10 10:37:42 +00:00
|
|
|
);
|
2021-01-22 16:09:20 +00:00
|
|
|
Protocols::Tcp((sp, rp))
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
pub(crate) fn new_mpsc(
|
|
|
|
sender: mpsc::Sender<MpscMsg>,
|
|
|
|
receiver: mpsc::Receiver<MpscMsg>,
|
2021-02-14 17:45:12 +00:00
|
|
|
cid: Cid,
|
|
|
|
metrics: Arc<ProtocolMetrics>,
|
2021-01-22 16:09:20 +00:00
|
|
|
) -> Self {
|
2021-02-14 17:45:12 +00:00
|
|
|
let metrics = ProtocolMetricCache::new(&cid.to_string(), metrics);
|
|
|
|
|
|
|
|
let sp = MpscSendProtocol::new(MpscDrain { sender }, metrics.clone());
|
|
|
|
let rp = MpscRecvProtocol::new(MpscSink { receiver }, metrics);
|
2021-01-22 16:09:20 +00:00
|
|
|
Protocols::Mpsc((sp, rp))
|
|
|
|
}
|
2020-05-04 13:27:58 +00:00
|
|
|
|
2021-04-11 21:37:48 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
pub(crate) async fn new_quic(
|
|
|
|
connection: quinn::NewConnection,
|
|
|
|
cid: Cid,
|
|
|
|
metrics: Arc<ProtocolMetrics>,
|
|
|
|
) -> Result<Self, quinn::ConnectionError> {
|
|
|
|
let metrics = ProtocolMetricCache::new(&cid.to_string(), metrics);
|
|
|
|
|
|
|
|
let (sendstream, recvstream) = connection.connection.open_bi().await?;
|
|
|
|
|
|
|
|
|
|
|
|
let sp = QuicSendProtocol::new(QuicDrain {
|
|
|
|
con: connection.connection.clone(),
|
|
|
|
main: sendstream,
|
|
|
|
reliables: vec!(),
|
|
|
|
}, metrics.clone());
|
|
|
|
let rp = QuicRecvProtocol::new(QuicSink {
|
|
|
|
con: connection.connection,
|
|
|
|
main: recvstream,
|
|
|
|
reliables: vec!(),
|
|
|
|
buffer: BytesMut::new(),
|
|
|
|
}, metrics);
|
|
|
|
Ok(Protocols::Quic((sp, rp)))
|
|
|
|
}
|
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
pub(crate) fn split(self) -> (SendProtocols, RecvProtocols) {
|
|
|
|
match self {
|
|
|
|
Protocols::Tcp((s, r)) => (SendProtocols::Tcp(s), RecvProtocols::Tcp(r)),
|
|
|
|
Protocols::Mpsc((s, r)) => (SendProtocols::Mpsc(s), RecvProtocols::Mpsc(r)),
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
Protocols::Quic((s, r)) => (SendProtocols::Quic(s), RecvProtocols::Quic(r)),
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
}
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
|
|
|
}
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[async_trait]
|
|
|
|
impl network_protocol::InitProtocol for Protocols {
|
|
|
|
async fn initialize(
|
|
|
|
&mut self,
|
|
|
|
initializer: bool,
|
|
|
|
local_pid: Pid,
|
|
|
|
secret: u128,
|
|
|
|
) -> Result<(Pid, Sid, u128), InitProtocolError> {
|
|
|
|
match self {
|
|
|
|
Protocols::Tcp(p) => p.initialize(initializer, local_pid, secret).await,
|
|
|
|
Protocols::Mpsc(p) => p.initialize(initializer, local_pid, secret).await,
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
Protocols::Quic(p) => p.initialize(initializer, local_pid, secret).await,
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-04 13:27:58 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[async_trait]
|
|
|
|
impl network_protocol::SendProtocol for SendProtocols {
|
2021-02-10 10:37:42 +00:00
|
|
|
fn notify_from_recv(&mut self, event: ProtocolEvent) {
|
|
|
|
match self {
|
|
|
|
SendProtocols::Tcp(s) => s.notify_from_recv(event),
|
|
|
|
SendProtocols::Mpsc(s) => s.notify_from_recv(event),
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
SendProtocols::Quic(s) => s.notify_from_recv(event),
|
2021-02-10 10:37:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
async fn send(&mut self, event: ProtocolEvent) -> Result<(), ProtocolError> {
|
|
|
|
match self {
|
|
|
|
SendProtocols::Tcp(s) => s.send(event).await,
|
|
|
|
SendProtocols::Mpsc(s) => s.send(event).await,
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
SendProtocols::Quic(s) => s.send(event).await,
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-25 17:28:50 +00:00
|
|
|
async fn flush(
|
|
|
|
&mut self,
|
|
|
|
bandwidth: Bandwidth,
|
|
|
|
dt: Duration,
|
|
|
|
) -> Result<Bandwidth, ProtocolError> {
|
2021-01-22 16:09:20 +00:00
|
|
|
match self {
|
|
|
|
SendProtocols::Tcp(s) => s.flush(bandwidth, dt).await,
|
|
|
|
SendProtocols::Mpsc(s) => s.flush(bandwidth, dt).await,
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
SendProtocols::Quic(s) => s.flush(bandwidth, dt).await,
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
2020-05-04 13:27:58 +00:00
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[async_trait]
|
|
|
|
impl network_protocol::RecvProtocol for RecvProtocols {
|
|
|
|
async fn recv(&mut self) -> Result<ProtocolEvent, ProtocolError> {
|
|
|
|
match self {
|
|
|
|
RecvProtocols::Tcp(r) => r.recv().await,
|
|
|
|
RecvProtocols::Mpsc(r) => r.recv().await,
|
2021-04-09 11:17:38 +00:00
|
|
|
#[cfg(feature = "quic")]
|
|
|
|
RecvProtocols::Quic(r) => r.recv().await,
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////
|
|
|
|
//// TCP
|
2020-05-26 13:06:03 +00:00
|
|
|
#[derive(Debug)]
|
2021-01-22 16:09:20 +00:00
|
|
|
pub struct TcpDrain {
|
|
|
|
half: OwnedWriteHalf,
|
2020-03-22 13:47:21 +00:00
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct TcpSink {
|
|
|
|
half: OwnedReadHalf,
|
2021-02-10 10:37:42 +00:00
|
|
|
buffer: BytesMut,
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[async_trait]
|
|
|
|
impl UnreliableDrain for TcpDrain {
|
2021-02-10 10:37:42 +00:00
|
|
|
type DataFormat = BytesMut;
|
2021-01-22 16:09:20 +00:00
|
|
|
|
|
|
|
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
|
|
|
|
match self.half.write_all(&data).await {
|
|
|
|
Ok(()) => Ok(()),
|
|
|
|
Err(_) => Err(ProtocolError::Closed),
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|
|
|
|
}
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[async_trait]
|
|
|
|
impl UnreliableSink for TcpSink {
|
2021-02-10 10:37:42 +00:00
|
|
|
type DataFormat = BytesMut;
|
2020-02-10 17:25:47 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
|
2021-02-10 10:37:42 +00:00
|
|
|
self.buffer.resize(1500, 0u8);
|
|
|
|
match self.half.read(&mut self.buffer).await {
|
2021-02-14 17:45:12 +00:00
|
|
|
Ok(0) => Err(ProtocolError::Closed),
|
2021-02-10 10:37:42 +00:00
|
|
|
Ok(n) => Ok(self.buffer.split_to(n)),
|
2021-01-22 16:09:20 +00:00
|
|
|
Err(_) => Err(ProtocolError::Closed),
|
2020-04-08 14:26:42 +00:00
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
///////////////////////////////////////
|
|
|
|
//// MPSC
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct MpscDrain {
|
|
|
|
sender: tokio::sync::mpsc::Sender<MpscMsg>,
|
|
|
|
}
|
2020-05-04 13:27:58 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct MpscSink {
|
|
|
|
receiver: tokio::sync::mpsc::Receiver<MpscMsg>,
|
|
|
|
}
|
2020-05-04 13:27:58 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[async_trait]
|
|
|
|
impl UnreliableDrain for MpscDrain {
|
|
|
|
type DataFormat = MpscMsg;
|
2020-02-04 15:42:04 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
|
|
|
|
self.sender
|
|
|
|
.send(data)
|
|
|
|
.await
|
|
|
|
.map_err(|_| ProtocolError::Closed)
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
2020-02-10 17:25:47 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[async_trait]
|
|
|
|
impl UnreliableSink for MpscSink {
|
|
|
|
type DataFormat = MpscMsg;
|
2020-02-10 17:25:47 +00:00
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
|
|
|
|
self.receiver.recv().await.ok_or(ProtocolError::Closed)
|
2020-02-10 17:25:47 +00:00
|
|
|
}
|
2021-01-22 16:09:20 +00:00
|
|
|
}
|
2020-07-14 23:34:41 +00:00
|
|
|
|
2021-04-09 11:17:38 +00:00
|
|
|
///////////////////////////////////////
|
|
|
|
//// QUIC
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct QuicDrain {
|
2021-04-11 21:37:48 +00:00
|
|
|
con: quinn::Connection,
|
|
|
|
main: quinn::SendStream,
|
|
|
|
reliables: Vec<quinn::SendStream>,
|
2021-04-09 11:17:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct QuicSink {
|
2021-04-11 21:37:48 +00:00
|
|
|
con: quinn::Connection,
|
|
|
|
main: quinn::RecvStream,
|
|
|
|
reliables: Vec<quinn::RecvStream>,
|
2021-04-09 11:17:38 +00:00
|
|
|
buffer: BytesMut,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
impl UnreliableDrain for QuicDrain {
|
2021-04-11 21:37:48 +00:00
|
|
|
type DataFormat = QuicDataFormat;
|
2021-04-09 11:17:38 +00:00
|
|
|
|
|
|
|
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
|
2021-04-11 21:37:48 +00:00
|
|
|
match match data.stream {
|
|
|
|
QuicDataFormatStream::Main => self.main.write_all(&data.data),
|
|
|
|
QuicDataFormatStream::Unreliable => unimplemented!(),
|
|
|
|
QuicDataFormatStream::Reliable(id) => self.reliables.get_mut(id as usize).ok_or(ProtocolError::Closed)?.write_all(&data.data),
|
|
|
|
}.await {
|
2021-04-09 11:17:38 +00:00
|
|
|
Ok(()) => Ok(()),
|
|
|
|
Err(_) => Err(ProtocolError::Closed),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
impl UnreliableSink for QuicSink {
|
2021-04-11 21:37:48 +00:00
|
|
|
type DataFormat = QuicDataFormat;
|
2021-04-09 11:17:38 +00:00
|
|
|
|
|
|
|
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
|
|
|
|
self.buffer.resize(1500, 0u8);
|
2021-04-11 21:37:48 +00:00
|
|
|
//TODO improve
|
|
|
|
match self.main.read(&mut self.buffer).await {
|
|
|
|
Ok(Some(0)) => Err(ProtocolError::Closed),
|
|
|
|
Ok(Some(n)) => Ok(QuicDataFormat{stream: QuicDataFormatStream::Main, data: self.buffer.split_to(n)}),
|
|
|
|
Ok(None) => Err(ProtocolError::Closed),
|
2021-04-09 11:17:38 +00:00
|
|
|
Err(_) => Err(ProtocolError::Closed),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 16:09:20 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2021-02-14 17:45:12 +00:00
|
|
|
use bytes::Bytes;
|
2021-01-22 16:09:20 +00:00
|
|
|
use network_protocol::{Promises, RecvProtocol, SendProtocol};
|
|
|
|
use tokio::net::{TcpListener, TcpStream};
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn tokio_sinks() {
|
|
|
|
let listener = TcpListener::bind("127.0.0.1:5000").await.unwrap();
|
|
|
|
let r1 = tokio::spawn(async move {
|
|
|
|
let (server, _) = listener.accept().await.unwrap();
|
|
|
|
(listener, server)
|
|
|
|
});
|
|
|
|
let client = TcpStream::connect("127.0.0.1:5000").await.unwrap();
|
|
|
|
let (_listener, server) = r1.await.unwrap();
|
2021-02-14 17:45:12 +00:00
|
|
|
let metrics = Arc::new(ProtocolMetrics::new().unwrap());
|
|
|
|
let client = Protocols::new_tcp(client, 0, Arc::clone(&metrics));
|
|
|
|
let server = Protocols::new_tcp(server, 0, Arc::clone(&metrics));
|
2021-01-22 16:09:20 +00:00
|
|
|
let (mut s, _) = client.split();
|
|
|
|
let (_, mut r) = server.split();
|
|
|
|
let event = ProtocolEvent::OpenStream {
|
|
|
|
sid: Sid::new(1),
|
|
|
|
prio: 4u8,
|
|
|
|
promises: Promises::GUARANTEED_DELIVERY,
|
|
|
|
guaranteed_bandwidth: 1_000,
|
|
|
|
};
|
|
|
|
s.send(event.clone()).await.unwrap();
|
2021-02-14 17:45:12 +00:00
|
|
|
s.send(ProtocolEvent::Message {
|
|
|
|
sid: Sid::new(1),
|
|
|
|
data: Bytes::from(&[8u8; 8][..]),
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.unwrap();
|
|
|
|
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
|
|
|
|
drop(s); // recv must work even after shutdown of send!
|
|
|
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
|
|
let res = r.recv().await;
|
|
|
|
match res {
|
2021-01-22 16:09:20 +00:00
|
|
|
Ok(ProtocolEvent::OpenStream {
|
|
|
|
sid,
|
|
|
|
prio,
|
|
|
|
promises,
|
|
|
|
guaranteed_bandwidth: _,
|
|
|
|
}) => {
|
|
|
|
assert_eq!(sid, Sid::new(1));
|
|
|
|
assert_eq!(prio, 4u8);
|
|
|
|
assert_eq!(promises, Promises::GUARANTEED_DELIVERY);
|
|
|
|
},
|
|
|
|
_ => {
|
2021-02-14 17:45:12 +00:00
|
|
|
panic!("wrong type {:?}", res);
|
2021-01-22 16:09:20 +00:00
|
|
|
},
|
2020-07-14 23:34:41 +00:00
|
|
|
}
|
2021-02-14 17:45:12 +00:00
|
|
|
r.recv().await.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn tokio_sink_stop_after_drop() {
|
|
|
|
let listener = TcpListener::bind("127.0.0.1:5001").await.unwrap();
|
|
|
|
let r1 = tokio::spawn(async move {
|
|
|
|
let (server, _) = listener.accept().await.unwrap();
|
|
|
|
(listener, server)
|
|
|
|
});
|
|
|
|
let client = TcpStream::connect("127.0.0.1:5001").await.unwrap();
|
|
|
|
let (_listener, server) = r1.await.unwrap();
|
|
|
|
let metrics = Arc::new(ProtocolMetrics::new().unwrap());
|
|
|
|
let client = Protocols::new_tcp(client, 0, Arc::clone(&metrics));
|
|
|
|
let server = Protocols::new_tcp(server, 0, Arc::clone(&metrics));
|
|
|
|
let (s, _) = client.split();
|
|
|
|
let (_, mut r) = server.split();
|
|
|
|
let e = tokio::spawn(async move { r.recv().await });
|
|
|
|
drop(s);
|
|
|
|
let e = e.await.unwrap();
|
|
|
|
assert!(e.is_err());
|
|
|
|
assert_eq!(e.unwrap_err(), ProtocolError::Closed);
|
2020-07-14 23:34:41 +00:00
|
|
|
}
|
2020-02-04 15:42:04 +00:00
|
|
|
}
|