Stabelize Network again:

- completly switch to Bytes, even in api. speed up TCP by fak 2
 - improve benchmarks
 - speed up mpsc metrics
 - gracefully handle shutdown by interpreting Ok(0) as tokio::tcpstream closed now.
 - fix hotloop in participants by adding `Some(n)` to fix endless handing.
 - fix closing bug by closing streams after `recv_mgr` is shutdown even if now shutdown is triggered locally.
 - fix prometheus
 - no longer throw when a `Stream` is dropped while participant still receives a msg for it.
 - fix the bandwith handling, TCP network send speed is up to 1.5GiB/s while recv is 150MiB/s
 - add documentation
 - tmp require rt-multi-threaded in client for tokio, to not fail cargo check

this is prob stable, i tested over 1 hour.
after that some optimisations in priomgr.
and impl. propper bandwith.
Speed is up to 2GB/s write and 150MB/s recv on a single core

sync add documentation
This commit is contained in:
Marcel Märtens 2021-02-14 18:45:12 +01:00
parent ea8ab1ce7a
commit 03af9937cf
33 changed files with 1444 additions and 955 deletions

View File

@ -46,6 +46,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Cave scatter now includes all 6 gems.
- Adjusted Stonework Defender loot table to remove mindflayer drops (bag, staff, glider).
- Changed default controller key bindings
- Improved network efficiency by ≈ factor 10 by using tokio.
### Removed

1
Cargo.lock generated
View File

@ -5713,6 +5713,7 @@ dependencies = [
"bitflags",
"bytes 1.0.1",
"clap",
"criterion",
"crossbeam-channel 0.5.0",
"futures-core",
"futures-util",

View File

@ -21,7 +21,7 @@ uvth = "3.1.1"
futures-util = "0.3.7"
futures-executor = "0.3"
futures-timer = "3.0"
tokio = { version = "1", default-features = false, features = ["rt"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread"] }
image = { version = "0.23.12", default-features = false, features = ["png"] }
num = "0.3.1"
num_cpus = "1.10.1"

View File

@ -2066,7 +2066,8 @@ mod tests {
let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 9000);
let view_distance: Option<u32> = None;
let veloren_client: Result<Client, Error> = Client::new(socket, view_distance);
let runtime = Arc::new(Runtime::new().unwrap());
let veloren_client: Result<Client, Error> = Client::new(socket, view_distance, runtime);
let _ = veloren_client.map(|mut client| {
//register

View File

@ -48,6 +48,11 @@ clap = { version = "2.33", default-features = false }
shellexpand = "2.0.0"
serde = { version = "1.0", features = ["derive"] }
prometheus-hyper = "0.1.1"
criterion = { version = "0.3.4", features = ["default", "async_tokio"] }
[[bench]]
name = "speed"
harness = false
[[example]]
name = "fileshare"

143
network/benches/speed.rs Normal file
View File

@ -0,0 +1,143 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use std::{net::SocketAddr, sync::Arc};
use tokio::{runtime::Runtime, sync::Mutex};
use veloren_network::{Message, Network, Participant, Pid, Promises, ProtocolAddr, Stream};
fn serialize(data: &[u8], stream: &Stream) { let _ = Message::serialize(data, &stream); }
async fn stream_msg(s1_a: Arc<Mutex<Stream>>, s1_b: Arc<Mutex<Stream>>, data: &[u8], cnt: usize) {
let mut s1_b = s1_b.lock().await;
let m = Message::serialize(&data, &s1_b);
std::thread::spawn(move || {
let mut s1_a = s1_a.try_lock().unwrap();
for _ in 0..cnt {
s1_a.send_raw(&m).unwrap();
}
});
for _ in 0..cnt {
s1_b.recv_raw().await.unwrap();
}
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.build()
.unwrap()
}
fn criterion_util(c: &mut Criterion) {
let mut c = c.benchmark_group("net_util");
c.significance_level(0.1).sample_size(100);
let (r, _n_a, p_a, s1_a, _n_b, _p_b, _s1_b) =
network_participant_stream(ProtocolAddr::Mpsc(5000));
let s2_a = r.block_on(p_a.open(4, Promises::COMPRESSED)).unwrap();
c.throughput(Throughput::Bytes(1000))
.bench_function("message_serialize", |b| {
let data = vec![0u8; 1000];
b.iter(|| serialize(&data, &s1_a))
});
c.throughput(Throughput::Bytes(1000))
.bench_function("message_serialize_compress", |b| {
let data = vec![0u8; 1000];
b.iter(|| serialize(&data, &s2_a))
});
}
fn criterion_mpsc(c: &mut Criterion) {
let mut c = c.benchmark_group("net_mpsc");
c.significance_level(0.1).sample_size(10);
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) =
network_participant_stream(ProtocolAddr::Mpsc(5000));
let s1_a = Arc::new(Mutex::new(s1_a));
let s1_b = Arc::new(Mutex::new(s1_b));
c.throughput(Throughput::Bytes(100000000)).bench_function(
BenchmarkId::new("100MB_in_10000_msg", ""),
|b| {
let data = vec![155u8; 100_000];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 1_000),
)
},
);
c.throughput(Throughput::Elements(100000)).bench_function(
BenchmarkId::new("100000_tiny_msg", ""),
|b| {
let data = vec![3u8; 5];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 100_000),
)
},
);
c.finish();
drop((_n_a, _p_a, _n_b, _p_b));
}
fn criterion_tcp(c: &mut Criterion) {
let mut c = c.benchmark_group("net_tcp");
c.significance_level(0.1).sample_size(10);
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) =
network_participant_stream(ProtocolAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], 5000))));
let s1_a = Arc::new(Mutex::new(s1_a));
let s1_b = Arc::new(Mutex::new(s1_b));
c.throughput(Throughput::Bytes(100000000)).bench_function(
BenchmarkId::new("100MB_in_1000_msg", ""),
|b| {
let data = vec![155u8; 100_000];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 1_000),
)
},
);
c.throughput(Throughput::Elements(100000)).bench_function(
BenchmarkId::new("100000_tiny_msg", ""),
|b| {
let data = vec![3u8; 5];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 100_000),
)
},
);
c.finish();
drop((_n_a, _p_a, _n_b, _p_b));
}
criterion_group!(benches, criterion_util, criterion_mpsc, criterion_tcp);
criterion_main!(benches);
pub fn network_participant_stream(
addr: ProtocolAddr,
) -> (
Arc<Runtime>,
Network,
Participant,
Stream,
Network,
Participant,
Stream,
) {
let runtime = Arc::new(Runtime::new().unwrap());
let (n_a, p1_a, s1_a, n_b, p1_b, s1_b) = runtime.block_on(async {
let n_a = Network::new(Pid::fake(0), Arc::clone(&runtime));
let n_b = Network::new(Pid::fake(1), Arc::clone(&runtime));
n_a.listen(addr.clone()).await.unwrap();
let p1_b = n_b.connect(addr).await.unwrap();
let p1_a = n_a.connected().await.unwrap();
let s1_a = p1_a.open(4, Promises::empty()).await.unwrap();
let s1_b = p1_b.opened().await.unwrap();
(n_a, p1_a, s1_a, n_b, p1_b, s1_b)
});
(runtime, n_a, p1_a, s1_a, n_b, p1_b, s1_b)
}

View File

@ -130,7 +130,7 @@ async fn client_connection(
Ok(msg) => {
println!("[{}]: {}", username, msg);
for p in participants.read().await.iter() {
match p.open(32, Promises::ORDERED | Promises::CONSISTENCY).await {
match p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await {
Err(_) => info!("error talking to client, //TODO drop it"),
Ok(mut s) => s.send((username.clone(), msg.clone())).unwrap(),
};
@ -148,7 +148,7 @@ fn client(address: ProtocolAddr) {
r.block_on(async {
let p1 = client.connect(address.clone()).await.unwrap(); //remote representation of p1
let mut s1 = p1
.open(16, Promises::ORDERED | Promises::CONSISTENCY)
.open(4, Promises::ORDERED | Promises::CONSISTENCY)
.await
.unwrap(); //remote representation of s1
let mut input_lines = io::BufReader::new(io::stdin());

View File

@ -121,8 +121,8 @@ impl Server {
#[allow(clippy::eval_order_dependence)]
async fn loop_participant(&self, p: Participant) {
if let (Ok(cmd_out), Ok(file_out), Ok(cmd_in), Ok(file_in)) = (
p.open(15, Promises::ORDERED | Promises::CONSISTENCY).await,
p.open(40, Promises::CONSISTENCY).await,
p.open(3, Promises::ORDERED | Promises::CONSISTENCY).await,
p.open(6, Promises::CONSISTENCY).await,
p.opened().await,
p.opened().await,
) {
@ -175,7 +175,7 @@ impl Server {
let mut path = std::env::current_dir().unwrap();
path.push(fi.path().file_name().unwrap());
trace!("No path provided, saving down to {:?}", path);
PathBuf::from(path)
path
},
};
debug!("Received file, going to save it under {:?}", path);

View File

@ -132,6 +132,7 @@ fn server(address: ProtocolAddr, runtime: Arc<Runtime>) {
runtime.block_on(server.listen(address)).unwrap();
loop {
info!("----");
info!("Waiting for participant to connect");
let p1 = runtime.block_on(server.connected()).unwrap(); //remote representation of p1
let mut s1 = runtime.block_on(p1.opened()).unwrap(); //remote representation of s1
@ -163,7 +164,7 @@ fn client(address: ProtocolAddr, runtime: Arc<Runtime>) {
let p1 = runtime.block_on(client.connect(address)).unwrap(); //remote representation of p1
let mut s1 = runtime
.block_on(p1.open(16, Promises::ORDERED | Promises::CONSISTENCY))
.block_on(p1.open(4, Promises::ORDERED | Promises::CONSISTENCY))
.unwrap(); //remote representation of s1
let mut last = Instant::now();
let mut id = 0u64;
@ -185,16 +186,16 @@ fn client(address: ProtocolAddr, runtime: Arc<Runtime>) {
}
if id > 2000000 {
println!("Stop");
std::thread::sleep(std::time::Duration::from_millis(5000));
std::thread::sleep(std::time::Duration::from_millis(2000));
break;
}
}
drop(s1);
std::thread::sleep(std::time::Duration::from_millis(5000));
std::thread::sleep(std::time::Duration::from_millis(2000));
info!("Closing participant");
runtime.block_on(p1.disconnect()).unwrap();
std::thread::sleep(std::time::Duration::from_millis(25000));
std::thread::sleep(std::time::Duration::from_millis(2000));
info!("DROPPING! client");
drop(client);
std::thread::sleep(std::time::Duration::from_millis(25000));
std::thread::sleep(std::time::Duration::from_millis(2000));
}

View File

@ -1,53 +1,40 @@
use async_channel::*;
use async_trait::async_trait;
use bytes::BytesMut;
use criterion::{criterion_group, criterion_main, Criterion};
use bytes::{Bytes, BytesMut};
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
use std::{sync::Arc, time::Duration};
use tokio::runtime::Runtime;
use veloren_network_protocol::{
InitProtocol, MessageBuffer, MpscMsg, MpscRecvProtcol, MpscSendProtcol, Pid, Promises,
ProtocolError, ProtocolEvent, ProtocolMetricCache, ProtocolMetrics, RecvProtocol, SendProtocol,
Sid, TcpRecvProtcol, TcpSendProtcol, UnreliableDrain, UnreliableSink, _internal::Frame,
InitProtocol, MpscMsg, MpscRecvProtocol, MpscSendProtocol, Pid, Promises, ProtocolError,
ProtocolEvent, ProtocolMetricCache, ProtocolMetrics, RecvProtocol, SendProtocol, Sid,
TcpRecvProtocol, TcpSendProtocol, UnreliableDrain, UnreliableSink, _internal::OTFrame,
};
fn frame_serialize(frame: Frame, buffer: &mut BytesMut) { frame.to_bytes(buffer); }
fn frame_serialize(frame: OTFrame, buffer: &mut BytesMut) { frame.write_bytes(buffer); }
async fn mpsc_msg(buffer: Arc<MessageBuffer>) {
// Arrrg, need to include constructor here
let [p1, p2] = utils::ac_bound(10, None);
let (mut s, mut r) = (p1.0, p2.1);
s.send(ProtocolEvent::Message {
sid: Sid::new(12),
mid: 0,
buffer,
})
.await
.unwrap();
r.recv().await.unwrap();
}
async fn mpsc_handshake() {
let [mut p1, mut p2] = utils::ac_bound(10, None);
let r1 = tokio::spawn(async move {
async fn handshake<S, R>(p: [(S, R); 2])
where
S: SendProtocol,
R: RecvProtocol,
(S, R): InitProtocol,
{
let [mut p1, mut p2] = p;
tokio::join!(
async {
p1.initialize(true, Pid::fake(2), 1337).await.unwrap();
p1
});
let r2 = tokio::spawn(async move {
},
async {
p2.initialize(false, Pid::fake(3), 42).await.unwrap();
p2
});
let (r1, r2) = tokio::join!(r1, r2);
r1.unwrap();
r2.unwrap();
}
);
}
async fn tcp_msg(buffer: Arc<MessageBuffer>, cnt: usize) {
let [p1, p2] = utils::tcp_bound(10000, None); /*10kbit*/
let (mut s, mut r) = (p1.0, p2.1);
async fn send_msg<T: SendProtocol>(mut s: T, data: Bytes, cnt: usize) {
let bandwidth = data.len() as u64 + 100;
const SEC1: Duration = Duration::from_secs(1);
let buffer = Arc::clone(&buffer);
let bandwidth = buffer.data.len() as u64 + 1000;
let r1 = tokio::spawn(async move {
s.send(ProtocolEvent::OpenStream {
sid: Sid::new(12),
prio: 0,
@ -61,80 +48,106 @@ async fn tcp_msg(buffer: Arc<MessageBuffer>, cnt: usize) {
s.send(ProtocolEvent::Message {
sid: Sid::new(12),
mid: i as u64,
buffer: Arc::clone(&buffer),
data: data.clone(),
})
.await
.unwrap();
s.flush(bandwidth, Duration::from_secs(1)).await.unwrap();
if i.rem_euclid(50) == 0 {
s.flush(bandwidth * 50_u64, SEC1).await.unwrap();
}
});
let r2 = tokio::spawn(async move {
}
s.flush(bandwidth * 1000_u64, SEC1).await.unwrap();
}
async fn recv_msg<T: RecvProtocol>(mut r: T, cnt: usize) {
r.recv().await.unwrap();
for _ in 0..cnt {
r.recv().await.unwrap();
}
});
let (r1, r2) = tokio::join!(r1, r2);
r1.unwrap();
r2.unwrap();
}
fn criterion_benchmark(c: &mut Criterion) {
let rt = || {
async fn send_and_recv_msg<S: SendProtocol, R: RecvProtocol>(
p: [(S, R); 2],
data: Bytes,
cnt: usize,
) {
let [p1, p2] = p;
let (s, r) = (p1.0, p2.1);
tokio::join!(send_msg(s, data, cnt), recv_msg(r, cnt));
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.build()
.unwrap()
};
}
c.bench_function("mpsc_short_msg", |b| {
let buffer = Arc::new(MessageBuffer {
data: b"hello_world".to_vec(),
});
b.to_async(rt()).iter(|| mpsc_msg(Arc::clone(&buffer)))
});
c.bench_function("mpsc_long_msg", |b| {
let buffer = Arc::new(MessageBuffer {
data: vec![150u8; 500_000],
});
b.to_async(rt()).iter(|| mpsc_msg(Arc::clone(&buffer)))
});
fn criterion_util(c: &mut Criterion) {
c.bench_function("mpsc_handshake", |b| {
b.to_async(rt()).iter(|| mpsc_handshake())
b.to_async(rt())
.iter_with_setup(|| utils::ac_bound(10, None), handshake)
});
let mut buffer = BytesMut::with_capacity(1500);
c.bench_function("frame_serialize_short", |b| {
let frame = Frame::Data {
let mut buffer = BytesMut::with_capacity(1500);
let frame = OTFrame::Data {
mid: 65,
start: 89u64,
data: b"hello_world".to_vec(),
data: Bytes::from(&b"hello_world"[..]),
};
b.iter(|| frame_serialize(frame.clone(), &mut buffer))
});
c.bench_function("tcp_short_msg", |b| {
let buffer = Arc::new(MessageBuffer {
data: b"hello_world".to_vec(),
});
b.to_async(rt()).iter(|| tcp_msg(Arc::clone(&buffer), 1))
});
c.bench_function("tcp_1GB_in_10000_msg", |b| {
let buffer = Arc::new(MessageBuffer {
data: vec![155u8; 100_000],
});
b.to_async(rt())
.iter(|| tcp_msg(Arc::clone(&buffer), 10_000))
});
c.bench_function("tcp_1000000_tiny_msg", |b| {
let buffer = Arc::new(MessageBuffer { data: vec![3u8; 5] });
b.to_async(rt())
.iter(|| tcp_msg(Arc::clone(&buffer), 1_000_000))
b.iter_with_setup(
|| frame.clone(),
|frame| frame_serialize(frame, &mut buffer),
)
});
}
criterion_group!(benches, criterion_benchmark);
fn criterion_mpsc(c: &mut Criterion) {
let mut c = c.benchmark_group("mpsc");
c.significance_level(0.1).sample_size(10);
c.throughput(Throughput::Bytes(1000000000))
.bench_function("1GB_in_10000_msg", |b| {
let buffer = Bytes::from(&[155u8; 100_000][..]);
b.to_async(rt()).iter_with_setup(
|| (buffer.clone(), utils::ac_bound(10, None)),
|(b, p)| send_and_recv_msg(p, b, 10_000),
)
});
c.throughput(Throughput::Elements(1000000))
.bench_function("1000000_tiny_msg", |b| {
let buffer = Bytes::from(&[3u8; 5][..]);
b.to_async(rt()).iter_with_setup(
|| (buffer.clone(), utils::ac_bound(10, None)),
|(b, p)| send_and_recv_msg(p, b, 1_000_000),
)
});
c.finish();
}
fn criterion_tcp(c: &mut Criterion) {
let mut c = c.benchmark_group("tcp");
c.significance_level(0.1).sample_size(10);
c.throughput(Throughput::Bytes(1000000000))
.bench_function("1GB_in_10000_msg", |b| {
let buf = Bytes::from(&[155u8; 100_000][..]);
b.to_async(rt()).iter_with_setup(
|| (buf.clone(), utils::tcp_bound(10000, None)),
|(b, p)| send_and_recv_msg(p, b, 10_000),
)
});
c.throughput(Throughput::Elements(1000000))
.bench_function("1000000_tiny_msg", |b| {
let buf = Bytes::from(&[3u8; 5][..]);
b.to_async(rt()).iter_with_setup(
|| (buf.clone(), utils::tcp_bound(10000, None)),
|(b, p)| send_and_recv_msg(p, b, 1_000_000),
)
});
c.finish();
}
criterion_group!(benches, criterion_util, criterion_mpsc, criterion_tcp);
criterion_main!(benches);
mod utils {
@ -151,7 +164,7 @@ mod utils {
pub fn ac_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(MpscSendProtcol<ACDrain>, MpscRecvProtcol<ACSink>); 2] {
) -> [(MpscSendProtocol<ACDrain>, MpscRecvProtocol<ACSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
@ -159,12 +172,12 @@ mod utils {
});
[
(
MpscSendProtcol::new(ACDrain { sender: s1 }, m.clone()),
MpscRecvProtcol::new(ACSink { receiver: r2 }, m.clone()),
MpscSendProtocol::new(ACDrain { sender: s1 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r2 }, m.clone()),
),
(
MpscSendProtcol::new(ACDrain { sender: s2 }, m.clone()),
MpscRecvProtcol::new(ACSink { receiver: r1 }, m.clone()),
MpscSendProtocol::new(ACDrain { sender: s2 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r1 }, m),
),
]
}
@ -181,7 +194,7 @@ mod utils {
pub fn tcp_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(TcpSendProtcol<TcpDrain>, TcpRecvProtcol<TcpSink>); 2] {
) -> [(TcpSendProtocol<TcpDrain>, TcpRecvProtocol<TcpSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
@ -189,12 +202,12 @@ mod utils {
});
[
(
TcpSendProtcol::new(TcpDrain { sender: s1 }, m.clone()),
TcpRecvProtcol::new(TcpSink { receiver: r2 }, m.clone()),
TcpSendProtocol::new(TcpDrain { sender: s1 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r2 }, m.clone()),
),
(
TcpSendProtcol::new(TcpDrain { sender: s2 }, m.clone()),
TcpRecvProtcol::new(TcpSink { receiver: r1 }, m.clone()),
TcpSendProtocol::new(TcpDrain { sender: s2 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r1 }, m),
),
]
}

View File

@ -1,11 +1,13 @@
use crate::{
frame::Frame,
message::MessageBuffer,
frame::OTFrame,
types::{Bandwidth, Mid, Prio, Promises, Sid},
};
use std::sync::Arc;
use bytes::Bytes;
/* used for communication with Protocols */
/// used for communication with [`SendProtocol`] and [`RecvProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
/// [`RecvProtocol`]: crate::RecvProtocol
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
pub enum ProtocolEvent {
@ -20,29 +22,29 @@ pub enum ProtocolEvent {
sid: Sid,
},
Message {
buffer: Arc<MessageBuffer>,
data: Bytes,
mid: Mid,
sid: Sid,
},
}
impl ProtocolEvent {
pub(crate) fn to_frame(&self) -> Frame {
pub(crate) fn to_frame(&self) -> OTFrame {
match self {
ProtocolEvent::Shutdown => Frame::Shutdown,
ProtocolEvent::Shutdown => OTFrame::Shutdown,
ProtocolEvent::OpenStream {
sid,
prio,
promises,
guaranteed_bandwidth: _,
} => Frame::OpenStream {
} => OTFrame::OpenStream {
sid: *sid,
prio: *prio,
promises: *promises,
},
ProtocolEvent::CloseStream { sid } => Frame::CloseStream { sid: *sid },
ProtocolEvent::CloseStream { sid } => OTFrame::CloseStream { sid: *sid },
ProtocolEvent::Message { .. } => {
unimplemented!("Event::Message to Frame IS NOT supported")
unimplemented!("Event::Message to OTFrame IS NOT supported")
},
}
}
@ -54,18 +56,18 @@ mod tests {
#[test]
fn test_to_frame() {
assert_eq!(ProtocolEvent::Shutdown.to_frame(), Frame::Shutdown);
assert_eq!(ProtocolEvent::Shutdown.to_frame(), OTFrame::Shutdown);
assert_eq!(
ProtocolEvent::CloseStream { sid: Sid::new(42) }.to_frame(),
Frame::CloseStream { sid: Sid::new(42) }
OTFrame::CloseStream { sid: Sid::new(42) }
);
}
#[test]
#[should_panic]
fn test_sixlet_to_str() {
fn test_msg_buffer_panic() {
let _ = ProtocolEvent::Message {
buffer: Arc::new(MessageBuffer { data: vec![] }),
data: Bytes::new(),
mid: 0,
sid: Sid::new(23),
}

View File

@ -1,5 +1,5 @@
use crate::types::{Mid, Pid, Prio, Promises, Sid};
use bytes::{Buf, BufMut, BytesMut};
use bytes::{Buf, BufMut, Bytes, BytesMut};
// const FRAME_RESERVED_1: u8 = 0;
const FRAME_HANDSHAKE: u8 = 1;
@ -15,7 +15,7 @@ const FRAME_RAW: u8 = 8;
/// Used for Communication between Channel <----(TCP/UDP)----> Channel
#[derive(Debug, PartialEq, Clone)]
pub /* should be crate only */ enum InitFrame {
pub enum InitFrame {
Handshake {
magic_number: [u8; 7],
version: [u32; 3],
@ -24,14 +24,14 @@ pub /* should be crate only */ enum InitFrame {
pid: Pid,
secret: u128,
},
/* WARNING: Sending RAW is only used for debug purposes in case someone write a new API
* against veloren Server! */
/// WARNING: sending RAW is only for debug purposes and will drop the
/// connection
Raw(Vec<u8>),
}
/// Used for Communication between Channel <----(TCP/UDP)----> Channel
/// Used for OUT TCP Communication between Channel --(TCP)--> Channel
#[derive(Debug, PartialEq, Clone)]
pub enum Frame {
pub enum OTFrame {
Shutdown, /* Shutdown this channel gracefully, if all channels are shutdown (gracefully),
* Participant is deleted */
OpenStream {
@ -49,8 +49,33 @@ pub enum Frame {
},
Data {
mid: Mid,
start: u64,
data: Vec<u8>,
start: u64, /* remove */
data: Bytes,
},
}
/// Used for IN TCP Communication between Channel <--(TCP)-- Channel
#[derive(Debug, PartialEq, Clone)]
pub enum ITFrame {
Shutdown, /* Shutdown this channel gracefully, if all channels are shutdown (gracefully),
* Participant is deleted */
OpenStream {
sid: Sid,
prio: Prio,
promises: Promises,
},
CloseStream {
sid: Sid,
},
DataHeader {
mid: Mid,
sid: Sid,
length: u64,
},
Data {
mid: Mid,
start: u64, /* remove */
data: BytesMut,
},
}
@ -62,7 +87,7 @@ impl InitFrame {
pub(crate) const RAW_CNS: usize = 2;
//provide an appropriate buffer size. > 1500
pub(crate) fn to_bytes(self, bytes: &mut BytesMut) {
pub(crate) fn write_bytes(self, bytes: &mut BytesMut) {
match self {
InitFrame::Handshake {
magic_number,
@ -87,7 +112,7 @@ impl InitFrame {
}
}
pub(crate) fn to_frame(bytes: &mut BytesMut) -> Option<Self> {
pub(crate) fn read_frame(bytes: &mut BytesMut) -> Option<Self> {
let frame_no = match bytes.get(0) {
Some(&f) => f,
None => return None,
@ -124,8 +149,6 @@ impl InitFrame {
let length = bytes.get_u16_le() as usize;
// lower length is allowed
let max_length = length.min(bytes.len());
println!("dasdasd {:?}", length);
println!("aaaaa {:?}", max_length);
let mut data = vec![0; max_length];
data.copy_from_slice(&bytes[..max_length]);
InitFrame::Raw(data)
@ -136,71 +159,67 @@ impl InitFrame {
}
}
impl Frame {
pub(crate) const CLOSE_STREAM_CNS: usize = 8;
/// const part of the DATA frame, actual size is variable
pub(crate) const DATA_CNS: usize = 18;
pub(crate) const DATA_HEADER_CNS: usize = 24;
pub(crate) const OPEN_STREAM_CNS: usize = 10;
// Size WITHOUT the 1rst indicating byte
pub(crate) const SHUTDOWN_CNS: usize = 0;
pub(crate) const TCP_CLOSE_STREAM_CNS: usize = 8;
/// const part of the DATA frame, actual size is variable
pub(crate) const TCP_DATA_CNS: usize = 18;
pub(crate) const TCP_DATA_HEADER_CNS: usize = 24;
pub(crate) const TCP_OPEN_STREAM_CNS: usize = 10;
// Size WITHOUT the 1rst indicating byte
pub(crate) const TCP_SHUTDOWN_CNS: usize = 0;
//provide an appropriate buffer size. > 1500
pub fn to_bytes(self, bytes: &mut BytesMut) -> u64 {
impl OTFrame {
pub fn write_bytes(self, bytes: &mut BytesMut) {
match self {
Frame::Shutdown => {
Self::Shutdown => {
bytes.put_u8(FRAME_SHUTDOWN);
0
},
Frame::OpenStream {
Self::OpenStream {
sid,
prio,
promises,
} => {
bytes.put_u8(FRAME_OPEN_STREAM);
bytes.put_slice(&sid.to_le_bytes());
sid.to_bytes(bytes);
bytes.put_u8(prio);
bytes.put_u8(promises.to_le_bytes()[0]);
0
},
Frame::CloseStream { sid } => {
Self::CloseStream { sid } => {
bytes.put_u8(FRAME_CLOSE_STREAM);
bytes.put_slice(&sid.to_le_bytes());
0
sid.to_bytes(bytes);
},
Frame::DataHeader { mid, sid, length } => {
Self::DataHeader { mid, sid, length } => {
bytes.put_u8(FRAME_DATA_HEADER);
bytes.put_u64_le(mid);
bytes.put_slice(&sid.to_le_bytes());
sid.to_bytes(bytes);
bytes.put_u64_le(length);
0
},
Frame::Data { mid, start, data } => {
Self::Data { mid, start, data } => {
bytes.put_u8(FRAME_DATA);
bytes.put_u64_le(mid);
bytes.put_u64_le(start);
bytes.put_u16_le(data.len() as u16);
bytes.put_slice(&data);
data.len() as u64
},
}
}
}
pub(crate) fn to_frame(bytes: &mut BytesMut) -> Option<Self> {
impl ITFrame {
pub(crate) fn read_frame(bytes: &mut BytesMut) -> Option<Self> {
let frame_no = match bytes.first() {
Some(&f) => f,
None => return None,
};
let size = match frame_no {
FRAME_SHUTDOWN => Self::SHUTDOWN_CNS,
FRAME_OPEN_STREAM => Self::OPEN_STREAM_CNS,
FRAME_CLOSE_STREAM => Self::CLOSE_STREAM_CNS,
FRAME_DATA_HEADER => Self::DATA_HEADER_CNS,
FRAME_SHUTDOWN => TCP_SHUTDOWN_CNS,
FRAME_OPEN_STREAM => TCP_OPEN_STREAM_CNS,
FRAME_CLOSE_STREAM => TCP_CLOSE_STREAM_CNS,
FRAME_DATA_HEADER => TCP_DATA_HEADER_CNS,
FRAME_DATA => {
if bytes.len() < 17 + 1 + 1 {
return None;
}
u16::from_le_bytes([bytes[16 + 1], bytes[17 + 1]]) as usize + Self::DATA_CNS
u16::from_le_bytes([bytes[16 + 1], bytes[17 + 1]]) as usize + TCP_DATA_CNS
},
_ => return None,
};
@ -212,13 +231,13 @@ impl Frame {
let frame = match frame_no {
FRAME_SHUTDOWN => {
let _ = bytes.split_to(size + 1);
Frame::Shutdown
Self::Shutdown
},
FRAME_OPEN_STREAM => {
let mut bytes = bytes.split_to(size + 1);
bytes.advance(1);
Frame::OpenStream {
sid: Sid::new(bytes.get_u64_le()),
Self::OpenStream {
sid: Sid::from_bytes(&mut bytes),
prio: bytes.get_u8(),
promises: Promises::from_bits_truncate(bytes.get_u8()),
}
@ -226,29 +245,27 @@ impl Frame {
FRAME_CLOSE_STREAM => {
let mut bytes = bytes.split_to(size + 1);
bytes.advance(1);
Frame::CloseStream {
sid: Sid::new(bytes.get_u64_le()),
Self::CloseStream {
sid: Sid::from_bytes(&mut bytes),
}
},
FRAME_DATA_HEADER => {
let mut bytes = bytes.split_to(size + 1);
bytes.advance(1);
Frame::DataHeader {
Self::DataHeader {
mid: bytes.get_u64_le(),
sid: Sid::new(bytes.get_u64_le()),
sid: Sid::from_bytes(&mut bytes),
length: bytes.get_u64_le(),
}
},
FRAME_DATA => {
let mut info = bytes.split_to(Self::DATA_CNS + 1);
info.advance(1);
let mid = info.get_u64_le();
let start = info.get_u64_le();
let length = info.get_u16_le();
debug_assert_eq!(length as usize, size - Self::DATA_CNS);
bytes.advance(1);
let mid = bytes.get_u64_le();
let start = bytes.get_u64_le();
let length = bytes.get_u16_le();
debug_assert_eq!(length as usize, size - TCP_DATA_CNS);
let data = bytes.split_to(length as usize);
let data = data.to_vec();
Frame::Data { mid, start, data }
Self::Data { mid, start, data }
},
_ => unreachable!("Frame::to_frame should be handled before!"),
};
@ -256,6 +273,29 @@ impl Frame {
}
}
#[allow(unused_variables)]
impl PartialEq<ITFrame> for OTFrame {
fn eq(&self, other: &ITFrame) -> bool {
match self {
Self::Shutdown => matches!(other, ITFrame::Shutdown),
Self::OpenStream {
sid,
prio,
promises,
} => matches!(other, ITFrame::OpenStream {
sid,
prio,
promises
}),
Self::CloseStream { sid } => matches!(other, ITFrame::CloseStream { sid }),
Self::DataHeader { mid, sid, length } => {
matches!(other, ITFrame::DataHeader { mid, sid, length })
},
Self::Data { mid, start, data } => matches!(other, ITFrame::Data { mid, start, data }),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -275,32 +315,32 @@ mod tests {
]
}
fn get_frames() -> Vec<Frame> {
fn get_otframes() -> Vec<OTFrame> {
vec![
Frame::OpenStream {
OTFrame::OpenStream {
sid: Sid::new(1337),
prio: 14,
promises: Promises::GUARANTEED_DELIVERY,
},
Frame::DataHeader {
OTFrame::DataHeader {
sid: Sid::new(1337),
mid: 0,
length: 36,
},
Frame::Data {
OTFrame::Data {
mid: 0,
start: 0,
data: vec![77u8; 20],
data: Bytes::from(&[77u8; 20][..]),
},
Frame::Data {
OTFrame::Data {
mid: 0,
start: 20,
data: vec![42u8; 16],
data: Bytes::from(&[42u8; 16][..]),
},
Frame::CloseStream {
OTFrame::CloseStream {
sid: Sid::new(1337),
},
Frame::Shutdown,
OTFrame::Shutdown,
]
}
@ -308,8 +348,8 @@ mod tests {
fn initframe_individual() {
let dupl = |frame: InitFrame| {
let mut buffer = BytesMut::with_capacity(1500);
InitFrame::to_bytes(frame.clone(), &mut buffer);
InitFrame::to_frame(&mut buffer)
InitFrame::write_bytes(frame, &mut buffer);
InitFrame::read_frame(&mut buffer)
};
for frame in get_initframes() {
@ -325,13 +365,13 @@ mod tests {
let mut frames = get_initframes();
// to string
for f in &frames {
InitFrame::to_bytes(f.clone(), &mut buffer);
InitFrame::write_bytes(f.clone(), &mut buffer);
}
// from string
let mut framesd = frames
.iter()
.map(|&_| InitFrame::to_frame(&mut buffer))
.map(|&_| InitFrame::read_frame(&mut buffer))
.collect::<Vec<_>>();
// compare
@ -343,15 +383,15 @@ mod tests {
#[test]
fn frame_individual() {
let dupl = |frame: Frame| {
let dupl = |frame: OTFrame| {
let mut buffer = BytesMut::with_capacity(1500);
Frame::to_bytes(frame.clone(), &mut buffer);
Frame::to_frame(&mut buffer)
OTFrame::write_bytes(frame, &mut buffer);
ITFrame::read_frame(&mut buffer)
};
for frame in get_frames() {
for frame in get_otframes() {
println!("frame: {:?}", &frame);
assert_eq!(Some(frame.clone()), dupl(frame));
assert_eq!(frame.clone(), dupl(frame).expect("NONE"));
}
}
@ -359,36 +399,36 @@ mod tests {
fn frame_multiple() {
let mut buffer = BytesMut::with_capacity(3000);
let mut frames = get_frames();
let mut frames = get_otframes();
// to string
for f in &frames {
Frame::to_bytes(f.clone(), &mut buffer);
OTFrame::write_bytes(f.clone(), &mut buffer);
}
// from string
let mut framesd = frames
.iter()
.map(|&_| Frame::to_frame(&mut buffer))
.map(|&_| ITFrame::read_frame(&mut buffer))
.collect::<Vec<_>>();
// compare
for (f, fd) in frames.drain(..).zip(framesd.drain(..)) {
println!("frame: {:?}", &f);
assert_eq!(Some(f), fd);
assert_eq!(f, fd.expect("NONE"));
}
}
#[test]
fn frame_exact_size() {
const SIZE: usize = Frame::CLOSE_STREAM_CNS+1/*first byte*/;
const SIZE: usize = TCP_CLOSE_STREAM_CNS+1/*first byte*/;
let mut buffer = BytesMut::with_capacity(SIZE);
let frame1 = Frame::CloseStream { sid: Sid::new(2) };
Frame::to_bytes(frame1.clone(), &mut buffer);
let frame1 = OTFrame::CloseStream { sid: Sid::new(2) };
OTFrame::write_bytes(frame1.clone(), &mut buffer);
assert_eq!(buffer.len(), SIZE);
let mut deque = buffer.iter().map(|b| *b).collect();
let frame2 = Frame::to_frame(&mut deque);
assert_eq!(Some(frame1), frame2);
let mut deque = buffer.iter().copied().collect();
let frame2 = ITFrame::read_frame(&mut deque);
assert_eq!(frame1, frame2.expect("NONE"));
}
#[test]
@ -399,7 +439,7 @@ mod tests {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
};
InitFrame::to_bytes(frame1.clone(), &mut buffer);
InitFrame::write_bytes(frame1, &mut buffer);
}
#[test]
@ -410,9 +450,9 @@ mod tests {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
};
let _ = InitFrame::to_bytes(frame1.clone(), &mut buffer);
let _ = InitFrame::write_bytes(frame1, &mut buffer);
buffer.truncate(6); // simulate partial retrieve
let frame1d = InitFrame::to_frame(&mut buffer);
let frame1d = InitFrame::read_frame(&mut buffer);
assert_eq!(frame1d, None);
}
@ -420,7 +460,7 @@ mod tests {
fn initframe_rubish() {
let mut buffer = BytesMut::from(&b"dtrgwcser"[..]);
assert_eq!(
InitFrame::to_frame(&mut buffer),
InitFrame::read_frame(&mut buffer),
Some(InitFrame::Raw(b"dtrgwcser".to_vec()))
);
}
@ -430,9 +470,9 @@ mod tests {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = InitFrame::Raw(b"foobar".to_vec());
let _ = InitFrame::to_bytes(frame1.clone(), &mut buffer);
let _ = InitFrame::write_bytes(frame1.clone(), &mut buffer);
buffer[1] = 255;
let framed = InitFrame::to_frame(&mut buffer);
let framed = InitFrame::read_frame(&mut buffer);
assert_eq!(framed, Some(frame1));
}
@ -441,9 +481,9 @@ mod tests {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = InitFrame::Raw(b"foobar".to_vec());
let _ = InitFrame::to_bytes(frame1.clone(), &mut buffer);
let _ = InitFrame::write_bytes(frame1, &mut buffer);
buffer[1] = 3;
let framed = InitFrame::to_frame(&mut buffer);
let framed = InitFrame::read_frame(&mut buffer);
// we accept a different frame here, as it's RAW and debug only!
assert_eq!(framed, Some(InitFrame::Raw(b"foo".to_vec())));
}
@ -452,48 +492,48 @@ mod tests {
fn frame_too_short_buffer() {
let mut buffer = BytesMut::with_capacity(10);
let frame1 = Frame::OpenStream {
let frame1 = OTFrame::OpenStream {
sid: Sid::new(88),
promises: Promises::ENCRYPTED,
prio: 88,
};
Frame::to_bytes(frame1.clone(), &mut buffer);
OTFrame::write_bytes(frame1, &mut buffer);
}
#[test]
fn frame_too_less_data() {
let mut buffer = BytesMut::with_capacity(20);
let frame1 = Frame::OpenStream {
let frame1 = OTFrame::OpenStream {
sid: Sid::new(88),
promises: Promises::ENCRYPTED,
prio: 88,
};
Frame::to_bytes(frame1.clone(), &mut buffer);
OTFrame::write_bytes(frame1, &mut buffer);
buffer.truncate(6); // simulate partial retrieve
let frame1d = Frame::to_frame(&mut buffer);
let frame1d = ITFrame::read_frame(&mut buffer);
assert_eq!(frame1d, None);
}
#[test]
fn frame_rubish() {
let mut buffer = BytesMut::from(&b"dtrgwcser"[..]);
assert_eq!(Frame::to_frame(&mut buffer), None);
assert_eq!(ITFrame::read_frame(&mut buffer), None);
}
#[test]
fn frame_attack_too_much_length() {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = Frame::Data {
let frame1 = OTFrame::Data {
mid: 7u64,
start: 1u64,
data: b"foobar".to_vec(),
data: Bytes::from(&b"foobar"[..]),
};
Frame::to_bytes(frame1.clone(), &mut buffer);
OTFrame::write_bytes(frame1, &mut buffer);
buffer[17] = 255;
let framed = Frame::to_frame(&mut buffer);
let framed = ITFrame::read_frame(&mut buffer);
assert_eq!(framed, None);
}
@ -501,25 +541,25 @@ mod tests {
fn frame_attack_too_low_length() {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = Frame::Data {
let frame1 = OTFrame::Data {
mid: 7u64,
start: 1u64,
data: b"foobar".to_vec(),
data: Bytes::from(&b"foobar"[..]),
};
Frame::to_bytes(frame1.clone(), &mut buffer);
OTFrame::write_bytes(frame1, &mut buffer);
buffer[17] = 3;
let framed = Frame::to_frame(&mut buffer);
let framed = ITFrame::read_frame(&mut buffer);
assert_eq!(
framed,
Some(Frame::Data {
Some(ITFrame::Data {
mid: 7u64,
start: 1u64,
data: b"foo".to_vec(),
data: BytesMut::from(&b"foo"[..]),
})
);
//next = Invalid => Empty
let framed = Frame::to_frame(&mut buffer);
let framed = ITFrame::read_frame(&mut buffer);
assert_eq!(framed, None);
}
}

View File

@ -9,13 +9,24 @@ use crate::{
use async_trait::async_trait;
use tracing::{debug, error, info, trace};
// Protocols might define a Reliable Variant for auto Handshake discovery
// this doesn't need to be effective
/// Implement this for auto Handshake with [`ReliableSink`].
/// You must make sure that EVERY message send this way actually is received on
/// the receiving site:
/// - exactly once
/// - in the correct order
/// - correctly
///
/// [`ReliableSink`]: crate::ReliableSink
/// [`RecvProtocol`]: crate::RecvProtocol
#[async_trait]
pub trait ReliableDrain {
async fn send(&mut self, frame: InitFrame) -> Result<(), ProtocolError>;
}
/// Implement this for auto Handshake with [`ReliableDrain`]. See
/// [`ReliableDrain`].
///
/// [`ReliableDrain`]: crate::ReliableDrain
#[async_trait]
pub trait ReliableSink {
async fn recv(&mut self) -> Result<InitFrame, ProtocolError>;
@ -34,14 +45,13 @@ where
local_secret: u128,
) -> Result<(Pid, Sid, u128), InitProtocolError> {
#[cfg(debug_assertions)]
const WRONG_NUMBER: &'static [u8] = "Handshake does not contain the magic number required \
by veloren server.\nWe are not sure if you are a \
valid veloren client.\nClosing the connection"
.as_bytes();
const WRONG_NUMBER: &str = "Handshake does not contain the magic number required by \
veloren server.\nWe are not sure if you are a valid veloren \
client.\nClosing the connection";
#[cfg(debug_assertions)]
const WRONG_VERSION: &'static str = "Handshake does contain a correct magic number, but \
invalid version.\nWe don't know how to communicate \
with you.\nClosing the connection";
const WRONG_VERSION: &str = "Handshake does contain a correct magic number, but invalid \
version.\nWe don't know how to communicate with \
you.\nClosing the connection";
const ERR_S: &str = "Got A Raw Message, these are usually Debug Messages indicating that \
something went wrong on network layer and connection will be closed";
@ -66,7 +76,9 @@ where
if magic_number != VELOREN_MAGIC_NUMBER {
error!(?magic_number, "Connection with invalid magic_number");
#[cfg(debug_assertions)]
drain.send(InitFrame::Raw(WRONG_NUMBER.to_vec())).await?;
drain
.send(InitFrame::Raw(WRONG_NUMBER.as_bytes().to_vec()))
.await?;
Err(InitProtocolError::WrongMagicNumber(magic_number))
} else if version != VELOREN_NETWORK_VERSION {
error!(?version, "Connection with wrong network version");

View File

@ -1,63 +0,0 @@
use crate::ProtocolError;
use async_trait::async_trait;
use bytes::BytesMut;
use std::collections::VecDeque;
///! I/O-Free (Sans-I/O) protocol https://sans-io.readthedocs.io/how-to-sans-io.html
// Protocols should base on the Unrealiable variants to get something effective!
#[async_trait]
pub trait UnreliableDrain: Send {
type DataFormat;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError>;
}
#[async_trait]
pub trait UnreliableSink: Send {
type DataFormat;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError>;
}
pub struct BaseDrain {
data: VecDeque<BytesMut>,
}
pub struct BaseSink {
data: VecDeque<BytesMut>,
}
impl BaseDrain {
pub fn new() -> Self {
Self {
data: VecDeque::new(),
}
}
}
impl BaseSink {
pub fn new() -> Self {
Self {
data: VecDeque::new(),
}
}
}
//TODO: Test Sinks that drop 20% by random and log that
#[async_trait]
impl UnreliableDrain for BaseDrain {
type DataFormat = BytesMut;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
self.data.push_back(data);
Ok(())
}
}
#[async_trait]
impl UnreliableSink for BaseSink {
type DataFormat = BytesMut;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
self.data.pop_front().ok_or(ProtocolError::Closed)
}
}

View File

@ -1,7 +1,57 @@
//! Network Protocol
//!
//! a I/O-Free protocol for the veloren network crate.
//! This crate defines multiple different protocols over [`UnreliableDrain`] and
//! [`UnreliableSink`] traits, which allows it to define the behavior of a
//! protocol separated from the actual io.
//!
//! For example we define the TCP protocol on top of Drains and Sinks that can
//! send chunks of bytes. You can now implement your own Drain And Sink that
//! sends the data via tokio's or std's implementation. Or you just use a
//! std::mpsc::channel for unit tests without needing a actual tcp socket.
//!
//! This crate currently defines:
//! - TCP
//! - MPSC
//!
//! a UDP implementation will quickly follow, and it's also possible to abstract
//! over QUIC.
//!
//! warning: don't mix protocol, using the TCP variant for actual UDP socket
//! will result in dropped data using UDP with a TCP socket will be a waste of
//! resources.
//!
//! A *channel* in this crate is defined as a combination of *read* and *write*
//! protocol.
//!
//! # adding a protocol
//!
//! We start by defining our DataFormat. For most this is prob [`Vec<u8>`] or
//! [`Bytes`]. MPSC can directly send a msg without serialisation.
//!
//! Create 2 structs, one for the receiving and sending end. Based on a generic
//! Drain/Sink with your required DataFormat.
//! Implement the [`SendProtocol`] and [`RecvProtocol`] traits respectively.
//!
//! Implement the Handshake: [`InitProtocol`], alternatively you can also
//! implement `ReliableDrain` and `ReliableSink`, by this, you use the default
//! Handshake.
//!
//! This crate also contains consts and definitions for the network protocol.
//!
//! For an *example* see `TcpDrain` and `TcpSink` in the [tcp.rs](tcp.rs)
//!
//! [`UnreliableDrain`]: crate::UnreliableDrain
//! [`UnreliableSink`]: crate::UnreliableSink
//! [`Vec<u8>`]: std::vec::Vec
//! [`Bytes`]: bytes::Bytes
//! [`SendProtocol`]: crate::SendProtocol
//! [`RecvProtocol`]: crate::RecvProtocol
//! [`InitProtocol`]: crate::InitProtocol
mod event;
mod frame;
mod handshake;
mod io;
mod message;
mod metrics;
mod mpsc;
@ -10,22 +60,23 @@ mod tcp;
mod types;
pub use event::ProtocolEvent;
pub use io::{BaseDrain, BaseSink, UnreliableDrain, UnreliableSink};
pub use message::MessageBuffer;
pub use metrics::ProtocolMetricCache;
#[cfg(feature = "metrics")]
pub use metrics::ProtocolMetrics;
pub use mpsc::{MpscMsg, MpscRecvProtcol, MpscSendProtcol};
pub use tcp::{TcpRecvProtcol, TcpSendProtcol};
pub use types::{Bandwidth, Cid, Mid, Pid, Prio, Promises, Sid, VELOREN_NETWORK_VERSION};
pub use mpsc::{MpscMsg, MpscRecvProtocol, MpscSendProtocol};
pub use tcp::{TcpRecvProtocol, TcpSendProtocol};
pub use types::{
Bandwidth, Cid, Mid, Pid, Prio, Promises, Sid, HIGHEST_PRIO, VELOREN_NETWORK_VERSION,
};
///use at own risk, might change any time, for internal benchmarks
pub mod _internal {
pub use crate::frame::Frame;
pub use crate::frame::{ITFrame, OTFrame};
}
use async_trait::async_trait;
/// Handshake: Used to connect 2 Channels.
#[async_trait]
pub trait InitProtocol {
async fn initialize(
@ -36,14 +87,32 @@ pub trait InitProtocol {
) -> Result<(Pid, Sid, u128), InitProtocolError>;
}
/// Generic Network Send Protocol.
/// Implement this for your Protocol of choice ( tcp, udp, mpsc, quic)
/// Allows the creation/deletions of `Streams` and sending messages via
/// [`ProtocolEvent`].
///
/// A `Stream` MUST be bound to a specific Channel. You MUST NOT switch the
/// channel to send a stream mid air. We will provide takeover options for
/// Channel closure in the future to allow keeping a `Stream` over a broker
/// Channel.
///
/// [`ProtocolEvent`]: crate::ProtocolEvent
#[async_trait]
pub trait SendProtocol {
//a stream MUST be bound to a specific Protocol, there will be a failover
// feature comming for the case where a Protocol fails completly
/// use this to notify the sending side of streams that were created/remove
/// from remote
/// YOU MUST inform the `SendProtocol` by any Stream Open BEFORE using it in
/// `send` and Stream Close AFTER using it in `send` via this fn.
fn notify_from_recv(&mut self, event: ProtocolEvent);
/// Send a Event via this Protocol. The `SendProtocol` MAY require `flush`
/// to be called before actual data is send to the respective `Sink`.
async fn send(&mut self, event: ProtocolEvent) -> Result<(), ProtocolError>;
/// Flush all buffered messages according to their [`Prio`] and
/// [`Bandwidth`]. provide the current bandwidth budget (per second) as
/// well as the `dt` since last call. According to the budget the
/// respective messages will be flushed.
///
/// [`Prio`]: crate::Prio
/// [`Bandwidth`]: crate::Bandwidth
async fn flush(
&mut self,
bandwidth: Bandwidth,
@ -51,11 +120,42 @@ pub trait SendProtocol {
) -> Result<(), ProtocolError>;
}
/// Generic Network Recv Protocol. See: [`SendProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
#[async_trait]
pub trait RecvProtocol {
/// Either recv an event or fail the Protocol, once the Recv side is closed
/// it cannot recover from the error.
async fn recv(&mut self) -> Result<ProtocolEvent, ProtocolError>;
}
/// This crate makes use of UnreliableDrains, they are expected to provide the
/// same guarantees like their IO-counterpart. E.g. ordered messages for TCP and
/// nothing for UDP. The respective Protocol needs then to handle this.
/// This trait is an abstraction above multiple Drains, e.g. [`tokio`](https://tokio.rs) [`async-std`] [`std`] or even [`async-channel`]
///
/// [`async-std`]: async-std
/// [`std`]: std
/// [`async-channel`]: async-channel
#[async_trait]
pub trait UnreliableDrain: Send {
type DataFormat;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError>;
}
/// Sink counterpart of [`UnreliableDrain`]
///
/// [`UnreliableDrain`]: crate::UnreliableDrain
#[async_trait]
pub trait UnreliableSink: Send {
type DataFormat;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError>;
}
/// All possible Errors that can happen during Handshake [`InitProtocol`]
///
/// [`InitProtocol`]: crate::InitProtocol
#[derive(Debug, PartialEq)]
pub enum InitProtocolError {
Closed,
@ -63,8 +163,8 @@ pub enum InitProtocolError {
WrongVersion([u32; 3]),
}
#[derive(Debug, PartialEq)]
/// When you return closed you must stay closed!
#[derive(Debug, PartialEq)]
pub enum ProtocolError {
Closed,
}

View File

@ -1,39 +1,100 @@
use crate::{
frame::Frame,
frame::OTFrame,
types::{Mid, Sid},
};
use std::{collections::VecDeque, sync::Arc};
use bytes::{Bytes, BytesMut};
//Todo: Evaluate switching to VecDeque for quickly adding and removing data
// from front, back.
// - It would prob require custom bincode code but thats possible.
#[cfg_attr(test, derive(PartialEq))]
pub struct MessageBuffer {
pub data: Vec<u8>,
pub(crate) const ALLOC_BLOCK: usize = 16_777_216;
/// Contains a outgoing message for TCP protocol
/// All Chunks have the same size, except for the last chunk which can end
/// earlier. E.g.
/// ```ignore
/// msg = OTMessage::new();
/// msg.next();
/// msg.next();
/// ```
#[derive(Debug)]
pub(crate) struct OTMessage {
data: Bytes,
original_length: u64,
send_header: bool,
mid: Mid,
sid: Sid,
start: u64, /* remove */
}
impl std::fmt::Debug for MessageBuffer {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
//TODO: small messages!
let len = self.data.len();
if len > 20 {
write!(
f,
"MessageBuffer(len: {}, {}, {}, {}, {:X?}..{:X?})",
len,
u32::from_le_bytes([self.data[0], self.data[1], self.data[2], self.data[3]]),
u32::from_le_bytes([self.data[4], self.data[5], self.data[6], self.data[7]]),
u32::from_le_bytes([self.data[8], self.data[9], self.data[10], self.data[11]]),
&self.data[13..16],
&self.data[len - 8..len]
)
#[derive(Debug)]
pub(crate) struct ITMessage {
pub data: BytesMut,
pub sid: Sid,
pub length: u64,
}
impl OTMessage {
pub(crate) const FRAME_DATA_SIZE: u64 = 1400;
pub(crate) fn new(data: Bytes, mid: Mid, sid: Sid) -> Self {
let original_length = data.len() as u64;
Self {
data,
original_length,
send_header: false,
mid,
sid,
start: 0,
}
}
fn get_header(&self) -> OTFrame {
OTFrame::DataHeader {
mid: self.mid,
sid: self.sid,
length: self.data.len() as u64,
}
}
fn get_next_data(&mut self) -> OTFrame {
let to_send = std::cmp::min(self.data.len(), Self::FRAME_DATA_SIZE as usize);
let data = self.data.split_to(to_send);
let start = self.start;
self.start += Self::FRAME_DATA_SIZE;
OTFrame::Data {
mid: self.mid,
start,
data,
}
}
/// returns if something was added
pub(crate) fn next(&mut self) -> Option<OTFrame> {
if !self.send_header {
self.send_header = true;
Some(self.get_header())
} else if !self.data.is_empty() {
Some(self.get_next_data())
} else {
write!(f, "MessageBuffer(len: {}, {:?})", len, &self.data[..])
None
}
}
pub(crate) fn get_sid_len(&self) -> (Sid, u64) { (self.sid, self.original_length) }
}
impl ITMessage {
pub(crate) fn new(sid: Sid, length: u64, _allocator: &mut BytesMut) -> Self {
//allocator.reserve(ALLOC_BLOCK);
//TODO: grab mem from the allocatior, but this is only possible with unsafe
Self {
sid,
length,
data: BytesMut::with_capacity((length as usize).min(ALLOC_BLOCK /* anti-ddos */)),
}
}
}
/*
/// Contains a outgoing message and store what was *send* and *confirmed*
/// All Chunks have the same size, except for the last chunk which can end
/// earlier. E.g.
@ -45,7 +106,8 @@ impl std::fmt::Debug for MessageBuffer {
/// msg.confirm(2);
/// ```
#[derive(Debug)]
pub(crate) struct OutgoingMessage {
#[allow(dead_code)]
pub(crate) struct OUMessage {
buffer: Arc<MessageBuffer>,
send_index: u64, // 3 => 4200 (3*FRAME_DATA_SIZE)
send_header: bool,
@ -56,7 +118,8 @@ pub(crate) struct OutgoingMessage {
missing_indices: VecDeque<u64>,
}
impl OutgoingMessage {
#[allow(dead_code)]
impl OUMessage {
pub(crate) const FRAME_DATA_SIZE: u64 = 1400;
pub(crate) fn new(buffer: Arc<MessageBuffer>, mid: Mid, sid: Sid) -> Self {
@ -125,3 +188,4 @@ impl OutgoingMessage {
pub(crate) fn get_sid_len(&self) -> (Sid, u64) { (self.sid, self.buffer.data.len() as u64) }
}
*/

View File

@ -5,7 +5,8 @@ use prometheus::{
IntCounterVec, IntGaugeVec, Opts, Registry,
};
#[cfg(feature = "metrics")]
use std::{collections::HashMap, error::Error, sync::Arc};
use std::collections::HashMap;
use std::{error::Error, sync::Arc};
#[allow(dead_code)]
pub enum RemoveReason {
@ -13,6 +14,10 @@ pub enum RemoveReason {
Dropped,
}
/// Use 1 `ProtocolMetrics` per `Network`.
/// I will contain all protocol related [`prometheus`] information
///
/// [`prometheus`]: prometheus
#[cfg(feature = "metrics")]
pub struct ProtocolMetrics {
// smsg=send_msg rdata=receive_data
@ -55,6 +60,10 @@ pub struct ProtocolMetrics {
ping: IntGaugeVec,
}
/// Cache for [`ProtocolMetrics`], more optimized and cleared up after channel
/// disconnect.
///
/// [`ProtocolMetrics`]: crate::ProtocolMetrics
#[cfg(feature = "metrics")]
#[derive(Debug, Clone)]
pub struct ProtocolMetricCache {
@ -201,17 +210,20 @@ impl ProtocolMetrics {
}
}
#[cfg(not(feature = "metrics"))]
pub struct ProtocolMetrics {}
#[cfg(feature = "metrics")]
#[derive(Debug, Clone)]
pub(crate) struct CacheLine {
smsg_it: GenericCounter<AtomicU64>,
smsg_ib: GenericCounter<AtomicU64>,
smsg_ot: [GenericCounter<AtomicU64>; 2],
smsg_ob: [GenericCounter<AtomicU64>; 2],
rmsg_it: GenericCounter<AtomicU64>,
rmsg_ib: GenericCounter<AtomicU64>,
rmsg_ot: [GenericCounter<AtomicU64>; 2],
rmsg_ob: [GenericCounter<AtomicU64>; 2],
pub smsg_it: GenericCounter<AtomicU64>,
pub smsg_ib: GenericCounter<AtomicU64>,
pub smsg_ot: [GenericCounter<AtomicU64>; 2],
pub smsg_ob: [GenericCounter<AtomicU64>; 2],
pub rmsg_it: GenericCounter<AtomicU64>,
pub rmsg_ib: GenericCounter<AtomicU64>,
pub rmsg_ot: [GenericCounter<AtomicU64>; 2],
pub rmsg_ob: [GenericCounter<AtomicU64>; 2],
}
#[cfg(feature = "metrics")]
@ -279,8 +291,8 @@ impl ProtocolMetricCache {
line.smsg_ob[reason.i()].inc_by(bytes);
}
pub(crate) fn sdata_frames_b(&mut self, bytes: u64) {
self.sdata_frames_t.inc();
pub(crate) fn sdata_frames_b(&mut self, cnt: u64, bytes: u64) {
self.sdata_frames_t.inc_by(cnt);
self.sdata_frames_b.inc_by(bytes);
}
@ -332,6 +344,31 @@ impl ProtocolMetricCache {
}
}
#[cfg(feature = "metrics")]
impl Drop for ProtocolMetricCache {
fn drop(&mut self) {
let cid = &self.cid;
let m = &self.m;
let finished = RemoveReason::Finished.to_str();
let dropped = RemoveReason::Dropped.to_str();
for (sid, _) in self.cache.drain() {
let s = sid.to_string();
let _ = m.smsg_it.remove_label_values(&[&cid, &s]);
let _ = m.smsg_ib.remove_label_values(&[&cid, &s]);
let _ = m.smsg_ot.remove_label_values(&[&cid, &s, &finished]);
let _ = m.smsg_ot.remove_label_values(&[&cid, &s, &dropped]);
let _ = m.smsg_ob.remove_label_values(&[&cid, &s, &finished]);
let _ = m.smsg_ob.remove_label_values(&[&cid, &s, &dropped]);
let _ = m.rmsg_it.remove_label_values(&[&cid, &s]);
let _ = m.rmsg_ib.remove_label_values(&[&cid, &s]);
let _ = m.rmsg_ot.remove_label_values(&[&cid, &s, &finished]);
let _ = m.rmsg_ot.remove_label_values(&[&cid, &s, &dropped]);
let _ = m.rmsg_ob.remove_label_values(&[&cid, &s, &finished]);
let _ = m.rmsg_ob.remove_label_values(&[&cid, &s, &dropped]);
}
}
}
#[cfg(feature = "metrics")]
impl std::fmt::Debug for ProtocolMetrics {
#[inline]
@ -342,45 +379,40 @@ impl std::fmt::Debug for ProtocolMetrics {
#[cfg(not(feature = "metrics"))]
impl ProtocolMetricCache {
pub(crate) fn smsg_it(&mut self, _sid: Sid) {}
pub fn new(_channel_key: &str, _metrics: Arc<ProtocolMetrics>) -> Self { Self {} }
pub(crate) fn smsg_ib(&mut self, _sid: Sid, _b: u64) {}
pub(crate) fn smsg_ot(&mut self, _sid: Sid, _reason: RemoveReason) {}
pub(crate) fn smsg_ob(&mut self, _sid: Sid, _reason: RemoveReason, _b: u64) {}
pub(crate) fn sdata_frames_t(&mut self) {}
pub(crate) fn sdata_frames_b(&mut self, _b: u64) {}
pub(crate) fn rmsg_it(&mut self, _sid: Sid) {}
pub(crate) fn sdata_frames_b(&mut self, _cnt: u64, _b: u64) {}
pub(crate) fn rmsg_ib(&mut self, _sid: Sid, _b: u64) {}
pub(crate) fn rmsg_ot(&mut self, _sid: Sid, _reason: RemoveReason) {}
pub(crate) fn rmsg_ob(&mut self, _sid: Sid, _reason: RemoveReason, _b: u64) {}
pub(crate) fn rdata_frames_t(&mut self) {}
pub(crate) fn rdata_frames_b(&mut self, _b: u64) {}
}
#[cfg(not(feature = "metrics"))]
impl ProtocolMetrics {
pub fn new() -> Result<Self, Box<dyn Error>> { Ok(Self {}) }
}
impl RemoveReason {
#[cfg(feature = "metrics")]
fn to_str(&self) -> &str {
match self {
RemoveReason::Dropped => "Dropped",
RemoveReason::Finished => "Finished",
RemoveReason::Dropped => "Dropped",
}
}
#[cfg(feature = "metrics")]
fn i(&self) -> usize {
pub(crate) fn i(&self) -> usize {
match self {
RemoveReason::Dropped => 0,
RemoveReason::Finished => 1,
RemoveReason::Finished => 0,
RemoveReason::Dropped => 1,
}
}
}

View File

@ -1,25 +1,30 @@
#[cfg(feature = "metrics")]
use crate::metrics::RemoveReason;
use crate::{
event::ProtocolEvent,
frame::InitFrame,
handshake::{ReliableDrain, ReliableSink},
io::{UnreliableDrain, UnreliableSink},
metrics::{ProtocolMetricCache, RemoveReason},
metrics::ProtocolMetricCache,
types::Bandwidth,
ProtocolError, RecvProtocol, SendProtocol,
ProtocolError, RecvProtocol, SendProtocol, UnreliableDrain, UnreliableSink,
};
use async_trait::async_trait;
use std::time::{Duration, Instant};
#[cfg(feature = "trace_pedantic")]
use tracing::trace;
/// used for implementing your own MPSC `Sink` and `Drain`
#[derive(Debug)]
pub /* should be private */ enum MpscMsg {
pub enum MpscMsg {
Event(ProtocolEvent),
InitFrame(InitFrame),
}
/// MPSC implementation of [`SendProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
#[derive(Debug)]
pub struct MpscSendProtcol<D>
pub struct MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
@ -28,8 +33,11 @@ where
metrics: ProtocolMetricCache,
}
/// MPSC implementation of [`RecvProtocol`]
///
/// [`RecvProtocol`]: crate::RecvProtocol
#[derive(Debug)]
pub struct MpscRecvProtcol<S>
pub struct MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
@ -37,7 +45,7 @@ where
metrics: ProtocolMetricCache,
}
impl<D> MpscSendProtcol<D>
impl<D> MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
@ -50,7 +58,7 @@ where
}
}
impl<S> MpscRecvProtcol<S>
impl<S> MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
@ -58,7 +66,7 @@ where
}
#[async_trait]
impl<D> SendProtocol for MpscSendProtcol<D>
impl<D> SendProtocol for MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
@ -69,15 +77,25 @@ where
trace!(?event, "send");
match &event {
ProtocolEvent::Message {
buffer,
data: _data,
mid: _,
sid,
sid: _sid,
} => {
let sid = *sid;
let bytes = buffer.data.len() as u64;
self.metrics.smsg_ib(sid, bytes);
#[cfg(feature = "metrics")]
let (bytes, line) = {
let sid = *_sid;
let bytes = _data.len() as u64;
let line = self.metrics.init_sid(sid);
line.smsg_it.inc();
line.smsg_ib.inc_by(bytes);
(bytes, line)
};
let r = self.drain.send(MpscMsg::Event(event)).await;
self.metrics.smsg_ob(sid, RemoveReason::Finished, bytes);
#[cfg(feature = "metrics")]
{
line.smsg_ot[RemoveReason::Finished.i()].inc();
line.smsg_ob[RemoveReason::Finished.i()].inc_by(bytes);
}
r
},
_ => self.drain.send(MpscMsg::Event(event)).await,
@ -88,7 +106,7 @@ where
}
#[async_trait]
impl<S> RecvProtocol for MpscRecvProtcol<S>
impl<S> RecvProtocol for MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
@ -98,16 +116,17 @@ where
trace!(?event, "recv");
match event {
MpscMsg::Event(e) => {
if let ProtocolEvent::Message {
buffer,
mid: _,
sid,
} = &e
#[cfg(feature = "metrics")]
{
if let ProtocolEvent::Message { data, mid: _, sid } = &e {
let sid = *sid;
let bytes = buffer.data.len() as u64;
self.metrics.rmsg_ib(sid, bytes);
self.metrics.rmsg_ob(sid, RemoveReason::Finished, bytes);
let bytes = data.len() as u64;
let line = self.metrics.init_sid(sid);
line.rmsg_it.inc();
line.rmsg_ib.inc_by(bytes);
line.rmsg_ot[RemoveReason::Finished.i()].inc();
line.rmsg_ob[RemoveReason::Finished.i()].inc_by(bytes);
}
}
Ok(e)
},
@ -117,7 +136,7 @@ where
}
#[async_trait]
impl<D> ReliableDrain for MpscSendProtcol<D>
impl<D> ReliableDrain for MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
@ -127,7 +146,7 @@ where
}
#[async_trait]
impl<S> ReliableSink for MpscRecvProtcol<S>
impl<S> ReliableSink for MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
@ -142,10 +161,7 @@ where
#[cfg(test)]
pub mod test_utils {
use super::*;
use crate::{
io::*,
metrics::{ProtocolMetricCache, ProtocolMetrics},
};
use crate::metrics::{ProtocolMetricCache, ProtocolMetrics};
use async_channel::*;
use std::sync::Arc;
@ -160,7 +176,7 @@ pub mod test_utils {
pub fn ac_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(MpscSendProtcol<ACDrain>, MpscRecvProtcol<ACSink>); 2] {
) -> [(MpscSendProtocol<ACDrain>, MpscRecvProtocol<ACSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
@ -168,12 +184,12 @@ pub mod test_utils {
});
[
(
MpscSendProtcol::new(ACDrain { sender: s1 }, m.clone()),
MpscRecvProtcol::new(ACSink { receiver: r2 }, m.clone()),
MpscSendProtocol::new(ACDrain { sender: s1 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r2 }, m.clone()),
),
(
MpscSendProtcol::new(ACDrain { sender: s2 }, m.clone()),
MpscRecvProtcol::new(ACSink { receiver: r1 }, m.clone()),
MpscSendProtocol::new(ACDrain { sender: s2 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r1 }, m),
),
]
}

View File

@ -1,12 +1,12 @@
use crate::{
frame::Frame,
message::{MessageBuffer, OutgoingMessage},
frame::OTFrame,
message::OTMessage,
metrics::{ProtocolMetricCache, RemoveReason},
types::{Bandwidth, Mid, Prio, Promises, Sid},
types::{Bandwidth, Mid, Prio, Promises, Sid, HIGHEST_PRIO},
};
use bytes::Bytes;
use std::{
collections::{HashMap, VecDeque},
sync::Arc,
time::Duration,
};
@ -15,7 +15,7 @@ struct StreamInfo {
pub(crate) guaranteed_bandwidth: Bandwidth,
pub(crate) prio: Prio,
pub(crate) promises: Promises,
pub(crate) messages: VecDeque<OutgoingMessage>,
pub(crate) messages: VecDeque<OTMessage>,
}
/// Responsible for queueing messages.
@ -31,8 +31,6 @@ pub(crate) struct PrioManager {
// Send everything ONCE, then keep it till it's confirmed
impl PrioManager {
const HIGHEST_PRIO: u8 = 7;
pub fn new(metrics: ProtocolMetricCache) -> Self {
Self {
streams: HashMap::new(),
@ -67,34 +65,34 @@ impl PrioManager {
pub fn is_empty(&self) -> bool { self.streams.is_empty() }
pub fn add(&mut self, buffer: Arc<MessageBuffer>, mid: Mid, sid: Sid) {
pub fn add(&mut self, buffer: Bytes, mid: Mid, sid: Sid) {
self.streams
.get_mut(&sid)
.unwrap()
.messages
.push_back(OutgoingMessage::new(buffer, mid, sid));
.push_back(OTMessage::new(buffer, mid, sid));
}
/// bandwidth might be extended, as for technical reasons
/// guaranteed_bandwidth is used and frames are always 1400 bytes.
pub fn grab(&mut self, bandwidth: Bandwidth, dt: Duration) -> Vec<Frame> {
pub fn grab(&mut self, bandwidth: Bandwidth, dt: Duration) -> (Vec<OTFrame>, Bandwidth) {
let total_bytes = (bandwidth as f64 * dt.as_secs_f64()) as u64;
let mut cur_bytes = 0u64;
let mut frames = vec![];
let mut prios = [0u64; (Self::HIGHEST_PRIO + 1) as usize];
let mut prios = [0u64; (HIGHEST_PRIO + 1) as usize];
let metrics = &mut self.metrics;
let mut process_stream =
|stream: &mut StreamInfo, mut bandwidth: i64, cur_bytes: &mut u64| {
let mut finished = vec![];
let mut finished = None;
'outer: for (i, msg) in stream.messages.iter_mut().enumerate() {
while let Some(frame) = msg.next() {
let b = if matches!(frame, Frame::DataHeader { .. }) {
25
let b = if let OTFrame::Data { data, .. } = &frame {
crate::frame::TCP_DATA_CNS + 1 + data.len()
} else {
19 + OutgoingMessage::FRAME_DATA_SIZE
};
crate::frame::TCP_DATA_HEADER_CNS + 1
} as u64;
bandwidth -= b as i64;
*cur_bytes += b;
frames.push(frame);
@ -102,41 +100,38 @@ impl PrioManager {
break 'outer;
}
}
finished.push(i);
}
//cleanup
for i in finished.iter().rev() {
let msg = stream.messages.remove(*i).unwrap();
let (sid, bytes) = msg.get_sid_len();
metrics.smsg_ob(sid, RemoveReason::Finished, bytes);
finished = Some(i);
}
if let Some(i) = finished {
//cleanup
stream.messages.drain(..=i);
}
};
// Add guaranteed bandwidth
for (_, stream) in &mut self.streams {
prios[stream.prio.min(Self::HIGHEST_PRIO) as usize] += 1;
for stream in self.streams.values_mut() {
prios[stream.prio as usize] += 1;
let stream_byte_cnt = (stream.guaranteed_bandwidth as f64 * dt.as_secs_f64()) as u64;
process_stream(stream, stream_byte_cnt as i64, &mut cur_bytes);
}
if cur_bytes < total_bytes {
// Add optional bandwidth
for prio in 0..=Self::HIGHEST_PRIO {
for prio in 0..=HIGHEST_PRIO {
if prios[prio as usize] == 0 {
continue;
}
let per_stream_bytes = (total_bytes - cur_bytes) / prios[prio as usize];
for (_, stream) in &mut self.streams {
let per_stream_bytes = ((total_bytes - cur_bytes) / prios[prio as usize]) as i64;
for stream in self.streams.values_mut() {
if stream.prio != prio {
continue;
}
process_stream(stream, per_stream_bytes as i64, &mut cur_bytes);
process_stream(stream, per_stream_bytes, &mut cur_bytes);
}
}
}
frames
(frames, cur_bytes)
}
}

View File

@ -1,26 +1,28 @@
use crate::{
event::ProtocolEvent,
frame::{Frame, InitFrame},
frame::{ITFrame, InitFrame, OTFrame},
handshake::{ReliableDrain, ReliableSink},
io::{UnreliableDrain, UnreliableSink},
message::{ITMessage, ALLOC_BLOCK},
metrics::{ProtocolMetricCache, RemoveReason},
prio::PrioManager,
types::Bandwidth,
ProtocolError, RecvProtocol, SendProtocol,
types::{Bandwidth, Mid, Sid},
ProtocolError, RecvProtocol, SendProtocol, UnreliableDrain, UnreliableSink,
};
use async_trait::async_trait;
use bytes::BytesMut;
use std::{
collections::HashMap,
sync::Arc,
time::{Duration, Instant},
};
use tracing::info;
#[cfg(feature = "trace_pedantic")]
use tracing::trace;
/// TCP implementation of [`SendProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
#[derive(Debug)]
pub struct TcpSendProtcol<D>
pub struct TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
@ -34,18 +36,22 @@ where
metrics: ProtocolMetricCache,
}
/// TCP implementation of [`RecvProtocol`]
///
/// [`RecvProtocol`]: crate::RecvProtocol
#[derive(Debug)]
pub struct TcpRecvProtcol<S>
pub struct TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
buffer: BytesMut,
incoming: HashMap<Mid, IncomingMsg>,
itmsg_allocator: BytesMut,
incoming: HashMap<Mid, ITMessage>,
sink: S,
metrics: ProtocolMetricCache,
}
impl<D> TcpSendProtcol<D>
impl<D> TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
@ -63,13 +69,14 @@ where
}
}
impl<S> TcpRecvProtcol<S>
impl<S> TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
pub fn new(sink: S, metrics: ProtocolMetricCache) -> Self {
Self {
buffer: BytesMut::new(),
itmsg_allocator: BytesMut::with_capacity(ALLOC_BLOCK),
incoming: HashMap::new(),
sink,
metrics,
@ -78,7 +85,7 @@ where
}
#[async_trait]
impl<D> SendProtocol for TcpSendProtcol<D>
impl<D> SendProtocol for TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
@ -116,12 +123,12 @@ where
} => {
self.store
.open_stream(sid, prio, promises, guaranteed_bandwidth);
event.to_frame().to_bytes(&mut self.buffer);
event.to_frame().write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
},
ProtocolEvent::CloseStream { sid } => {
if self.store.try_close_stream(sid) {
event.to_frame().to_bytes(&mut self.buffer);
event.to_frame().write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
} else {
#[cfg(feature = "trace_pedantic")]
@ -131,7 +138,7 @@ where
},
ProtocolEvent::Shutdown => {
if self.store.is_empty() {
event.to_frame().to_bytes(&mut self.buffer);
event.to_frame().write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
} else {
#[cfg(feature = "trace_pedantic")]
@ -139,35 +146,41 @@ where
self.pending_shutdown = true;
}
},
ProtocolEvent::Message { buffer, mid, sid } => {
self.metrics.smsg_ib(sid, buffer.data.len() as u64);
self.store.add(buffer, mid, sid);
ProtocolEvent::Message { data, mid, sid } => {
self.metrics.smsg_ib(sid, data.len() as u64);
self.store.add(data, mid, sid);
},
}
Ok(())
}
async fn flush(&mut self, bandwidth: Bandwidth, dt: Duration) -> Result<(), ProtocolError> {
let frames = self.store.grab(bandwidth, dt);
let (frames, total_bytes) = self.store.grab(bandwidth, dt);
self.buffer.reserve(total_bytes as usize);
let mut data_frames = 0;
let mut data_bandwidth = 0;
for frame in frames {
if let Frame::Data {
if let OTFrame::Data {
mid: _,
start: _,
data,
} = &frame
{
self.metrics.sdata_frames_b(data.len() as u64);
data_bandwidth += data.len();
data_frames += 1;
}
frame.write_bytes(&mut self.buffer);
}
frame.to_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
}
self.metrics
.sdata_frames_b(data_frames, data_bandwidth as u64);
let mut finished_streams = vec![];
for (i, &sid) in self.closing_streams.iter().enumerate() {
if self.store.try_close_stream(sid) {
#[cfg(feature = "trace_pedantic")]
trace!(?sid, "close stream, as it's now empty");
Frame::CloseStream { sid }.to_bytes(&mut self.buffer);
OTFrame::CloseStream { sid }.write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
finished_streams.push(i);
}
@ -191,7 +204,7 @@ where
if self.pending_shutdown && self.store.is_empty() {
#[cfg(feature = "trace_pedantic")]
trace!("shutdown, as it's now empty");
Frame::Shutdown {}.to_bytes(&mut self.buffer);
OTFrame::Shutdown {}.write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
self.pending_shutdown = false;
}
@ -199,58 +212,42 @@ where
}
}
use crate::{
message::MessageBuffer,
types::{Mid, Sid},
};
#[derive(Debug)]
struct IncomingMsg {
sid: Sid,
length: u64,
data: MessageBuffer,
}
#[async_trait]
impl<S> RecvProtocol for TcpRecvProtcol<S>
impl<S> RecvProtocol for TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
async fn recv(&mut self) -> Result<ProtocolEvent, ProtocolError> {
'outer: loop {
while let Some(frame) = Frame::to_frame(&mut self.buffer) {
while let Some(frame) = ITFrame::read_frame(&mut self.buffer) {
#[cfg(feature = "trace_pedantic")]
trace!(?frame, "recv");
match frame {
Frame::Shutdown => break 'outer Ok(ProtocolEvent::Shutdown),
Frame::OpenStream {
ITFrame::Shutdown => break 'outer Ok(ProtocolEvent::Shutdown),
ITFrame::OpenStream {
sid,
prio,
promises,
} => {
break 'outer Ok(ProtocolEvent::OpenStream {
sid,
prio,
prio: prio.min(crate::types::HIGHEST_PRIO),
promises,
guaranteed_bandwidth: 1_000_000,
});
},
Frame::CloseStream { sid } => {
ITFrame::CloseStream { sid } => {
break 'outer Ok(ProtocolEvent::CloseStream { sid });
},
Frame::DataHeader { sid, mid, length } => {
let m = IncomingMsg {
sid,
length,
data: MessageBuffer { data: vec![] },
};
ITFrame::DataHeader { sid, mid, length } => {
let m = ITMessage::new(sid, length, &mut self.itmsg_allocator);
self.metrics.rmsg_ib(sid, length);
self.incoming.insert(mid, m);
},
Frame::Data {
ITFrame::Data {
mid,
start: _,
mut data,
data,
} => {
self.metrics.rdata_frames_b(data.len() as u64);
let m = match self.incoming.get_mut(&mid) {
@ -263,45 +260,48 @@ where
break 'outer Err(ProtocolError::Closed);
},
};
m.data.data.append(&mut data);
if m.data.data.len() == m.length as usize {
m.data.extend_from_slice(&data);
if m.data.len() == m.length as usize {
// finished, yay
drop(m);
let m = self.incoming.remove(&mid).unwrap();
self.metrics.rmsg_ob(
m.sid,
RemoveReason::Finished,
m.data.data.len() as u64,
m.data.len() as u64,
);
break 'outer Ok(ProtocolEvent::Message {
sid: m.sid,
mid,
buffer: Arc::new(m.data),
data: m.data.freeze(),
});
}
},
};
}
let chunk = self.sink.recv().await?;
if self.buffer.is_empty() {
self.buffer = chunk;
} else {
self.buffer.extend_from_slice(&chunk);
}
}
}
}
#[async_trait]
impl<D> ReliableDrain for TcpSendProtcol<D>
impl<D> ReliableDrain for TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
async fn send(&mut self, frame: InitFrame) -> Result<(), ProtocolError> {
let mut buffer = BytesMut::with_capacity(500);
frame.to_bytes(&mut buffer);
frame.write_bytes(&mut buffer);
self.drain.send(buffer).await
}
}
#[async_trait]
impl<S> ReliableSink for TcpRecvProtcol<S>
impl<S> ReliableSink for TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
@ -309,7 +309,7 @@ where
while self.buffer.len() < 100 {
let chunk = self.sink.recv().await?;
self.buffer.extend_from_slice(&chunk);
if let Some(frame) = InitFrame::to_frame(&mut self.buffer) {
if let Some(frame) = InitFrame::read_frame(&mut self.buffer) {
return Ok(frame);
}
}
@ -321,11 +321,9 @@ where
mod test_utils {
//TCP protocol based on Channel
use super::*;
use crate::{
io::*,
metrics::{ProtocolMetricCache, ProtocolMetrics},
};
use crate::metrics::{ProtocolMetricCache, ProtocolMetrics};
use async_channel::*;
use std::sync::Arc;
pub struct TcpDrain {
pub sender: Sender<BytesMut>,
@ -339,7 +337,7 @@ mod test_utils {
pub fn tcp_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(TcpSendProtcol<TcpDrain>, TcpRecvProtcol<TcpSink>); 2] {
) -> [(TcpSendProtocol<TcpDrain>, TcpRecvProtocol<TcpSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
@ -347,12 +345,12 @@ mod test_utils {
});
[
(
TcpSendProtcol::new(TcpDrain { sender: s1 }, m.clone()),
TcpRecvProtcol::new(TcpSink { receiver: r2 }, m.clone()),
TcpSendProtocol::new(TcpDrain { sender: s1 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r2 }, m.clone()),
),
(
TcpSendProtcol::new(TcpDrain { sender: s2 }, m.clone()),
TcpRecvProtcol::new(TcpSink { receiver: r1 }, m.clone()),
TcpSendProtocol::new(TcpDrain { sender: s2 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r1 }, m),
),
]
}
@ -385,12 +383,13 @@ mod test_utils {
#[cfg(test)]
mod tests {
use crate::{
frame::OTFrame,
metrics::{ProtocolMetricCache, ProtocolMetrics, RemoveReason},
tcp::test_utils::*,
types::{Pid, Promises, Sid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2},
InitProtocol, MessageBuffer, ProtocolEvent, RecvProtocol, SendProtocol,
InitProtocol, ProtocolError, ProtocolEvent, RecvProtocol, SendProtocol,
};
use bytes::BytesMut;
use bytes::{Bytes, BytesMut};
use std::{sync::Arc, time::Duration};
#[tokio::test]
@ -409,7 +408,7 @@ mod tests {
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid: Sid::new(10),
prio: 9u8,
prio: 0u8,
promises: Promises::ORDERED,
guaranteed_bandwidth: 1_000_000,
};
@ -433,9 +432,7 @@ mod tests {
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 0,
buffer: Arc::new(MessageBuffer {
data: vec![188u8; 600],
}),
data: Bytes::from(&[188u8; 600][..]),
};
s.send(event.clone()).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
@ -445,9 +442,7 @@ mod tests {
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 1,
buffer: Arc::new(MessageBuffer {
data: vec![7u8; 30],
}),
data: Bytes::from(&[7u8; 30][..]),
};
s.send(event.clone()).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
@ -473,9 +468,7 @@ mod tests {
let event = ProtocolEvent::Message {
sid,
mid: 77,
buffer: Arc::new(MessageBuffer {
data: vec![99u8; 500_000],
}),
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event.clone()).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
@ -503,9 +496,7 @@ mod tests {
let event = ProtocolEvent::Message {
sid,
mid: 77,
buffer: Arc::new(MessageBuffer {
data: vec![99u8; 500_000],
}),
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event).await.unwrap();
let event = ProtocolEvent::CloseStream { sid };
@ -534,9 +525,7 @@ mod tests {
let event = ProtocolEvent::Message {
sid,
mid: 77,
buffer: Arc::new(MessageBuffer {
data: vec![99u8; 500_000],
}),
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event).await.unwrap();
let event = ProtocolEvent::Shutdown {};
@ -553,46 +542,80 @@ mod tests {
assert!(matches!(e, ProtocolEvent::Shutdown { .. }));
}
#[tokio::test]
async fn msg_finishes_after_drop() {
let sid = Sid::new(1);
let [p1, p2] = tcp_bound(10000, None);
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
guaranteed_bandwidth: 0,
};
s.send(event).await.unwrap();
let event = ProtocolEvent::Message {
sid,
mid: 77,
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let event = ProtocolEvent::Message {
sid,
mid: 78,
data: Bytes::from(&[100u8; 500_000][..]),
};
s.send(event).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
drop(s);
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::OpenStream { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Message { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Message { .. }));
}
#[tokio::test]
async fn header_and_data_in_seperate_msg() {
let sid = Sid::new(1);
let (s, r) = async_channel::bounded(10);
let m = ProtocolMetricCache::new("tcp", Arc::new(ProtocolMetrics::new().unwrap()));
let mut r =
super::TcpRecvProtcol::new(super::test_utils::TcpSink { receiver: r }, m.clone());
super::TcpRecvProtocol::new(super::test_utils::TcpSink { receiver: r }, m.clone());
const DATA1: &[u8; 69] =
b"We need to make sure that its okay to send OPEN_STREAM and DATA_HEAD ";
const DATA2: &[u8; 95] = b"in one chunk and (DATA and CLOSE_STREAM) in the second chunk. and then keep the connection open";
let mut bytes = BytesMut::with_capacity(1500);
use crate::frame::Frame;
Frame::OpenStream {
OTFrame::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
}
.to_bytes(&mut bytes);
Frame::DataHeader {
.write_bytes(&mut bytes);
OTFrame::DataHeader {
mid: 99,
sid,
length: (DATA1.len() + DATA2.len()) as u64,
}
.to_bytes(&mut bytes);
.write_bytes(&mut bytes);
s.send(bytes.split()).await.unwrap();
Frame::Data {
OTFrame::Data {
mid: 99,
start: 0,
data: DATA1.to_vec(),
data: Bytes::from(&DATA1[..]),
}
.to_bytes(&mut bytes);
Frame::Data {
.write_bytes(&mut bytes);
OTFrame::Data {
mid: 99,
start: DATA1.len() as u64,
data: DATA2.to_vec(),
data: Bytes::from(&DATA2[..]),
}
.to_bytes(&mut bytes);
Frame::CloseStream { sid }.to_bytes(&mut bytes);
.write_bytes(&mut bytes);
OTFrame::CloseStream { sid }.write_bytes(&mut bytes);
s.send(bytes.split()).await.unwrap();
let e = r.recv().await.unwrap();
@ -605,6 +628,32 @@ mod tests {
assert!(matches!(e, ProtocolEvent::CloseStream { .. }));
}
#[tokio::test]
async fn drop_sink_while_recv() {
let sid = Sid::new(1);
let (s, r) = async_channel::bounded(10);
let m = ProtocolMetricCache::new("tcp", Arc::new(ProtocolMetrics::new().unwrap()));
let mut r =
super::TcpRecvProtocol::new(super::test_utils::TcpSink { receiver: r }, m.clone());
let mut bytes = BytesMut::with_capacity(1500);
OTFrame::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
}
.write_bytes(&mut bytes);
s.send(bytes.split()).await.unwrap();
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::OpenStream { .. }));
let e = tokio::spawn(async move { r.recv().await });
drop(s);
let e = e.await.unwrap();
assert_eq!(e, Err(ProtocolError::Closed));
}
#[tokio::test]
#[should_panic]
async fn send_on_stream_from_remote_without_notify() {
@ -622,9 +671,7 @@ mod tests {
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 0,
buffer: Arc::new(MessageBuffer {
data: vec![188u8; 600],
}),
data: Bytes::from(&[188u8; 600][..]),
};
p2.0.send(event.clone()).await.unwrap();
p2.0.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
@ -649,9 +696,7 @@ mod tests {
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 0,
buffer: Arc::new(MessageBuffer {
data: vec![188u8; 600],
}),
data: Bytes::from(&[188u8; 600][..]),
};
p2.0.send(event.clone()).await.unwrap();
p2.0.flush(1_000_000, Duration::from_secs(1)).await.unwrap();

View File

@ -2,9 +2,21 @@ use bitflags::bitflags;
use bytes::{Buf, BufMut, BytesMut};
use rand::Rng;
/// MessageID, unique ID per Message.
pub type Mid = u64;
/// ChannelID, unique ID per Channel (Protocol)
pub type Cid = u64;
/// Every Stream has a `Prio` and guaranteed [`Bandwidth`].
/// Every send, the guarantees part is used first.
/// If there is still bandwidth left, it will be shared by all Streams with the
/// same priority. Prio 0 will be send first, then 1, ... till the last prio 7
/// is send. Prio must be < 8!
///
/// [`Bandwidth`]: crate::Bandwidth
pub type Prio = u8;
/// guaranteed `Bandwidth`. See [`Prio`]
///
/// [`Prio`]: crate::Prio
pub type Bandwidth = u64;
bitflags! {
@ -36,20 +48,23 @@ impl Promises {
}
pub(crate) const VELOREN_MAGIC_NUMBER: [u8; 7] = *b"VELOREN";
/// When this semver differs, 2 Networks can't communicate.
pub const VELOREN_NETWORK_VERSION: [u32; 3] = [0, 5, 0];
pub(crate) const STREAM_ID_OFFSET1: Sid = Sid::new(0);
pub(crate) const STREAM_ID_OFFSET2: Sid = Sid::new(u64::MAX / 2);
/// Maximal possible Prio to choose (for performance reasons)
pub const HIGHEST_PRIO: u8 = 7;
/// Support struct used for uniquely identifying [`Participant`] over the
/// [`Network`].
///
/// [`Participant`]: crate::api::Participant
/// [`Network`]: crate::api::Network
/// Support struct used for uniquely identifying `Participant` over the
/// `Network`.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct Pid {
internal: u128,
}
/// Unique ID per Stream, in one Channel.
/// one side will always start with 0, while the other start with u64::MAX / 2.
/// number increases for each created Stream.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct Sid {
internal: u64,
@ -89,19 +104,29 @@ impl Pid {
}
}
#[inline]
pub(crate) fn from_bytes(bytes: &mut BytesMut) -> Self {
Self {
internal: bytes.get_u128_le(),
}
}
#[inline]
pub(crate) fn to_bytes(&self, bytes: &mut BytesMut) { bytes.put_u128_le(self.internal) }
}
impl Sid {
pub const fn new(internal: u64) -> Self { Self { internal } }
pub(crate) fn to_le_bytes(&self) -> [u8; 8] { self.internal.to_le_bytes() }
#[inline]
pub(crate) fn from_bytes(bytes: &mut BytesMut) -> Self {
Self {
internal: bytes.get_u64_le(),
}
}
#[inline]
pub(crate) fn to_bytes(&self, bytes: &mut BytesMut) { bytes.put_u64_le(self.internal) }
}
impl std::fmt::Debug for Pid {

View File

@ -1,15 +1,12 @@
//!
//!
//!
//! (cd network/examples/async_recv && RUST_BACKTRACE=1 cargo run)
use crate::{
message::{partial_eq_bincode, Message},
participant::{A2bStreamOpen, S2bShutdownBparticipant},
scheduler::Scheduler,
};
use bytes::Bytes;
#[cfg(feature = "compression")]
use lz_fear::raw::DecodeError;
use network_protocol::{Bandwidth, MessageBuffer, Pid, Prio, Promises, Sid};
use network_protocol::{Bandwidth, Pid, Prio, Promises, Sid};
#[cfg(feature = "metrics")]
use prometheus::Registry;
use serde::{de::DeserializeOwned, Serialize};
@ -76,8 +73,8 @@ pub struct Stream {
promises: Promises,
guaranteed_bandwidth: Bandwidth,
send_closed: Arc<AtomicBool>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Arc<MessageBuffer>)>,
b2a_msg_recv_r: Option<async_channel::Receiver<MessageBuffer>>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Bytes)>,
b2a_msg_recv_r: Option<async_channel::Receiver<Bytes>>,
a2b_close_stream_s: Option<mpsc::UnboundedSender<Sid>>,
}
@ -125,17 +122,17 @@ pub enum StreamError {
///
/// # Examples
/// ```rust
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// use futures::executor::block_on;
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2999` to accept connections and connect to port `8080` to connect to a (pseudo) database Application
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// block_on(async{
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async{
/// # //setup pseudo database!
/// # let (database, fd) = Network::new(Pid::new());
/// # std::thread::spawn(fd);
/// # let database = Network::new(Pid::new(), Arc::clone(&runtime));
/// # database.listen(ProtocolAddr::Tcp("127.0.0.1:8080".parse().unwrap())).await?;
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2999".parse().unwrap())).await?;
/// let database = network.connect(ProtocolAddr::Tcp("127.0.0.1:8080".parse().unwrap())).await?;
@ -179,24 +176,20 @@ impl Network {
///
/// # Examples
/// ```rust
/// //Example with tokio
/// use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// let runtime = Runtime::new();
/// let runtime = Runtime::new().unwrap();
/// let network = Network::new(Pid::new(), Arc::new(runtime));
/// ```
///
///
/// Usually you only create a single `Network` for an application,
/// except when client and server are in the same application, then you
/// will want 2. However there are no technical limitations from
/// creating more.
///
/// [`Pid::new()`]: crate::types::Pid::new
/// [`ThreadPool`]: https://docs.rs/uvth/newest/uvth/struct.ThreadPool.html
/// [`uvth`]: https://docs.rs/uvth
/// [`Pid::new()`]: network_protocol::Pid::new
pub fn new(participant_id: Pid, runtime: Arc<Runtime>) -> Self {
Self::internal_new(
participant_id,
@ -215,12 +208,14 @@ impl Network {
///
/// # Examples
/// ```rust
/// # use std::sync::Arc;
/// use prometheus::Registry;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// let runtime = Runtime::new().unwrap();
/// let registry = Registry::new();
/// let (network, f) = Network::new_with_registry(Pid::new(), &registry);
/// std::thread::spawn(f);
/// let network = Network::new_with_registry(Pid::new(), Arc::new(runtime), &registry);
/// ```
/// [`new`]: crate::api::Network::new
#[cfg(feature = "metrics")]
@ -243,7 +238,6 @@ impl Network {
let (scheduler, listen_sender, connect_sender, connected_receiver, shutdown_sender) =
Scheduler::new(
participant_id,
Arc::clone(&runtime),
#[cfg(feature = "metrics")]
registry,
);
@ -274,15 +268,16 @@ impl Network {
/// support multiple Protocols or NICs.
///
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// ```ignore
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2000` TCP on all NICs and `2001` UDP locally
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network
/// .listen(ProtocolAddr::Tcp("127.0.0.1:2000".parse().unwrap()))
/// .await?;
@ -315,17 +310,17 @@ impl Network {
/// When the method returns the Network either returns a [`Participant`]
/// ready to open [`Streams`] on OR has returned a [`NetworkError`] (e.g.
/// can't connect, or invalid Handshake) # Examples
/// ```rust
/// use futures::executor::block_on;
/// ```ignore
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, connect on port `2010` TCP and `2011` UDP like listening above
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// # remote.listen(ProtocolAddr::Tcp("127.0.0.1:2010".parse().unwrap())).await?;
/// # remote.listen(ProtocolAddr::Udp("127.0.0.1:2011".parse().unwrap())).await?;
/// let p1 = network
@ -379,16 +374,16 @@ impl Network {
///
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2020` TCP and opens returns their Pid
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network
/// .listen(ProtocolAddr::Tcp("127.0.0.1:2020".parse().unwrap()))
/// .await?;
@ -437,10 +432,8 @@ impl Participant {
/// [`Promises`]
///
/// # Arguments
/// * `prio` - valid between 0-63. The priority rates the throughput for
/// messages of the [`Stream`] e.g. prio 5 messages will get 1/2 the speed
/// prio0 messages have. Prio10 messages only 1/4 and Prio 15 only 1/8,
/// etc...
/// * `prio` - defines which stream is processed first when limited on
/// bandwidth. See [`Prio`] for documentation.
/// * `promises` - use a combination of you prefered [`Promises`], see the
/// link for further documentation. You can combine them, e.g.
/// `Promises::ORDERED | Promises::CONSISTENCY` The Stream will then
@ -452,36 +445,39 @@ impl Participant {
///
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, Promises, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, connect on port 2100 and open a stream
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// # remote.listen(ProtocolAddr::Tcp("127.0.0.1:2100".parse().unwrap())).await?;
/// let p1 = network
/// .connect(ProtocolAddr::Tcp("127.0.0.1:2100".parse().unwrap()))
/// .await?;
/// let _s1 = p1
/// .open(16, Promises::ORDERED | Promises::CONSISTENCY)
/// .open(4, Promises::ORDERED | Promises::CONSISTENCY)
/// .await?;
/// # Ok(())
/// })
/// # }
/// ```
///
/// [`Prio`]: network_protocol::Prio
/// [`Promises`]: network_protocol::Promises
/// [`Streams`]: crate::api::Stream
#[instrument(name="network", skip(self, prio, promises), fields(p = %self.local_pid))]
pub async fn open(&self, prio: u8, promises: Promises) -> Result<Stream, ParticipantError> {
debug_assert!(prio <= network_protocol::HIGHEST_PRIO, "invalid prio");
let (p2a_return_stream_s, p2a_return_stream_r) = oneshot::channel::<Stream>();
if let Err(e) = self.a2b_open_stream_s.lock().await.send((
prio,
promises,
100000u64,
1_000_000,
p2a_return_stream_s,
)) {
debug!(?e, "bParticipant is already closed, notifying");
@ -509,21 +505,21 @@ impl Participant {
///
/// # Examples
/// ```rust
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, ProtocolAddr, Promises};
/// use futures::executor::block_on;
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, connect on port 2110 and wait for the other side to open a stream
/// // Note: It's quite unusual to actively connect, but then wait on a stream to be connected, usually the Application taking initiative want's to also create the first Stream.
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// # remote.listen(ProtocolAddr::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
/// let p1 = network.connect(ProtocolAddr::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
/// # let p2 = remote.connected().await?;
/// # p2.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # p2.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// let _s1 = p1.opened().await?;
/// # Ok(())
/// })
@ -565,16 +561,16 @@ impl Participant {
///
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2030` TCP and opens returns their Pid and close connection.
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network
/// .listen(ProtocolAddr::Tcp("127.0.0.1:2030".parse().unwrap()))
/// .await?;
@ -636,7 +632,7 @@ impl Participant {
}
}
/// Returns the remote [`Pid`]
/// Returns the remote [`Pid`](network_protocol::Pid)
pub fn remote_pid(&self) -> Pid { self.remote_pid }
}
@ -650,8 +646,8 @@ impl Stream {
promises: Promises,
guaranteed_bandwidth: Bandwidth,
send_closed: Arc<AtomicBool>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Arc<MessageBuffer>)>,
b2a_msg_recv_r: async_channel::Receiver<MessageBuffer>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Bytes)>,
b2a_msg_recv_r: async_channel::Receiver<Bytes>,
a2b_close_stream_s: mpsc::UnboundedSender<Sid>,
) -> Self {
Self {
@ -694,21 +690,21 @@ impl Stream {
///
/// # Example
/// ```
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::Promises;
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, ProtocolAddr, Pid};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on Port `2200` and wait for a Stream to be opened, then answer `Hello World`
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
/// # // keep it alive
/// # let _stream_p = remote_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # let _stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// let participant_a = network.connected().await?;
/// let mut stream_a = participant_a.opened().await?;
/// //Send Message
@ -734,26 +730,24 @@ impl Stream {
///
/// # Example
/// ```rust
/// use veloren_network::{Network, ProtocolAddr, Pid, Message};
/// # use veloren_network::Promises;
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use bincode;
/// use std::sync::Arc;
/// use veloren_network::{Network, ProtocolAddr, Pid, Message};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote1, fr1) = Network::new(Pid::new());
/// # std::thread::spawn(fr1);
/// # let (remote2, fr2) = Network::new(Pid::new());
/// # std::thread::spawn(fr2);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote1 = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote2 = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # let remote1_p = remote1.connect(ProtocolAddr::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # let remote2_p = remote2.connect(ProtocolAddr::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # assert_eq!(remote1_p.remote_pid(), remote2_p.remote_pid());
/// # remote1_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # remote2_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # remote1_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # remote2_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// let participant_a = network.connected().await?;
/// let participant_b = network.connected().await?;
/// let mut stream_a = participant_a.opened().await?;
@ -779,8 +773,7 @@ impl Stream {
}
#[cfg(debug_assertions)]
message.verify(&self);
self.a2b_msg_s
.send((self.sid, Arc::clone(&message.buffer)))?;
self.a2b_msg_s.send((self.sid, message.data.clone()))?;
Ok(())
}
@ -795,20 +788,20 @@ impl Stream {
///
/// # Example
/// ```
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::Promises;
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, ProtocolAddr, Pid};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on Port `2220` and wait for a Stream to be opened, then listen on it
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
/// # let mut stream_p = remote_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # stream_p.send("Hello World");
/// let participant_a = network.connected().await?;
/// let mut stream_a = participant_a.opened().await?;
@ -828,20 +821,20 @@ impl Stream {
///
/// # Example
/// ```
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::Promises;
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, ProtocolAddr, Pid};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on Port `2230` and wait for a Stream to be opened, then listen on it
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2230".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2230".parse().unwrap())).await?;
/// # let mut stream_p = remote_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # stream_p.send("Hello World");
/// let participant_a = network.connected().await?;
/// let mut stream_a = participant_a.opened().await?;
@ -861,8 +854,8 @@ impl Stream {
match &mut self.b2a_msg_recv_r {
Some(b2a_msg_recv_r) => {
match b2a_msg_recv_r.recv().await {
Ok(msg) => Ok(Message {
buffer: Arc::new(msg),
Ok(data) => Ok(Message {
data,
#[cfg(feature = "compression")]
compressed: self.promises.contains(Promises::COMPRESSED),
}),
@ -883,20 +876,20 @@ impl Stream {
///
/// # Example
/// ```
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::Promises;
/// use futures::executor::block_on;
/// # use std::sync::Arc;
/// use tokio::runtime::Runtime;
/// use veloren_network::{Network, ProtocolAddr, Pid};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on Port `2240` and wait for a Stream to be opened, then listen on it
/// let (network, f) = Network::new(Pid::new());
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// block_on(async {
/// let runtime = Arc::new(Runtime::new().unwrap());
/// let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// runtime.block_on(async {
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2240".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2240".parse().unwrap())).await?;
/// # let mut stream_p = remote_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # stream_p.send("Hello World");
/// # std::thread::sleep(std::time::Duration::from_secs(1));
/// let participant_a = network.connected().await?;
@ -913,9 +906,9 @@ impl Stream {
pub fn try_recv<M: DeserializeOwned>(&mut self) -> Result<Option<M>, StreamError> {
match &mut self.b2a_msg_recv_r {
Some(b2a_msg_recv_r) => match b2a_msg_recv_r.try_recv() {
Ok(msg) => Ok(Some(
Ok(data) => Ok(Some(
Message {
buffer: Arc::new(msg),
data,
#[cfg(feature = "compression")]
compressed: self.promises().contains(Promises::COMPRESSED),
}
@ -954,7 +947,6 @@ impl Drop for Network {
}
tokio::task::block_in_place(|| {
/* This context prevents panic if Dropped in a async fn */
self.runtime.block_on(async {
for (remote_pid, a2s_disconnect_s) in
self.participant_disconnect_sender.lock().await.drain()

View File

@ -1,12 +1,11 @@
use async_trait::async_trait;
use bytes::BytesMut;
use network_protocol::{
InitProtocolError, MpscMsg, MpscRecvProtcol, MpscSendProtcol, Pid, ProtocolError,
ProtocolEvent, ProtocolMetricCache, ProtocolMetrics, Sid, TcpRecvProtcol, TcpSendProtcol,
Cid, InitProtocolError, MpscMsg, MpscRecvProtocol, MpscSendProtocol, Pid, ProtocolError,
ProtocolEvent, ProtocolMetricCache, ProtocolMetrics, Sid, TcpRecvProtocol, TcpSendProtocol,
UnreliableDrain, UnreliableSink,
};
#[cfg(feature = "metrics")] use std::sync::Arc;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::tcp::{OwnedReadHalf, OwnedWriteHalf},
@ -15,40 +14,38 @@ use tokio::{
#[derive(Debug)]
pub(crate) enum Protocols {
Tcp((TcpSendProtcol<TcpDrain>, TcpRecvProtcol<TcpSink>)),
Mpsc((MpscSendProtcol<MpscDrain>, MpscRecvProtcol<MpscSink>)),
Tcp((TcpSendProtocol<TcpDrain>, TcpRecvProtocol<TcpSink>)),
Mpsc((MpscSendProtocol<MpscDrain>, MpscRecvProtocol<MpscSink>)),
}
#[derive(Debug)]
pub(crate) enum SendProtocols {
Tcp(TcpSendProtcol<TcpDrain>),
Mpsc(MpscSendProtcol<MpscDrain>),
Tcp(TcpSendProtocol<TcpDrain>),
Mpsc(MpscSendProtocol<MpscDrain>),
}
#[derive(Debug)]
pub(crate) enum RecvProtocols {
Tcp(TcpRecvProtcol<TcpSink>),
Mpsc(MpscRecvProtcol<MpscSink>),
Tcp(TcpRecvProtocol<TcpSink>),
Mpsc(MpscRecvProtocol<MpscSink>),
}
impl Protocols {
pub(crate) fn new_tcp(stream: tokio::net::TcpStream) -> Self {
pub(crate) fn new_tcp(
stream: tokio::net::TcpStream,
cid: Cid,
metrics: Arc<ProtocolMetrics>,
) -> Self {
let (r, w) = stream.into_split();
#[cfg(feature = "metrics")]
let metrics = ProtocolMetricCache::new(
"foooobaaaarrrrrrrr",
Arc::new(ProtocolMetrics::new().unwrap()),
);
#[cfg(not(feature = "metrics"))]
let metrics = ProtocolMetricCache {};
let metrics = ProtocolMetricCache::new(&cid.to_string(), metrics);
let sp = TcpSendProtcol::new(TcpDrain { half: w }, metrics.clone());
let rp = TcpRecvProtcol::new(
let sp = TcpSendProtocol::new(TcpDrain { half: w }, metrics.clone());
let rp = TcpRecvProtocol::new(
TcpSink {
half: r,
buffer: BytesMut::new(),
},
metrics.clone(),
metrics,
);
Protocols::Tcp((sp, rp))
}
@ -56,15 +53,13 @@ impl Protocols {
pub(crate) fn new_mpsc(
sender: mpsc::Sender<MpscMsg>,
receiver: mpsc::Receiver<MpscMsg>,
cid: Cid,
metrics: Arc<ProtocolMetrics>,
) -> Self {
#[cfg(feature = "metrics")]
let metrics =
ProtocolMetricCache::new("mppppsssscccc", Arc::new(ProtocolMetrics::new().unwrap()));
#[cfg(not(feature = "metrics"))]
let metrics = ProtocolMetricCache {};
let metrics = ProtocolMetricCache::new(&cid.to_string(), metrics);
let sp = MpscSendProtcol::new(MpscDrain { sender }, metrics.clone());
let rp = MpscRecvProtcol::new(MpscSink { receiver }, metrics.clone());
let sp = MpscSendProtocol::new(MpscDrain { sender }, metrics.clone());
let rp = MpscRecvProtocol::new(MpscSink { receiver }, metrics);
Protocols::Mpsc((sp, rp))
}
@ -157,6 +152,7 @@ impl UnreliableSink for TcpSink {
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
self.buffer.resize(1500, 0u8);
match self.half.read(&mut self.buffer).await {
Ok(0) => Err(ProtocolError::Closed),
Ok(n) => Ok(self.buffer.split_to(n)),
Err(_) => Err(ProtocolError::Closed),
}
@ -199,6 +195,7 @@ impl UnreliableSink for MpscSink {
#[cfg(test)]
mod tests {
use super::*;
use bytes::Bytes;
use network_protocol::{Promises, RecvProtocol, SendProtocol};
use tokio::net::{TcpListener, TcpStream};
@ -211,8 +208,9 @@ mod tests {
});
let client = TcpStream::connect("127.0.0.1:5000").await.unwrap();
let (_listener, server) = r1.await.unwrap();
let client = Protocols::new_tcp(client);
let server = Protocols::new_tcp(server);
let metrics = Arc::new(ProtocolMetrics::new().unwrap());
let client = Protocols::new_tcp(client, 0, Arc::clone(&metrics));
let server = Protocols::new_tcp(server, 0, Arc::clone(&metrics));
let (mut s, _) = client.split();
let (_, mut r) = server.split();
let event = ProtocolEvent::OpenStream {
@ -222,8 +220,18 @@ mod tests {
guaranteed_bandwidth: 1_000,
};
s.send(event.clone()).await.unwrap();
let r = r.recv().await;
match r {
s.send(ProtocolEvent::Message {
sid: Sid::new(1),
mid: 0,
data: Bytes::from(&[8u8; 8][..]),
})
.await
.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
drop(s); // recv must work even after shutdown of send!
tokio::time::sleep(Duration::from_secs(1)).await;
let res = r.recv().await;
match res {
Ok(ProtocolEvent::OpenStream {
sid,
prio,
@ -235,8 +243,30 @@ mod tests {
assert_eq!(promises, Promises::GUARANTEED_DELIVERY);
},
_ => {
panic!("wrong type {:?}", r);
panic!("wrong type {:?}", res);
},
}
r.recv().await.unwrap();
}
#[tokio::test]
async fn tokio_sink_stop_after_drop() {
let listener = TcpListener::bind("127.0.0.1:5001").await.unwrap();
let r1 = tokio::spawn(async move {
let (server, _) = listener.accept().await.unwrap();
(listener, server)
});
let client = TcpStream::connect("127.0.0.1:5001").await.unwrap();
let (_listener, server) = r1.await.unwrap();
let metrics = Arc::new(ProtocolMetrics::new().unwrap());
let client = Protocols::new_tcp(client, 0, Arc::clone(&metrics));
let server = Protocols::new_tcp(server, 0, Arc::clone(&metrics));
let (s, _) = client.split();
let (_, mut r) = server.split();
let e = tokio::spawn(async move { r.recv().await });
drop(s);
let e = e.await.unwrap();
assert!(e.is_err());
assert_eq!(e.unwrap_err(), ProtocolError::Closed);
}
}

View File

@ -39,29 +39,27 @@
//!
//! # Examples
//! ```rust
//! use futures::{executor::block_on, join};
//! use tokio::task::sleep;
//! use std::sync::Arc;
//! use tokio::{join, runtime::Runtime, time::sleep};
//! use veloren_network::{Network, Pid, Promises, ProtocolAddr};
//!
//! // Client
//! async fn client() -> std::result::Result<(), Box<dyn std::error::Error>> {
//! async fn client(runtime: Arc<Runtime>) -> std::result::Result<(), Box<dyn std::error::Error>> {
//! sleep(std::time::Duration::from_secs(1)).await; // `connect` MUST be after `listen`
//! let (client_network, f) = Network::new(Pid::new());
//! std::thread::spawn(f);
//! let client_network = Network::new(Pid::new(), runtime);
//! let server = client_network
//! .connect(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .await?;
//! let mut stream = server
//! .open(10, Promises::ORDERED | Promises::CONSISTENCY)
//! .open(4, Promises::ORDERED | Promises::CONSISTENCY)
//! .await?;
//! stream.send("Hello World")?;
//! Ok(())
//! }
//!
//! // Server
//! async fn server() -> std::result::Result<(), Box<dyn std::error::Error>> {
//! let (server_network, f) = Network::new(Pid::new());
//! std::thread::spawn(f);
//! async fn server(runtime: Arc<Runtime>) -> std::result::Result<(), Box<dyn std::error::Error>> {
//! let server_network = Network::new(Pid::new(), runtime);
//! server_network
//! .listen(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .await?;
@ -74,8 +72,10 @@
//! }
//!
//! fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
//! block_on(async {
//! let (result_c, result_s) = join!(client(), server(),);
//! let runtime = Arc::new(Runtime::new().unwrap());
//! runtime.block_on(async {
//! let (result_c, result_s) =
//! join!(client(Arc::clone(&runtime)), server(Arc::clone(&runtime)),);
//! result_c?;
//! result_s?;
//! Ok(())
@ -95,14 +95,14 @@
//! [`Streams`]: crate::api::Stream
//! [`send`]: crate::api::Stream::send
//! [`recv`]: crate::api::Stream::recv
//! [`Pid`]: crate::types::Pid
//! [`Pid`]: network_protocol::Pid
//! [`ProtocolAddr`]: crate::api::ProtocolAddr
//! [`Promises`]: crate::types::Promises
//! [`Promises`]: network_protocol::Promises
mod api;
mod channel;
mod message;
#[cfg(feature = "metrics")] mod metrics;
mod metrics;
mod participant;
mod scheduler;

View File

@ -1,10 +1,9 @@
use serde::{de::DeserializeOwned, Serialize};
//use std::collections::VecDeque;
use crate::api::{Stream, StreamError};
use network_protocol::MessageBuffer;
use bytes::Bytes;
#[cfg(feature = "compression")]
use network_protocol::Promises;
use std::{io, sync::Arc};
use serde::{de::DeserializeOwned, Serialize};
use std::io;
#[cfg(all(feature = "compression", debug_assertions))]
use tracing::warn;
@ -16,7 +15,7 @@ use tracing::warn;
/// [`Stream`]: crate::api::Stream
/// [`send_raw`]: crate::api::Stream::send_raw
pub struct Message {
pub(crate) buffer: Arc<MessageBuffer>,
pub(crate) data: Bytes,
#[cfg(feature = "compression")]
pub(crate) compressed: bool,
}
@ -58,7 +57,7 @@ impl Message {
let _stream = stream;
Self {
buffer: Arc::new(MessageBuffer { data }),
data: Bytes::from(data),
#[cfg(feature = "compression")]
compressed,
}
@ -73,18 +72,18 @@ impl Message {
/// ```
/// # use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::Promises;
/// # use futures::executor::block_on;
/// # use tokio::runtime::Runtime;
/// # use std::sync::Arc;
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on Port `2300` and wait for a Stream to be opened, then listen on it
/// # let (network, f) = Network::new(Pid::new());
/// # std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new());
/// # std::thread::spawn(fr);
/// # block_on(async {
/// # let runtime = Arc::new(Runtime::new().unwrap());
/// # let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// # runtime.block_on(async {
/// # network.listen(ProtocolAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
/// # let mut stream_p = remote_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # stream_p.send("Hello World");
/// # let participant_a = network.connected().await?;
/// let mut stream_a = participant_a.opened().await?;
@ -99,33 +98,27 @@ impl Message {
/// [`recv_raw`]: crate::api::Stream::recv_raw
pub fn deserialize<M: DeserializeOwned>(self) -> Result<M, StreamError> {
#[cfg(not(feature = "compression"))]
let uncompressed_data = match Arc::try_unwrap(self.buffer) {
Ok(d) => d.data,
Err(b) => b.data.clone(),
};
let uncompressed_data = self.data;
#[cfg(feature = "compression")]
let uncompressed_data = if self.compressed {
{
let mut uncompressed_data = Vec::with_capacity(self.buffer.data.len() * 2);
let mut uncompressed_data = Vec::with_capacity(self.data.len() * 2);
if let Err(e) = lz_fear::raw::decompress_raw(
&self.buffer.data,
&self.data,
&[0; 0],
&mut uncompressed_data,
usize::MAX,
) {
return Err(StreamError::Compression(e));
}
uncompressed_data
Bytes::from(uncompressed_data)
}
} else {
match Arc::try_unwrap(self.buffer) {
Ok(d) => d.data,
Err(b) => b.data.clone(),
}
self.data
};
match bincode::deserialize(uncompressed_data.as_slice()) {
match bincode::deserialize(&uncompressed_data) {
Ok(m) => Ok(m),
Err(e) => Err(StreamError::Deserialize(e)),
}
@ -215,25 +208,25 @@ mod tests {
#[test]
fn serialize_test() {
let msg = Message::serialize("abc", &stub_stream(false));
assert_eq!(msg.buffer.data.len(), 11);
assert_eq!(msg.buffer.data[0], 3);
assert_eq!(msg.buffer.data[1..7], [0, 0, 0, 0, 0, 0]);
assert_eq!(msg.buffer.data[8], b'a');
assert_eq!(msg.buffer.data[9], b'b');
assert_eq!(msg.buffer.data[10], b'c');
assert_eq!(msg.data.len(), 11);
assert_eq!(msg.data[0], 3);
assert_eq!(msg.data[1..7], [0, 0, 0, 0, 0, 0]);
assert_eq!(msg.data[8], b'a');
assert_eq!(msg.data[9], b'b');
assert_eq!(msg.data[10], b'c');
}
#[cfg(feature = "compression")]
#[test]
fn serialize_compress_small() {
let msg = Message::serialize("abc", &stub_stream(true));
assert_eq!(msg.buffer.data.len(), 12);
assert_eq!(msg.buffer.data[0], 176);
assert_eq!(msg.buffer.data[1], 3);
assert_eq!(msg.buffer.data[2..8], [0, 0, 0, 0, 0, 0]);
assert_eq!(msg.buffer.data[9], b'a');
assert_eq!(msg.buffer.data[10], b'b');
assert_eq!(msg.buffer.data[11], b'c');
assert_eq!(msg.data.len(), 12);
assert_eq!(msg.data[0], 176);
assert_eq!(msg.data[1], 3);
assert_eq!(msg.data[2..8], [0, 0, 0, 0, 0, 0]);
assert_eq!(msg.data[9], b'a');
assert_eq!(msg.data[10], b'b');
assert_eq!(msg.data[11], b'c');
}
#[cfg(feature = "compression")]
@ -251,14 +244,14 @@ mod tests {
"assets/data/plants/flowers/greenrose.ron",
);
let msg = Message::serialize(&msg, &stub_stream(true));
assert_eq!(msg.buffer.data.len(), 79);
assert_eq!(msg.buffer.data[0], 34);
assert_eq!(msg.buffer.data[1], 5);
assert_eq!(msg.buffer.data[2], 0);
assert_eq!(msg.buffer.data[3], 1);
assert_eq!(msg.buffer.data[20], 20);
assert_eq!(msg.buffer.data[40], 115);
assert_eq!(msg.buffer.data[60], 111);
assert_eq!(msg.data.len(), 79);
assert_eq!(msg.data[0], 34);
assert_eq!(msg.data[1], 5);
assert_eq!(msg.data[2], 0);
assert_eq!(msg.data[3], 1);
assert_eq!(msg.data[20], 20);
assert_eq!(msg.data[40], 115);
assert_eq!(msg.data[60], 111);
}
#[cfg(feature = "compression")]
@ -281,6 +274,6 @@ mod tests {
}
}
let msg = Message::serialize(&msg, &stub_stream(true));
assert_eq!(msg.buffer.data.len(), 1331);
assert_eq!(msg.data.len(), 1331);
}
}

View File

@ -1,12 +1,10 @@
use network_protocol::Pid;
use network_protocol::{Cid, Pid};
#[cfg(feature = "metrics")]
use prometheus::{IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Opts, Registry};
use std::error::Error;
/// 1:1 relation between NetworkMetrics and Network
/// use 2NF here and avoid redundant data like CHANNEL AND PARTICIPANT encoding.
/// as this will cause a matrix that is full of 0 but needs alot of bandwith and
/// storage
#[allow(dead_code)]
#[cfg(feature = "metrics")]
pub struct NetworkMetrics {
pub listen_requests_total: IntCounterVec,
pub connect_requests_total: IntCounterVec,
@ -23,8 +21,11 @@ pub struct NetworkMetrics {
pub network_info: IntGauge,
}
#[cfg(not(feature = "metrics"))]
pub struct NetworkMetrics {}
#[cfg(feature = "metrics")]
impl NetworkMetrics {
#[allow(dead_code)]
pub fn new(local_pid: &Pid) -> Result<Self, Box<dyn Error>> {
let listen_requests_total = IntCounterVec::new(
Opts::new(
@ -123,6 +124,46 @@ impl NetworkMetrics {
registry.register(Box::new(self.network_info.clone()))?;
Ok(())
}
pub(crate) fn channels_connected(&self, remote_p: &str, no: usize, cid: Cid) {
self.channels_connected_total
.with_label_values(&[remote_p])
.inc();
self.participants_channel_ids
.with_label_values(&[remote_p, &no.to_string()])
.set(cid as i64);
}
pub(crate) fn channels_disconnected(&self, remote_p: &str) {
self.channels_disconnected_total
.with_label_values(&[remote_p])
.inc();
}
pub(crate) fn streams_opened(&self, remote_p: &str) {
self.streams_opened_total
.with_label_values(&[remote_p])
.inc();
}
pub(crate) fn streams_closed(&self, remote_p: &str) {
self.streams_closed_total
.with_label_values(&[remote_p])
.inc();
}
}
#[cfg(not(feature = "metrics"))]
impl NetworkMetrics {
pub fn new(_local_pid: &Pid) -> Result<Self, Box<dyn Error>> { Ok(Self {}) }
pub(crate) fn channels_connected(&self, _remote_p: &str, _no: usize, _cid: Cid) {}
pub(crate) fn channels_disconnected(&self, _remote_p: &str) {}
pub(crate) fn streams_opened(&self, _remote_p: &str) {}
pub(crate) fn streams_closed(&self, _remote_p: &str) {}
}
impl std::fmt::Debug for NetworkMetrics {

View File

@ -1,13 +1,12 @@
#[cfg(feature = "metrics")]
use crate::metrics::NetworkMetrics;
use crate::{
api::{ParticipantError, Stream},
channel::{Protocols, RecvProtocols, SendProtocols},
metrics::NetworkMetrics,
};
use bytes::Bytes;
use futures_util::{FutureExt, StreamExt};
use network_protocol::{
Bandwidth, Cid, MessageBuffer, Pid, Prio, Promises, ProtocolEvent, RecvProtocol, SendProtocol,
Sid,
Bandwidth, Cid, Pid, Prio, Promises, ProtocolEvent, RecvProtocol, SendProtocol, Sid,
};
use std::{
collections::HashMap,
@ -41,7 +40,7 @@ struct StreamInfo {
prio: Prio,
promises: Promises,
send_closed: Arc<AtomicBool>,
b2a_msg_recv_s: Mutex<async_channel::Sender<MessageBuffer>>,
b2a_msg_recv_s: Mutex<async_channel::Sender<Bytes>>,
}
#[derive(Debug)]
@ -68,7 +67,6 @@ pub struct BParticipant {
streams: RwLock<HashMap<Sid, StreamInfo>>,
run_channels: Option<ControlChannels>,
shutdown_barrier: AtomicI32,
#[cfg(feature = "metrics")]
metrics: Arc<NetworkMetrics>,
no_channel_error_info: RwLock<(Instant, u64)>,
}
@ -86,7 +84,7 @@ impl BParticipant {
local_pid: Pid,
remote_pid: Pid,
offset_sid: Sid,
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>,
metrics: Arc<NetworkMetrics>,
) -> (
Self,
mpsc::UnboundedSender<A2bStreamOpen>,
@ -118,7 +116,6 @@ impl BParticipant {
Self::BARR_CHANNEL + Self::BARR_SEND + Self::BARR_RECV,
),
run_channels,
#[cfg(feature = "metrics")]
metrics,
no_channel_error_info: RwLock::new((Instant::now(), 0)),
},
@ -139,12 +136,11 @@ impl BParticipant {
let (b2b_force_close_recv_protocol_s, b2b_force_close_recv_protocol_r) =
async_channel::unbounded::<Cid>();
let (b2b_notify_send_of_recv_s, b2b_notify_send_of_recv_r) =
mpsc::unbounded_channel::<ProtocolEvent>();
crossbeam_channel::unbounded::<ProtocolEvent>();
let (a2b_close_stream_s, a2b_close_stream_r) = mpsc::unbounded_channel::<Sid>();
const STREAM_BOUND: usize = 10_000;
let (a2b_msg_s, a2b_msg_r) =
crossbeam_channel::bounded::<(Sid, Arc<MessageBuffer>)>(STREAM_BOUND);
let (a2b_msg_s, a2b_msg_r) = crossbeam_channel::bounded::<(Sid, Bytes)>(STREAM_BOUND);
let run_channels = self.run_channels.take().unwrap();
trace!("start all managers");
@ -185,20 +181,22 @@ impl BParticipant {
}
//TODO: local stream_cid: HashMap<Sid, Cid> to know the respective protocol
#[allow(clippy::too_many_arguments)]
async fn send_mgr(
&self,
mut a2b_open_stream_r: mpsc::UnboundedReceiver<A2bStreamOpen>,
mut a2b_close_stream_r: mpsc::UnboundedReceiver<Sid>,
a2b_msg_r: crossbeam_channel::Receiver<(Sid, Arc<MessageBuffer>)>,
a2b_msg_r: crossbeam_channel::Receiver<(Sid, Bytes)>,
mut b2b_add_protocol_r: mpsc::UnboundedReceiver<(Cid, SendProtocols)>,
b2b_close_send_protocol_r: async_channel::Receiver<Cid>,
mut b2b_notify_send_of_recv_r: mpsc::UnboundedReceiver<ProtocolEvent>,
b2b_notify_send_of_recv_r: crossbeam_channel::Receiver<ProtocolEvent>,
_b2s_prio_statistic_s: mpsc::UnboundedSender<B2sPrioStatistic>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Arc<MessageBuffer>)>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Bytes)>,
a2b_close_stream_s: mpsc::UnboundedSender<Sid>,
) {
let mut send_protocols: HashMap<Cid, SendProtocols> = HashMap::new();
let mut interval = tokio::time::interval(Self::TICK_TIME);
let mut last_instant = Instant::now();
let mut stream_ids = self.offset_sid;
let mut fake_mid = 0; //TODO: move MID to protocol, should be inc per stream ? or ?
trace!("workaround, actively wait for first protocol");
@ -207,41 +205,21 @@ impl BParticipant {
.await
.map(|(c, p)| send_protocols.insert(c, p));
loop {
let (open, close, r_event, _, addp, remp) = select!(
n = a2b_open_stream_r.recv().fuse() => (Some(n), None, None, None, None, None),
n = a2b_close_stream_r.recv().fuse() => (None, Some(n), None, None, None, None),
n = b2b_notify_send_of_recv_r.recv().fuse() => (None, None, Some(n), None, None, None),
_ = interval.tick() => (None, None, None, Some(()), None, None),
n = b2b_add_protocol_r.recv().fuse() => (None, None, None, None, Some(n), None),
n = b2b_close_send_protocol_r.recv().fuse() => (None, None, None, None, None, Some(n)),
let (open, close, _, addp, remp) = select!(
Some(n) = a2b_open_stream_r.recv().fuse() => (Some(n), None, None, None, None),
Some(n) = a2b_close_stream_r.recv().fuse() => (None, Some(n), None, None, None),
_ = interval.tick() => (None, None, Some(()), None, None),
Some(n) = b2b_add_protocol_r.recv().fuse() => (None, None, None, Some(n), None),
Ok(n) = b2b_close_send_protocol_r.recv().fuse() => (None, None, None, None, Some(n)),
);
addp.flatten().map(|(cid, p)| {
addp.map(|(cid, p)| {
debug!(?cid, "add protocol");
send_protocols.insert(cid, p)
});
match remp {
Some(Ok(cid)) => {
debug!(?cid, "remove protocol");
match send_protocols.remove(&cid) {
Some(mut prot) => {
trace!("blocking flush");
let _ = prot.flush(u64::MAX, Duration::from_secs(1)).await;
trace!("shutdown prot");
let _ = prot.send(ProtocolEvent::Shutdown).await;
},
None => trace!("tried to remove protocol twice"),
};
if send_protocols.is_empty() {
break;
}
},
_ => (),
};
let cid = 0;
let active = match send_protocols.get_mut(&cid) {
Some(a) => a,
let (cid, active) = match send_protocols.iter_mut().next() {
Some((cid, a)) => (*cid, a),
None => {
warn!("no channel");
continue;
@ -249,11 +227,7 @@ impl BParticipant {
};
let active_err = async {
if let Some(Some(event)) = r_event {
active.notify_from_recv(event);
}
if let Some(Some((prio, promises, guaranteed_bandwidth, return_s))) = open {
if let Some((prio, promises, guaranteed_bandwidth, return_s)) = open {
let sid = stream_ids;
trace!(?sid, "open stream");
stream_ids += Sid::from(1);
@ -279,19 +253,39 @@ impl BParticipant {
active.send(event).await?;
}
// process recv content first
let mut closeevents = b2b_notify_send_of_recv_r
.try_iter()
.map(|e| {
if matches!(e, ProtocolEvent::OpenStream { .. }) {
active.notify_from_recv(e);
None
} else {
Some(e)
}
})
.collect::<Vec<_>>();
// get all messages and assign it to a channel
for (sid, buffer) in a2b_msg_r.try_iter() {
fake_mid += 1;
active
.send(ProtocolEvent::Message {
buffer,
data: buffer,
mid: fake_mid,
sid,
})
.await?
}
if let Some(Some(sid)) = close {
// process recv content afterwards
let _ = closeevents.drain(..).map(|e| {
if let Some(e) = e {
active.notify_from_recv(e);
}
});
if let Some(sid) = close {
trace!(?stream_ids, "delete stream");
self.delete_stream(sid).await;
// Fire&Forget the protocol will take care to verify that this Frame is delayed
@ -299,9 +293,10 @@ impl BParticipant {
active.send(ProtocolEvent::CloseStream { sid }).await?;
}
active
.flush(1_000_000, Duration::from_secs(1) /* TODO */)
.await?; //this actually blocks, so we cant set streams whilte it.
let send_time = Instant::now();
let diff = send_time.duration_since(last_instant);
last_instant = send_time;
active.flush(1_000_000_000, diff).await?; //this actually blocks, so we cant set streams while it.
let r: Result<(), network_protocol::ProtocolError> = Ok(());
r
}
@ -311,6 +306,24 @@ impl BParticipant {
// remote recv will now fail, which will trigger remote send which will trigger
// recv
send_protocols.remove(&cid).unwrap();
self.metrics.channels_disconnected(&self.remote_pid_string);
}
if let Some(cid) = remp {
debug!(?cid, "remove protocol");
match send_protocols.remove(&cid) {
Some(mut prot) => {
self.metrics.channels_disconnected(&self.remote_pid_string);
trace!("blocking flush");
let _ = prot.flush(u64::MAX, Duration::from_secs(1)).await;
trace!("shutdown prot");
let _ = prot.send(ProtocolEvent::Shutdown).await;
},
None => trace!("tried to remove protocol twice"),
};
if send_protocols.is_empty() {
break;
}
}
}
trace!("Stop send_mgr");
@ -318,14 +331,15 @@ impl BParticipant {
.fetch_sub(Self::BARR_SEND, Ordering::Relaxed);
}
#[allow(clippy::too_many_arguments)]
async fn recv_mgr(
&self,
b2a_stream_opened_s: mpsc::UnboundedSender<Stream>,
mut b2b_add_protocol_r: mpsc::UnboundedReceiver<(Cid, RecvProtocols)>,
b2b_force_close_recv_protocol_r: async_channel::Receiver<Cid>,
b2b_close_send_protocol_s: async_channel::Sender<Cid>,
b2b_notify_send_of_recv_s: mpsc::UnboundedSender<ProtocolEvent>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Arc<MessageBuffer>)>,
b2b_notify_send_of_recv_s: crossbeam_channel::Sender<ProtocolEvent>,
a2b_msg_s: crossbeam_channel::Sender<(Sid, Bytes)>,
a2b_close_stream_s: mpsc::UnboundedSender<Sid>,
) {
let mut recv_protocols: HashMap<Cid, JoinHandle<()>> = HashMap::new();
@ -355,23 +369,27 @@ impl BParticipant {
loop {
let (event, addp, remp) = select!(
next = hacky_recv_r.recv().fuse() => (Some(next), None, None),
Some(next) = b2b_add_protocol_r.recv().fuse() => (None, Some(next), None),
next = b2b_force_close_recv_protocol_r.recv().fuse() => (None, None, Some(next)),
Some(n) = hacky_recv_r.recv().fuse() => (Some(n), None, None),
Some(n) = b2b_add_protocol_r.recv().fuse() => (None, Some(n), None),
Ok(n) = b2b_force_close_recv_protocol_r.recv().fuse() => (None, None, Some(n)),
else => {
error!("recv_mgr -> something is seriously wrong!, end recv_mgr");
break;
}
);
addp.map(|(cid, p)| {
if let Some((cid, p)) = addp {
debug!(?cid, "add protocol");
retrigger(cid, p, &mut recv_protocols);
});
if let Some(Ok(cid)) = remp {
};
if let Some(cid) = remp {
// no need to stop the send_mgr here as it has been canceled before
if remove_c(&mut recv_protocols, &cid) {
break;
}
};
if let Some(Some((cid, r, p))) = event {
if let Some((cid, r, p)) = event {
match r {
Ok(ProtocolEvent::OpenStream {
sid,
@ -381,6 +399,8 @@ impl BParticipant {
}) => {
trace!(?sid, "open stream");
let _ = b2b_notify_send_of_recv_s.send(r.unwrap());
// waiting for receiving is not necessary, because the send_mgr will first
// process this before process messages!
let stream = self
.create_stream(
sid,
@ -400,22 +420,11 @@ impl BParticipant {
self.delete_stream(sid).await;
retrigger(cid, p, &mut recv_protocols);
},
Ok(ProtocolEvent::Message {
buffer,
mid: _,
sid,
}) => {
let buffer = Arc::try_unwrap(buffer).unwrap();
Ok(ProtocolEvent::Message { data, mid: _, sid }) => {
let lock = self.streams.read().await;
match lock.get(&sid) {
Some(stream) => {
stream
.b2a_msg_recv_s
.lock()
.await
.send(buffer)
.await
.unwrap();
let _ = stream.b2a_msg_recv_s.lock().await.send(data).await;
},
None => warn!("recv a msg with orphan stream"),
};
@ -442,7 +451,11 @@ impl BParticipant {
}
}
}
trace!("receiving no longer possible, closing all streams");
for (_, si) in self.streams.write().await.drain() {
si.send_closed.store(true, Ordering::Relaxed);
self.metrics.streams_closed(&self.remote_pid_string);
}
trace!("Stop recv_mgr");
self.shutdown_barrier
.fetch_sub(Self::BARR_RECV, Ordering::Relaxed);
@ -459,13 +472,11 @@ impl BParticipant {
.for_each_concurrent(None, |(cid, _, protocol, b2s_create_channel_done_s)| {
// This channel is now configured, and we are running it in scope of the
// participant.
//let w2b_frames_s = w2b_frames_s.clone();
let channels = Arc::clone(&self.channels);
let b2b_add_send_protocol_s = b2b_add_send_protocol_s.clone();
let b2b_add_recv_protocol_s = b2b_add_recv_protocol_s.clone();
async move {
let mut lock = channels.write().await;
#[cfg(feature = "metrics")]
let mut channel_no = lock.len();
lock.insert(
cid,
@ -479,21 +490,12 @@ impl BParticipant {
b2b_add_send_protocol_s.send((cid, send)).unwrap();
b2b_add_recv_protocol_s.send((cid, recv)).unwrap();
b2s_create_channel_done_s.send(()).unwrap();
#[cfg(feature = "metrics")]
{
self.metrics
.channels_connected_total
.with_label_values(&[&self.remote_pid_string])
.inc();
if channel_no > 5 {
debug!(?channel_no, "metrics will overwrite channel #5");
channel_no = 5;
}
self.metrics
.participants_channel_ids
.with_label_values(&[&self.remote_pid_string, &channel_no.to_string()])
.set(cid as i64);
}
.channels_connected(&self.remote_pid_string, channel_no, cid);
}
})
.await;
@ -544,6 +546,7 @@ impl BParticipant {
}
}
};
let (timeout_time, sender) = s2b_shutdown_bparticipant_r.await.unwrap();
debug!("participant_shutdown_mgr triggered. Closing all streams for send");
{
@ -602,11 +605,7 @@ impl BParticipant {
/// Stopping API and participant usage
/// Protocol will take care of the order of the frame
async fn delete_stream(
&self,
sid: Sid,
/* #[cfg(feature = "metrics")] frames_out_total_cache: &mut MultiCidFrameCache, */
) {
async fn delete_stream(&self, sid: Sid) {
let stream = { self.streams.write().await.remove(&sid) };
match stream {
Some(si) => {
@ -617,12 +616,7 @@ impl BParticipant {
trace!("Couldn't find the stream, might be simultaneous close from local/remote")
},
}
/*
#[cfg(feature = "metrics")]
self.metrics
.streams_closed_total
.with_label_values(&[&self.remote_pid_string])
.inc();*/
self.metrics.streams_closed(&self.remote_pid_string);
}
async fn create_stream(
@ -631,10 +625,10 @@ impl BParticipant {
prio: Prio,
promises: Promises,
guaranteed_bandwidth: Bandwidth,
a2b_msg_s: &crossbeam_channel::Sender<(Sid, Arc<MessageBuffer>)>,
a2b_msg_s: &crossbeam_channel::Sender<(Sid, Bytes)>,
a2b_close_stream_s: &mpsc::UnboundedSender<Sid>,
) -> Stream {
let (b2a_msg_recv_s, b2a_msg_recv_r) = async_channel::unbounded::<MessageBuffer>();
let (b2a_msg_recv_s, b2a_msg_recv_r) = async_channel::unbounded::<Bytes>();
let send_closed = Arc::new(AtomicBool::new(false));
self.streams.write().await.insert(sid, StreamInfo {
prio,
@ -642,11 +636,7 @@ impl BParticipant {
send_closed: Arc::clone(&send_closed),
b2a_msg_recv_s: Mutex::new(b2a_msg_recv_s),
});
#[cfg(feature = "metrics")]
self.metrics
.streams_opened_total
.with_label_values(&[&self.remote_pid_string])
.inc();
self.metrics.streams_opened(&self.remote_pid_string);
Stream::new(
self.local_pid,
self.remote_pid,
@ -665,12 +655,14 @@ impl BParticipant {
#[cfg(test)]
mod tests {
use super::*;
use network_protocol::ProtocolMetrics;
use tokio::{
runtime::Runtime,
sync::{mpsc, oneshot},
task::JoinHandle,
};
#[allow(clippy::type_complexity)]
fn mock_bparticipant() -> (
Arc<Runtime>,
mpsc::UnboundedSender<A2bStreamOpen>,
@ -720,13 +712,14 @@ mod tests {
) -> Protocols {
let (s1, r1) = mpsc::channel(100);
let (s2, r2) = mpsc::channel(100);
let p1 = Protocols::new_mpsc(s1, r2);
let metrics = Arc::new(ProtocolMetrics::new().unwrap());
let p1 = Protocols::new_mpsc(s1, r2, cid, Arc::clone(&metrics));
let (complete_s, complete_r) = oneshot::channel();
create_channel
.send((cid, Sid::new(0), p1, complete_s))
.unwrap();
complete_r.await.unwrap();
Protocols::new_mpsc(s2, r1)
Protocols::new_mpsc(s2, r1, cid, Arc::clone(&metrics))
}
#[test]

View File

@ -1,12 +1,11 @@
#[cfg(feature = "metrics")]
use crate::metrics::NetworkMetrics;
use crate::{
api::{Participant, ProtocolAddr},
channel::Protocols,
metrics::NetworkMetrics,
participant::{B2sPrioStatistic, BParticipant, S2bCreateChannel, S2bShutdownBparticipant},
};
use futures_util::{FutureExt, StreamExt};
use network_protocol::{MpscMsg, Pid};
use network_protocol::{Cid, MpscMsg, Pid, ProtocolMetrics};
#[cfg(feature = "metrics")]
use prometheus::Registry;
use rand::Rng;
@ -19,9 +18,7 @@ use std::{
time::Duration,
};
use tokio::{
io, net,
runtime::Runtime,
select,
io, net, select,
sync::{mpsc, oneshot, Mutex},
};
use tokio_stream::wrappers::UnboundedReceiverStream;
@ -37,7 +34,7 @@ use tracing::*;
// - c: channel/handshake
lazy_static::lazy_static! {
static ref MPSC_POOL: Mutex<HashMap<u64, mpsc::UnboundedSender<(mpsc::Sender<MpscMsg>, oneshot::Sender<mpsc::Sender<MpscMsg>>)>>> = {
static ref MPSC_POOL: Mutex<HashMap<u64, mpsc::UnboundedSender<S2sMpscConnect>>> = {
Mutex::new(HashMap::new())
};
}
@ -52,6 +49,10 @@ struct ParticipantInfo {
type A2sListen = (ProtocolAddr, oneshot::Sender<io::Result<()>>);
type A2sConnect = (ProtocolAddr, oneshot::Sender<io::Result<Participant>>);
type A2sDisconnect = (Pid, S2bShutdownBparticipant);
type S2sMpscConnect = (
mpsc::Sender<MpscMsg>,
oneshot::Sender<mpsc::Sender<MpscMsg>>,
);
#[derive(Debug)]
struct ControlChannels {
@ -72,7 +73,6 @@ struct ParticipantChannels {
#[derive(Debug)]
pub struct Scheduler {
local_pid: Pid,
runtime: Arc<Runtime>,
local_secret: u128,
closed: AtomicBool,
run_channels: Option<ControlChannels>,
@ -80,8 +80,8 @@ pub struct Scheduler {
participants: Arc<Mutex<HashMap<Pid, ParticipantInfo>>>,
channel_ids: Arc<AtomicU64>,
channel_listener: Mutex<HashMap<ProtocolAddr, oneshot::Sender<()>>>,
#[cfg(feature = "metrics")]
metrics: Arc<NetworkMetrics>,
protocol_metrics: Arc<ProtocolMetrics>,
}
impl Scheduler {
@ -89,7 +89,6 @@ impl Scheduler {
pub fn new(
local_pid: Pid,
runtime: Arc<Runtime>,
#[cfg(feature = "metrics")] registry: Option<&Registry>,
) -> (
Self,
@ -120,13 +119,14 @@ impl Scheduler {
b2s_prio_statistic_s,
};
#[cfg(feature = "metrics")]
let metrics = Arc::new(NetworkMetrics::new(&local_pid).unwrap());
let protocol_metrics = Arc::new(ProtocolMetrics::new().unwrap());
#[cfg(feature = "metrics")]
{
if let Some(registry) = registry {
metrics.register(registry).unwrap();
protocol_metrics.register(registry).unwrap();
}
}
@ -136,7 +136,6 @@ impl Scheduler {
(
Self {
local_pid,
runtime,
local_secret,
closed: AtomicBool::new(false),
run_channels,
@ -144,8 +143,8 @@ impl Scheduler {
participants: Arc::new(Mutex::new(HashMap::new())),
channel_ids: Arc::new(AtomicU64::new(0)),
channel_listener: Mutex::new(HashMap::new()),
#[cfg(feature = "metrics")]
metrics,
protocol_metrics,
},
a2s_listen_s,
a2s_connect_s,
@ -206,7 +205,7 @@ impl Scheduler {
) {
trace!("Start connect_mgr");
while let Some((addr, pid_sender)) = a2s_connect_r.recv().await {
let (protocol, handshake) = match addr {
let (protocol, cid, handshake) = match addr {
ProtocolAddr::Tcp(addr) => {
#[cfg(feature = "metrics")]
self.metrics
@ -220,8 +219,13 @@ impl Scheduler {
continue;
},
};
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
info!("Connecting Tcp to: {}", stream.peer_addr().unwrap());
(Protocols::new_tcp(stream), false)
(
Protocols::new_tcp(stream, cid, Arc::clone(&self.protocol_metrics)),
cid,
false,
)
},
ProtocolAddr::Mpsc(addr) => {
let mpsc_s = match MPSC_POOL.lock().await.get(&addr) {
@ -244,9 +248,16 @@ impl Scheduler {
.unwrap();
let local_to_remote_s = local_to_remote_oneshot_r.await.unwrap();
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
info!(?addr, "Connecting Mpsc");
(
Protocols::new_mpsc(local_to_remote_s, remote_to_local_r),
Protocols::new_mpsc(
local_to_remote_s,
remote_to_local_r,
cid,
Arc::clone(&self.protocol_metrics),
),
cid,
false,
)
},
@ -285,7 +296,7 @@ impl Scheduler {
//},
_ => unimplemented!(),
};
self.init_protocol(protocol, Some(pid_sender), handshake)
self.init_protocol(protocol, cid, Some(pid_sender), handshake)
.await;
}
trace!("Stop connect_mgr");
@ -422,7 +433,8 @@ impl Scheduler {
},
};
info!("Accepting Tcp from: {}", remote_addr);
self.init_protocol(Protocols::new_tcp(stream), None, true)
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
self.init_protocol(Protocols::new_tcp(stream, cid, Arc::clone(&self.protocol_metrics)), cid, None, true)
.await;
}
},
@ -440,7 +452,8 @@ impl Scheduler {
let (remote_to_local_s, remote_to_local_r) = mpsc::channel(Self::MPSC_CHANNEL_BOUND);
local_remote_to_local_s.send(remote_to_local_s).unwrap();
info!(?addr, "Accepting Mpsc from");
self.init_protocol(Protocols::new_mpsc(local_to_remote_s, remote_to_local_r), None, true)
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
self.init_protocol(Protocols::new_mpsc(local_to_remote_s, remote_to_local_r, cid, Arc::clone(&self.protocol_metrics)), cid, None, true)
.await;
}
warn!("MpscStream Failed, stopping");
@ -529,6 +542,7 @@ impl Scheduler {
async fn init_protocol(
&self,
mut protocol: Protocols,
cid: Cid,
s2a_return_pid_s: Option<oneshot::Sender<io::Result<Participant>>>,
send_handshake: bool,
) {
@ -543,15 +557,12 @@ impl Scheduler {
// participant can be in handshake phase ever! Someone could deadlock
// the whole server easily for new clients UDP doesnt work at all, as
// the UDP listening is done in another place.
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
let participants = Arc::clone(&self.participants);
let runtime = Arc::clone(&self.runtime);
#[cfg(feature = "metrics")]
let metrics = Arc::clone(&self.metrics);
let local_pid = self.local_pid;
let local_secret = self.local_secret;
// this is necessary for UDP to work at all and to remove code duplication
self.runtime.spawn(
tokio::spawn(
async move {
trace!(?cid, "Open channel and be ready for Handshake");
use network_protocol::InitProtocol;
@ -575,13 +586,7 @@ impl Scheduler {
b2a_stream_opened_r,
s2b_create_channel_s,
s2b_shutdown_bparticipant_s,
) = BParticipant::new(
local_pid,
pid,
sid,
#[cfg(feature = "metrics")]
Arc::clone(&metrics),
);
) = BParticipant::new(local_pid, pid, sid, Arc::clone(&metrics));
let participant = Participant::new(
local_pid,
@ -601,7 +606,7 @@ impl Scheduler {
drop(participants);
trace!("dropped participants lock");
let p = pid;
runtime.spawn(
tokio::spawn(
bparticipant
.run(participant_channels.b2s_prio_statistic_s)
.instrument(tracing::info_span!("remote", ?p)),

View File

@ -230,7 +230,7 @@ fn close_network_then_disconnect_part() {
fn opened_stream_before_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0);
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
let mut s2_a = r.block_on(p_a.open(10, Promises::empty())).unwrap();
let mut s2_a = r.block_on(p_a.open(4, Promises::empty())).unwrap();
s2_a.send("HelloWorld").unwrap();
let mut s2_b = r.block_on(p_b.opened()).unwrap();
drop(p_a);
@ -243,7 +243,7 @@ fn opened_stream_before_remote_part_is_closed() {
fn opened_stream_after_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0);
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
let mut s2_a = r.block_on(p_a.open(10, Promises::empty())).unwrap();
let mut s2_a = r.block_on(p_a.open(3, Promises::empty())).unwrap();
s2_a.send("HelloWorld").unwrap();
drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000));
@ -260,14 +260,14 @@ fn opened_stream_after_remote_part_is_closed() {
fn open_stream_after_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0);
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
let mut s2_a = r.block_on(p_a.open(10, Promises::empty())).unwrap();
let mut s2_a = r.block_on(p_a.open(4, Promises::empty())).unwrap();
s2_a.send("HelloWorld").unwrap();
drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000));
let mut s2_b = r.block_on(p_b.opened()).unwrap();
assert_eq!(r.block_on(s2_b.recv()), Ok("HelloWorld".to_string()));
assert_eq!(
r.block_on(p_b.open(20, Promises::empty())).unwrap_err(),
r.block_on(p_b.open(5, Promises::empty())).unwrap_err(),
ParticipantError::ParticipantDisconnected
);
drop((_n_a, _n_b, p_b)); //clean teardown
@ -294,7 +294,7 @@ fn open_participant_before_remote_part_is_closed() {
let addr = tcp();
r.block_on(n_a.listen(addr.clone())).unwrap();
let p_b = r.block_on(n_b.connect(addr)).unwrap();
let mut s1_b = r.block_on(p_b.open(10, Promises::empty())).unwrap();
let mut s1_b = r.block_on(p_b.open(4, Promises::empty())).unwrap();
s1_b.send("HelloWorld").unwrap();
let p_a = r.block_on(n_a.connected()).unwrap();
drop(s1_b);
@ -314,7 +314,7 @@ fn open_participant_after_remote_part_is_closed() {
let addr = tcp();
r.block_on(n_a.listen(addr.clone())).unwrap();
let p_b = r.block_on(n_b.connect(addr)).unwrap();
let mut s1_b = r.block_on(p_b.open(10, Promises::empty())).unwrap();
let mut s1_b = r.block_on(p_b.open(4, Promises::empty())).unwrap();
s1_b.send("HelloWorld").unwrap();
drop(s1_b);
drop(p_b);
@ -334,7 +334,7 @@ fn close_network_scheduler_completely() {
let addr = tcp();
r.block_on(n_a.listen(addr.clone())).unwrap();
let p_b = r.block_on(n_b.connect(addr)).unwrap();
let mut s1_b = r.block_on(p_b.open(10, Promises::empty())).unwrap();
let mut s1_b = r.block_on(p_b.open(4, Promises::empty())).unwrap();
s1_b.send("HelloWorld").unwrap();
let p_a = r.block_on(n_a.connected()).unwrap();

View File

@ -67,7 +67,7 @@ pub fn network_participant_stream(
let p1_b = n_b.connect(addr).await.unwrap();
let p1_a = n_a.connected().await.unwrap();
let s1_a = p1_a.open(10, Promises::empty()).await.unwrap();
let s1_a = p1_a.open(4, Promises::empty()).await.unwrap();
let s1_b = p1_b.opened().await.unwrap();
(n_a, p1_a, s1_a, n_b, p1_b, s1_b)
@ -76,28 +76,28 @@ pub fn network_participant_stream(
}
#[allow(dead_code)]
pub fn tcp() -> veloren_network::ProtocolAddr {
pub fn tcp() -> ProtocolAddr {
lazy_static! {
static ref PORTS: AtomicU16 = AtomicU16::new(5000);
}
let port = PORTS.fetch_add(1, Ordering::Relaxed);
veloren_network::ProtocolAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], port)))
ProtocolAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], port)))
}
#[allow(dead_code)]
pub fn udp() -> veloren_network::ProtocolAddr {
pub fn udp() -> ProtocolAddr {
lazy_static! {
static ref PORTS: AtomicU16 = AtomicU16::new(5000);
}
let port = PORTS.fetch_add(1, Ordering::Relaxed);
veloren_network::ProtocolAddr::Udp(SocketAddr::from(([127, 0, 0, 1], port)))
ProtocolAddr::Udp(SocketAddr::from(([127, 0, 0, 1], port)))
}
#[allow(dead_code)]
pub fn mpsc() -> veloren_network::ProtocolAddr {
pub fn mpsc() -> ProtocolAddr {
lazy_static! {
static ref PORTS: AtomicU64 = AtomicU64::new(5000);
}
let port = PORTS.fetch_add(1, Ordering::Relaxed);
veloren_network::ProtocolAddr::Mpsc(port)
ProtocolAddr::Mpsc(port)
}

View File

@ -177,7 +177,7 @@ fn api_stream_send_main() -> std::result::Result<(), Box<dyn std::error::Error>>
.await?;
// keep it alive
let _stream_p = remote_p
.open(16, Promises::ORDERED | Promises::CONSISTENCY)
.open(4, Promises::ORDERED | Promises::CONSISTENCY)
.await?;
let participant_a = network.connected().await?;
let mut stream_a = participant_a.opened().await?;
@ -205,7 +205,7 @@ fn api_stream_recv_main() -> std::result::Result<(), Box<dyn std::error::Error>>
.connect(ProtocolAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
.await?;
let mut stream_p = remote_p
.open(16, Promises::ORDERED | Promises::CONSISTENCY)
.open(4, Promises::ORDERED | Promises::CONSISTENCY)
.await?;
stream_p.send("Hello World")?;
let participant_a = network.connected().await?;

View File

@ -17,7 +17,9 @@ pub fn init(basic: bool) {
env.add_directive("veloren_world::sim=info".parse().unwrap())
.add_directive("veloren_world::civ=info".parse().unwrap())
.add_directive("uvth=warn".parse().unwrap())
.add_directive("tiny_http=warn".parse().unwrap())
.add_directive("hyper=info".parse().unwrap())
.add_directive("prometheus_hyper=info".parse().unwrap())
.add_directive("mio::pool=info".parse().unwrap())
.add_directive("mio::sys::windows=debug".parse().unwrap())
.add_directive("veloren_network_protocol=info".parse().unwrap())
.add_directive(

View File

@ -104,11 +104,11 @@ impl ConnectionHandler {
let reliable = Promises::ORDERED | Promises::CONSISTENCY;
let reliablec = reliable | Promises::COMPRESSED;
let general_stream = participant.open(10, reliablec).await?;
let ping_stream = participant.open(5, reliable).await?;
let mut register_stream = participant.open(10, reliablec).await?;
let character_screen_stream = participant.open(10, reliablec).await?;
let in_game_stream = participant.open(10, reliablec).await?;
let general_stream = participant.open(3, reliablec).await?;
let ping_stream = participant.open(2, reliable).await?;
let mut register_stream = participant.open(3, reliablec).await?;
let character_screen_stream = participant.open(3, reliablec).await?;
let in_game_stream = participant.open(3, reliablec).await?;
let server_data = receiver.recv()?;