Merge branch 'xMAC94x/net-improve' into 'master'

xMac94x/net improve

See merge request veloren/veloren!1162
This commit is contained in:
Marcel 2020-07-14 14:09:43 +00:00
commit d5bfae886e
28 changed files with 1205 additions and 912 deletions

View File

@ -35,14 +35,16 @@ use futures_timer::Delay;
use futures_util::{select, FutureExt};
use hashbrown::HashMap;
use image::DynamicImage;
use network::{Address, Network, Participant, Pid, Stream, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use network::{
Network, Participant, Pid, ProtocolAddr, Stream, PROMISES_CONSISTENCY, PROMISES_ORDERED,
};
use std::{
collections::VecDeque,
net::SocketAddr,
sync::Arc,
time::{Duration, Instant},
};
use tracing::{debug, error, warn};
use tracing::{debug, error, trace, warn};
use uvth::{ThreadPool, ThreadPoolBuilder};
use vek::*;
@ -75,7 +77,7 @@ pub struct Client {
pub active_character_id: Option<i32>,
_network: Network,
_participant: Arc<Participant>,
participant: Option<Participant>,
singleton_stream: Stream,
last_server_ping: f64,
@ -117,7 +119,7 @@ impl Client {
let (network, f) = Network::new(Pid::new(), None);
thread_pool.execute(f);
let participant = block_on(network.connect(Address::Tcp(addr.into())))?;
let participant = block_on(network.connect(ProtocolAddr::Tcp(addr.into())))?;
let mut stream = block_on(participant.open(10, PROMISES_ORDERED | PROMISES_CONSISTENCY))?;
// Wait for initial sync
@ -200,7 +202,7 @@ impl Client {
active_character_id: None,
_network: network,
_participant: participant,
participant: Some(participant),
singleton_stream: stream,
last_server_ping: 0.0,
@ -312,10 +314,11 @@ impl Client {
/// Send disconnect message to the server
pub fn request_logout(&mut self) {
debug!("Requesting logout from server");
if let Err(e) = self.singleton_stream.send(ClientMsg::Disconnect) {
error!(
?e,
"couldn't send disconnect package to server, did server close already?"
"Couldn't send disconnect package to server, did server close already?"
);
}
}
@ -610,8 +613,7 @@ impl Client {
);
}
self.singleton_stream
.send(ClientMsg::ControllerInputs(inputs))
.unwrap();
.send(ClientMsg::ControllerInputs(inputs))?;
}
// 2) Build up a list of events for this frame, to be passed to the frontend.
@ -1191,12 +1193,16 @@ impl Client {
impl Drop for Client {
fn drop(&mut self) {
trace!("Dropping client");
if let Err(e) = self.singleton_stream.send(ClientMsg::Disconnect) {
warn!(
"error during drop of client, couldn't send disconnect package, is the connection \
already closed? : {}",
e
?e,
"Error during drop of client, couldn't send disconnect package, is the connection \
already closed?",
);
}
if let Err(e) = block_on(self.participant.take().unwrap().disconnect()) {
warn!(?e, "error when disconnecting, couldn't send all data");
}
}
}

View File

@ -65,7 +65,7 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf"
dependencies = [
"byteorder",
"byteorder 1.3.4",
"serde",
]
@ -75,6 +75,12 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "byteorder"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
[[package]]
name = "byteorder"
version = "1.3.4"
@ -345,6 +351,16 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "lz4-compress"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f966533a922a9bba9e95e594c1fdb3b9bf5fdcdb11e37e51ad84cd76e468b91"
dependencies = [
"byteorder 0.5.3",
"quick-error",
]
[[package]]
name = "matchers"
version = "0.0.1"
@ -538,15 +554,15 @@ dependencies = [
[[package]]
name = "prometheus"
version = "0.7.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1"
checksum = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd"
dependencies = [
"cfg-if",
"fnv",
"lazy_static",
"quick-error",
"spin",
"thiserror",
]
[[package]]
@ -629,7 +645,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
dependencies = [
"byteorder",
"byteorder 1.3.4",
"regex-syntax",
]
@ -712,6 +728,26 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "thiserror"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "thread_local"
version = "1.0.1"
@ -798,6 +834,7 @@ dependencies = [
"crossbeam-channel",
"futures",
"lazy_static",
"lz4-compress",
"prometheus",
"rand",
"serde",

View File

@ -4,9 +4,10 @@
//! (cd network/examples/chat && RUST_BACKTRACE=1 cargo run --release -- --trace=info --port 15006 --mode=client)
//! ```
use async_std::io;
use async_std::sync::RwLock;
use clap::{App, Arg};
use futures::executor::{block_on, ThreadPool};
use network::{Address, Network, Participant, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use network::{ProtocolAddr, Network, Participant, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use std::{sync::Arc, thread, time::Duration};
use tracing::*;
use tracing_subscriber::EnvFilter;
@ -76,8 +77,8 @@ fn main() {
let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
let ip: &str = matches.value_of("ip").unwrap();
let address = match matches.value_of("protocol") {
Some("tcp") => Address::Tcp(format!("{}:{}", ip, port).parse().unwrap()),
Some("udp") => Address::Udp(format!("{}:{}", ip, port).parse().unwrap()),
Some("tcp") => ProtocolAddr::Tcp(format!("{}:{}", ip, port).parse().unwrap()),
Some("udp") => ProtocolAddr::Udp(format!("{}:{}", ip, port).parse().unwrap()),
_ => panic!("invalid mode, run --help!"),
};
@ -98,22 +99,24 @@ fn main() {
}
}
fn server(address: Address) {
fn server(address: ProtocolAddr) {
let (server, f) = Network::new(Pid::new(), None);
let server = Arc::new(server);
std::thread::spawn(f);
let pool = ThreadPool::new().unwrap();
let participants = Arc::new(RwLock::new(Vec::new()));
block_on(async {
server.listen(address).await.unwrap();
loop {
let p1 = server.connected().await.unwrap();
let p1 = Arc::new(server.connected().await.unwrap());
let server1 = server.clone();
pool.spawn_ok(client_connection(server1, p1));
participants.write().await.push(p1.clone());
pool.spawn_ok(client_connection(server1, p1, participants.clone()));
}
});
}
async fn client_connection(network: Arc<Network>, participant: Arc<Participant>) {
async fn client_connection(_network: Arc<Network>, participant: Arc<Participant>, participants: Arc<RwLock<Vec<Arc<Participant>>>>) {
let mut s1 = participant.opened().await.unwrap();
let username = s1.recv::<String>().await.unwrap();
println!("[{}] connected", username);
@ -124,14 +127,12 @@ async fn client_connection(network: Arc<Network>, participant: Arc<Participant>)
},
Ok(msg) => {
println!("[{}]: {}", username, msg);
let mut parts = network.participants().await;
for (_, p) in parts.drain() {
for p in participants.read().await.iter() {
match p
.open(32, PROMISES_ORDERED | PROMISES_CONSISTENCY)
.await {
Err(_) => {
//probably disconnected, remove it
network.disconnect(p).await.unwrap();
info!("error talking to client, //TODO drop it")
},
Ok(mut s) => s.send((username.clone(), msg.clone())).unwrap(),
};
@ -142,7 +143,7 @@ async fn client_connection(network: Arc<Network>, participant: Arc<Participant>)
println!("[{}] disconnected", username);
}
fn client(address: Address) {
fn client(address: ProtocolAddr) {
let (client, f) = Network::new(Pid::new(), None);
std::thread::spawn(f);
let pool = ThreadPool::new().unwrap();
@ -180,7 +181,7 @@ fn client(address: Address) {
// receiving i open and close a stream per message. this can be done easier but
// this allows me to be quite lazy on the server side and just get a list of
// all participants and send to them...
async fn read_messages(participant: Arc<Participant>) {
async fn read_messages(participant: Participant) {
while let Ok(mut s) = participant.opened().await {
let (username, message) = s.recv::<(String, String)>().await.unwrap();
println!("[{}]: {}", username, message);

View File

@ -83,7 +83,7 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf"
dependencies = [
"byteorder",
"byteorder 1.3.4",
"serde",
]
@ -104,6 +104,12 @@ dependencies = [
"constant_time_eq",
]
[[package]]
name = "byteorder"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
[[package]]
name = "byteorder"
version = "1.3.4"
@ -418,6 +424,16 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "lz4-compress"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f966533a922a9bba9e95e594c1fdb3b9bf5fdcdb11e37e51ad84cd76e468b91"
dependencies = [
"byteorder 0.5.3",
"quick-error",
]
[[package]]
name = "matchers"
version = "0.0.1"
@ -597,15 +613,15 @@ dependencies = [
[[package]]
name = "prometheus"
version = "0.7.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1"
checksum = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd"
dependencies = [
"cfg-if",
"fnv",
"lazy_static",
"quick-error",
"spin",
"thiserror",
]
[[package]]
@ -699,7 +715,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
dependencies = [
"byteorder",
"byteorder 1.3.4",
"regex-syntax",
]
@ -803,6 +819,26 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "thiserror"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "thread_local"
version = "1.0.1"
@ -889,6 +925,7 @@ dependencies = [
"crossbeam-channel",
"futures",
"lazy_static",
"lz4-compress",
"prometheus",
"rand",
"serde",

View File

@ -2,17 +2,17 @@ use async_std::{
fs,
path::{Path, PathBuf},
};
use network::{Address, Participant, Stream};
use network::{ProtocolAddr, Participant, Stream};
use rand::Rng;
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, sync::Arc};
use std::collections::HashMap;
#[derive(Debug)]
pub enum LocalCommand {
Shutdown,
Disconnect,
Connect(Address),
Connect(ProtocolAddr),
List,
Serve(FileInfo),
Get(u32, Option<String>),
@ -34,7 +34,7 @@ pub struct FileInfo {
pub struct RemoteInfo {
infos: HashMap<u32, FileInfo>,
_participant: Arc<Participant>,
_participant: Participant,
pub cmd_out: Stream,
pub file_out: Stream,
}
@ -44,7 +44,7 @@ impl FileInfo {
let mt = match fs::metadata(&path).await {
Err(e) => {
println!(
"cannot get metadata for file: {:?}, does it exist? Error: {:?}",
"Cannot get metadata for file: {:?}, does it exist? Error: {:?}",
&path, &e
);
return None;
@ -68,7 +68,7 @@ impl FileInfo {
}
impl RemoteInfo {
pub fn new(cmd_out: Stream, file_out: Stream, participant: Arc<Participant>) -> Self {
pub fn new(cmd_out: Stream, file_out: Stream, participant: Participant) -> Self {
Self {
infos: HashMap::new(),
_participant: participant,

View File

@ -10,7 +10,7 @@ use futures::{
executor::{block_on, ThreadPool},
sink::SinkExt,
};
use network::Address;
use network::ProtocolAddr;
use std::{thread, time::Duration};
use tracing::*;
use tracing_subscriber::EnvFilter;
@ -54,7 +54,7 @@ fn main() {
.init();
let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
let address = Address::Tcp(format!("{}:{}", "127.0.0.1", port).parse().unwrap());
let address = ProtocolAddr::Tcp(format!("{}:{}", "127.0.0.1", port).parse().unwrap());
let (server, cmd_sender) = Server::new();
let pool = ThreadPool::new().unwrap();
@ -70,7 +70,7 @@ fn file_exists(file: String) -> Result<(), String> {
if file.exists() {
Ok(())
} else {
Err(format!("file does not exist"))
Err(format!("File does not exist"))
}
}
@ -157,13 +157,13 @@ async fn client(mut cmd_sender: mpsc::UnboundedSender<LocalCommand>) {
("connect", Some(connect_matches)) => {
let socketaddr = connect_matches.value_of("ip:port").unwrap().parse().unwrap();
cmd_sender
.send(LocalCommand::Connect(Address::Tcp(socketaddr)))
.send(LocalCommand::Connect(ProtocolAddr::Tcp(socketaddr)))
.await
.unwrap();
},
("t", _) => {
cmd_sender
.send(LocalCommand::Connect(Address::Tcp(
.send(LocalCommand::Connect(ProtocolAddr::Tcp(
"127.0.0.1:1231".parse().unwrap(),
)))
.await

View File

@ -5,7 +5,7 @@ use async_std::{
sync::{Mutex, RwLock},
};
use futures::{channel::mpsc, future::FutureExt, stream::StreamExt};
use network::{Address, Network, Participant, Pid, Stream, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use network::{ProtocolAddr, Network, Participant, Pid, Stream, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use std::{collections::HashMap, sync::Arc};
use tracing::*;
@ -42,7 +42,7 @@ impl Server {
)
}
pub async fn run(mut self, address: Address) {
pub async fn run(mut self, address: ProtocolAddr) {
let run_channels = self.run_channels.take().unwrap();
self.network.listen(address).await.unwrap();
@ -54,34 +54,31 @@ impl Server {
}
async fn command_manager(&self, command_receiver: mpsc::UnboundedReceiver<LocalCommand>) {
trace!("start command_manager");
trace!("Start command_manager");
command_receiver
.for_each_concurrent(None, async move |cmd| {
match cmd {
LocalCommand::Shutdown => {
println!("shutting down service");
println!("Shutting down service");
return;
},
LocalCommand::Disconnect => {
self.remotes.write().await.clear();
for (_, p) in self.network.participants().await.drain() {
self.network.disconnect(p).await.unwrap();
}
println!("disconnecting all connections");
println!("Disconnecting all connections");
return;
},
LocalCommand::Connect(addr) => {
println!("trying to connect to: {:?}", &addr);
println!("Trying to connect to: {:?}", &addr);
match self.network.connect(addr.clone()).await {
Ok(p) => self.loop_participant(p).await,
Err(e) => {
println!("failled to connect to {:?}, err: {:?}", &addr, e);
println!("Failled to connect to {:?}, err: {:?}", &addr, e);
},
}
},
LocalCommand::Serve(fileinfo) => {
self.served.write().await.push(fileinfo.clone());
println!("serving file: {:?}", fileinfo.path);
println!("Serving file: {:?}", fileinfo.path);
},
LocalCommand::List => {
let mut total_file_infos = vec![];
@ -110,11 +107,11 @@ impl Server {
}
})
.await;
trace!("stop command_manager");
trace!("Stop command_manager");
}
async fn connect_manager(&self) {
trace!("start connect_manager");
trace!("Start connect_manager");
let iter = futures::stream::unfold((), |_| {
self.network.connected().map(|r| r.ok().map(|v| (v, ())))
});
@ -123,17 +120,17 @@ impl Server {
self.loop_participant(participant).await;
})
.await;
trace!("stop connect_manager");
trace!("Stop connect_manager");
}
async fn loop_participant(&self, p: Arc<Participant>) {
async fn loop_participant(&self, p: Participant) {
if let (Ok(cmd_out), Ok(file_out), Ok(cmd_in), Ok(file_in)) = (
p.open(15, PROMISES_CONSISTENCY | PROMISES_ORDERED).await,
p.open(40, PROMISES_CONSISTENCY).await,
p.opened().await,
p.opened().await,
) {
debug!(?p, "connection successfully initiated");
debug!(?p, "Connection successfully initiated");
let id = p.remote_pid();
let ri = Arc::new(Mutex::new(RemoteInfo::new(cmd_out, file_out, p)));
self.remotes.write().await.insert(id, ri.clone());
@ -146,24 +143,24 @@ impl Server {
async fn handle_remote_cmd(&self, mut stream: Stream, remote_info: Arc<Mutex<RemoteInfo>>) {
while let Ok(msg) = stream.recv::<Command>().await {
println!("got message: {:?}", &msg);
println!("Got message: {:?}", &msg);
match msg {
Command::List => {
info!("request to send my list");
info!("Request to send my list");
let served = self.served.read().await.clone();
stream.send(served).unwrap();
},
Command::Get(id) => {
for file_info in self.served.read().await.iter() {
if file_info.id() == id {
info!("request to send file i got, sending it");
info!("Request to send file i got, sending it");
if let Ok(data) = file_info.load().await {
match remote_info.lock().await.file_out.send((file_info, data)) {
Ok(_) => debug!("send file"),
Err(e) => error!(?e, "sending file failed"),
}
} else {
warn!("cannot send file as loading failed, oes it still exist?");
warn!("Cannot send file as loading failed, oes it still exist?");
}
}
}
@ -174,18 +171,18 @@ impl Server {
async fn handle_files(&self, mut stream: Stream, _remote_info: Arc<Mutex<RemoteInfo>>) {
while let Ok((fi, data)) = stream.recv::<(FileInfo, Vec<u8>)>().await {
debug!(?fi, "got file");
debug!(?fi, "Got file");
let path = self.receiving_files.lock().await.remove(&fi.id()).flatten();
let path: PathBuf = match &path {
Some(path) => shellexpand::tilde(&path).parse().unwrap(),
None => {
let mut path = std::env::current_dir().unwrap();
path.push(fi.path().file_name().unwrap());
trace!("no path provided, saving down to {:?}", path);
trace!("No path provided, saving down to {:?}", path);
PathBuf::from(path)
},
};
debug!("received file, going to save it under {:?}", path);
debug!("Received file, going to save it under {:?}", path);
fs::write(path, data).await.unwrap();
}
}

View File

@ -71,7 +71,7 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf"
dependencies = [
"byteorder",
"byteorder 1.3.4",
"serde",
]
@ -81,6 +81,12 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "byteorder"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
[[package]]
name = "byteorder"
version = "1.3.4"
@ -372,6 +378,16 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "lz4-compress"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f966533a922a9bba9e95e594c1fdb3b9bf5fdcdb11e37e51ad84cd76e468b91"
dependencies = [
"byteorder 0.5.3",
"quick-error",
]
[[package]]
name = "matchers"
version = "0.0.1"
@ -578,16 +594,16 @@ dependencies = [
[[package]]
name = "prometheus"
version = "0.7.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1"
checksum = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd"
dependencies = [
"cfg-if",
"fnv",
"lazy_static",
"protobuf",
"quick-error",
"spin",
"thiserror",
]
[[package]]
@ -670,7 +686,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
dependencies = [
"byteorder",
"byteorder 1.3.4",
"regex-syntax",
]
@ -753,6 +769,26 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "thiserror"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "thread_local"
version = "1.0.1"
@ -880,6 +916,7 @@ dependencies = [
"crossbeam-channel",
"futures",
"lazy_static",
"lz4-compress",
"prometheus",
"rand",
"serde",

View File

@ -7,7 +7,7 @@ mod metrics;
use clap::{App, Arg};
use futures::executor::block_on;
use network::{Address, MessageBuffer, Network, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use network::{ProtocolAddr, MessageBuffer, Network, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use serde::{Deserialize, Serialize};
use std::{
sync::Arc,
@ -96,9 +96,9 @@ fn main() {
let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
let ip: &str = matches.value_of("ip").unwrap();
let address = match matches.value_of("protocol") {
Some("tcp") => Address::Tcp(format!("{}:{}", ip, port).parse().unwrap()),
Some("udp") => Address::Udp(format!("{}:{}", ip, port).parse().unwrap()),
_ => panic!("invalid mode, run --help!"),
Some("tcp") => ProtocolAddr::Tcp(format!("{}:{}", ip, port).parse().unwrap()),
Some("udp") => ProtocolAddr::Udp(format!("{}:{}", ip, port).parse().unwrap()),
_ => panic!("Invalid mode, run --help!"),
};
let mut background = None;
@ -111,14 +111,14 @@ fn main() {
thread::sleep(Duration::from_millis(200)); //start client after server
client(address);
},
_ => panic!("invalid mode, run --help!"),
_ => panic!("Invalid mode, run --help!"),
};
if let Some(background) = background {
background.join().unwrap();
}
}
fn server(address: Address) {
fn server(address: ProtocolAddr) {
let mut metrics = metrics::SimpleMetrics::new();
let (server, f) = Network::new(Pid::new(), Some(metrics.registry()));
std::thread::spawn(f);
@ -126,7 +126,7 @@ fn server(address: Address) {
block_on(server.listen(address)).unwrap();
loop {
info!("waiting for participant to connect");
info!("Waiting for participant to connect");
let p1 = block_on(server.connected()).unwrap(); //remote representation of p1
let mut s1 = block_on(p1.opened()).unwrap(); //remote representation of s1
block_on(async {
@ -138,15 +138,15 @@ fn server(address: Address) {
let new = Instant::now();
let diff = new.duration_since(last);
last = new;
println!("recv 1.000.000 took {}", diff.as_millis());
println!("Recv 1.000.000 took {}", diff.as_millis());
}
}
info!("other stream was closed");
info!("Other stream was closed");
});
}
}
fn client(address: Address) {
fn client(address: ProtocolAddr) {
let mut metrics = metrics::SimpleMetrics::new();
let (client, f) = Network::new(Pid::new(), Some(metrics.registry()));
std::thread::spawn(f);
@ -170,18 +170,18 @@ fn client(address: Address) {
let new = Instant::now();
let diff = new.duration_since(last);
last = new;
println!("send 1.000.000 took {}", diff.as_millis());
println!("Send 1.000.000 took {}", diff.as_millis());
}
if id > 2000000 {
println!("stop");
println!("Stop");
std::thread::sleep(std::time::Duration::from_millis(5000));
break;
}
}
drop(s1);
std::thread::sleep(std::time::Duration::from_millis(5000));
info!("closing participant");
block_on(client.disconnect(p1)).unwrap();
info!("Closing participant");
block_on(p1.disconnect()).unwrap();
std::thread::sleep(std::time::Duration::from_millis(25000));
info!("DROPPING! client");
drop(client);

View File

@ -55,7 +55,7 @@ impl SimpleMetrics {
Ok(Some(rq)) => rq,
Ok(None) => continue,
Err(e) => {
println!("error: {}", e);
println!("Error: {}", e);
break;
},
};
@ -76,7 +76,7 @@ impl SimpleMetrics {
_ => (),
}
}
debug!("stopping tiny_http server to serve metrics");
debug!("Stopping tiny_http server to serve metrics");
}));
Ok(())
}

View File

@ -7,7 +7,11 @@ use crate::{
scheduler::Scheduler,
types::{Mid, Pid, Prio, Promises, Sid},
};
use async_std::{io, sync::RwLock, task};
use async_std::{
io,
sync::{Mutex, RwLock},
task,
};
use futures::{
channel::{mpsc, oneshot},
sink::SinkExt,
@ -26,9 +30,12 @@ use std::{
use tracing::*;
use tracing_futures::Instrument;
type ParticipantCloseChannel =
mpsc::UnboundedSender<(Pid, oneshot::Sender<async_std::io::Result<()>>)>;
/// Represents a Tcp or Udp or Mpsc address
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub enum Address {
pub enum ProtocolAddr {
Tcp(SocketAddr),
Udp(SocketAddr),
Mpsc(u64),
@ -46,9 +53,8 @@ pub struct Participant {
remote_pid: Pid,
a2b_steam_open_s: RwLock<mpsc::UnboundedSender<(Prio, Promises, oneshot::Sender<Stream>)>>,
b2a_stream_opened_r: RwLock<mpsc::UnboundedReceiver<Stream>>,
closed: AtomicBool,
a2s_disconnect_s:
Option<mpsc::UnboundedSender<(Pid, oneshot::Sender<async_std::io::Result<()>>)>>,
closed: Arc<RwLock<Result<(), ParticipantError>>>,
a2s_disconnect_s: Arc<Mutex<Option<ParticipantCloseChannel>>>,
}
/// `Streams` represents a channel to send `n` messages with a certain priority
@ -83,13 +89,18 @@ pub enum NetworkError {
NetworkClosed,
ListenFailed(std::io::Error),
ConnectFailed(std::io::Error),
GracefulDisconnectFailed(std::io::Error),
}
/// Error type thrown by [`Participants`](Participant) methods
#[derive(Debug, PartialEq)]
#[derive(Debug, PartialEq, Clone)]
pub enum ParticipantError {
ParticipantClosed,
///Participant was closed by remote side
ParticipantDisconnected,
///Underlying Protocol failed and wasn't able to recover, expect some Data
/// loss unfortunately, there is no method to get the exact messages
/// that failed. This is also returned when local side tries to do
/// something while remote site gracefully disconnects
ProtocolFailedUnrecoverable,
}
/// Error type thrown by [`Streams`](Stream) methods
@ -105,14 +116,13 @@ pub enum StreamError {
/// Application. You can pass it around multiple threads in an
/// [`Arc`](std::sync::Arc) as all commands have internal mutability.
///
/// The `Network` has methods to [`connect`] and [`disconnect`] to other
/// [`Participants`] via their [`Address`]. All [`Participants`] will be stored
/// in the Network until explicitly disconnected, which is the only way to close
/// the sockets.
/// The `Network` has methods to [`connect`] to other [`Participants`] actively
/// via their [`ProtocolAddr`], or [`listen`] passively for [`connected`]
/// [`Participants`].
///
/// # Examples
/// ```rust
/// use veloren_network::{Network, Address, Pid};
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// use futures::executor::block_on;
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
@ -123,9 +133,9 @@ pub enum StreamError {
/// # //setup pseudo database!
/// # let (database, fd) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fd);
/// # database.listen(Address::Tcp("127.0.0.1:8080".parse().unwrap())).await?;
/// network.listen(Address::Tcp("127.0.0.1:2999".parse().unwrap())).await?;
/// let database = network.connect(Address::Tcp("127.0.0.1:8080".parse().unwrap())).await?;
/// # database.listen(ProtocolAddr::Tcp("127.0.0.1:8080".parse().unwrap())).await?;
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2999".parse().unwrap())).await?;
/// let database = network.connect(ProtocolAddr::Tcp("127.0.0.1:8080".parse().unwrap())).await?;
/// # Ok(())
/// })
/// # }
@ -133,14 +143,16 @@ pub enum StreamError {
///
/// [`Participants`]: crate::api::Participant
/// [`connect`]: Network::connect
/// [`disconnect`]: Network::disconnect
/// [`listen`]: Network::listen
/// [`connected`]: Network::connected
pub struct Network {
local_pid: Pid,
participants: RwLock<HashMap<Pid, Arc<Participant>>>,
participant_disconnect_sender:
RwLock<HashMap<Pid, Arc<Mutex<Option<ParticipantCloseChannel>>>>>,
listen_sender:
RwLock<mpsc::UnboundedSender<(Address, oneshot::Sender<async_std::io::Result<()>>)>>,
RwLock<mpsc::UnboundedSender<(ProtocolAddr, oneshot::Sender<async_std::io::Result<()>>)>>,
connect_sender:
RwLock<mpsc::UnboundedSender<(Address, oneshot::Sender<io::Result<Participant>>)>>,
RwLock<mpsc::UnboundedSender<(ProtocolAddr, oneshot::Sender<io::Result<Participant>>)>>,
connected_receiver: RwLock<mpsc::UnboundedReceiver<Participant>>,
shutdown_sender: Option<oneshot::Sender<()>>,
}
@ -170,7 +182,7 @@ impl Network {
/// ```rust
/// //Example with uvth
/// use uvth::ThreadPoolBuilder;
/// use veloren_network::{Address, Network, Pid};
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// let pool = ThreadPoolBuilder::new().build();
/// let (network, f) = Network::new(Pid::new(), None);
@ -179,7 +191,7 @@ impl Network {
///
/// ```rust
/// //Example with std::thread
/// use veloren_network::{Address, Network, Pid};
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// let (network, f) = Network::new(Pid::new(), None);
/// std::thread::spawn(f);
@ -198,31 +210,31 @@ impl Network {
registry: Option<&Registry>,
) -> (Self, impl std::ops::FnOnce()) {
let p = participant_id;
debug!(?p, "starting Network");
debug!(?p, "Starting Network");
let (scheduler, listen_sender, connect_sender, connected_receiver, shutdown_sender) =
Scheduler::new(participant_id, registry);
(
Self {
local_pid: participant_id,
participants: RwLock::new(HashMap::new()),
participant_disconnect_sender: RwLock::new(HashMap::new()),
listen_sender: RwLock::new(listen_sender),
connect_sender: RwLock::new(connect_sender),
connected_receiver: RwLock::new(connected_receiver),
shutdown_sender: Some(shutdown_sender),
},
move || {
trace!(?p, "starting sheduler in own thread");
trace!(?p, "Starting sheduler in own thread");
let _handle = task::block_on(
scheduler
.run()
.instrument(tracing::info_span!("scheduler", ?p)),
);
trace!(?p, "stopping sheduler and his own thread");
trace!(?p, "Stopping sheduler and his own thread");
},
)
}
/// starts listening on an [`Address`].
/// starts listening on an [`ProtocolAddr`].
/// When the method returns the `Network` is ready to listen for incoming
/// connections OR has returned a [`NetworkError`] (e.g. port already used).
/// You can call [`connected`] to asynchrony wait for a [`Participant`] to
@ -232,7 +244,7 @@ impl Network {
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// use veloren_network::{Address, Network, Pid};
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2000` TCP on all NICs and `2001` UDP locally
@ -240,10 +252,10 @@ impl Network {
/// std::thread::spawn(f);
/// block_on(async {
/// network
/// .listen(Address::Tcp("0.0.0.0:2000".parse().unwrap()))
/// .listen(ProtocolAddr::Tcp("0.0.0.0:2000".parse().unwrap()))
/// .await?;
/// network
/// .listen(Address::Udp("127.0.0.1:2001".parse().unwrap()))
/// .listen(ProtocolAddr::Udp("127.0.0.1:2001".parse().unwrap()))
/// .await?;
/// # Ok(())
/// })
@ -251,7 +263,7 @@ impl Network {
/// ```
///
/// [`connected`]: Network::connected
pub async fn listen(&self, address: Address) -> Result<(), NetworkError> {
pub async fn listen(&self, address: ProtocolAddr) -> Result<(), NetworkError> {
let (s2a_result_s, s2a_result_r) = oneshot::channel::<async_std::io::Result<()>>();
debug!(?address, "listening on address");
self.listen_sender
@ -267,13 +279,13 @@ impl Network {
}
}
/// starts connectiong to an [`Address`].
/// starts connectiong to an [`ProtocolAddr`].
/// When the method returns the Network either returns a [`Participant`]
/// ready to open [`Streams`] on OR has returned a [`NetworkError`] (e.g.
/// can't connect, or invalid Handshake) # Examples
/// ```rust
/// use futures::executor::block_on;
/// use veloren_network::{Address, Network, Pid};
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, connect on port `2010` TCP and `2011` UDP like listening above
@ -282,34 +294,34 @@ impl Network {
/// # let (remote, fr) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr);
/// block_on(async {
/// # remote.listen(Address::Tcp("0.0.0.0:2010".parse().unwrap())).await?;
/// # remote.listen(Address::Udp("0.0.0.0:2011".parse().unwrap())).await?;
/// # remote.listen(ProtocolAddr::Tcp("0.0.0.0:2010".parse().unwrap())).await?;
/// # remote.listen(ProtocolAddr::Udp("0.0.0.0:2011".parse().unwrap())).await?;
/// let p1 = network
/// .connect(Address::Tcp("127.0.0.1:2010".parse().unwrap()))
/// .connect(ProtocolAddr::Tcp("127.0.0.1:2010".parse().unwrap()))
/// .await?;
/// # //this doesn't work yet, so skip the test
/// # //TODO fixme!
/// # return Ok(());
/// let p2 = network
/// .connect(Address::Udp("127.0.0.1:2011".parse().unwrap()))
/// .connect(ProtocolAddr::Udp("127.0.0.1:2011".parse().unwrap()))
/// .await?;
/// assert!(std::sync::Arc::ptr_eq(&p1, &p2));
/// assert_eq!(&p1, &p2);
/// # Ok(())
/// })
/// # }
/// ```
/// Usually the `Network` guarantees that a operation on a [`Participant`]
/// succeeds, e.g. by automatic retrying unless it fails completely e.g. by
/// disconnecting from the remote. If 2 [`Addresses`] you `connect` to
/// disconnecting from the remote. If 2 [`ProtocolAddres`] you `connect` to
/// belongs to the same [`Participant`], you get the same [`Participant`] as
/// a result. This is useful e.g. by connecting to the same
/// [`Participant`] via multiple Protocols.
///
/// [`Streams`]: crate::api::Stream
/// [`Addresses`]: crate::api::Address
pub async fn connect(&self, address: Address) -> Result<Arc<Participant>, NetworkError> {
/// [`ProtocolAddres`]: crate::api::ProtocolAddr
pub async fn connect(&self, address: ProtocolAddr) -> Result<Participant, NetworkError> {
let (pid_sender, pid_receiver) = oneshot::channel::<io::Result<Participant>>();
debug!(?address, "connect to address");
debug!(?address, "Connect to address");
self.connect_sender
.write()
.await
@ -322,17 +334,16 @@ impl Network {
let pid = participant.remote_pid;
debug!(
?pid,
"received Participant id from remote and return to user"
"Received Participant id from remote and return to user"
);
let participant = Arc::new(participant);
self.participants
self.participant_disconnect_sender
.write()
.await
.insert(participant.remote_pid, participant.clone());
.insert(pid, participant.a2s_disconnect_s.clone());
Ok(participant)
}
/// returns a [`Participant`] created from a [`Address`] you called
/// returns a [`Participant`] created from a [`ProtocolAddr`] you called
/// [`listen`] on before. This function will either return a working
/// [`Participant`] ready to open [`Streams`] on OR has returned a
/// [`NetworkError`] (e.g. Network got closed)
@ -340,7 +351,7 @@ impl Network {
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// use veloren_network::{Address, Network, Pid};
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2020` TCP and opens returns their Pid
@ -350,9 +361,9 @@ impl Network {
/// # std::thread::spawn(fr);
/// block_on(async {
/// network
/// .listen(Address::Tcp("0.0.0.0:2020".parse().unwrap()))
/// .listen(ProtocolAddr::Tcp("0.0.0.0:2020".parse().unwrap()))
/// .await?;
/// # remote.connect(Address::Tcp("0.0.0.0:2020".parse().unwrap())).await?;
/// # remote.connect(ProtocolAddr::Tcp("0.0.0.0:2020".parse().unwrap())).await?;
/// while let Ok(participant) = network.connected().await {
/// println!("Participant connected: {}", participant.remote_pid());
/// # //skip test here as it would be a endless loop
@ -365,129 +376,14 @@ impl Network {
///
/// [`Streams`]: crate::api::Stream
/// [`listen`]: crate::api::Network::listen
pub async fn connected(&self) -> Result<Arc<Participant>, NetworkError> {
pub async fn connected(&self) -> Result<Participant, NetworkError> {
let participant = self.connected_receiver.write().await.next().await?;
let participant = Arc::new(participant);
self.participants
self.participant_disconnect_sender
.write()
.await
.insert(participant.remote_pid, participant.clone());
.insert(participant.remote_pid, participant.a2s_disconnect_s.clone());
Ok(participant)
}
/// disconnecting a [`Participant`] where you move the last existing
/// [`Arc<Participant>`]. As the [`Network`] also holds [`Arc`] to the
/// [`Participant`], you need to provide the last [`Arc<Participant>`] and
/// are not allowed to keep others. If you do so the [`Participant`]
/// can't be disconnected properly. If you no longer have the respective
/// [`Participant`], try using the [`participants`] method to get it.
///
/// This function will wait for all [`Streams`] to properly close, including
/// all messages to be send before closing. If an error occurs with one
/// of the messages.
/// Except if the remote side already dropped the [`Participant`]
/// simultaneously, then messages won't be sended
///
/// There is NO `disconnected` function in `Network`, if a [`Participant`]
/// is no longer reachable (e.g. as the network cable was unplugged) the
/// [`Participant`] will fail all action, but needs to be manually
/// disconected, using this function.
///
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// use veloren_network::{Address, Network, Pid};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2030` TCP and opens returns their Pid and close connection.
/// let (network, f) = Network::new(Pid::new(), None);
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr);
/// block_on(async {
/// network
/// .listen(Address::Tcp("0.0.0.0:2030".parse().unwrap()))
/// .await?;
/// # remote.connect(Address::Tcp("0.0.0.0:2030".parse().unwrap())).await?;
/// while let Ok(participant) = network.connected().await {
/// println!("Participant connected: {}", participant.remote_pid());
/// network.disconnect(participant).await?;
/// # //skip test here as it would be a endless loop
/// # break;
/// }
/// # Ok(())
/// })
/// # }
/// ```
///
/// [`Arc<Participant>`]: crate::api::Participant
/// [`Streams`]: crate::api::Stream
/// [`participants`]: Network::participants
/// [`Arc`]: std::sync::Arc
pub async fn disconnect(&self, participant: Arc<Participant>) -> Result<(), NetworkError> {
// Remove, Close and try_unwrap error when unwrap fails!
let pid = participant.remote_pid;
debug!(?pid, "removing participant from network");
self.participants.write().await.remove(&pid)?;
participant.closed.store(true, Ordering::Relaxed);
match Arc::try_unwrap(participant) {
Err(_) => {
warn!(
"you are disconnecting and still keeping a reference to this participant, \
this is a bad idea. Participant will only be dropped when you drop your last \
reference"
);
Ok(())
},
Ok(mut participant) => {
trace!("waiting now for participant to close");
let (finished_sender, finished_receiver) = oneshot::channel();
// we are deleting here asyncly before DROP is called. Because this is done
// nativly async, while drop needs an BLOCK! Drop will recognis
// that it has been delete here and don't try another double delete.
participant
.a2s_disconnect_s
.take()
.unwrap()
.send((pid, finished_sender))
.await
.expect("something is wrong in internal scheduler coding");
match finished_receiver.await {
Ok(Ok(())) => {
trace!(?pid, "Participant is now closed");
Ok(())
},
Ok(Err(e)) => {
trace!(
?e,
"Error occured during shutdown of participant and is propagated to \
User"
);
Err(NetworkError::GracefulDisconnectFailed(e))
},
Err(e) => {
error!(
?pid,
?e,
"Failed to get a message back from the scheduler, closing the network"
);
Err(NetworkError::NetworkClosed)
},
}
},
}
}
/// returns a copy of all current connected [`Participants`],
/// including ones, which can't send data anymore as the underlying sockets
/// are closed already but haven't been [`disconnected`] yet.
///
/// [`Participants`]: crate::api::Participant
/// [`disconnected`]: Network::disconnect
pub async fn participants(&self) -> HashMap<Pid, Arc<Participant>> {
self.participants.read().await.clone()
}
}
impl Participant {
@ -497,14 +393,15 @@ impl Participant {
a2b_steam_open_s: mpsc::UnboundedSender<(Prio, Promises, oneshot::Sender<Stream>)>,
b2a_stream_opened_r: mpsc::UnboundedReceiver<Stream>,
a2s_disconnect_s: mpsc::UnboundedSender<(Pid, oneshot::Sender<async_std::io::Result<()>>)>,
closed: Arc<RwLock<Result<(), ParticipantError>>>,
) -> Self {
Self {
local_pid,
remote_pid,
a2b_steam_open_s: RwLock::new(a2b_steam_open_s),
b2a_stream_opened_r: RwLock::new(b2a_stream_opened_r),
closed: AtomicBool::new(false),
a2s_disconnect_s: Some(a2s_disconnect_s),
closed,
a2s_disconnect_s: Arc::new(Mutex::new(Some(a2s_disconnect_s))),
}
}
@ -528,7 +425,7 @@ impl Participant {
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// use veloren_network::{Address, Network, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
/// use veloren_network::{Network, Pid, ProtocolAddr, PROMISES_CONSISTENCY, PROMISES_ORDERED};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, connect on port 2100 and open a stream
@ -537,9 +434,9 @@ impl Participant {
/// # let (remote, fr) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr);
/// block_on(async {
/// # remote.listen(Address::Tcp("0.0.0.0:2100".parse().unwrap())).await?;
/// # remote.listen(ProtocolAddr::Tcp("0.0.0.0:2100".parse().unwrap())).await?;
/// let p1 = network
/// .connect(Address::Tcp("127.0.0.1:2100".parse().unwrap()))
/// .connect(ProtocolAddr::Tcp("127.0.0.1:2100".parse().unwrap()))
/// .await?;
/// let _s1 = p1.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY).await?;
/// # Ok(())
@ -552,20 +449,12 @@ impl Participant {
//use this lock for now to make sure that only one open at a time is made,
// TODO: not sure if we can paralise that, check in future
let mut a2b_steam_open_s = self.a2b_steam_open_s.write().await;
if self.closed.load(Ordering::Relaxed) {
warn!(?self.remote_pid, "participant is closed but another open is tried on it");
return Err(ParticipantError::ParticipantClosed);
}
self.closed.read().await.clone()?;
let (p2a_return_stream_s, p2a_return_stream_r) = oneshot::channel();
if a2b_steam_open_s
a2b_steam_open_s
.send((prio, promises, p2a_return_stream_s))
.await
.is_err()
{
debug!(?self.remote_pid, "stream_open_sender failed, closing participant");
self.closed.store(true, Ordering::Relaxed);
return Err(ParticipantError::ParticipantClosed);
}
.unwrap();
match p2a_return_stream_r.await {
Ok(stream) => {
let sid = stream.sid;
@ -574,8 +463,8 @@ impl Participant {
},
Err(_) => {
debug!(?self.remote_pid, "p2a_return_stream_r failed, closing participant");
self.closed.store(true, Ordering::Relaxed);
Err(ParticipantError::ParticipantClosed)
*self.closed.write().await = Err(ParticipantError::ProtocolFailedUnrecoverable);
Err(ParticipantError::ProtocolFailedUnrecoverable)
},
}
}
@ -589,7 +478,7 @@ impl Participant {
///
/// # Examples
/// ```rust
/// use veloren_network::{Network, Pid, Address, PROMISES_ORDERED, PROMISES_CONSISTENCY};
/// use veloren_network::{Network, Pid, ProtocolAddr, PROMISES_ORDERED, PROMISES_CONSISTENCY};
/// use futures::executor::block_on;
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
@ -600,8 +489,8 @@ impl Participant {
/// # let (remote, fr) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr);
/// block_on(async {
/// # remote.listen(Address::Tcp("0.0.0.0:2110".parse().unwrap())).await?;
/// let p1 = network.connect(Address::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
/// # remote.listen(ProtocolAddr::Tcp("0.0.0.0:2110".parse().unwrap())).await?;
/// let p1 = network.connect(ProtocolAddr::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
/// # let p2 = remote.connected().await?;
/// # p2.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY).await?;
/// let _s1 = p1.opened().await?;
@ -617,20 +506,115 @@ impl Participant {
//use this lock for now to make sure that only one open at a time is made,
// TODO: not sure if we can paralise that, check in future
let mut stream_opened_receiver = self.b2a_stream_opened_r.write().await;
if self.closed.load(Ordering::Relaxed) {
warn!(?self.remote_pid, "participant is closed but another open is tried on it");
return Err(ParticipantError::ParticipantClosed);
}
self.closed.read().await.clone()?;
match stream_opened_receiver.next().await {
Some(stream) => {
let sid = stream.sid;
debug!(?sid, ?self.remote_pid, "receive opened stream");
debug!(?sid, ?self.remote_pid, "Receive opened stream");
Ok(stream)
},
None => {
debug!(?self.remote_pid, "stream_opened_receiver failed, closing participant");
self.closed.store(true, Ordering::Relaxed);
Err(ParticipantError::ParticipantClosed)
*self.closed.write().await = Err(ParticipantError::ProtocolFailedUnrecoverable);
Err(ParticipantError::ProtocolFailedUnrecoverable)
},
}
}
/// disconnecting a `Participant` in a async way.
/// Use this rather than `Participant::Drop` if you want to close multiple
/// `Participants`.
///
/// This function will wait for all [`Streams`] to properly close, including
/// all messages to be send before closing. If an error occurs with one
/// of the messages.
/// Except if the remote side already dropped the `Participant`
/// simultaneously, then messages won't be send
///
/// There is NO `disconnected` function in `Participant`, if a `Participant`
/// is no longer reachable (e.g. as the network cable was unplugged) the
/// `Participant` will fail all action, but needs to be manually
/// disconected, using this function.
///
/// # Examples
/// ```rust
/// use futures::executor::block_on;
/// use veloren_network::{Network, Pid, ProtocolAddr};
///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on port `2030` TCP and opens returns their Pid and close connection.
/// let (network, f) = Network::new(Pid::new(), None);
/// std::thread::spawn(f);
/// # let (remote, fr) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr);
/// block_on(async {
/// network
/// .listen(ProtocolAddr::Tcp("0.0.0.0:2030".parse().unwrap()))
/// .await?;
/// # let keep_alive = remote.connect(ProtocolAddr::Tcp("0.0.0.0:2030".parse().unwrap())).await?;
/// while let Ok(participant) = network.connected().await {
/// println!("Participant connected: {}", participant.remote_pid());
/// participant.disconnect().await?;
/// # //skip test here as it would be a endless loop
/// # break;
/// }
/// # Ok(())
/// })
/// # }
/// ```
///
/// [`Streams`]: crate::api::Stream
pub async fn disconnect(self) -> Result<(), ParticipantError> {
// Remove, Close and try_unwrap error when unwrap fails!
let pid = self.remote_pid;
debug!(?pid, "Closing participant from network");
{
let mut lock = self.closed.write().await;
lock.clone()?;
*lock = Err(ParticipantError::ParticipantDisconnected);
}
//Streams will be closed by BParticipant
match self.a2s_disconnect_s.lock().await.take() {
Some(mut a2s_disconnect_s) => {
let (finished_sender, finished_receiver) = oneshot::channel();
// Participant is connecting to Scheduler here, not as usual
// Participant<->BParticipant
a2s_disconnect_s
.send((pid, finished_sender))
.await
.expect("Something is wrong in internal scheduler coding");
match finished_receiver.await {
Ok(Ok(())) => {
trace!(?pid, "Participant is now closed");
Ok(())
},
Ok(Err(e)) => {
trace!(
?e,
"Error occured during shutdown of participant and is propagated to \
User"
);
Err(ParticipantError::ProtocolFailedUnrecoverable)
},
Err(e) => {
//this is a bug. but as i am Participant i can't destroy the network
error!(
?pid,
?e,
"Failed to get a message back from the scheduler, seems like the \
network is already closed"
);
Err(ParticipantError::ProtocolFailedUnrecoverable)
},
}
},
None => {
warn!(
"seems like you are trying to disconnecting a participant after the network \
was already dropped. It was already dropped with the network!"
);
Err(ParticipantError::ParticipantDisconnected)
},
}
}
@ -690,7 +674,7 @@ impl Stream {
///
/// # Example
/// ```
/// use veloren_network::{Network, Address, Pid};
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::{PROMISES_ORDERED, PROMISES_CONSISTENCY};
/// use futures::executor::block_on;
///
@ -701,8 +685,8 @@ impl Stream {
/// # let (remote, fr) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr);
/// block_on(async {
/// network.listen(Address::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
/// # let remote_p = remote.connect(Address::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
/// # // keep it alive
/// # let _stream_p = remote_p.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY).await?;
/// let participant_a = network.connected().await?;
@ -729,7 +713,7 @@ impl Stream {
///
/// # Example
/// ```rust
/// use veloren_network::{Network, Address, Pid, MessageBuffer};
/// use veloren_network::{Network, ProtocolAddr, Pid, MessageBuffer};
/// # use veloren_network::{PROMISES_ORDERED, PROMISES_CONSISTENCY};
/// use futures::executor::block_on;
/// use bincode;
@ -743,9 +727,9 @@ impl Stream {
/// # let (remote2, fr2) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr2);
/// block_on(async {
/// network.listen(Address::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # let remote1_p = remote1.connect(Address::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # let remote2_p = remote2.connect(Address::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # let remote1_p = remote1.connect(ProtocolAddr::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # let remote2_p = remote2.connect(ProtocolAddr::Tcp("127.0.0.1:2210".parse().unwrap())).await?;
/// # assert_eq!(remote1_p.remote_pid(), remote2_p.remote_pid());
/// # remote1_p.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY).await?;
/// # remote2_p.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY).await?;
@ -795,7 +779,7 @@ impl Stream {
///
/// # Example
/// ```
/// use veloren_network::{Network, Address, Pid};
/// use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::{PROMISES_ORDERED, PROMISES_CONSISTENCY};
/// use futures::executor::block_on;
///
@ -806,8 +790,8 @@ impl Stream {
/// # let (remote, fr) = Network::new(Pid::new(), None);
/// # std::thread::spawn(fr);
/// block_on(async {
/// network.listen(Address::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
/// # let remote_p = remote.connect(Address::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
/// network.listen(ProtocolAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
/// # let mut stream_p = remote_p.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY).await?;
/// # stream_p.send("Hello World");
/// let participant_a = network.connected().await?;
@ -837,38 +821,67 @@ impl Stream {
}
}
///
impl core::cmp::PartialEq for Participant {
fn eq(&self, other: &Self) -> bool {
//don't check local_pid, 2 Participant from different network should match if
// they are the "same"
self.remote_pid == other.remote_pid
}
}
impl Drop for Network {
fn drop(&mut self) {
let pid = self.local_pid;
debug!(?pid, "shutting down Network");
debug!(
debug!(?pid, "Shutting down Network");
trace!(
?pid,
"shutting down Participants of Network, while we still have metrics"
"Shutting down Participants of Network, while we still have metrics"
);
let mut finished_receiver_list = vec![];
task::block_on(async {
// we need to carefully shut down here! as otherwise we might call
// Participant::Drop with a2s_disconnect_s here which would open
// another task::block, which would panic! also i can't `.write` on
// `self.participants` as the `disconnect` fn needs it.
let mut participant_clone = self.participants().await;
for (_, p) in participant_clone.drain() {
if let Err(e) = self.disconnect(p).await {
error!(
?e,
"error while dropping network, the error occured when dropping a \
participant but can't be notified to the user any more"
);
// we MUST avoid nested block_on, good that Network::Drop no longer triggers
// Participant::Drop directly but just the BParticipant
for (remote_pid, a2s_disconnect_s) in
self.participant_disconnect_sender.write().await.drain()
{
match a2s_disconnect_s.lock().await.take() {
Some(mut a2s_disconnect_s) => {
trace!(?remote_pid, "Participants will be closed");
let (finished_sender, finished_receiver) = oneshot::channel();
finished_receiver_list.push((remote_pid, finished_receiver));
a2s_disconnect_s
.send((remote_pid, finished_sender))
.await
.expect(
"Scheduler is closed, but nobody other should be able to close it",
);
},
None => trace!(?remote_pid, "Participant already disconnected gracefully"),
}
}
//wait after close is requested for all
for (remote_pid, finished_receiver) in finished_receiver_list.drain(..) {
match finished_receiver.await {
Ok(Ok(())) => trace!(?remote_pid, "disconnect successful"),
Ok(Err(e)) => info!(?remote_pid, ?e, "unclean disconnect"),
Err(e) => warn!(
?remote_pid,
?e,
"Failed to get a message back from the scheduler, seems like the network \
is already closed"
),
}
}
self.participants.write().await.clear();
});
debug!(?pid, "shutting down Scheduler");
trace!(?pid, "Participants have shut down!");
trace!(?pid, "Shutting down Scheduler");
self.shutdown_sender
.take()
.unwrap()
.send(())
.expect("scheduler is closed, but nobody other should be able to close it");
debug!(?pid, "participants have shut down!");
.expect("Scheduler is closed, but nobody other should be able to close it");
debug!(?pid, "Network has shut down");
}
}
@ -877,42 +890,36 @@ impl Drop for Participant {
// ignore closed, as we need to send it even though we disconnected the
// participant from network
let pid = self.remote_pid;
debug!(?pid, "shutting down Participant");
match self.a2s_disconnect_s.take() {
None => debug!(
debug!(?pid, "Shutting down Participant");
match task::block_on(self.a2s_disconnect_s.lock()).take() {
None => trace!(
?pid,
"Participant has been shutdown cleanly, no further waiting is requiered!"
),
Some(mut a2s_disconnect_s) => {
debug!(
?pid,
"unclean shutdown detected, active waiting for client to be disconnected"
);
debug!(?pid, "Disconnect from Scheduler");
task::block_on(async {
let (finished_sender, finished_receiver) = oneshot::channel();
a2s_disconnect_s
.send((self.remote_pid, finished_sender))
.await
.expect("something is wrong in internal scheduler coding");
match finished_receiver.await {
Ok(Err(e)) => error!(
.expect("Something is wrong in internal scheduler coding");
if let Err(e) = finished_receiver
.await
.expect("Something is wrong in internal scheduler/participant coding")
{
error!(
?pid,
?e,
"Error while dropping the participant, couldn't send all outgoing \
messages, dropping remaining"
),
Err(e) => warn!(
?e,
"//TODO i dont know why the finish doesnt work, i normally would \
expect to have sended a return message from the participant... \
ignoring to not caue a panic for now, please fix me"
),
_ => (),
);
};
});
},
}
debug!(?pid, "network dropped");
debug!(?pid, "Participant dropped");
}
}
@ -922,7 +929,7 @@ impl Drop for Stream {
if !self.closed.load(Ordering::Relaxed) {
let sid = self.sid;
let pid = self.pid;
debug!(?pid, ?sid, "shutting down Stream");
debug!(?pid, ?sid, "Shutting down Stream");
if task::block_on(self.a2b_close_stream_s.take().unwrap().send(self.sid)).is_err() {
warn!(
"Other side got already dropped, probably due to timing, other side will \
@ -932,7 +939,7 @@ impl Drop for Stream {
} else {
let sid = self.sid;
let pid = self.pid;
debug!(?pid, ?sid, "not needed");
trace!(?pid, ?sid, "Stream Drop not needed");
}
}
}
@ -940,15 +947,10 @@ impl Drop for Stream {
impl std::fmt::Debug for Participant {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let status = if self.closed.load(Ordering::Relaxed) {
"[CLOSED]"
} else {
"[OPEN]"
};
write!(
f,
"Participant {{{} local_pid: {:?}, remote_pid: {:?} }}",
status, &self.local_pid, &self.remote_pid,
"Participant {{ local_pid: {:?}, remote_pid: {:?} }}",
&self.local_pid, &self.remote_pid,
)
}
}
@ -957,10 +959,6 @@ impl<T> From<crossbeam_channel::SendError<T>> for StreamError {
fn from(_err: crossbeam_channel::SendError<T>) -> Self { StreamError::StreamClosed }
}
impl<T> From<crossbeam_channel::SendError<T>> for ParticipantError {
fn from(_err: crossbeam_channel::SendError<T>) -> Self { ParticipantError::ParticipantClosed }
}
impl<T> From<crossbeam_channel::SendError<T>> for NetworkError {
fn from(_err: crossbeam_channel::SendError<T>) -> Self { NetworkError::NetworkClosed }
}
@ -969,26 +967,14 @@ impl From<std::option::NoneError> for StreamError {
fn from(_err: std::option::NoneError) -> Self { StreamError::StreamClosed }
}
impl From<std::option::NoneError> for ParticipantError {
fn from(_err: std::option::NoneError) -> Self { ParticipantError::ParticipantClosed }
}
impl From<std::option::NoneError> for NetworkError {
fn from(_err: std::option::NoneError) -> Self { NetworkError::NetworkClosed }
}
impl From<mpsc::SendError> for ParticipantError {
fn from(_err: mpsc::SendError) -> Self { ParticipantError::ParticipantClosed }
}
impl From<mpsc::SendError> for NetworkError {
fn from(_err: mpsc::SendError) -> Self { NetworkError::NetworkClosed }
}
impl From<oneshot::Canceled> for ParticipantError {
fn from(_err: oneshot::Canceled) -> Self { ParticipantError::ParticipantClosed }
}
impl From<oneshot::Canceled> for NetworkError {
fn from(_err: oneshot::Canceled) -> Self { NetworkError::NetworkClosed }
}
@ -1011,7 +997,10 @@ impl core::fmt::Display for StreamError {
impl core::fmt::Display for ParticipantError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
ParticipantError::ParticipantClosed => write!(f, "participant closed"),
ParticipantError::ParticipantDisconnected => write!(f, "Participant disconnect"),
ParticipantError::ProtocolFailedUnrecoverable => {
write!(f, "underlying protocol failed unrecoverable")
},
}
}
}
@ -1019,10 +1008,9 @@ impl core::fmt::Display for ParticipantError {
impl core::fmt::Display for NetworkError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
NetworkError::NetworkClosed => write!(f, "network closed"),
NetworkError::ListenFailed(_) => write!(f, "listening failed"),
NetworkError::ConnectFailed(_) => write!(f, "connecting failed"),
NetworkError::GracefulDisconnectFailed(_) => write!(f, "graceful disconnect failed"),
NetworkError::NetworkClosed => write!(f, "Network closed"),
NetworkError::ListenFailed(_) => write!(f, "Listening failed"),
NetworkError::ConnectFailed(_) => write!(f, "Connecting failed"),
}
}
}

View File

@ -48,13 +48,13 @@ impl Channel {
//reapply leftovers from handshake
let cnt = leftover_cid_frame.len();
trace!(?self.cid, ?cnt, "reapplying leftovers");
trace!(?self.cid, ?cnt, "Reapplying leftovers");
for cid_frame in leftover_cid_frame.drain(..) {
w2c_cid_frame_s.send(cid_frame).await.unwrap();
}
trace!(?self.cid, ?cnt, "all leftovers reapplied");
trace!(?self.cid, ?cnt, "All leftovers reapplied");
trace!(?self.cid, "start up channel");
trace!(?self.cid, "Start up channel");
match protocol {
Protocols::Tcp(tcp) => {
futures::join!(
@ -70,7 +70,7 @@ impl Channel {
},
}
trace!(?self.cid, "shut down channel");
trace!(?self.cid, "Shut down channel");
}
}
@ -159,7 +159,7 @@ impl Handshake {
&self,
w2c_cid_frame_r: &mut mpsc::UnboundedReceiver<(Cid, Frame)>,
mut c2w_frame_s: mpsc::UnboundedSender<Frame>,
_read_stop_sender: oneshot::Sender<()>,
read_stop_sender: oneshot::Sender<()>,
) -> Result<(Pid, Sid, u128), ()> {
const ERR_S: &str = "Got A Raw Message, these are usually Debug Messages indicating that \
something went wrong on network layer and connection will be closed";
@ -170,7 +170,7 @@ impl Handshake {
self.send_handshake(&mut c2w_frame_s).await;
}
match w2c_cid_frame_r.next().await {
let r = match w2c_cid_frame_r.next().await {
Some((
_,
Frame::Handshake {
@ -178,33 +178,32 @@ impl Handshake {
version,
},
)) => {
trace!(?magic_number, ?version, "recv handshake");
trace!(?magic_number, ?version, "Recv handshake");
self.metrics
.frames_in_total
.with_label_values(&["", &cid_string, "Handshake"])
.inc();
if magic_number != VELOREN_MAGIC_NUMBER {
error!(?magic_number, "connection with invalid magic_number");
error!(?magic_number, "Connection with invalid magic_number");
#[cfg(debug_assertions)]
{
self.metrics
.frames_out_total
.with_label_values(&["", &cid_string, "Raw"])
.inc();
debug!("sending client instructions before killing");
debug!("Sending client instructions before killing");
c2w_frame_s
.send(Frame::Raw(Self::WRONG_NUMBER.to_vec()))
.await
.unwrap();
c2w_frame_s.send(Frame::Shutdown).await.unwrap();
}
return Err(());
}
if version != VELOREN_NETWORK_VERSION {
error!(?version, "connection with wrong network version");
Err(())
} else if version != VELOREN_NETWORK_VERSION {
error!(?version, "Connection with wrong network version");
#[cfg(debug_assertions)]
{
debug!("sending client instructions before killing");
debug!("Sending client instructions before killing");
self.metrics
.frames_out_total
.with_label_values(&["", &cid_string, "Raw"])
@ -225,63 +224,19 @@ impl Handshake {
.unwrap();
c2w_frame_s.send(Frame::Shutdown {}).await.unwrap();
}
return Err(());
}
debug!("handshake completed");
if self.init_handshake {
self.send_init(&mut c2w_frame_s, &pid_string).await;
Err(())
} else {
self.send_handshake(&mut c2w_frame_s).await;
debug!("Handshake completed");
if self.init_handshake {
self.send_init(&mut c2w_frame_s, &pid_string).await;
} else {
self.send_handshake(&mut c2w_frame_s).await;
}
Ok(())
}
},
Some((_, Frame::Shutdown)) => {
info!("shutdown signal received");
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, "Shutdown"])
.inc();
return Err(());
},
Some((_, Frame::Raw(bytes))) => {
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, "Raw"])
.inc();
match std::str::from_utf8(bytes.as_slice()) {
Ok(string) => error!(?string, ERR_S),
_ => error!(?bytes, ERR_S),
}
return Err(());
},
Some((_, frame)) => {
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, frame.get_string()])
.inc();
return Err(());
},
None => return Err(()),
};
match w2c_cid_frame_r.next().await {
Some((_, Frame::Init { pid, secret })) => {
debug!(?pid, "Participant send their ID");
pid_string = pid.to_string();
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, "ParticipantId"])
.inc();
let stream_id_offset = if self.init_handshake {
STREAM_ID_OFFSET1
} else {
self.send_init(&mut c2w_frame_s, &pid_string).await;
STREAM_ID_OFFSET2
};
info!(?pid, "this Handshake is now configured!");
Ok((pid, stream_id_offset, secret))
},
Some((_, Frame::Shutdown)) => {
info!("shutdown signal received");
info!("Shutdown signal received");
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, "Shutdown"])
@ -307,7 +262,73 @@ impl Handshake {
Err(())
},
None => Err(()),
};
if let Err(()) = r {
if let Err(e) = read_stop_sender.send(()) {
trace!(
?e,
"couldn't stop protocol, probably it encountered a Protocol Stop and closed \
itself already, which is fine"
);
}
return Err(());
}
let r = match w2c_cid_frame_r.next().await {
Some((_, Frame::Init { pid, secret })) => {
debug!(?pid, "Participant send their ID");
pid_string = pid.to_string();
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, "ParticipantId"])
.inc();
let stream_id_offset = if self.init_handshake {
STREAM_ID_OFFSET1
} else {
self.send_init(&mut c2w_frame_s, &pid_string).await;
STREAM_ID_OFFSET2
};
info!(?pid, "This Handshake is now configured!");
Ok((pid, stream_id_offset, secret))
},
Some((_, Frame::Shutdown)) => {
info!("Shutdown signal received");
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, "Shutdown"])
.inc();
Err(())
},
Some((_, Frame::Raw(bytes))) => {
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, "Raw"])
.inc();
match std::str::from_utf8(bytes.as_slice()) {
Ok(string) => error!(?string, ERR_S),
_ => error!(?bytes, ERR_S),
}
Err(())
},
Some((_, frame)) => {
self.metrics
.frames_in_total
.with_label_values(&[&pid_string, &cid_string, frame.get_string()])
.inc();
Err(())
},
None => Err(()),
};
if r.is_err() {
if let Err(e) = read_stop_sender.send(()) {
trace!(
?e,
"couldn't stop protocol, probably it encountered a Protocol Stop and closed \
itself already, which is fine"
);
}
}
r
}
async fn send_handshake(&self, c2w_frame_s: &mut mpsc::UnboundedSender<Frame>) {

View File

@ -14,16 +14,16 @@
//! struct [`Network`] once with a new [`Pid`]. The Pid is necessary to identify
//! other [`Networks`] over the network protocols (e.g. TCP, UDP)
//!
//! To connect to another application, you must know it's [`Address`]. One side
//! will call [`connect`], the other [`connected`]. If successfull both
//! applications will now get a [`Arc<Participant>`].
//! To connect to another application, you must know it's [`ProtocolAddr`]. One
//! side will call [`connect`], the other [`connected`]. If successfull both
//! applications will now get a [`Participant`].
//!
//! This [`Participant`] represents the connection between those 2 applications.
//! over the respective [`Address`] and with it the choosen network protocol.
//! However messages can't be send directly via [`Participants`], instead you
//! must open a [`Stream`] on it. Like above, one side has to call [`open`], the
//! other [`opened`]. [`Streams`] can have a different priority and
//! [`Promises`].
//! over the respective [`ProtocolAddr`] and with it the choosen network
//! protocol. However messages can't be send directly via [`Participants`],
//! instead you must open a [`Stream`] on it. Like above, one side has to call
//! [`open`], the other [`opened`]. [`Streams`] can have a different priority
//! and [`Promises`].
//!
//! You can now use the [`Stream`] to [`send`] and [`recv`] in both directions.
//! You can send all kind of messages that implement [`serde`].
@ -40,7 +40,7 @@
//! ```rust
//! use async_std::task::sleep;
//! use futures::{executor::block_on, join};
//! use veloren_network::{Address, Network, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
//! use veloren_network::{Network, Pid, ProtocolAddr, PROMISES_CONSISTENCY, PROMISES_ORDERED};
//!
//! // Client
//! async fn client() -> std::result::Result<(), Box<dyn std::error::Error>> {
@ -48,7 +48,7 @@
//! let (client_network, f) = Network::new(Pid::new(), None);
//! std::thread::spawn(f);
//! let server = client_network
//! .connect(Address::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .connect(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .await?;
//! let mut stream = server
//! .open(10, PROMISES_ORDERED | PROMISES_CONSISTENCY)
@ -62,12 +62,12 @@
//! let (server_network, f) = Network::new(Pid::new(), None);
//! std::thread::spawn(f);
//! server_network
//! .listen(Address::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .listen(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .await?;
//! let client = server_network.connected().await?;
//! let mut stream = client.opened().await?;
//! let msg: String = stream.recv().await?;
//! println!("got message: {}", msg);
//! println!("Got message: {}", msg);
//! assert_eq!(msg, "Hello World");
//! Ok(())
//! }
@ -86,7 +86,6 @@
//! [`Networks`]: crate::api::Network
//! [`connect`]: crate::api::Network::connect
//! [`connected`]: crate::api::Network::connected
//! [`Arc<Participant>`]: crate::api::Participant
//! [`Participant`]: crate::api::Participant
//! [`Participants`]: crate::api::Participant
//! [`open`]: crate::api::Participant::open
@ -96,7 +95,7 @@
//! [`send`]: crate::api::Stream::send
//! [`recv`]: crate::api::Stream::recv
//! [`Pid`]: crate::types::Pid
//! [`Address`]: crate::api::Address
//! [`ProtocolAddr`]: crate::api::ProtocolAddr
//! [`Promises`]: crate::types::Promises
mod api;
@ -110,7 +109,9 @@ mod scheduler;
#[macro_use]
mod types;
pub use api::{Address, Network, NetworkError, Participant, ParticipantError, Stream, StreamError};
pub use api::{
Network, NetworkError, Participant, ParticipantError, ProtocolAddr, Stream, StreamError,
};
pub use message::MessageBuffer;
pub use types::{
Pid, Promises, PROMISES_COMPRESSED, PROMISES_CONSISTENCY, PROMISES_ENCRYPTED,

View File

@ -1,6 +1,6 @@
use serde::{de::DeserializeOwned, Serialize};
//use std::collections::VecDeque;
use crate::types::{Mid, Sid};
use crate::types::{Frame, Mid, Sid};
use std::{io, sync::Arc};
//Todo: Evaluate switching to VecDeque for quickly adding and removing data
@ -36,19 +36,54 @@ pub(crate) struct IncomingMessage {
pub(crate) fn serialize<M: Serialize>(message: &M) -> MessageBuffer {
//this will never fail: https://docs.rs/bincode/0.8.0/bincode/fn.serialize.html
let writer = bincode::serialize(message).unwrap();
MessageBuffer { data: writer }
MessageBuffer {
data: lz4_compress::compress(&writer),
}
}
//pub(crate) fn deserialize<M: DeserializeOwned>(buffer: MessageBuffer) ->
// std::Result<M, std::Box<bincode::error::bincode::ErrorKind>> {
pub(crate) fn deserialize<M: DeserializeOwned>(buffer: MessageBuffer) -> bincode::Result<M> {
let span = buffer.data;
let span = lz4_compress::decompress(&buffer.data)
.expect("lz4 decompression failed, failed to deserialze");
//this might fail if you choose the wrong type for M. in that case probably X
// got transfered while you assume Y. probably this means your application
// logic is wrong. E.g. You expect a String, but just get a u8.
bincode::deserialize(span.as_slice())
}
impl OutgoingMessage {
pub(crate) const FRAME_DATA_SIZE: u64 = 1400;
/// returns if msg is empty
pub(crate) fn fill_next<E: Extend<(Sid, Frame)>>(
&mut self,
msg_sid: Sid,
frames: &mut E,
) -> bool {
let to_send = std::cmp::min(
self.buffer.data[self.cursor as usize..].len() as u64,
Self::FRAME_DATA_SIZE,
);
if to_send > 0 {
if self.cursor == 0 {
frames.extend(std::iter::once((msg_sid, Frame::DataHeader {
mid: self.mid,
sid: self.sid,
length: self.buffer.data.len() as u64,
})));
}
frames.extend(std::iter::once((msg_sid, Frame::Data {
mid: self.mid,
start: self.cursor,
data: self.buffer.data[self.cursor as usize..][..to_send as usize].to_vec(),
})));
};
self.cursor += to_send;
self.cursor >= self.buffer.data.len() as u64
}
}
///wouldn't trust this aaaassss much, fine for tests
pub(crate) fn partial_eq_io_error(first: &io::Error, second: &io::Error) -> bool {
if let Some(f) = first.raw_os_error() {
@ -134,13 +169,11 @@ mod tests {
fn serialize_test() {
let msg = "abc";
let mb = serialize(&msg);
assert_eq!(mb.data.len(), 11);
assert_eq!(mb.data[0], 3);
assert_eq!(mb.data[1], 0);
assert_eq!(mb.data[7], 0);
assert_eq!(mb.data[8], b'a');
assert_eq!(mb.data[8], 97);
assert_eq!(mb.data[9], b'b');
assert_eq!(mb.data[10], b'c');
assert_eq!(mb.data.len(), 9);
assert_eq!(mb.data[0], 34);
assert_eq!(mb.data[1], 3);
assert_eq!(mb.data[6], b'a');
assert_eq!(mb.data[7], b'b');
assert_eq!(mb.data[8], b'c');
}
}

View File

@ -50,50 +50,50 @@ impl NetworkMetrics {
let listen_requests_total = IntCounterVec::new(
Opts::new(
"listen_requests_total",
"shows the number of listen requests to the scheduler",
"Shows the number of listen requests to the scheduler",
),
&["protocol"],
)?;
let connect_requests_total = IntCounterVec::new(
Opts::new(
"connect_requests_total",
"shows the number of connect requests to the scheduler",
"Shows the number of connect requests to the scheduler",
),
&["protocol"],
)?;
let participants_connected_total = IntCounter::with_opts(Opts::new(
"participants_connected_total",
"shows the number of participants connected to the network",
"Shows the number of participants connected to the network",
))?;
let participants_disconnected_total = IntCounter::with_opts(Opts::new(
"participants_disconnected_total",
"shows the number of participants disconnected to the network",
"Shows the number of participants disconnected to the network",
))?;
let channels_connected_total = IntCounterVec::new(
Opts::new(
"channels_connected_total",
"number of all channels currently connected on the network",
"Number of all channels currently connected on the network",
),
&["participant"],
)?;
let channels_disconnected_total = IntCounterVec::new(
Opts::new(
"channels_disconnected_total",
"number of all channels currently disconnected on the network",
"Number of all channels currently disconnected on the network",
),
&["participant"],
)?;
let streams_opened_total = IntCounterVec::new(
Opts::new(
"streams_opened_total",
"number of all streams currently open on the network",
"Number of all streams currently open on the network",
),
&["participant"],
)?;
let streams_closed_total = IntCounterVec::new(
Opts::new(
"streams_closed_total",
"number of all streams currently open on the network",
"Number of all streams currently open on the network",
),
&["participant"],
)?;
@ -112,42 +112,42 @@ impl NetworkMetrics {
let frames_out_total = IntCounterVec::new(
Opts::new(
"frames_out_total",
"number of all frames send per channel, at the channel level",
"Number of all frames send per channel, at the channel level",
),
&["participant", "channel", "frametype"],
)?;
let frames_in_total = IntCounterVec::new(
Opts::new(
"frames_in_total",
"number of all frames received per channel, at the channel level",
"Number of all frames received per channel, at the channel level",
),
&["participant", "channel", "frametype"],
)?;
let frames_wire_out_total = IntCounterVec::new(
Opts::new(
"frames_wire_out_total",
"number of all frames send per channel, at the protocol level",
"Number of all frames send per channel, at the protocol level",
),
&["channel", "frametype"],
)?;
let frames_wire_in_total = IntCounterVec::new(
Opts::new(
"frames_wire_in_total",
"number of all frames received per channel, at the protocol level",
"Number of all frames received per channel, at the protocol level",
),
&["channel", "frametype"],
)?;
let wire_out_throughput = IntCounterVec::new(
Opts::new(
"wire_out_throughput",
"throupgput of all data frames send per channel, at the protocol level",
"Throupgput of all data frames send per channel, at the protocol level",
),
&["channel"],
)?;
let wire_in_throughput = IntCounterVec::new(
Opts::new(
"wire_in_throughput",
"throupgput of all data frames send per channel, at the protocol level",
"Throupgput of all data frames send per channel, at the protocol level",
),
&["channel"],
)?;
@ -155,7 +155,7 @@ impl NetworkMetrics {
let message_out_total = IntCounterVec::new(
Opts::new(
"message_out_total",
"number of messages send by streams on the network",
"Number of messages send by streams on the network",
),
&["participant", "stream"],
)?;
@ -163,28 +163,28 @@ impl NetworkMetrics {
let message_out_throughput = IntCounterVec::new(
Opts::new(
"message_out_throughput",
"throughput of messages send by streams on the network",
"Throughput of messages send by streams on the network",
),
&["participant", "stream"],
)?;
let queued_count = IntGaugeVec::new(
Opts::new(
"queued_count",
"queued number of messages by participant on the network",
"Queued number of messages by participant on the network",
),
&["channel"],
)?;
let queued_bytes = IntGaugeVec::new(
Opts::new(
"queued_bytes",
"queued bytes of messages by participant on the network",
"Queued bytes of messages by participant on the network",
),
&["channel"],
)?;
let participants_ping = IntGaugeVec::new(
Opts::new(
"participants_ping",
"ping time to participants on the network",
"Ping time to participants on the network",
),
&["channel"],
)?;

View File

@ -1,5 +1,5 @@
use crate::{
api::Stream,
api::{ParticipantError, Stream},
channel::Channel,
message::{IncomingMessage, MessageBuffer, OutgoingMessage},
metrics::{NetworkMetrics, PidCidFrameCache},
@ -60,6 +60,7 @@ pub struct BParticipant {
offset_sid: Sid,
channels: Arc<RwLock<Vec<ChannelInfo>>>,
streams: RwLock<HashMap<Sid, StreamInfo>>,
api_participant_closed: Arc<RwLock<Result<(), ParticipantError>>>,
running_mgr: AtomicUsize,
run_channels: Option<ControlChannels>,
metrics: Arc<NetworkMetrics>,
@ -78,6 +79,7 @@ impl BParticipant {
mpsc::UnboundedReceiver<Stream>,
mpsc::UnboundedSender<(Cid, Sid, Protocols, Vec<(Cid, Frame)>, oneshot::Sender<()>)>,
oneshot::Sender<oneshot::Sender<async_std::io::Result<()>>>,
Arc<RwLock<Result<(), ParticipantError>>>,
) {
let (a2b_steam_open_s, a2b_steam_open_r) =
mpsc::unbounded::<(Prio, Promises, oneshot::Sender<Stream>)>();
@ -95,6 +97,8 @@ impl BParticipant {
s2b_shutdown_bparticipant_r,
});
let api_participant_closed = Arc::new(RwLock::new(Ok(())));
(
Self {
remote_pid,
@ -102,6 +106,7 @@ impl BParticipant {
offset_sid,
channels: Arc::new(RwLock::new(vec![])),
streams: RwLock::new(HashMap::new()),
api_participant_closed: api_participant_closed.clone(),
running_mgr: AtomicUsize::new(0),
run_channels,
metrics,
@ -111,6 +116,7 @@ impl BParticipant {
b2a_stream_opened_r,
s2b_create_channel_s,
s2b_shutdown_bparticipant_s,
api_participant_closed,
)
}
@ -155,11 +161,11 @@ impl BParticipant {
self.participant_shutdown_mgr(
run_channels.s2b_shutdown_bparticipant_r,
b2b_prios_flushed_r,
vec!(
vec![
shutdown_send_mgr_sender,
shutdown_open_mgr_sender,
shutdown_stream_close_mgr_sender
)
shutdown_stream_close_mgr_sender,
],
),
);
}
@ -178,16 +184,15 @@ impl BParticipant {
const FRAMES_PER_TICK: usize = 10005;
self.running_mgr.fetch_add(1, Ordering::Relaxed);
let mut closing_up = false;
trace!("start send_mgr");
trace!("Start send_mgr");
let mut send_cache =
PidCidFrameCache::new(self.metrics.frames_out_total.clone(), self.remote_pid);
//while !self.closed.load(Ordering::Relaxed) {
loop {
let mut frames = VecDeque::new();
prios.fill_frames(FRAMES_PER_TICK, &mut frames).await;
let len = frames.len();
if len > 0 {
trace!("tick {}", len);
trace!("Tick {}", len);
}
for (_, frame) in frames {
self.send_frame(frame, &mut send_cache).await;
@ -207,7 +212,7 @@ impl BParticipant {
closing_up = true;
}
}
trace!("stop send_mgr");
trace!("Stop send_mgr");
b2b_prios_flushed_s.send(()).unwrap();
self.running_mgr.fetch_sub(1, Ordering::Relaxed);
}
@ -233,21 +238,24 @@ impl BParticipant {
if let Err(e) = ci.b2w_frame_s.send(frame).await {
warn!(
?e,
"the channel got closed unexpectedly, cleaning it up now."
"The channel got closed unexpectedly, cleaning it up now."
);
let ci = lock.remove(0);
if let Err(e) = ci.b2r_read_shutdown.send(()) {
debug!(
?e,
"error shutdowning channel, which is prob fine as we detected it to no \
"Error shutdowning channel, which is prob fine as we detected it to no \
longer work in the first place"
);
};
//TODO
warn!(
//TODO FIXME tags: takeover channel multiple
info!(
"FIXME: the frame is actually drop. which is fine for now as the participant \
will be closed, but not if we do channel-takeover"
);
//TEMP FIX: as we dont have channel takeover yet drop the whole bParticipant
self.close_api(ParticipantError::ProtocolFailedUnrecoverable)
.await;
false
} else {
true
@ -259,7 +267,7 @@ impl BParticipant {
guard.0 = now;
let occurrences = guard.1 + 1;
guard.1 = 0;
error!(?occurrences, "participant has no channel to communicate on");
error!(?occurrences, "Participant has no channel to communicate on");
} else {
guard.1 += 1;
}
@ -275,7 +283,7 @@ impl BParticipant {
a2p_msg_s: crossbeam_channel::Sender<(Prio, Sid, OutgoingMessage)>,
) {
self.running_mgr.fetch_add(1, Ordering::Relaxed);
trace!("start handle_frames_mgr");
trace!("Start handle_frames_mgr");
let mut messages = HashMap::new();
let mut dropped_instant = Instant::now();
let mut dropped_cnt = 0u64;
@ -299,7 +307,7 @@ impl BParticipant {
.create_stream(sid, prio, promises, a2p_msg_s, &a2b_close_stream_s)
.await;
b2a_stream_opened_s.send(stream).await.unwrap();
trace!("opened frame from remote");
trace!("Opened frame from remote");
},
Frame::CloseStream { sid } => {
// Closing is realised by setting a AtomicBool to true, however we also have a
@ -309,7 +317,7 @@ impl BParticipant {
// be dropped... from remote, notify local
trace!(
?sid,
"got remote request to close a stream, without flushing it, local \
"Got remote request to close a stream, without flushing it, local \
messages are dropped"
);
// no wait for flush here, as the remote wouldn't care anyway.
@ -319,11 +327,11 @@ impl BParticipant {
.with_label_values(&[&self.remote_pid_string])
.inc();
si.closed.store(true, Ordering::Relaxed);
trace!(?sid, "closed stream from remote");
trace!(?sid, "Closed stream from remote");
} else {
warn!(
?sid,
"couldn't find stream to close, either this is a duplicate message, \
"Couldn't find stream to close, either this is a duplicate message, \
or the local copy of the Stream got closed simultaniously"
);
}
@ -356,7 +364,7 @@ impl BParticipant {
warn!(
?e,
?mid,
"dropping message, as streams seem to be in act of beeing \
"Dropping message, as streams seem to be in act of beeing \
dropped right now"
);
}
@ -368,7 +376,7 @@ impl BParticipant {
{
warn!(
?dropped_cnt,
"dropping multiple messages as stream no longer seems to \
"Dropping multiple messages as stream no longer seems to \
exist because it was dropped probably."
);
dropped_cnt = 0;
@ -380,21 +388,22 @@ impl BParticipant {
}
}
},
Frame::Shutdown => error!(
"Somehow this Shutdown signal got here, i should probably handle it. To not \
crash let me just put this message here"
),
f => unreachable!("never reaches frame!: {:?}", f),
Frame::Shutdown => {
debug!("Shutdown received from remote side");
self.close_api(ParticipantError::ParticipantDisconnected)
.await;
},
f => unreachable!("Frame should never reache participant!: {:?}", f),
}
}
if dropped_cnt > 0 {
warn!(
?dropped_cnt,
"dropping multiple messages as stream no longer seems to exist because it was \
"Dropping multiple messages as stream no longer seems to exist because it was \
dropped probably."
);
}
trace!("stop handle_frames_mgr");
trace!("Stop handle_frames_mgr");
self.running_mgr.fetch_sub(1, Ordering::Relaxed);
}
@ -411,7 +420,7 @@ impl BParticipant {
w2b_frames_s: mpsc::UnboundedSender<(Cid, Frame)>,
) {
self.running_mgr.fetch_add(1, Ordering::Relaxed);
trace!("start create_channel_mgr");
trace!("Start create_channel_mgr");
s2b_create_channel_r
.for_each_concurrent(
None,
@ -433,7 +442,7 @@ impl BParticipant {
.channels_connected_total
.with_label_values(&[&self.remote_pid_string])
.inc();
trace!(?cid, "running channel in participant");
trace!(?cid, "Running channel in participant");
channel
.run(protocol, w2b_frames_s, leftover_cid_frame)
.await;
@ -441,12 +450,12 @@ impl BParticipant {
.channels_disconnected_total
.with_label_values(&[&self.remote_pid_string])
.inc();
trace!(?cid, "channel got closed");
trace!(?cid, "Channel got closed");
}
},
)
.await;
trace!("stop create_channel_mgr");
trace!("Stop create_channel_mgr");
self.running_mgr.fetch_sub(1, Ordering::Relaxed);
}
@ -458,7 +467,7 @@ impl BParticipant {
shutdown_open_mgr_receiver: oneshot::Receiver<()>,
) {
self.running_mgr.fetch_add(1, Ordering::Relaxed);
trace!("start open_mgr");
trace!("Start open_mgr");
let mut stream_ids = self.offset_sid;
let mut send_cache =
PidCidFrameCache::new(self.metrics.frames_out_total.clone(), self.remote_pid);
@ -468,7 +477,7 @@ impl BParticipant {
next = a2b_steam_open_r.next().fuse() => next,
_ = shutdown_open_mgr_receiver => None,
} {
debug!(?prio, ?promises, "got request to open a new steam");
debug!(?prio, ?promises, "Got request to open a new steam");
let a2p_msg_s = a2p_msg_s.clone();
let sid = stream_ids;
let stream = self
@ -491,7 +500,7 @@ impl BParticipant {
stream_ids += Sid::from(1);
}
}
trace!("stop open_mgr");
trace!("Stop open_mgr");
self.running_mgr.fetch_sub(1, Ordering::Relaxed);
}
@ -499,52 +508,61 @@ impl BParticipant {
/// wait for everything to go right! Then return 1. Shutting down
/// Streams for API and End user! 2. Wait for all "prio queued" Messages
/// to be send. 3. Send Stream
/// If BParticipant kills itself managers stay active till this function is
/// called by api to get the result status
async fn participant_shutdown_mgr(
&self,
s2b_shutdown_bparticipant_r: oneshot::Receiver<oneshot::Sender<async_std::io::Result<()>>>,
b2b_prios_flushed_r: oneshot::Receiver<()>,
mut to_shutdown: Vec<oneshot::Sender<()>>,
mut mgr_to_shutdown: Vec<oneshot::Sender<()>>,
) {
self.running_mgr.fetch_add(1, Ordering::Relaxed);
trace!("start participant_shutdown_mgr");
trace!("Start participant_shutdown_mgr");
let sender = s2b_shutdown_bparticipant_r.await.unwrap();
debug!("closing all managers");
for sender in to_shutdown.drain(..) {
//Todo: isn't ParticipantDisconnected useless, as api is waiting rn for a
// callback?
self.close_api(ParticipantError::ParticipantDisconnected)
.await;
debug!("Closing all managers");
for sender in mgr_to_shutdown.drain(..) {
if let Err(e) = sender.send(()) {
warn!(?e, "manager seems to be closed already, weird, maybe a bug");
warn!(?e, "Manager seems to be closed already, weird, maybe a bug");
};
}
debug!("closing all streams");
for (sid, si) in self.streams.write().await.drain() {
trace!(?sid, "shutting down Stream");
si.closed.store(true, Ordering::Relaxed);
}
debug!("waiting for prios to be flushed");
b2b_prios_flushed_r.await.unwrap();
debug!("closing all channels");
debug!("Closing all channels, after flushed prios");
for ci in self.channels.write().await.drain(..) {
if let Err(e) = ci.b2r_read_shutdown.send(()) {
debug!(?e, ?ci.cid, "seems like this read protocol got already dropped by closing the Stream itself, just ignoring the fact");
debug!(?e, ?ci.cid, "Seems like this read protocol got already dropped by closing the Stream itself, just ignoring the fact");
};
}
//Wait for other bparticipants mgr to close via AtomicUsize
const SLEEP_TIME: Duration = Duration::from_millis(5);
const ALLOWED_MANAGER: usize = 1;
async_std::task::sleep(SLEEP_TIME).await;
let mut i: u32 = 1;
while self.running_mgr.load(Ordering::Relaxed) > 1 {
while self.running_mgr.load(Ordering::Relaxed) > ALLOWED_MANAGER {
i += 1;
if i.rem_euclid(10) == 1 {
trace!(
"waiting for bparticipant mgr to shut down, remaining {}",
self.running_mgr.load(Ordering::Relaxed) - 1
?ALLOWED_MANAGER,
"Waiting for bparticipant mgr to shut down, remaining {}",
self.running_mgr.load(Ordering::Relaxed) - ALLOWED_MANAGER
);
}
async_std::task::sleep(SLEEP_TIME * i).await;
}
trace!("all bparticipant mgr (except me) are shut down now");
trace!("All BParticipant mgr (except me) are shut down now");
self.metrics.participants_disconnected_total.inc();
debug!("BParticipant close done");
sender.send(Ok(())).unwrap();
trace!("stop participant_shutdown_mgr");
trace!("Stop participant_shutdown_mgr");
self.running_mgr.fetch_sub(1, Ordering::Relaxed);
}
@ -555,7 +573,7 @@ impl BParticipant {
b2p_notify_empty_stream_s: crossbeam_channel::Sender<(Sid, oneshot::Sender<()>)>,
) {
self.running_mgr.fetch_add(1, Ordering::Relaxed);
trace!("start stream_close_mgr");
trace!("Start stream_close_mgr");
let mut send_cache =
PidCidFrameCache::new(self.metrics.frames_out_total.clone(), self.remote_pid);
let mut shutdown_stream_close_mgr_receiver = shutdown_stream_close_mgr_receiver.fuse();
@ -567,7 +585,7 @@ impl BParticipant {
} {
//TODO: make this concurrent!
//TODO: Performance, closing is slow!
trace!(?sid, "got request from api to close steam");
trace!(?sid, "Got request from api to close steam");
//This needs to first stop clients from sending any more.
//Then it will wait for all pending messages (in prio) to be send to the
// protocol After this happened the stream is closed
@ -575,24 +593,24 @@ impl BParticipant {
// frame! If we would send it before, all followup messages couldn't
// be handled at the remote side.
trace!(?sid, "stopping api to use this stream");
trace!(?sid, "Stopping api to use this stream");
match self.streams.read().await.get(&sid) {
Some(si) => {
si.closed.store(true, Ordering::Relaxed);
},
None => warn!("couldn't find the stream, might be simulanious close from remote"),
None => warn!("Couldn't find the stream, might be simulanious close from remote"),
}
//TODO: what happens if RIGHT NOW the remote sends a StreamClose and this
// streams get closed and removed? RACE CONDITION
trace!(?sid, "wait for stream to be flushed");
trace!(?sid, "Wait for stream to be flushed");
let (s2b_stream_finished_closed_s, s2b_stream_finished_closed_r) = oneshot::channel();
b2p_notify_empty_stream_s
.send((sid, s2b_stream_finished_closed_s))
.unwrap();
s2b_stream_finished_closed_r.await.unwrap();
trace!(?sid, "stream was successfully flushed");
trace!(?sid, "Stream was successfully flushed");
self.metrics
.streams_closed_total
.with_label_values(&[&self.remote_pid_string])
@ -602,7 +620,7 @@ impl BParticipant {
self.send_frame(Frame::CloseStream { sid }, &mut send_cache)
.await;
}
trace!("stop stream_close_mgr");
trace!("Stop stream_close_mgr");
self.running_mgr.fetch_sub(1, Ordering::Relaxed);
}
@ -638,9 +656,13 @@ impl BParticipant {
)
}
/*
async fn close_participant(&self) {
/// close streams and set err
async fn close_api(&self, err: ParticipantError) {
*self.api_participant_closed.write().await = Err(err);
debug!("Closing all streams");
for (sid, si) in self.streams.write().await.drain() {
trace!(?sid, "Shutting down Stream");
si.closed.store(true, Ordering::Relaxed);
}
}
*/
}

View File

@ -40,7 +40,6 @@ pub(crate) struct PrioManager {
}
impl PrioManager {
const FRAME_DATA_SIZE: u64 = 1400;
const PRIOS: [u32; PRIO_MAX] = [
100, 115, 132, 152, 174, 200, 230, 264, 303, 348, 400, 459, 528, 606, 696, 800, 919, 1056,
1213, 1393, 1600, 1838, 2111, 2425, 2786, 3200, 3676, 4222, 4850, 5572, 6400, 7352, 8445,
@ -201,34 +200,6 @@ impl PrioManager {
.min_by_key(|&n| self.points[*n as usize]).cloned()*/
}
/// returns if msg is empty
fn tick_msg<E: Extend<(Sid, Frame)>>(
msg: &mut OutgoingMessage,
msg_sid: Sid,
frames: &mut E,
) -> bool {
let to_send = std::cmp::min(
msg.buffer.data[msg.cursor as usize..].len() as u64,
Self::FRAME_DATA_SIZE,
);
if to_send > 0 {
if msg.cursor == 0 {
frames.extend(std::iter::once((msg_sid, Frame::DataHeader {
mid: msg.mid,
sid: msg.sid,
length: msg.buffer.data.len() as u64,
})));
}
frames.extend(std::iter::once((msg_sid, Frame::Data {
mid: msg.mid,
start: msg.cursor,
data: msg.buffer.data[msg.cursor as usize..][..to_send as usize].to_vec(),
})));
};
msg.cursor += to_send;
msg.cursor >= msg.buffer.data.len() as u64
}
/// no_of_frames = frames.len()
/// Your goal is to try to find a realistic no_of_frames!
/// no_of_frames should be choosen so, that all Frames can be send out till
@ -257,7 +228,7 @@ impl PrioManager {
// => messages with same prio get a fair chance :)
//TODO: evalaute not poping every time
let (sid, mut msg) = self.messages[prio as usize].pop_front().unwrap();
if Self::tick_msg(&mut msg, sid, frames) {
if msg.fill_next(sid, frames) {
//trace!(?m.mid, "finish message");
//check if prio is empty
if self.messages[prio as usize].is_empty() {
@ -265,7 +236,7 @@ impl PrioManager {
}
//decrease pid_sid counter by 1 again
let cnt = self.sid_owned.get_mut(&sid).expect(
"the pid_sid_owned counter works wrong, more pid,sid removed than \
"The pid_sid_owned counter works wrong, more pid,sid removed than \
inserted",
);
cnt.len -= 1;
@ -276,7 +247,7 @@ impl PrioManager {
}
}
} else {
trace!(?msg.mid, "repush message");
trace!(?msg.mid, "Repush message");
self.messages[prio as usize].push_front((sid, msg));
}
},
@ -314,8 +285,8 @@ mod tests {
use futures::{channel::oneshot, executor::block_on};
use std::{collections::VecDeque, sync::Arc};
const SIZE: u64 = PrioManager::FRAME_DATA_SIZE;
const USIZE: usize = PrioManager::FRAME_DATA_SIZE as usize;
const SIZE: u64 = OutgoingMessage::FRAME_DATA_SIZE;
const USIZE: usize = OutgoingMessage::FRAME_DATA_SIZE as usize;
#[allow(clippy::type_complexity)]
fn mock_new() -> (
@ -358,28 +329,28 @@ mod tests {
fn assert_header(frames: &mut VecDeque<(Sid, Frame)>, f_sid: u64, f_length: u64) {
let frame = frames
.pop_front()
.expect("frames vecdeque doesn't contain enough frames!")
.expect("Frames vecdeque doesn't contain enough frames!")
.1;
if let Frame::DataHeader { mid, sid, length } = frame {
assert_eq!(mid, 1);
assert_eq!(sid, Sid::new(f_sid));
assert_eq!(length, f_length);
} else {
panic!("wrong frame type!, expected DataHeader");
panic!("Wrong frame type!, expected DataHeader");
}
}
fn assert_data(frames: &mut VecDeque<(Sid, Frame)>, f_start: u64, f_data: Vec<u8>) {
let frame = frames
.pop_front()
.expect("frames vecdeque doesn't contain enough frames!")
.expect("Frames vecdeque doesn't contain enough frames!")
.1;
if let Frame::Data { mid, start, data } = frame {
assert_eq!(mid, 1);
assert_eq!(start, f_start);
assert_eq!(data, f_data);
} else {
panic!("wrong frame type!, expected Data");
panic!("Wrong frame type!, expected Data");
}
}

View File

@ -8,13 +8,13 @@ use async_std::{
};
use futures::{
channel::{mpsc, oneshot},
future::FutureExt,
future::{Fuse, FutureExt},
lock::Mutex,
select,
sink::SinkExt,
stream::StreamExt,
};
use std::{net::SocketAddr, sync::Arc};
use std::{convert::TryFrom, net::SocketAddr, sync::Arc};
use tracing::*;
// Reserving bytes 0, 10, 13 as i have enough space and want to make it easy to
@ -59,22 +59,35 @@ impl TcpProtocol {
}
/// read_except and if it fails, close the protocol
async fn read_except_or_close(
async fn read_or_close(
cid: Cid,
mut stream: &TcpStream,
mut bytes: &mut [u8],
w2c_cid_frame_s: &mut mpsc::UnboundedSender<(Cid, Frame)>,
) {
if let Err(e) = stream.read_exact(&mut bytes).await {
warn!(
?e,
"closing tcp protocol due to read error, sending close frame to gracefully \
shutdown"
);
w2c_cid_frame_s
.send((cid, Frame::Shutdown))
.await
.expect("Channel or Participant seems no longer to exist to be Shutdown");
mut end_receiver: &mut Fuse<oneshot::Receiver<()>>,
) -> bool {
match select! {
r = stream.read_exact(&mut bytes).fuse() => Some(r),
_ = end_receiver => None,
} {
Some(Ok(_)) => false,
Some(Err(e)) => {
debug!(
?cid,
?e,
"Closing tcp protocol due to read error, sending close frame to gracefully \
shutdown"
);
w2c_cid_frame_s
.send((cid, Frame::Shutdown))
.await
.expect("Channel or Participant seems no longer to exist to be Shutdown");
true
},
None => {
trace!(?cid, "shutdown requested");
true
},
}
}
@ -82,60 +95,59 @@ impl TcpProtocol {
&self,
cid: Cid,
w2c_cid_frame_s: &mut mpsc::UnboundedSender<(Cid, Frame)>,
end_receiver: oneshot::Receiver<()>,
end_r: oneshot::Receiver<()>,
) {
trace!("starting up tcp read()");
trace!("Starting up tcp read()");
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_in_total.clone(), cid);
let throughput_cache = self
.metrics
.wire_in_throughput
.with_label_values(&[&cid.to_string()]);
let mut stream = self.stream.clone();
let mut end_receiver = end_receiver.fuse();
let stream = self.stream.clone();
let mut end_r = end_r.fuse();
macro_rules! read_or_close {
($x:expr) => {
if TcpProtocol::read_or_close(cid, &stream, $x, w2c_cid_frame_s, &mut end_r).await {
info!("Tcp stream closed, shutting down read");
break;
}
};
}
loop {
let mut bytes = [0u8; 1];
let r = select! {
r = stream.read_exact(&mut bytes).fuse() => r,
_ = end_receiver => break,
let frame_no = {
let mut bytes = [0u8; 1];
read_or_close!(&mut bytes);
bytes[0]
};
if r.is_err() {
info!("tcp stream closed, shutting down read");
break;
}
let frame_no = bytes[0];
let frame = match frame_no {
FRAME_HANDSHAKE => {
let mut bytes = [0u8; 19];
Self::read_except_or_close(cid, &stream, &mut bytes, w2c_cid_frame_s).await;
let magic_number = [
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
];
read_or_close!(&mut bytes);
let magic_number = *<&[u8; 7]>::try_from(&bytes[0..7]).unwrap();
Frame::Handshake {
magic_number,
version: [
u32::from_le_bytes([bytes[7], bytes[8], bytes[9], bytes[10]]),
u32::from_le_bytes([bytes[11], bytes[12], bytes[13], bytes[14]]),
u32::from_le_bytes([bytes[15], bytes[16], bytes[17], bytes[18]]),
u32::from_le_bytes(*<&[u8; 4]>::try_from(&bytes[7..11]).unwrap()),
u32::from_le_bytes(*<&[u8; 4]>::try_from(&bytes[11..15]).unwrap()),
u32::from_le_bytes(*<&[u8; 4]>::try_from(&bytes[15..19]).unwrap()),
],
}
},
FRAME_INIT => {
let mut bytes = [0u8; 16];
Self::read_except_or_close(cid, &stream, &mut bytes, w2c_cid_frame_s).await;
read_or_close!(&mut bytes);
let pid = Pid::from_le_bytes(bytes);
stream.read_exact(&mut bytes).await.unwrap();
read_or_close!(&mut bytes);
let secret = u128::from_le_bytes(bytes);
Frame::Init { pid, secret }
},
FRAME_SHUTDOWN => Frame::Shutdown,
FRAME_OPEN_STREAM => {
let mut bytes = [0u8; 10];
Self::read_except_or_close(cid, &stream, &mut bytes, w2c_cid_frame_s).await;
let sid = Sid::from_le_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
bytes[7],
]);
read_or_close!(&mut bytes);
let sid = Sid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
let prio = bytes[8];
let promises = bytes[9];
Frame::OpenStream {
@ -146,61 +158,44 @@ impl TcpProtocol {
},
FRAME_CLOSE_STREAM => {
let mut bytes = [0u8; 8];
Self::read_except_or_close(cid, &stream, &mut bytes, w2c_cid_frame_s).await;
let sid = Sid::from_le_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
bytes[7],
]);
read_or_close!(&mut bytes);
let sid = Sid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
Frame::CloseStream { sid }
},
FRAME_DATA_HEADER => {
let mut bytes = [0u8; 24];
Self::read_except_or_close(cid, &stream, &mut bytes, w2c_cid_frame_s).await;
let mid = Mid::from_le_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
bytes[7],
]);
let sid = Sid::from_le_bytes([
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14],
bytes[15],
]);
let length = u64::from_le_bytes([
bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21],
bytes[22], bytes[23],
]);
read_or_close!(&mut bytes);
let mid = Mid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
let sid = Sid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[8..16]).unwrap());
let length = u64::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[16..24]).unwrap());
Frame::DataHeader { mid, sid, length }
},
FRAME_DATA => {
let mut bytes = [0u8; 18];
Self::read_except_or_close(cid, &stream, &mut bytes, w2c_cid_frame_s).await;
let mid = Mid::from_le_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6],
bytes[7],
]);
let start = u64::from_le_bytes([
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14],
bytes[15],
]);
let length = u16::from_le_bytes([bytes[16], bytes[17]]);
let mut cdata = vec![0; length as usize];
read_or_close!(&mut bytes);
let mid = Mid::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[0..8]).unwrap());
let start = u64::from_le_bytes(*<&[u8; 8]>::try_from(&bytes[8..16]).unwrap());
let length = u16::from_le_bytes(*<&[u8; 2]>::try_from(&bytes[16..18]).unwrap());
let mut data = vec![0; length as usize];
throughput_cache.inc_by(length as i64);
Self::read_except_or_close(cid, &stream, &mut cdata, w2c_cid_frame_s).await;
let data = lz4_compress::decompress(&cdata).unwrap();
read_or_close!(&mut data);
Frame::Data { mid, start, data }
},
FRAME_RAW => {
let mut bytes = [0u8; 2];
Self::read_except_or_close(cid, &stream, &mut bytes, w2c_cid_frame_s).await;
read_or_close!(&mut bytes);
let length = u16::from_le_bytes([bytes[0], bytes[1]]);
let mut data = vec![0; length as usize];
Self::read_except_or_close(cid, &stream, &mut data, w2c_cid_frame_s).await;
read_or_close!(&mut data);
Frame::Raw(data)
},
_ => {
other => {
// report a RAW frame, but cannot rely on the next 2 bytes to be a size.
// guessing 256 bytes, which might help to sort down issues
let mut data = vec![0; 256];
Self::read_except_or_close(cid, &stream, &mut data, w2c_cid_frame_s).await;
// guessing 32 bytes, which might help to sort down issues
let mut data = vec![0; 32];
//keep the first byte!
read_or_close!(&mut data[1..]);
data[0] = other;
Frame::Raw(data)
},
};
@ -210,173 +205,100 @@ impl TcpProtocol {
.await
.expect("Channel or Participant seems no longer to exist");
}
trace!("shutting down tcp read()");
trace!("Shutting down tcp read()");
}
/// read_except and if it fails, close the protocol
async fn write_or_close(
stream: &mut TcpStream,
bytes: &[u8],
to_wire_receiver: &mut mpsc::UnboundedReceiver<Frame>,
c2w_frame_r: &mut mpsc::UnboundedReceiver<Frame>,
) -> bool {
match stream.write_all(&bytes).await {
Err(e) => {
warn!(
debug!(
?e,
"got an error writing to tcp, going to close this channel"
"Got an error writing to tcp, going to close this channel"
);
to_wire_receiver.close();
c2w_frame_r.close();
true
},
_ => false,
}
}
//dezerialize here as this is executed in a seperate thread PER channel.
// Limites Throughput per single Receiver but stays in same thread (maybe as its
// in a threadpool) for TCP, UDP and MPSC
pub async fn write_to_wire(&self, cid: Cid, mut c2w_frame_r: mpsc::UnboundedReceiver<Frame>) {
trace!("starting up tcp write()");
trace!("Starting up tcp write()");
let mut stream = self.stream.clone();
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_out_total.clone(), cid);
let throughput_cache = self
.metrics
.wire_out_throughput
.with_label_values(&[&cid.to_string()]);
macro_rules! write_or_close {
($x:expr) => {
if TcpProtocol::write_or_close(&mut stream, $x, &mut c2w_frame_r).await {
info!("Tcp stream closed, shutting down write");
break;
}
};
}
while let Some(frame) = c2w_frame_r.next().await {
metrics_cache.with_label_values(&frame).inc();
if match frame {
match frame {
Frame::Handshake {
magic_number,
version,
} => {
Self::write_or_close(
&mut stream,
&FRAME_HANDSHAKE.to_be_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(&mut stream, &magic_number, &mut c2w_frame_r).await
|| Self::write_or_close(
&mut stream,
&version[0].to_le_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(
&mut stream,
&version[1].to_le_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(
&mut stream,
&version[2].to_le_bytes(),
&mut c2w_frame_r,
)
.await
write_or_close!(&FRAME_HANDSHAKE.to_be_bytes());
write_or_close!(&magic_number);
write_or_close!(&version[0].to_le_bytes());
write_or_close!(&version[1].to_le_bytes());
write_or_close!(&version[2].to_le_bytes());
},
Frame::Init { pid, secret } => {
Self::write_or_close(&mut stream, &FRAME_INIT.to_be_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(&mut stream, &pid.to_le_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(
&mut stream,
&secret.to_le_bytes(),
&mut c2w_frame_r,
)
.await
write_or_close!(&FRAME_INIT.to_be_bytes());
write_or_close!(&pid.to_le_bytes());
write_or_close!(&secret.to_le_bytes());
},
Frame::Shutdown => {
Self::write_or_close(
&mut stream,
&FRAME_SHUTDOWN.to_be_bytes(),
&mut c2w_frame_r,
)
.await
write_or_close!(&FRAME_SHUTDOWN.to_be_bytes());
},
Frame::OpenStream {
sid,
prio,
promises,
} => {
Self::write_or_close(
&mut stream,
&FRAME_OPEN_STREAM.to_be_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(&mut stream, &sid.to_le_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(&mut stream, &prio.to_le_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(
&mut stream,
&promises.to_le_bytes(),
&mut c2w_frame_r,
)
.await
write_or_close!(&FRAME_OPEN_STREAM.to_be_bytes());
write_or_close!(&sid.to_le_bytes());
write_or_close!(&prio.to_le_bytes());
write_or_close!(&promises.to_le_bytes());
},
Frame::CloseStream { sid } => {
Self::write_or_close(
&mut stream,
&FRAME_CLOSE_STREAM.to_be_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(&mut stream, &sid.to_le_bytes(), &mut c2w_frame_r)
.await
write_or_close!(&FRAME_CLOSE_STREAM.to_be_bytes());
write_or_close!(&sid.to_le_bytes());
},
Frame::DataHeader { mid, sid, length } => {
Self::write_or_close(
&mut stream,
&FRAME_DATA_HEADER.to_be_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(&mut stream, &mid.to_le_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(&mut stream, &sid.to_le_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(
&mut stream,
&length.to_le_bytes(),
&mut c2w_frame_r,
)
.await
write_or_close!(&FRAME_DATA_HEADER.to_be_bytes());
write_or_close!(&mid.to_le_bytes());
write_or_close!(&sid.to_le_bytes());
write_or_close!(&length.to_le_bytes());
},
Frame::Data { mid, start, data } => {
throughput_cache.inc_by(data.len() as i64);
let cdata = lz4_compress::compress(&data);
Self::write_or_close(&mut stream, &FRAME_DATA.to_be_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(&mut stream, &mid.to_le_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(&mut stream, &start.to_le_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(
&mut stream,
&(cdata.len() as u16).to_le_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(&mut stream, &cdata, &mut c2w_frame_r).await
write_or_close!(&FRAME_DATA.to_be_bytes());
write_or_close!(&mid.to_le_bytes());
write_or_close!(&start.to_le_bytes());
write_or_close!(&(data.len() as u16).to_le_bytes());
write_or_close!(&data);
},
Frame::Raw(data) => {
Self::write_or_close(&mut stream, &FRAME_RAW.to_be_bytes(), &mut c2w_frame_r)
.await
|| Self::write_or_close(
&mut stream,
&(data.len() as u16).to_le_bytes(),
&mut c2w_frame_r,
)
.await
|| Self::write_or_close(&mut stream, &data, &mut c2w_frame_r).await
write_or_close!(&FRAME_RAW.to_be_bytes());
write_or_close!(&(data.len() as u16).to_le_bytes());
write_or_close!(&data);
},
} {
//failure
return;
}
}
trace!("shutting down tcp write()");
@ -402,21 +324,21 @@ impl UdpProtocol {
&self,
cid: Cid,
w2c_cid_frame_s: &mut mpsc::UnboundedSender<(Cid, Frame)>,
end_receiver: oneshot::Receiver<()>,
end_r: oneshot::Receiver<()>,
) {
trace!("starting up udp read()");
trace!("Starting up udp read()");
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_in_total.clone(), cid);
let throughput_cache = self
.metrics
.wire_in_throughput
.with_label_values(&[&cid.to_string()]);
let mut data_in = self.data_in.lock().await;
let mut end_receiver = end_receiver.fuse();
let mut end_r = end_r.fuse();
while let Some(bytes) = select! {
r = data_in.next().fuse() => r,
_ = end_receiver => None,
_ = end_r => None,
} {
trace!("got raw UDP message with len: {}", bytes.len());
trace!("Got raw UDP message with len: {}", bytes.len());
let frame_no = bytes[0];
let frame = match frame_no {
FRAME_HANDSHAKE => {
@ -511,11 +433,11 @@ impl UdpProtocol {
metrics_cache.with_label_values(&frame).inc();
w2c_cid_frame_s.send((cid, frame)).await.unwrap();
}
trace!("shutting down udp read()");
trace!("Shutting down udp read()");
}
pub async fn write_to_wire(&self, cid: Cid, mut c2w_frame_r: mpsc::UnboundedReceiver<Frame>) {
trace!("starting up udp write()");
trace!("Starting up udp write()");
let mut buffer = [0u8; 2000];
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_out_total.clone(), cid);
let throughput_cache = self
@ -588,7 +510,7 @@ impl UdpProtocol {
};
let mut start = 0;
while start < len {
trace!(?start, ?len, "splitting up udp frame in multiple packages");
trace!(?start, ?len, "Splitting up udp frame in multiple packages");
match self
.socket
.send_to(&buffer[start..len], self.remote_addr)
@ -603,10 +525,115 @@ impl UdpProtocol {
);
}
},
Err(e) => error!(?e, "need to handle that error!"),
Err(e) => error!(?e, "Need to handle that error!"),
}
}
}
trace!("shutting down udp write()");
trace!("Shutting down udp write()");
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
metrics::NetworkMetrics,
types::{Cid, Pid},
};
use async_std::net;
use futures::{executor::block_on, stream::StreamExt};
use std::sync::Arc;
#[test]
fn tcp_read_handshake() {
let pid = Pid::new();
let cid = 80085;
let metrics = Arc::new(NetworkMetrics::new(&pid).unwrap());
let addr = std::net::SocketAddrV4::new(std::net::Ipv4Addr::new(127, 0, 0, 1), 50500);
block_on(async {
let server = net::TcpListener::bind(addr).await.unwrap();
let mut client = net::TcpStream::connect(addr).await.unwrap();
let s_stream = server.incoming().next().await.unwrap().unwrap();
let prot = TcpProtocol::new(s_stream, metrics);
//Send Handshake
client.write_all(&[FRAME_HANDSHAKE]).await.unwrap();
client.write_all(b"HELLOWO").await.unwrap();
client.write_all(&1337u32.to_le_bytes()).await.unwrap();
client.write_all(&0u32.to_le_bytes()).await.unwrap();
client.write_all(&42u32.to_le_bytes()).await.unwrap();
client.flush();
//handle data
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<(Cid, Frame)>();
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
let cid2 = cid;
let t = std::thread::spawn(move || {
block_on(async {
prot.read_from_wire(cid2, &mut w2c_cid_frame_s, read_stop_receiver)
.await;
})
});
// Assert than we get some value back! Its a Handshake!
//async_std::task::sleep(std::time::Duration::from_millis(1000));
let (cid_r, frame) = w2c_cid_frame_r.next().await.unwrap();
assert_eq!(cid, cid_r);
if let Frame::Handshake {
magic_number,
version,
} = frame
{
assert_eq!(&magic_number, b"HELLOWO");
assert_eq!(version, [1337, 0, 42]);
} else {
panic!("wrong handshake");
}
read_stop_sender.send(()).unwrap();
t.join().unwrap();
});
}
#[test]
fn tcp_read_garbage() {
let pid = Pid::new();
let cid = 80085;
let metrics = Arc::new(NetworkMetrics::new(&pid).unwrap());
let addr = std::net::SocketAddrV4::new(std::net::Ipv4Addr::new(127, 0, 0, 1), 50501);
block_on(async {
let server = net::TcpListener::bind(addr).await.unwrap();
let mut client = net::TcpStream::connect(addr).await.unwrap();
let s_stream = server.incoming().next().await.unwrap().unwrap();
let prot = TcpProtocol::new(s_stream, metrics);
//Send Handshake
client
.write_all("x4hrtzsektfhxugzdtz5r78gzrtzfhxfdthfthuzhfzzufasgasdfg".as_bytes())
.await
.unwrap();
client.flush();
//handle data
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<(Cid, Frame)>();
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
let cid2 = cid;
let t = std::thread::spawn(move || {
block_on(async {
prot.read_from_wire(cid2, &mut w2c_cid_frame_s, read_stop_receiver)
.await;
})
});
// Assert than we get some value back! Its a Raw!
let (cid_r, frame) = w2c_cid_frame_r.next().await.unwrap();
assert_eq!(cid, cid_r);
if let Frame::Raw(data) = frame {
assert_eq!(&data.as_slice(), b"x4hrtzsektfhxugzdtz5r78gzrtzfhxf");
} else {
panic!("wrong frame type");
}
read_stop_sender.send(()).unwrap();
t.join().unwrap();
});
}
}

View File

@ -1,5 +1,5 @@
use crate::{
api::{Address, Participant},
api::{Participant, ProtocolAddr},
channel::Handshake,
metrics::NetworkMetrics,
participant::BParticipant,
@ -50,8 +50,9 @@ struct ParticipantInfo {
/// - c: channel/handshake
#[derive(Debug)]
struct ControlChannels {
a2s_listen_r: mpsc::UnboundedReceiver<(Address, oneshot::Sender<io::Result<()>>)>,
a2s_connect_r: mpsc::UnboundedReceiver<(Address, oneshot::Sender<io::Result<Participant>>)>,
a2s_listen_r: mpsc::UnboundedReceiver<(ProtocolAddr, oneshot::Sender<io::Result<()>>)>,
a2s_connect_r:
mpsc::UnboundedReceiver<(ProtocolAddr, oneshot::Sender<io::Result<Participant>>)>,
a2s_scheduler_shutdown_r: oneshot::Receiver<()>,
a2s_disconnect_r: mpsc::UnboundedReceiver<(Pid, oneshot::Sender<async_std::io::Result<()>>)>,
b2s_prio_statistic_r: mpsc::UnboundedReceiver<(Pid, u64, u64)>,
@ -74,7 +75,7 @@ pub struct Scheduler {
participant_channels: Arc<Mutex<Option<ParticipantChannels>>>,
participants: Arc<RwLock<HashMap<Pid, ParticipantInfo>>>,
channel_ids: Arc<AtomicU64>,
channel_listener: RwLock<HashMap<Address, oneshot::Sender<()>>>,
channel_listener: RwLock<HashMap<ProtocolAddr, oneshot::Sender<()>>>,
metrics: Arc<NetworkMetrics>,
}
@ -85,15 +86,15 @@ impl Scheduler {
registry: Option<&Registry>,
) -> (
Self,
mpsc::UnboundedSender<(Address, oneshot::Sender<io::Result<()>>)>,
mpsc::UnboundedSender<(Address, oneshot::Sender<io::Result<Participant>>)>,
mpsc::UnboundedSender<(ProtocolAddr, oneshot::Sender<io::Result<()>>)>,
mpsc::UnboundedSender<(ProtocolAddr, oneshot::Sender<io::Result<Participant>>)>,
mpsc::UnboundedReceiver<Participant>,
oneshot::Sender<()>,
) {
let (a2s_listen_s, a2s_listen_r) =
mpsc::unbounded::<(Address, oneshot::Sender<io::Result<()>>)>();
mpsc::unbounded::<(ProtocolAddr, oneshot::Sender<io::Result<()>>)>();
let (a2s_connect_s, a2s_connect_r) =
mpsc::unbounded::<(Address, oneshot::Sender<io::Result<Participant>>)>();
mpsc::unbounded::<(ProtocolAddr, oneshot::Sender<io::Result<Participant>>)>();
let (s2a_connected_s, s2a_connected_r) = mpsc::unbounded::<Participant>();
let (a2s_scheduler_shutdown_s, a2s_scheduler_shutdown_r) = oneshot::channel::<()>();
let (a2s_disconnect_s, a2s_disconnect_r) =
@ -156,21 +157,21 @@ impl Scheduler {
async fn listen_mgr(
&self,
a2s_listen_r: mpsc::UnboundedReceiver<(Address, oneshot::Sender<io::Result<()>>)>,
a2s_listen_r: mpsc::UnboundedReceiver<(ProtocolAddr, oneshot::Sender<io::Result<()>>)>,
) {
trace!("start listen_mgr");
trace!("Start listen_mgr");
a2s_listen_r
.for_each_concurrent(None, |(address, s2a_listen_result_s)| {
let address = address;
async move {
debug!(?address, "got request to open a channel_creator");
debug!(?address, "Got request to open a channel_creator");
self.metrics
.listen_requests_total
.with_label_values(&[match address {
Address::Tcp(_) => "tcp",
Address::Udp(_) => "udp",
Address::Mpsc(_) => "mpsc",
ProtocolAddr::Tcp(_) => "tcp",
ProtocolAddr::Udp(_) => "udp",
ProtocolAddr::Mpsc(_) => "mpsc",
}])
.inc();
let (end_sender, end_receiver) = oneshot::channel::<()>();
@ -183,20 +184,20 @@ impl Scheduler {
}
})
.await;
trace!("stop listen_mgr");
trace!("Stop listen_mgr");
}
async fn connect_mgr(
&self,
mut a2s_connect_r: mpsc::UnboundedReceiver<(
Address,
ProtocolAddr,
oneshot::Sender<io::Result<Participant>>,
)>,
) {
trace!("start connect_mgr");
trace!("Start connect_mgr");
while let Some((addr, pid_sender)) = a2s_connect_r.next().await {
let (protocol, handshake) = match addr {
Address::Tcp(addr) => {
ProtocolAddr::Tcp(addr) => {
self.metrics
.connect_requests_total
.with_label_values(&["tcp"])
@ -214,7 +215,7 @@ impl Scheduler {
false,
)
},
Address::Udp(addr) => {
ProtocolAddr::Udp(addr) => {
self.metrics
.connect_requests_total
.with_label_values(&["udp"])
@ -249,7 +250,7 @@ impl Scheduler {
self.init_protocol(protocol, Some(pid_sender), handshake)
.await;
}
trace!("stop connect_mgr");
trace!("Stop connect_mgr");
}
async fn disconnect_mgr(
@ -259,14 +260,14 @@ impl Scheduler {
oneshot::Sender<async_std::io::Result<()>>,
)>,
) {
trace!("start disconnect_mgr");
trace!("Start disconnect_mgr");
while let Some((pid, return_once_successful_shutdown)) = a2s_disconnect_r.next().await {
//Closing Participants is done the following way:
// 1. We drop our senders and receivers
// 2. we need to close BParticipant, this will drop its senderns and receivers
// 3. Participant will try to access the BParticipant senders and receivers with
// their next api action, it will fail and be closed then.
trace!(?pid, "got request to close participant");
trace!(?pid, "Got request to close participant");
if let Some(mut pi) = self.participants.write().await.remove(&pid) {
let (finished_sender, finished_receiver) = oneshot::channel();
pi.s2b_shutdown_bparticipant_s
@ -278,36 +279,36 @@ impl Scheduler {
let e = finished_receiver.await.unwrap();
return_once_successful_shutdown.send(e).unwrap();
} else {
debug!(?pid, "looks like participant is already dropped");
debug!(?pid, "Looks like participant is already dropped");
return_once_successful_shutdown.send(Ok(())).unwrap();
}
trace!(?pid, "closed participant");
trace!(?pid, "Closed participant");
}
trace!("stop disconnect_mgr");
trace!("Stop disconnect_mgr");
}
async fn prio_adj_mgr(
&self,
mut b2s_prio_statistic_r: mpsc::UnboundedReceiver<(Pid, u64, u64)>,
) {
trace!("start prio_adj_mgr");
trace!("Start prio_adj_mgr");
while let Some((_pid, _frame_cnt, _unused)) = b2s_prio_statistic_r.next().await {
//TODO adjust prios in participants here!
}
trace!("stop prio_adj_mgr");
trace!("Stop prio_adj_mgr");
}
async fn scheduler_shutdown_mgr(&self, a2s_scheduler_shutdown_r: oneshot::Receiver<()>) {
trace!("start scheduler_shutdown_mgr");
trace!("Start scheduler_shutdown_mgr");
a2s_scheduler_shutdown_r.await.unwrap();
self.closed.store(true, Ordering::Relaxed);
debug!("shutting down all BParticipants gracefully");
debug!("Shutting down all BParticipants gracefully");
let mut participants = self.participants.write().await;
let waitings = participants
.drain()
.map(|(pid, mut pi)| {
trace!(?pid, "shutting down BParticipants");
trace!(?pid, "Shutting down BParticipants");
let (finished_sender, finished_receiver) = oneshot::channel();
pi.s2b_shutdown_bparticipant_s
.take()
@ -317,33 +318,34 @@ impl Scheduler {
(pid, finished_receiver)
})
.collect::<Vec<_>>();
debug!("wait for partiticipants to be shut down");
debug!("Wait for partiticipants to be shut down");
for (pid, recv) in waitings {
if let Err(e) = recv.await {
error!(
?pid,
?e,
"failed to finish sending all remainding messages to participant when \
"Failed to finish sending all remainding messages to participant when \
shutting down"
);
};
}
debug!("Scheduler shut down gracefully");
//removing the possibility to create new participants, needed to close down
// some mgr:
self.participant_channels.lock().await.take();
trace!("stop scheduler_shutdown_mgr");
trace!("Stop scheduler_shutdown_mgr");
}
async fn channel_creator(
&self,
addr: Address,
addr: ProtocolAddr,
s2s_stop_listening_r: oneshot::Receiver<()>,
s2a_listen_result_s: oneshot::Sender<io::Result<()>>,
) {
trace!(?addr, "start up channel creator");
trace!(?addr, "Start up channel creator");
match addr {
Address::Tcp(addr) => {
ProtocolAddr::Tcp(addr) => {
let listener = match net::TcpListener::bind(addr).await {
Ok(listener) => {
s2a_listen_result_s.send(Ok(())).unwrap();
@ -353,27 +355,40 @@ impl Scheduler {
info!(
?addr,
?e,
"listener couldn't be started due to error on tcp bind"
"Listener couldn't be started due to error on tcp bind"
);
s2a_listen_result_s.send(Err(e)).unwrap();
return;
},
};
trace!(?addr, "listener bound");
trace!(?addr, "Listener bound");
let mut incoming = listener.incoming();
let mut end_receiver = s2s_stop_listening_r.fuse();
while let Some(stream) = select! {
next = incoming.next().fuse() => next,
_ = end_receiver => None,
} {
let stream = stream.unwrap();
info!("Accepting Tcp from: {}", stream.peer_addr().unwrap());
let stream = match stream {
Ok(s) => s,
Err(e) => {
warn!(?e, "TcpStream Error, ignoring connection attempt");
continue;
},
};
let peer_addr = match stream.peer_addr() {
Ok(s) => s,
Err(e) => {
warn!(?e, "TcpStream Error, ignoring connection attempt");
continue;
},
};
info!("Accepting Tcp from: {}", peer_addr);
let protocol = TcpProtocol::new(stream, self.metrics.clone());
self.init_protocol(Protocols::Tcp(protocol), None, true)
.await;
}
},
Address::Udp(addr) => {
ProtocolAddr::Udp(addr) => {
let socket = match net::UdpSocket::bind(addr).await {
Ok(socket) => {
s2a_listen_result_s.send(Ok(())).unwrap();
@ -383,13 +398,13 @@ impl Scheduler {
info!(
?addr,
?e,
"listener couldn't be started due to error on udp bind"
"Listener couldn't be started due to error on udp bind"
);
s2a_listen_result_s.send(Err(e)).unwrap();
return;
},
};
trace!(?addr, "listener bound");
trace!(?addr, "Listener bound");
// receiving is done from here and will be piped to protocol as UDP does not
// have any state
let mut listeners = HashMap::new();
@ -424,7 +439,7 @@ impl Scheduler {
},
_ => unimplemented!(),
}
trace!(?addr, "ending channel creator");
trace!(?addr, "Ending channel creator");
}
async fn udp_single_channel_connect(
@ -432,7 +447,7 @@ impl Scheduler {
mut w2p_udp_package_s: mpsc::UnboundedSender<Vec<u8>>,
) {
let addr = socket.local_addr();
trace!(?addr, "start udp_single_channel_connect");
trace!(?addr, "Start udp_single_channel_connect");
//TODO: implement real closing
let (_end_sender, end_receiver) = oneshot::channel::<()>();
@ -448,7 +463,7 @@ impl Scheduler {
datavec.extend_from_slice(&data[0..size]);
w2p_udp_package_s.send(datavec).await.unwrap();
}
trace!(?addr, "stop udp_single_channel_connect");
trace!(?addr, "Stop udp_single_channel_connect");
}
async fn init_protocol(
@ -477,7 +492,7 @@ impl Scheduler {
// this is necessary for UDP to work at all and to remove code duplication
self.pool.spawn_ok(
async move {
trace!(?cid, "open channel and be ready for Handshake");
trace!(?cid, "Open channel and be ready for Handshake");
let handshake = Handshake::new(
cid,
local_pid,
@ -490,17 +505,18 @@ impl Scheduler {
trace!(
?cid,
?pid,
"detected that my channel is ready!, activating it :)"
"Detected that my channel is ready!, activating it :)"
);
let mut participants = participants.write().await;
if !participants.contains_key(&pid) {
debug!(?cid, "new participant connected via a channel");
debug!(?cid, "New participant connected via a channel");
let (
bparticipant,
a2b_steam_open_s,
b2a_stream_opened_r,
mut s2b_create_channel_s,
s2b_shutdown_bparticipant_s,
api_participant_closed,
) = BParticipant::new(pid, sid, metrics.clone());
let participant = Participant::new(
@ -509,6 +525,7 @@ impl Scheduler {
a2b_steam_open_s,
b2a_stream_opened_r,
participant_channels.a2s_disconnect_s,
api_participant_closed,
);
metrics.participants_connected_total.inc();
@ -557,7 +574,7 @@ impl Scheduler {
?secret,
"Detected incompatible Secret!, this is probably an attack!"
);
error!("just dropping here, TODO handle this correctly!");
error!("Just dropping here, TODO handle this correctly!");
//TODO
if let Some(pid_oneshot) = s2a_return_pid_s {
// someone is waiting with `connect`, so give them their Error
@ -571,7 +588,7 @@ impl Scheduler {
return;
}
error!(
"ufff i cant answer the pid_oneshot. as i need to create the SAME \
"Ufff i cant answer the pid_oneshot. as i need to create the SAME \
participant. maybe switch to ARC"
);
}
@ -581,10 +598,11 @@ impl Scheduler {
Err(()) => {
if let Some(pid_oneshot) = s2a_return_pid_s {
// someone is waiting with `connect`, so give them their Error
trace!("returning the Err to api who requested the connect");
pid_oneshot
.send(Err(std::io::Error::new(
std::io::ErrorKind::PermissionDenied,
"handshake failed, denying connection",
"Handshake failed, denying connection",
)))
.unwrap();
}

View File

@ -34,7 +34,7 @@ pub const PROMISES_COMPRESSED: Promises = 8;
pub const PROMISES_ENCRYPTED: Promises = 16;
pub(crate) const VELOREN_MAGIC_NUMBER: [u8; 7] = [86, 69, 76, 79, 82, 69, 78]; //VELOREN
pub const VELOREN_NETWORK_VERSION: [u32; 3] = [0, 3, 0];
pub const VELOREN_NETWORK_VERSION: [u32; 3] = [0, 4, 0];
pub(crate) const STREAM_ID_OFFSET1: Sid = Sid::new(0);
pub(crate) const STREAM_ID_OFFSET2: Sid = Sid::new(u64::MAX / 2);

View File

@ -1,3 +1,23 @@
//! How to read those tests:
//! - in the first line we call the helper, this is only debug code. in case
//! you want to have tracing for a special test you set set the bool = true
//! and the sleep to 10000 and your test will start 10 sec delayed with
//! tracing. You need a delay as otherwise the other tests polute your trace
//! - the second line is to simulate a client and a server
//! `network_participant_stream` will return
//! - 2 networks
//! - 2 participants
//! - 2 streams
//! each one `linked` to their counterpart.
//! You see a cryptic use of rust `_` this is because we are testing the
//! `drop` behavior here.
//! - A `_` means this is directly dropped after the line executes, thus
//! immediately executing its `Drop` impl.
//! - A `_p1_a` e.g. means we don't use that Participant yet, but we must
//! not `drop` it yet as we might want to use the Streams.
//! - You sometimes see sleep(1000ms) this is used when we rely on the
//! underlying TCP functionality, as this simulates client and server
use async_std::task;
use task::block_on;
use veloren_network::StreamError;
@ -19,10 +39,17 @@ fn close_network() {
#[test]
fn close_participant() {
let (_, _) = helper::setup(false, 0);
let (n_a, p1_a, mut s1_a, n_b, p1_b, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, p1_a, mut s1_a, _n_b, p1_b, mut s1_b) = block_on(network_participant_stream(tcp()));
block_on(n_a.disconnect(p1_a)).unwrap();
block_on(n_b.disconnect(p1_b)).unwrap();
block_on(p1_a.disconnect()).unwrap();
// The following will `Err`, but we don't know the exact error message.
// Why? because of the TCP layer we have no guarantee if the TCP messages send
// one line above already reached `p1_b`. If they reached them it would fail
// with a `ParticipantDisconnected` as a clean disconnect was performed.
// If they haven't reached them yet but will reach them during the execution it
// will return a unclean shutdown was detected. Nevertheless, if it returns
// Ok(()) then something is wrong!
assert!(block_on(p1_b.disconnect()).is_err());
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
assert_eq!(
@ -66,7 +93,7 @@ fn close_streams_in_block_on() {
#[test]
fn stream_simple_3msg_then_close() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp()));
s1_a.send(1u8).unwrap();
s1_a.send(42).unwrap();
@ -83,7 +110,7 @@ fn stream_simple_3msg_then_close() {
fn stream_send_first_then_receive() {
// recv should still be possible even if stream got closed if they are in queue
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp()));
s1_a.send(1u8).unwrap();
s1_a.send(42).unwrap();
@ -99,7 +126,7 @@ fn stream_send_first_then_receive() {
#[test]
fn stream_send_1_then_close_stream() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp()));
s1_a.send("this message must be received, even if stream is closed already!")
.unwrap();
drop(s1_a);
@ -112,7 +139,7 @@ fn stream_send_1_then_close_stream() {
#[test]
fn stream_send_100000_then_close_stream() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp()));
for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap();
}
@ -130,7 +157,7 @@ fn stream_send_100000_then_close_stream() {
#[test]
fn stream_send_100000_then_close_stream_remote() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, _s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp()));
for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap();
}
@ -142,7 +169,7 @@ fn stream_send_100000_then_close_stream_remote() {
#[test]
fn stream_send_100000_then_close_stream_remote2() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, _s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp()));
for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap();
}
@ -155,7 +182,7 @@ fn stream_send_100000_then_close_stream_remote2() {
#[test]
fn stream_send_100000_then_close_stream_remote3() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, _s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp()));
for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap();
}
@ -164,3 +191,41 @@ fn stream_send_100000_then_close_stream_remote3() {
drop(s1_a);
//no receiving
}
#[test]
fn close_part_then_network() {
let (_, _) = helper::setup(false, 0);
let (n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp()));
for _ in 0..1000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap();
}
drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000));
drop(n_a);
std::thread::sleep(std::time::Duration::from_millis(1000));
}
#[test]
fn close_network_then_part() {
let (_, _) = helper::setup(false, 0);
let (n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp()));
for _ in 0..1000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap();
}
drop(n_a);
std::thread::sleep(std::time::Duration::from_millis(1000));
drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000));
}
#[test]
fn close_network_then_disconnect_part() {
let (_, _) = helper::setup(false, 0);
let (n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp()));
for _ in 0..1000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap();
}
drop(n_a);
assert!(block_on(p_a.disconnect()).is_err());
std::thread::sleep(std::time::Duration::from_millis(1000));
}

View File

@ -1,16 +1,13 @@
use lazy_static::*;
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicU16, Ordering},
Arc,
},
sync::atomic::{AtomicU16, Ordering},
thread,
time::Duration,
};
use tracing::*;
use tracing_subscriber::EnvFilter;
use veloren_network::{Address, Network, Participant, Pid, Stream, PROMISES_NONE};
use veloren_network::{Network, Participant, Pid, ProtocolAddr, Stream, PROMISES_NONE};
#[allow(dead_code)]
pub fn setup(tracing: bool, mut sleep: u64) -> (u64, u64) {
@ -50,15 +47,8 @@ pub fn setup(tracing: bool, mut sleep: u64) -> (u64, u64) {
#[allow(dead_code)]
pub async fn network_participant_stream(
addr: Address,
) -> (
Network,
Arc<Participant>,
Stream,
Network,
Arc<Participant>,
Stream,
) {
addr: ProtocolAddr,
) -> (Network, Participant, Stream, Network, Participant, Stream) {
let (n_a, f_a) = Network::new(Pid::fake(1), None);
std::thread::spawn(f_a);
let (n_b, f_b) = Network::new(Pid::fake(2), None);
@ -75,19 +65,19 @@ pub async fn network_participant_stream(
}
#[allow(dead_code)]
pub fn tcp() -> veloren_network::Address {
pub fn tcp() -> veloren_network::ProtocolAddr {
lazy_static! {
static ref PORTS: AtomicU16 = AtomicU16::new(5000);
}
let port = PORTS.fetch_add(1, Ordering::Relaxed);
veloren_network::Address::Tcp(SocketAddr::from(([127, 0, 0, 1], port)))
veloren_network::ProtocolAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], port)))
}
#[allow(dead_code)]
pub fn udp() -> veloren_network::Address {
pub fn udp() -> veloren_network::ProtocolAddr {
lazy_static! {
static ref PORTS: AtomicU16 = AtomicU16::new(5000);
}
let port = PORTS.fetch_add(1, Ordering::Relaxed);
veloren_network::Address::Udp(SocketAddr::from(([127, 0, 0, 1], port)))
veloren_network::ProtocolAddr::Udp(SocketAddr::from(([127, 0, 0, 1], port)))
}

View File

@ -4,7 +4,7 @@ use veloren_network::{NetworkError, StreamError};
mod helper;
use helper::{network_participant_stream, tcp, udp};
use std::io::ErrorKind;
use veloren_network::{Address, Network, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
use veloren_network::{Network, Pid, ProtocolAddr, PROMISES_CONSISTENCY, PROMISES_ORDERED};
#[test]
#[ignore]
@ -17,7 +17,7 @@ fn network_20s() {
#[test]
fn stream_simple() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp()));
s1_a.send("Hello World").unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string()));
@ -26,7 +26,7 @@ fn stream_simple() {
#[test]
fn stream_simple_3msg() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp()));
s1_a.send("Hello World").unwrap();
s1_a.send(1337).unwrap();
@ -39,7 +39,7 @@ fn stream_simple_3msg() {
#[test]
fn stream_simple_udp() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(udp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(udp()));
s1_a.send("Hello World").unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string()));
@ -48,7 +48,7 @@ fn stream_simple_udp() {
#[test]
fn stream_simple_udp_3msg() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(udp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(udp()));
s1_a.send("Hello World").unwrap();
s1_a.send(1337).unwrap();
@ -68,18 +68,18 @@ fn tcp_and_udp_2_connections() -> std::result::Result<(), Box<dyn std::error::Er
std::thread::spawn(fr);
block_on(async {
remote
.listen(Address::Tcp("0.0.0.0:2000".parse().unwrap()))
.listen(ProtocolAddr::Tcp("0.0.0.0:2000".parse().unwrap()))
.await?;
remote
.listen(Address::Udp("0.0.0.0:2001".parse().unwrap()))
.listen(ProtocolAddr::Udp("0.0.0.0:2001".parse().unwrap()))
.await?;
let p1 = network
.connect(Address::Tcp("127.0.0.1:2000".parse().unwrap()))
.connect(ProtocolAddr::Tcp("127.0.0.1:2000".parse().unwrap()))
.await?;
let p2 = network
.connect(Address::Udp("127.0.0.1:2001".parse().unwrap()))
.connect(ProtocolAddr::Udp("127.0.0.1:2001".parse().unwrap()))
.await?;
assert!(std::sync::Arc::ptr_eq(&p1, &p2));
assert_eq!(&p1, &p2);
Ok(())
})
}
@ -126,10 +126,10 @@ fn api_stream_send_main() -> std::result::Result<(), Box<dyn std::error::Error>>
std::thread::spawn(fr);
block_on(async {
network
.listen(Address::Tcp("127.0.0.1:1200".parse().unwrap()))
.listen(ProtocolAddr::Tcp("127.0.0.1:1200".parse().unwrap()))
.await?;
let remote_p = remote
.connect(Address::Tcp("127.0.0.1:1200".parse().unwrap()))
.connect(ProtocolAddr::Tcp("127.0.0.1:1200".parse().unwrap()))
.await?;
// keep it alive
let _stream_p = remote_p
@ -154,10 +154,10 @@ fn api_stream_recv_main() -> std::result::Result<(), Box<dyn std::error::Error>>
std::thread::spawn(fr);
block_on(async {
network
.listen(Address::Tcp("127.0.0.1:1220".parse().unwrap()))
.listen(ProtocolAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
.await?;
let remote_p = remote
.connect(Address::Tcp("127.0.0.1:1220".parse().unwrap()))
.connect(ProtocolAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
.await?;
let mut stream_p = remote_p
.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY)
@ -174,7 +174,7 @@ fn api_stream_recv_main() -> std::result::Result<(), Box<dyn std::error::Error>>
#[test]
fn wrong_parse() {
let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp()));
s1_a.send(1337).unwrap();
match block_on(s1_b.recv::<String>()) {

View File

@ -1,13 +1,21 @@
use common::msg::{ClientState, RequestStateError, ServerMsg};
use crate::error::Error;
use common::msg::{ClientMsg, ClientState, RequestStateError, ServerMsg};
use hashbrown::HashSet;
use network::Stream;
use network::{Participant, Stream};
use specs::{Component, FlaggedStorage};
use specs_idvs::IdvStorage;
use std::sync::{
atomic::{AtomicBool, Ordering},
Mutex,
};
use tracing::debug;
use vek::*;
pub struct Client {
pub client_state: ClientState,
pub participant: Mutex<Option<Participant>>,
pub singleton_stream: Stream,
pub network_error: AtomicBool,
pub last_ping: f64,
pub login_msg_sent: bool,
}
@ -17,7 +25,29 @@ impl Component for Client {
}
impl Client {
pub fn notify(&mut self, msg: ServerMsg) { let _ = self.singleton_stream.send(msg); }
pub fn notify(&mut self, msg: ServerMsg) {
if !self.network_error.load(Ordering::Relaxed) {
if let Err(e) = self.singleton_stream.send(msg) {
debug!(?e, "got a network error with client");
self.network_error.store(true, Ordering::Relaxed);
}
}
}
pub async fn recv(&mut self) -> Result<ClientMsg, Error> {
if !self.network_error.load(Ordering::Relaxed) {
match self.singleton_stream.recv().await {
Ok(r) => Ok(r),
Err(e) => {
debug!(?e, "got a network error with client while recv");
self.network_error.store(true, Ordering::Relaxed);
Err(Error::StreamErr(e))
},
}
} else {
Err(Error::StreamErr(network::StreamError::StreamClosed))
}
}
pub fn is_registered(&self) -> bool {
match self.client_state {
@ -41,9 +71,7 @@ impl Client {
}
pub fn error_state(&mut self, error: RequestStateError) {
let _ = self
.singleton_stream
.send(ServerMsg::StateAnswer(Err((error, self.client_state))));
let _ = self.notify(ServerMsg::StateAnswer(Err((error, self.client_state))));
}
}

View File

@ -8,8 +8,9 @@ use common::{
msg::{ClientState, PlayerListUpdate, ServerMsg},
sync::{Uid, UidAllocator},
};
use futures_executor::block_on;
use specs::{saveload::MarkerAllocator, Builder, Entity as EcsEntity, WorldExt};
use tracing::error;
use tracing::{debug, error, trace};
pub fn handle_exit_ingame(server: &mut Server, entity: EcsEntity) {
let state = server.state_mut();
@ -46,6 +47,17 @@ pub fn handle_exit_ingame(server: &mut Server, entity: EcsEntity) {
}
pub fn handle_client_disconnect(server: &mut Server, entity: EcsEntity) -> Event {
if let Some(client) = server.state().read_storage::<Client>().get(entity) {
trace!("Closing participant of client");
let participant = client.participant.lock().unwrap().take().unwrap();
if let Err(e) = block_on(participant.disconnect()) {
debug!(
?e,
"Error when disconnecting client, maybe the pipe already broke"
);
};
}
let state = server.state_mut();
// Tell other clients to remove from player list

View File

@ -41,7 +41,7 @@ use futures_executor::block_on;
use futures_timer::Delay;
use futures_util::{select, FutureExt};
use metrics::{ServerMetrics, TickMetrics};
use network::{Address, Network, Pid};
use network::{Network, Pid, ProtocolAddr};
use persistence::character::{CharacterLoader, CharacterLoaderResponseType, CharacterUpdater};
use specs::{join::Join, Builder, Entity as EcsEntity, RunNow, SystemData, WorldExt};
use std::{
@ -241,7 +241,7 @@ impl Server {
.build();
let (network, f) = Network::new(Pid::new(), None);
thread_pool.execute(f);
block_on(network.listen(Address::Tcp(settings.gameserver_address)))?;
block_on(network.listen(ProtocolAddr::Tcp(settings.gameserver_address)))?;
let this = Self {
state,
@ -343,9 +343,9 @@ impl Server {
// 3) Handle inputs from clients
block_on(async {
//TIMEOUT 0.01 ms for msg handling
//TIMEOUT 0.1 ms for msg handling
select!(
_ = Delay::new(std::time::Duration::from_micros(10)).fuse() => Ok(()),
_ = Delay::new(std::time::Duration::from_micros(100)).fuse() => Ok(()),
err = self.handle_new_connections(&mut frontend_events).fuse() => err,
)
})?;
@ -597,11 +597,14 @@ impl Server {
) -> Result<(), Error> {
loop {
let participant = self.network.connected().await?;
debug!("New Participant connected to the server");
let singleton_stream = participant.opened().await?;
let mut client = Client {
client_state: ClientState::Connected,
participant: std::sync::Mutex::new(Some(participant)),
singleton_stream,
network_error: std::sync::atomic::AtomicBool::new(false),
last_ping: self.state.get_time(),
login_msg_sent: false,
};
@ -634,9 +637,9 @@ impl Server {
time_of_day: *self.state.ecs().read_resource(),
world_map: (WORLD_SIZE.map(|e| e as u32), self.map.clone()),
});
debug!("Done initial sync with client.");
frontend_events.push(Event::ClientConnected { entity });
debug!("Done initial sync with client.");
}
}
}

View File

@ -27,8 +27,8 @@ use specs::{
};
impl Sys {
///We need to move this to a async fn, otherwise the compiler generates to
/// much recursive fn, and async closures dont work yet
///We needed to move this to a async fn, if we would use a async closures
/// the compiler generates to much recursion and fails to compile this
#[allow(clippy::too_many_arguments)]
async fn handle_client_msg(
server_emitter: &mut common::event::Emitter<'_, ServerEvent>,
@ -57,7 +57,7 @@ impl Sys {
settings: &Read<'_, ServerSettings>,
) -> Result<(), crate::error::Error> {
loop {
let msg = client.singleton_stream.recv().await?;
let msg = client.recv().await?;
*cnt += 1;
match msg {
// Go back to registered state (char selection screen)
@ -473,9 +473,9 @@ impl<'a> System<'a> for Sys {
let mut cnt = 0;
let network_err: Result<(), crate::error::Error> = block_on(async {
//TIMEOUT 0.01 ms for msg handling
//TIMEOUT 0.02 ms for msg handling
select!(
_ = Delay::new(std::time::Duration::from_micros(10)).fuse() => Ok(()),
_ = Delay::new(std::time::Duration::from_micros(20)).fuse() => Ok(()),
err = Self::handle_client_msg(
&mut server_emitter,
&mut new_chat_msgs,

View File

@ -12,7 +12,7 @@ use std::{
thread,
time::Duration,
};
use tracing::debug;
use tracing::{debug, trace, warn};
#[derive(Debug)]
pub enum Error {
@ -81,7 +81,7 @@ impl ClientInit {
{
match Client::new(socket_addr, view_distance) {
Ok(mut client) => {
if let Err(err) =
if let Err(e) =
client.register(username, password, |auth_server| {
let _ = tx
.send(Msg::IsAuthTrusted(auth_server.to_string()));
@ -93,29 +93,28 @@ impl ClientInit {
.unwrap_or(false)
})
{
last_err = Some(Error::ClientError(err));
last_err = Some(Error::ClientError(e));
break 'tries;
}
let _ = tx.send(Msg::Done(Ok(client)));
return;
},
Err(err) => {
match err {
ClientError::NetworkErr(NetworkError::ConnectFailed(
..,
)) => {
debug!(
"can't reach the server, going to retry in a few \
seconds"
);
},
// Non-connection error, stop attempts
err => {
last_err = Some(Error::ClientError(err));
break 'tries;
},
Err(ClientError::NetworkErr(NetworkError::ConnectFailed(e))) => {
if e.kind() == std::io::ErrorKind::PermissionDenied {
warn!(?e, "Cannot connect to server: Incompatible version");
last_err = Some(Error::ClientError(
ClientError::NetworkErr(NetworkError::ConnectFailed(e)),
));
break 'tries;
} else {
debug!("Cannot connect to server: Timeout (retrying...)");
}
},
Err(e) => {
trace!(?e, "Aborting server connection attempt");
last_err = Some(Error::ClientError(e));
break 'tries;
},
}
}
thread::sleep(Duration::from_secs(5));