mirror of
https://gitlab.com/veloren/veloren.git
synced 2024-08-30 18:12:32 +00:00
Examples, HUGE fixes, test, make it alot smother
- switch `listen` to async in oder to verify if the bind was successful - Introduce the following examples - network speed - chat - fileshare - add additional tests - fix dropping stream before last messages can be handled bug, when dropping a stream, BParticipant will wait for prio to be empty before dropping the stream and sending the signal - correct closing of stream and participant - move tcp to protocols and create udp front and backend - tracing and fixing a bug that is caused by not waiting for configuration after receiving a frame - fix a bug in network-speed, but there is still a bug if trace=warn after 2.000.000 messages the server doesnt get that client has shut down and seems to lock somewhere. hard to reproduce open tasks [ ] verify UDP works correctly, especcially the connect! [ ] implements UDP shutdown correctly, the one created in connect! [ ] unify logging [ ] fill metrics [ ] fix dropping stream before last messages can be handled bug [ ] add documentation [ ] add benchmarks [ ] remove async_serde??? [ ] add mpsc
This commit is contained in:
parent
595f1502b3
commit
2ee18b1fd8
63
Cargo.lock
generated
63
Cargo.lock
generated
@ -156,22 +156,6 @@ version = "0.8.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97be891acc47ca214468e09425d02cef3af2c94d0d82081cd02061f996802f14"
|
||||
|
||||
[[package]]
|
||||
name = "async-recv"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"chrono",
|
||||
"clap",
|
||||
"futures 0.3.5",
|
||||
"serde",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"uuid 0.8.1",
|
||||
"uvth",
|
||||
"veloren_network",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-std"
|
||||
version = "1.5.0"
|
||||
@ -589,13 +573,9 @@ version = "2.33.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags",
|
||||
"strsim 0.8.0",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1069,7 +1049,7 @@ dependencies = [
|
||||
"ident_case",
|
||||
"proc-macro2 1.0.17",
|
||||
"quote 1.0.6",
|
||||
"strsim 0.9.3",
|
||||
"strsim",
|
||||
"syn 1.0.27",
|
||||
]
|
||||
|
||||
@ -2830,21 +2810,6 @@ dependencies = [
|
||||
"winapi 0.3.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "network-speed"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"clap",
|
||||
"futures 0.3.5",
|
||||
"serde",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"uuid 0.8.1",
|
||||
"uvth",
|
||||
"veloren_network",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.14.1"
|
||||
@ -4487,12 +4452,6 @@ dependencies = [
|
||||
"bytes 0.4.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.9.3"
|
||||
@ -4556,13 +4515,6 @@ dependencies = [
|
||||
"unicode-xid 0.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tcp-loadtest"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"rand 0.7.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempdir"
|
||||
version = "0.3.7"
|
||||
@ -4686,14 +4638,6 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tlid"
|
||||
version = "0.2.2"
|
||||
dependencies = [
|
||||
"num-traits 0.2.11",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "0.1.22"
|
||||
@ -5094,10 +5038,6 @@ name = "uuid"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11"
|
||||
dependencies = [
|
||||
"rand 0.7.3",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uvth"
|
||||
@ -5358,7 +5298,6 @@ dependencies = [
|
||||
"prometheus",
|
||||
"rand 0.7.3",
|
||||
"serde",
|
||||
"tlid",
|
||||
"tracing",
|
||||
"tracing-futures",
|
||||
"tracing-subscriber",
|
||||
|
@ -10,9 +10,6 @@ members = [
|
||||
"voxygen",
|
||||
"world",
|
||||
"network",
|
||||
"network/tools/tcp-loadtest",
|
||||
"network/tools/network-speed",
|
||||
"network/tools/async_recv",
|
||||
]
|
||||
|
||||
# default profile for devs, fast to compile, okay enough to run, no debug information
|
||||
|
916
network/Cargo.lock
generated
Normal file
916
network/Cargo.lock
generated
Normal file
@ -0,0 +1,916 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-std"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"async-task 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-timer 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kv-log-macro 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-task"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.6.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon-sys"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "futures"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-executor 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-channel"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-core"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "futures-executor"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-io"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "futures-macro"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-sink"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "futures-task"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "futures-timer"
|
||||
version = "2.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "futures-util"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iovec"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kv-log-macro"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matchers"
|
||||
version = "0.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "maybe-uninit"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.6.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio-uds"
|
||||
version = "0.6.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miow"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "net2"
|
||||
version = "0.2.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-internal"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
version = "0.1.0-alpha.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-nested"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prometheus"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"protobuf 2.14.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "2.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "quick-error"
|
||||
version = "1.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.56"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.106"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.106"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.51"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "spin"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.1.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-attributes 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-futures"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-log"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-serde"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"matchers 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sharded-slab 0.0.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-log 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-serde 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "uvth"
|
||||
version = "3.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"crossbeam-channel 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "veloren_network"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-std 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bincode 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-futures 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tracing-subscriber 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"uvth 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.9.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-build"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "ws2_32-sys"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[metadata]
|
||||
"checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada"
|
||||
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
"checksum async-std 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "538ecb01eb64eecd772087e5b6f7540cbc917f047727339a472dafed2185b267"
|
||||
"checksum async-task 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0ac2c016b079e771204030951c366db398864f5026f84a44dafb0ff20f02085d"
|
||||
"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
|
||||
"checksum bincode 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf"
|
||||
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
|
||||
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||
"checksum chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2"
|
||||
"checksum crossbeam-channel 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c8ec7fcd21571dc78f96cc96243cab8d8f035247c3efd16c687be154c3fa9efa"
|
||||
"checksum crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061"
|
||||
"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
|
||||
"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
|
||||
"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
|
||||
"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
|
||||
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
|
||||
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
|
||||
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
"checksum futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780"
|
||||
"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8"
|
||||
"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a"
|
||||
"checksum futures-executor 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba"
|
||||
"checksum futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6"
|
||||
"checksum futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7"
|
||||
"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6"
|
||||
"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27"
|
||||
"checksum futures-timer 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6"
|
||||
"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5"
|
||||
"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
|
||||
"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e"
|
||||
"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e"
|
||||
"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
|
||||
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
||||
"checksum kv-log-macro 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c54d9f465d530a752e6ebdc217e081a7a614b48cb200f6f0aee21ba6bc9aabb"
|
||||
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
"checksum libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)" = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005"
|
||||
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
|
||||
"checksum matchers 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1"
|
||||
"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
|
||||
"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
|
||||
"checksum memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8"
|
||||
"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f"
|
||||
"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125"
|
||||
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
|
||||
"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
|
||||
"checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
|
||||
"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
|
||||
"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
|
||||
"checksum once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b"
|
||||
"checksum pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c"
|
||||
"checksum pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f"
|
||||
"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae"
|
||||
"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587"
|
||||
"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
|
||||
"checksum proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63"
|
||||
"checksum proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694"
|
||||
"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3"
|
||||
"checksum prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1"
|
||||
"checksum protobuf 2.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485"
|
||||
"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
|
||||
"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
|
||||
"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
|
||||
"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
|
||||
"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
|
||||
"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
|
||||
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
|
||||
"checksum regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3"
|
||||
"checksum regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
|
||||
"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
|
||||
"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76"
|
||||
"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
"checksum serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399"
|
||||
"checksum serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c"
|
||||
"checksum serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)" = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9"
|
||||
"checksum sharded-slab 0.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ae75d0445b5d3778c9da3d1f840faa16d0627c8607f78a74daf69e5b988c39a1"
|
||||
"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
|
||||
"checksum smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05720e22615919e4734f6a99ceae50d00226c3c5aca406e102ebc33298214e0a"
|
||||
"checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
||||
"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03"
|
||||
"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
|
||||
"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f"
|
||||
"checksum tracing 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1721cc8cf7d770cc4257872507180f35a4797272f5962f24c806af9e7faf52ab"
|
||||
"checksum tracing-attributes 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "7fbad39da2f9af1cae3016339ad7f2c7a9e870f12e8fd04c4fd7ef35b30c0d2b"
|
||||
"checksum tracing-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715"
|
||||
"checksum tracing-futures 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "58b0b7fd92dc7b71f29623cc6836dd7200f32161a2313dd78be233a8405694f6"
|
||||
"checksum tracing-log 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9"
|
||||
"checksum tracing-serde 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6ccba2f8f16e0ed268fc765d9b7ff22e965e7185d32f8f1ec8294fe17d86e79"
|
||||
"checksum tracing-subscriber 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cfc50df245be6f0adf35c399cb16dea60e2c7d6cc83ff5dc22d727df06dd6f0c"
|
||||
"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
|
||||
"checksum uvth 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e59a167890d173eb0fcd7a1b99b84dc05c521ae8d76599130b8e19bef287abbf"
|
||||
"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
|
||||
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
|
||||
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
|
||||
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
|
@ -8,15 +8,14 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
|
||||
tlid = { path = "../../tlid", features = ["serde"]}
|
||||
#threadpool
|
||||
uvth = "3.1"
|
||||
#serialisation
|
||||
bincode = "1.2"
|
||||
serde = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
byteorder = "1.3"
|
||||
#sending
|
||||
async-std = { version = "1.5", features = ["std", "unstable"] }
|
||||
async-std = { version = "1.5", features = ["std"] }
|
||||
#tracing and metrics
|
||||
tracing = "0.1"
|
||||
tracing-futures = "0.2"
|
||||
|
2
network/examples/.gitignore
vendored
Normal file
2
network/examples/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
# dont save cargo locks for examples
|
||||
*/Cargo.lock
|
@ -1,3 +1,5 @@
|
||||
[workspace]
|
||||
|
||||
[package]
|
||||
name = "async-recv"
|
||||
version = "0.1.0"
|
||||
@ -10,7 +12,6 @@ edition = "2018"
|
||||
uvth = "3.1"
|
||||
network = { package = "veloren_network", path = "../../../network" }
|
||||
clap = "2.33"
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
futures = "0.3"
|
||||
tracing = "0.1"
|
||||
chrono = "0.4"
|
199
network/examples/async_recv/src/main.rs
Normal file
199
network/examples/async_recv/src/main.rs
Normal file
@ -0,0 +1,199 @@
|
||||
use chrono::prelude::*;
|
||||
use clap::{App, Arg};
|
||||
use futures::executor::block_on;
|
||||
use network::{Address, Network, Pid, Stream, PROMISES_NONE};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
thread,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::*;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum Msg {
|
||||
Ping(u64),
|
||||
Pong(u64),
|
||||
}
|
||||
|
||||
/// This utility checks if async functionatily of veloren-network works
|
||||
/// correctly and outputs it at the end
|
||||
fn main() {
|
||||
let matches = App::new("Veloren Async Prove Utility")
|
||||
.version("0.1.0")
|
||||
.author("Marcel Märtens <marcel.cochem@googlemail.com>")
|
||||
.about("proves that veloren-network runs async")
|
||||
.arg(
|
||||
Arg::with_name("mode")
|
||||
.short("m")
|
||||
.long("mode")
|
||||
.takes_value(true)
|
||||
.possible_values(&["server", "client", "both"])
|
||||
.default_value("both")
|
||||
.help(
|
||||
"choose whether you want to start the server or client or both needed for \
|
||||
this program",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.default_value("52000")
|
||||
.help("port to listen on"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ip")
|
||||
.long("ip")
|
||||
.takes_value(true)
|
||||
.default_value("127.0.0.1")
|
||||
.help("ip to listen and connect to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("protocol")
|
||||
.long("protocol")
|
||||
.takes_value(true)
|
||||
.default_value("tcp")
|
||||
.possible_values(&["tcp", "upd", "mpsc"])
|
||||
.help(
|
||||
"underlying protocol used for this test, mpsc can only combined with mode=both",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("trace")
|
||||
.short("t")
|
||||
.long("trace")
|
||||
.takes_value(true)
|
||||
.default_value("warn")
|
||||
.possible_values(&["trace", "debug", "info", "warn", "error"])
|
||||
.help("set trace level, not this has a performance impact!"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
if let Some(trace) = matches.value_of("trace") {
|
||||
let filter = EnvFilter::from_default_env().add_directive(trace.parse().unwrap());
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_max_level(Level::TRACE)
|
||||
.with_env_filter(filter)
|
||||
.init();
|
||||
};
|
||||
let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
|
||||
let ip: &str = matches.value_of("ip").unwrap();
|
||||
let address = match matches.value_of("protocol") {
|
||||
Some("tcp") => Address::Tcp(format!("{}:{}", ip, port).parse().unwrap()),
|
||||
Some("udp") => Address::Udp(format!("{}:{}", ip, port).parse().unwrap()),
|
||||
_ => panic!("invalid mode, run --help!"),
|
||||
};
|
||||
|
||||
let mut background = None;
|
||||
match matches.value_of("mode") {
|
||||
Some("server") => server(address),
|
||||
Some("client") => client(address),
|
||||
Some("both") => {
|
||||
let address1 = address.clone();
|
||||
background = Some(thread::spawn(|| server(address1)));
|
||||
thread::sleep(Duration::from_millis(200)); //start client after server
|
||||
client(address)
|
||||
},
|
||||
_ => panic!("invalid mode, run --help!"),
|
||||
};
|
||||
if let Some(background) = background {
|
||||
background.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn server(address: Address) {
|
||||
let thread_pool = ThreadPoolBuilder::new().build();
|
||||
let server = Network::new(Pid::new(), &thread_pool);
|
||||
block_on(server.listen(address.clone())).unwrap(); //await
|
||||
println!("waiting for client");
|
||||
|
||||
let p1 = block_on(server.connected()).unwrap(); //remote representation of p1
|
||||
let mut s1 = block_on(p1.opened()).unwrap(); //remote representation of s1
|
||||
let mut s2 = block_on(p1.opened()).unwrap(); //remote representation of s2
|
||||
let t1 = thread::spawn(move || {
|
||||
if let Ok(Msg::Ping(id)) = block_on(s1.recv()) {
|
||||
thread::sleep(Duration::from_millis(3000));
|
||||
s1.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s1_1", Utc::now().time());
|
||||
}
|
||||
if let Ok(Msg::Ping(id)) = block_on(s1.recv()) {
|
||||
thread::sleep(Duration::from_millis(3000));
|
||||
s1.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s1_2", Utc::now().time());
|
||||
}
|
||||
thread::sleep(Duration::from_millis(10000));
|
||||
});
|
||||
let t2 = thread::spawn(move || {
|
||||
if let Ok(Msg::Ping(id)) = block_on(s2.recv()) {
|
||||
thread::sleep(Duration::from_millis(1000));
|
||||
s2.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s2_1", Utc::now().time());
|
||||
}
|
||||
if let Ok(Msg::Ping(id)) = block_on(s2.recv()) {
|
||||
thread::sleep(Duration::from_millis(1000));
|
||||
s2.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s2_2", Utc::now().time());
|
||||
}
|
||||
thread::sleep(Duration::from_millis(10000));
|
||||
});
|
||||
t1.join().unwrap();
|
||||
t2.join().unwrap();
|
||||
thread::sleep(Duration::from_millis(50));
|
||||
}
|
||||
|
||||
async fn async_task1(mut s: Stream) -> u64 {
|
||||
s.send(Msg::Ping(100)).unwrap();
|
||||
println!("[{}], s1_1...", Utc::now().time());
|
||||
let m1: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s1_1: {:?}", Utc::now().time(), m1);
|
||||
thread::sleep(Duration::from_millis(1000));
|
||||
s.send(Msg::Ping(101)).unwrap();
|
||||
println!("[{}], s1_2...", Utc::now().time());
|
||||
let m2: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s1_2: {:?}", Utc::now().time(), m2);
|
||||
match m2.unwrap() {
|
||||
Msg::Pong(id) => id,
|
||||
_ => panic!("wrong answer"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn async_task2(mut s: Stream) -> u64 {
|
||||
s.send(Msg::Ping(200)).unwrap();
|
||||
println!("[{}], s2_1...", Utc::now().time());
|
||||
let m1: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s2_1: {:?}", Utc::now().time(), m1);
|
||||
thread::sleep(Duration::from_millis(5000));
|
||||
s.send(Msg::Ping(201)).unwrap();
|
||||
println!("[{}], s2_2...", Utc::now().time());
|
||||
let m2: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s2_2: {:?}", Utc::now().time(), m2);
|
||||
match m2.unwrap() {
|
||||
Msg::Pong(id) => id,
|
||||
_ => panic!("wrong answer"),
|
||||
}
|
||||
}
|
||||
|
||||
fn client(address: Address) {
|
||||
let thread_pool = ThreadPoolBuilder::new().build();
|
||||
let client = Network::new(Pid::new(), &thread_pool);
|
||||
|
||||
let p1 = block_on(client.connect(address.clone())).unwrap(); //remote representation of p1
|
||||
let s1 = block_on(p1.open(16, PROMISES_NONE)).unwrap(); //remote representation of s1
|
||||
let s2 = block_on(p1.open(16, PROMISES_NONE)).unwrap(); //remote representation of s2
|
||||
let before = Instant::now();
|
||||
block_on(async {
|
||||
let f1 = async_task1(s1);
|
||||
let f2 = async_task2(s2);
|
||||
let _ = futures::join!(f1, f2);
|
||||
});
|
||||
if before.elapsed() < Duration::from_secs(13) {
|
||||
println!("IT WORKS!");
|
||||
} else {
|
||||
println!("doesn't seem to work :/")
|
||||
}
|
||||
thread::sleep(Duration::from_millis(50));
|
||||
}
|
20
network/examples/chat/Cargo.toml
Normal file
20
network/examples/chat/Cargo.toml
Normal file
@ -0,0 +1,20 @@
|
||||
[workspace]
|
||||
|
||||
[package]
|
||||
name = "network-speed"
|
||||
version = "0.1.0"
|
||||
authors = ["Marcel Märtens <marcel.cochem@googlemail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
uvth = "3.1"
|
||||
network = { package = "veloren_network", path = "../../../network" }
|
||||
clap = "2.33"
|
||||
async-std = { version = "1.5", default-features = false }
|
||||
futures = "0.3"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.2.3"
|
||||
bincode = "1.2"
|
||||
serde = "1.0"
|
180
network/examples/chat/src/main.rs
Normal file
180
network/examples/chat/src/main.rs
Normal file
@ -0,0 +1,180 @@
|
||||
use async_std::io;
|
||||
use clap::{App, Arg};
|
||||
use futures::executor::{block_on, ThreadPool};
|
||||
use network::{Address, Network, Participant, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
|
||||
use std::{sync::Arc, thread, time::Duration};
|
||||
use tracing::*;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
|
||||
///This example contains a simple chatserver, that allows to send messages
|
||||
/// between participants
|
||||
fn main() {
|
||||
let matches = App::new("Chat example")
|
||||
.version("0.1.0")
|
||||
.author("Marcel Märtens <marcel.cochem@googlemail.com>")
|
||||
.about("example chat implemented with veloren-network")
|
||||
.arg(
|
||||
Arg::with_name("mode")
|
||||
.short("m")
|
||||
.long("mode")
|
||||
.takes_value(true)
|
||||
.possible_values(&["server", "client", "both"])
|
||||
.default_value("both")
|
||||
.help(
|
||||
"choose whether you want to start the server or client or both needed for \
|
||||
this program",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.default_value("52000")
|
||||
.help("port to listen on"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ip")
|
||||
.long("ip")
|
||||
.takes_value(true)
|
||||
.default_value("127.0.0.1")
|
||||
.help("ip to listen and connect to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("protocol")
|
||||
.long("protocol")
|
||||
.takes_value(true)
|
||||
.default_value("tcp")
|
||||
.possible_values(&["tcp", "upd", "mpsc"])
|
||||
.help(
|
||||
"underlying protocol used for this test, mpsc can only combined with mode=both",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("trace")
|
||||
.short("t")
|
||||
.long("trace")
|
||||
.takes_value(true)
|
||||
.default_value("warn")
|
||||
.possible_values(&["trace", "debug", "info", "warn", "error"])
|
||||
.help("set trace level, not this has a performance impact!"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let trace = matches.value_of("trace").unwrap();
|
||||
let filter = EnvFilter::from_default_env().add_directive(trace.parse().unwrap());
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_max_level(Level::TRACE)
|
||||
.with_env_filter(filter)
|
||||
.init();
|
||||
|
||||
let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
|
||||
let ip: &str = matches.value_of("ip").unwrap();
|
||||
let address = match matches.value_of("protocol") {
|
||||
Some("tcp") => Address::Tcp(format!("{}:{}", ip, port).parse().unwrap()),
|
||||
Some("udp") => Address::Udp(format!("{}:{}", ip, port).parse().unwrap()),
|
||||
_ => panic!("invalid mode, run --help!"),
|
||||
};
|
||||
|
||||
let mut background = None;
|
||||
match matches.value_of("mode") {
|
||||
Some("server") => server(address),
|
||||
Some("client") => client(address),
|
||||
Some("both") => {
|
||||
let address1 = address.clone();
|
||||
background = Some(thread::spawn(|| server(address1)));
|
||||
thread::sleep(Duration::from_millis(200)); //start client after server
|
||||
client(address)
|
||||
},
|
||||
_ => panic!("invalid mode, run --help!"),
|
||||
};
|
||||
if let Some(background) = background {
|
||||
background.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn server(address: Address) {
|
||||
let thread_pool = ThreadPoolBuilder::new().build();
|
||||
let server = Arc::new(Network::new(Pid::new(), &thread_pool));
|
||||
let pool = ThreadPool::new().unwrap();
|
||||
block_on(async {
|
||||
server.listen(address).await.unwrap();
|
||||
loop {
|
||||
let p1 = server.connected().await.unwrap();
|
||||
let server1 = server.clone();
|
||||
pool.spawn_ok(client_connection(server1, p1));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn client_connection(network: Arc<Network>, participant: Arc<Participant>) {
|
||||
let mut s1 = participant.opened().await.unwrap();
|
||||
let username = s1.recv::<String>().await.unwrap();
|
||||
println!("[{}] connected", username);
|
||||
loop {
|
||||
match s1.recv::<String>().await {
|
||||
Err(_) => {
|
||||
break;
|
||||
},
|
||||
Ok(msg) => {
|
||||
println!("[{}]: {}", username, msg);
|
||||
let parts = network.participants().await;
|
||||
for p in parts.values() {
|
||||
let mut s = p
|
||||
.open(32, PROMISES_ORDERED | PROMISES_CONSISTENCY)
|
||||
.await
|
||||
.unwrap();
|
||||
s.send((username.clone(), msg.clone())).unwrap();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
println!("[{}] disconnected", username);
|
||||
}
|
||||
|
||||
fn client(address: Address) {
|
||||
let thread_pool = ThreadPoolBuilder::new().build();
|
||||
let client = Network::new(Pid::new(), &thread_pool);
|
||||
let pool = ThreadPool::new().unwrap();
|
||||
|
||||
block_on(async {
|
||||
let p1 = client.connect(address.clone()).await.unwrap(); //remote representation of p1
|
||||
let mut s1 = p1
|
||||
.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY)
|
||||
.await
|
||||
.unwrap(); //remote representation of s1
|
||||
println!("Enter your username:");
|
||||
let mut username = String::new();
|
||||
io::stdin().read_line(&mut username).await.unwrap();
|
||||
username = username.split_whitespace().collect();
|
||||
println!("Your username is: {}", username);
|
||||
println!("write /quit to close");
|
||||
pool.spawn_ok(read_messages(p1));
|
||||
s1.send(username).unwrap();
|
||||
loop {
|
||||
let mut line = String::new();
|
||||
io::stdin().read_line(&mut line).await.unwrap();
|
||||
line = line.split_whitespace().collect();
|
||||
if line.as_str() == "/quit" {
|
||||
println!("goodbye");
|
||||
break;
|
||||
} else {
|
||||
s1.send(line).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
thread::sleep(Duration::from_millis(30)); // TODO: still needed for correct shutdown
|
||||
}
|
||||
|
||||
// I am quite lazy, the sending is done in a single stream above, but for
|
||||
// receiving i open and close a stream per message. this can be done easier but
|
||||
// this allows me to be quite lazy on the server side and just get a list of
|
||||
// all participants and send to them...
|
||||
async fn read_messages(participant: Arc<Participant>) {
|
||||
while let Ok(mut s) = participant.opened().await {
|
||||
let (username, message) = s.recv::<(String, String)>().await.unwrap();
|
||||
println!("[{}]: {}", username, message);
|
||||
}
|
||||
println!("gracefully shut down");
|
||||
}
|
22
network/examples/fileshare/Cargo.toml
Normal file
22
network/examples/fileshare/Cargo.toml
Normal file
@ -0,0 +1,22 @@
|
||||
[workspace]
|
||||
|
||||
[package]
|
||||
name = "fileshare"
|
||||
version = "0.1.0"
|
||||
authors = ["Marcel Märtens <marcel.cochem@googlemail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
uvth = "3.1"
|
||||
network = { package = "veloren_network", path = "../../../network" }
|
||||
clap = "2.33"
|
||||
async-std = { version = "1.5", default-features = false }
|
||||
futures = "0.3"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.2.3"
|
||||
bincode = "1.2"
|
||||
serde = "1.0"
|
||||
rand = "0.7.3"
|
||||
shellexpand = "2.0.0"
|
87
network/examples/fileshare/src/commands.rs
Normal file
87
network/examples/fileshare/src/commands.rs
Normal file
@ -0,0 +1,87 @@
|
||||
use async_std::{
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use network::{Address, Participant, Stream};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum LocalCommand {
|
||||
Shutdown,
|
||||
Disconnect,
|
||||
Connect(Address),
|
||||
List,
|
||||
Serve(FileInfo),
|
||||
Get(u32, Option<String>),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Command {
|
||||
List,
|
||||
Get(u32),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct FileInfo {
|
||||
id: u32,
|
||||
pub path: String,
|
||||
pub size: u64,
|
||||
pub hash: String,
|
||||
}
|
||||
|
||||
pub struct RemoteInfo {
|
||||
infos: HashMap<u32, FileInfo>,
|
||||
_participant: Arc<Participant>,
|
||||
pub cmd_out: Stream,
|
||||
pub file_out: Stream,
|
||||
}
|
||||
|
||||
impl FileInfo {
|
||||
pub async fn new(path: &Path) -> Option<Self> {
|
||||
let mt = match fs::metadata(&path).await {
|
||||
Err(e) => {
|
||||
println!(
|
||||
"cannot get metadata for file: {:?}, does it exist? Error: {:?}",
|
||||
&path, &e
|
||||
);
|
||||
return None;
|
||||
},
|
||||
Ok(mt) => mt,
|
||||
};
|
||||
let size = mt.len();
|
||||
Some(Self {
|
||||
id: rand::thread_rng().gen(),
|
||||
path: path.as_os_str().to_os_string().into_string().unwrap(),
|
||||
size,
|
||||
hash: "<none>".to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn load(&self) -> Result<Vec<u8>, std::io::Error> { fs::read(self.path()).await }
|
||||
|
||||
pub fn id(&self) -> u32 { self.id }
|
||||
|
||||
pub fn path(&self) -> PathBuf { self.path.parse().unwrap() }
|
||||
}
|
||||
|
||||
impl RemoteInfo {
|
||||
pub fn new(cmd_out: Stream, file_out: Stream, participant: Arc<Participant>) -> Self {
|
||||
Self {
|
||||
infos: HashMap::new(),
|
||||
_participant: participant,
|
||||
cmd_out,
|
||||
file_out,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_info(&self, id: u32) -> Option<FileInfo> { self.infos.get(&id).map(|fi| fi.clone()) }
|
||||
|
||||
pub fn insert_infos(&mut self, mut fi: Vec<FileInfo>) {
|
||||
for fi in fi.drain(..) {
|
||||
self.infos.insert(fi.id(), fi);
|
||||
}
|
||||
}
|
||||
}
|
201
network/examples/fileshare/src/main.rs
Normal file
201
network/examples/fileshare/src/main.rs
Normal file
@ -0,0 +1,201 @@
|
||||
#![feature(async_closure, exclusive_range_pattern)]
|
||||
|
||||
use async_std::{io, path::PathBuf};
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use futures::{
|
||||
channel::mpsc,
|
||||
executor::{block_on, ThreadPool},
|
||||
sink::SinkExt,
|
||||
};
|
||||
use network::Address;
|
||||
use std::{thread, time::Duration};
|
||||
use tracing::*;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
mod commands;
|
||||
mod server;
|
||||
use commands::{FileInfo, LocalCommand};
|
||||
use server::Server;
|
||||
|
||||
fn main() {
|
||||
let matches = App::new("File Server")
|
||||
.version("0.1.0")
|
||||
.author("Marcel Märtens <marcel.cochem@googlemail.com>")
|
||||
.about("example file server implemented with veloren-network")
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.default_value("15006")
|
||||
.help("port to listen on"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("trace")
|
||||
.short("t")
|
||||
.long("trace")
|
||||
.takes_value(true)
|
||||
.default_value("warn")
|
||||
.possible_values(&["trace", "debug", "info", "warn", "error"])
|
||||
.help("set trace level, not this has a performance impact!"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let trace = matches.value_of("trace").unwrap();
|
||||
let filter = EnvFilter::from_default_env()
|
||||
.add_directive(trace.parse().unwrap())
|
||||
.add_directive("fileshare::server=trace".parse().unwrap())
|
||||
.add_directive("fileshare::commands=trace".parse().unwrap());
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_max_level(Level::TRACE)
|
||||
.with_env_filter(filter)
|
||||
.init();
|
||||
|
||||
let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
|
||||
let address = Address::Tcp(format!("{}:{}", "127.0.0.1", port).parse().unwrap());
|
||||
|
||||
let (server, cmd_sender) = Server::new();
|
||||
let pool = ThreadPool::new().unwrap();
|
||||
pool.spawn_ok(server.run(address));
|
||||
|
||||
thread::sleep(Duration::from_millis(50)); //just for trace
|
||||
|
||||
block_on(client(cmd_sender));
|
||||
}
|
||||
|
||||
fn file_exists(file: String) -> Result<(), String> {
|
||||
let file: std::path::PathBuf = shellexpand::tilde(&file).parse().unwrap();
|
||||
if file.exists() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!("file does not exist"))
|
||||
}
|
||||
}
|
||||
|
||||
fn get_options<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("")
|
||||
.setting(clap::AppSettings::NoBinaryName)
|
||||
.setting(clap::AppSettings::SubcommandRequired)
|
||||
.setting(clap::AppSettings::VersionlessSubcommands)
|
||||
.setting(clap::AppSettings::SubcommandRequiredElseHelp)
|
||||
.setting(clap::AppSettings::ColorAuto)
|
||||
.subcommand(SubCommand::with_name("quit").about("closes program"))
|
||||
.subcommand(SubCommand::with_name("disconnect").about("stop connections to all servers"))
|
||||
.subcommand(SubCommand::with_name("t").about("quick test by connectiong to 127.0.0.1:1231"))
|
||||
.subcommand(
|
||||
SubCommand::with_name("connect")
|
||||
.about("opens a connection to another instance of this fileserver network")
|
||||
.setting(clap::AppSettings::NoBinaryName)
|
||||
.arg(
|
||||
Arg::with_name("ip:port")
|
||||
.help("ip and port to connect to, example '127.0.0.1:1231'")
|
||||
.required(true)
|
||||
.validator(|ipport| match ipport.parse::<std::net::SocketAddr>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(format!("must be valid Ip:Port combination {:?}", e)),
|
||||
}),
|
||||
),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("list").about("lists all available files on the network"))
|
||||
.subcommand(
|
||||
SubCommand::with_name("serve")
|
||||
.about("make file available on the network")
|
||||
.arg(
|
||||
Arg::with_name("file")
|
||||
.help("file to serve")
|
||||
.required(true)
|
||||
.validator(file_exists),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("get")
|
||||
.about(
|
||||
"downloads file with the id from the `list` command. Optionally provide a \
|
||||
storage path, if none is provided it will be saved in the current directory \
|
||||
with the remote filename",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("id")
|
||||
.help("id to download. get the id from the `list` command")
|
||||
.required(true)
|
||||
.validator(|id| match id.parse::<u32>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(format!("must be a number {:?}", e)),
|
||||
}),
|
||||
)
|
||||
.arg(Arg::with_name("file").help("local path to store the file to")),
|
||||
)
|
||||
}
|
||||
|
||||
async fn client(mut cmd_sender: mpsc::UnboundedSender<LocalCommand>) {
|
||||
use std::io::Write;
|
||||
|
||||
loop {
|
||||
let mut line = String::new();
|
||||
print!("==> ");
|
||||
std::io::stdout().flush().unwrap();
|
||||
io::stdin().read_line(&mut line).await.unwrap();
|
||||
let matches = match get_options().get_matches_from_safe(line.split_whitespace()) {
|
||||
Err(e) => {
|
||||
println!("{}", e.message);
|
||||
continue;
|
||||
},
|
||||
Ok(matches) => matches,
|
||||
};
|
||||
|
||||
match matches.subcommand() {
|
||||
("quit", _) => {
|
||||
cmd_sender.send(LocalCommand::Shutdown).await.unwrap();
|
||||
println!("goodbye");
|
||||
break;
|
||||
},
|
||||
("disconnect", _) => {
|
||||
cmd_sender.send(LocalCommand::Disconnect).await.unwrap();
|
||||
},
|
||||
("connect", Some(connect_matches)) => {
|
||||
let socketaddr = connect_matches.value_of("ipport").unwrap().parse().unwrap();
|
||||
cmd_sender
|
||||
.send(LocalCommand::Connect(Address::Tcp(socketaddr)))
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
("t", _) => {
|
||||
cmd_sender
|
||||
.send(LocalCommand::Connect(Address::Tcp(
|
||||
"127.0.0.1:1231".parse().unwrap(),
|
||||
)))
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
("serve", Some(serve_matches)) => {
|
||||
let path = shellexpand::tilde(serve_matches.value_of("file").unwrap());
|
||||
let path: PathBuf = path.parse().unwrap();
|
||||
if let Some(fileinfo) = FileInfo::new(&path).await {
|
||||
cmd_sender
|
||||
.send(LocalCommand::Serve(fileinfo))
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
},
|
||||
("list", _) => {
|
||||
cmd_sender.send(LocalCommand::List).await.unwrap();
|
||||
},
|
||||
("get", Some(get_matches)) => {
|
||||
let id: u32 = get_matches.value_of("id").unwrap().parse().unwrap();
|
||||
let file = get_matches.value_of("file");
|
||||
cmd_sender
|
||||
.send(LocalCommand::Get(id, file.map(|s| s.to_string())))
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
|
||||
(_, _) => {
|
||||
unreachable!("this subcommand isn't yet handled");
|
||||
},
|
||||
}
|
||||
// this 100 ms is because i am super lazy, and i want to appear the logs before
|
||||
// the next '==>' appears...
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
println!("");
|
||||
}
|
||||
thread::sleep(Duration::from_millis(30)); // TODO: still needed for correct shutdown
|
||||
}
|
214
network/examples/fileshare/src/server.rs
Normal file
214
network/examples/fileshare/src/server.rs
Normal file
@ -0,0 +1,214 @@
|
||||
use crate::commands::{Command, FileInfo, LocalCommand, RemoteInfo};
|
||||
use async_std::{
|
||||
fs,
|
||||
path::PathBuf,
|
||||
sync::{Mutex, RwLock},
|
||||
};
|
||||
use futures::{channel::mpsc, future::FutureExt, stream::StreamExt};
|
||||
use network::{Address, Network, Participant, Pid, Stream, PROMISES_CONSISTENCY, PROMISES_ORDERED};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tracing::*;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ControlChannels {
|
||||
command_receiver: mpsc::UnboundedReceiver<LocalCommand>,
|
||||
}
|
||||
|
||||
pub struct Server {
|
||||
run_channels: Option<ControlChannels>,
|
||||
network: Network,
|
||||
served: RwLock<Vec<FileInfo>>,
|
||||
remotes: RwLock<HashMap<Pid, Arc<Mutex<RemoteInfo>>>>,
|
||||
receiving_files: Mutex<HashMap<u32, Option<String>>>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub fn new() -> (Self, mpsc::UnboundedSender<LocalCommand>) {
|
||||
let (command_sender, command_receiver) = mpsc::unbounded();
|
||||
|
||||
let thread_pool = ThreadPoolBuilder::new().build();
|
||||
let network = Network::new(Pid::new(), &thread_pool);
|
||||
|
||||
let run_channels = Some(ControlChannels { command_receiver });
|
||||
(
|
||||
Server {
|
||||
run_channels,
|
||||
network,
|
||||
served: RwLock::new(vec![]),
|
||||
remotes: RwLock::new(HashMap::new()),
|
||||
receiving_files: Mutex::new(HashMap::new()),
|
||||
},
|
||||
command_sender,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn run(mut self, address: Address) {
|
||||
let run_channels = self.run_channels.take().unwrap();
|
||||
|
||||
self.network.listen(address).await.unwrap();
|
||||
|
||||
futures::join!(
|
||||
self.command_manager(run_channels.command_receiver,),
|
||||
self.connect_manager(),
|
||||
);
|
||||
}
|
||||
|
||||
async fn command_manager(&self, command_receiver: mpsc::UnboundedReceiver<LocalCommand>) {
|
||||
trace!("start command_manager");
|
||||
command_receiver
|
||||
.for_each_concurrent(None, async move |cmd| {
|
||||
match cmd {
|
||||
LocalCommand::Shutdown => {
|
||||
println!("shutting down service");
|
||||
return;
|
||||
},
|
||||
LocalCommand::Disconnect => {
|
||||
self.remotes.write().await.clear();
|
||||
for (_, p) in self.network.participants().await.drain() {
|
||||
self.network.disconnect(p).await.unwrap();
|
||||
}
|
||||
println!("disconnecting all connections");
|
||||
return;
|
||||
},
|
||||
LocalCommand::Connect(addr) => {
|
||||
println!("trying to connect to: {:?}", &addr);
|
||||
match self.network.connect(addr.clone()).await {
|
||||
Ok(p) => self.loop_participant(p).await,
|
||||
Err(e) => {
|
||||
println!("failled to connect to {:?}, err: {:?}", &addr, e);
|
||||
},
|
||||
}
|
||||
},
|
||||
LocalCommand::Serve(fileinfo) => {
|
||||
self.served.write().await.push(fileinfo.clone());
|
||||
println!("serving file: {:?}", fileinfo.path);
|
||||
},
|
||||
LocalCommand::List => {
|
||||
let mut total_file_infos = vec![];
|
||||
for ri in self.remotes.read().await.values() {
|
||||
let mut ri = ri.lock().await;
|
||||
ri.cmd_out.send(Command::List).unwrap();
|
||||
let mut file_infos = ri.cmd_out.recv::<Vec<FileInfo>>().await.unwrap();
|
||||
ri.insert_infos(file_infos.clone());
|
||||
total_file_infos.append(&mut file_infos);
|
||||
}
|
||||
print_fileinfos(&total_file_infos);
|
||||
},
|
||||
LocalCommand::Get(id, path) => {
|
||||
// i dont know the owner, just broadcast, i am laaaazyyy
|
||||
for ri in self.remotes.read().await.values() {
|
||||
let mut ri = ri.lock().await;
|
||||
if ri.get_info(id).is_some() {
|
||||
//found provider, send request.
|
||||
self.receiving_files.lock().await.insert(id, path.clone());
|
||||
ri.cmd_out.send(Command::Get(id)).unwrap();
|
||||
// the answer is handled via the other stream!
|
||||
break;
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
})
|
||||
.await;
|
||||
trace!("stop command_manager");
|
||||
}
|
||||
|
||||
async fn connect_manager(&self) {
|
||||
trace!("start connect_manager");
|
||||
let iter = futures::stream::unfold((), |_| {
|
||||
self.network.connected().map(|r| r.ok().map(|v| (v, ())))
|
||||
});
|
||||
|
||||
iter.for_each_concurrent(/* limit */ None, async move |participant| {
|
||||
self.loop_participant(participant).await;
|
||||
})
|
||||
.await;
|
||||
trace!("stop connect_manager");
|
||||
}
|
||||
|
||||
async fn loop_participant(&self, p: Arc<Participant>) {
|
||||
if let (Ok(cmd_out), Ok(file_out), Ok(cmd_in), Ok(file_in)) = (
|
||||
p.open(15, PROMISES_CONSISTENCY | PROMISES_ORDERED).await,
|
||||
p.open(40, PROMISES_CONSISTENCY).await,
|
||||
p.opened().await,
|
||||
p.opened().await,
|
||||
) {
|
||||
debug!(?p, "connection successfully initiated");
|
||||
let id = p.remote_pid();
|
||||
let ri = Arc::new(Mutex::new(RemoteInfo::new(cmd_out, file_out, p)));
|
||||
self.remotes.write().await.insert(id, ri.clone());
|
||||
futures::join!(
|
||||
self.handle_remote_cmd(cmd_in, ri.clone()),
|
||||
self.handle_files(file_in, ri.clone()),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_remote_cmd(&self, mut stream: Stream, remote_info: Arc<Mutex<RemoteInfo>>) {
|
||||
while let Ok(msg) = stream.recv::<Command>().await {
|
||||
println!("got message: {:?}", &msg);
|
||||
match msg {
|
||||
Command::List => {
|
||||
info!("request to send my list");
|
||||
let served = self.served.read().await.clone();
|
||||
stream.send(served).unwrap();
|
||||
},
|
||||
Command::Get(id) => {
|
||||
for file_info in self.served.read().await.iter() {
|
||||
if file_info.id() == id {
|
||||
info!("request to send file i got, sending it");
|
||||
if let Ok(data) = file_info.load().await {
|
||||
match remote_info.lock().await.file_out.send((file_info, data)) {
|
||||
Ok(_) => debug!("send file"),
|
||||
Err(e) => error!(?e, "sending file failed"),
|
||||
}
|
||||
} else {
|
||||
warn!("cannot send file as loading failed, oes it still exist?");
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_files(&self, mut stream: Stream, _remote_info: Arc<Mutex<RemoteInfo>>) {
|
||||
while let Ok((fi, data)) = stream.recv::<(FileInfo, Vec<u8>)>().await {
|
||||
debug!(?fi, "got file");
|
||||
let path = self.receiving_files.lock().await.remove(&fi.id()).flatten();
|
||||
let path: PathBuf = match &path {
|
||||
Some(path) => shellexpand::tilde(&path).parse().unwrap(),
|
||||
None => {
|
||||
let mut path = std::env::current_dir().unwrap();
|
||||
path.push(fi.path().file_name().unwrap());
|
||||
trace!("no path provided, saving down to {:?}", path);
|
||||
PathBuf::from(path)
|
||||
},
|
||||
};
|
||||
debug!("received file, going to save it under {:?}", path);
|
||||
fs::write(path, data).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_fileinfos(infos: &Vec<FileInfo>) {
|
||||
let mut i = 0;
|
||||
for info in infos {
|
||||
let bytes = info.size;
|
||||
match bytes {
|
||||
0..100_000 => println!("{}: {}bytes '{}'", info.id(), bytes, info.path),
|
||||
100_000..100_000_000 => {
|
||||
println!("{}: {}bytes '{}'", info.id(), bytes / 1024, info.path)
|
||||
},
|
||||
_ => println!(
|
||||
"{}: {}bytes '{}'",
|
||||
info.id(),
|
||||
bytes / 1024 / 1024,
|
||||
info.path
|
||||
),
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
println!("-- {} files available", i);
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
[workspace]
|
||||
|
||||
[package]
|
||||
name = "network-speed"
|
||||
version = "0.1.0"
|
||||
@ -10,7 +12,6 @@ edition = "2018"
|
||||
uvth = "3.1"
|
||||
network = { package = "veloren_network", path = "../../../network" }
|
||||
clap = "2.33"
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
futures = "0.3"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.2.3"
|
160
network/examples/network-speed/src/main.rs
Normal file
160
network/examples/network-speed/src/main.rs
Normal file
@ -0,0 +1,160 @@
|
||||
use clap::{App, Arg};
|
||||
use futures::executor::block_on;
|
||||
use network::{Address, Network, Pid, PROMISES_CONSISTENCY, PROMISES_ORDERED};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
thread,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::*;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum Msg {
|
||||
Ping { id: u64, data: Vec<u8> },
|
||||
Pong { id: u64, data: Vec<u8> },
|
||||
}
|
||||
|
||||
/// This utility tests the speed of veloren network by creating a client that
|
||||
/// opens a stream and pipes as many messages through it as possible.
|
||||
fn main() {
|
||||
let matches = App::new("Veloren Speed Test Utility")
|
||||
.version("0.1.0")
|
||||
.author("Marcel Märtens <marcel.cochem@googlemail.com>")
|
||||
.about("Runs speedtests regarding different parameter to benchmark veloren-network")
|
||||
.arg(
|
||||
Arg::with_name("mode")
|
||||
.short("m")
|
||||
.long("mode")
|
||||
.takes_value(true)
|
||||
.possible_values(&["server", "client", "both"])
|
||||
.default_value("both")
|
||||
.help(
|
||||
"choose whether you want to start the server or client or both needed for \
|
||||
this program",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.default_value("52000")
|
||||
.help("port to listen on"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ip")
|
||||
.long("ip")
|
||||
.takes_value(true)
|
||||
.default_value("127.0.0.1")
|
||||
.help("ip to listen and connect to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("protocol")
|
||||
.long("protocol")
|
||||
.takes_value(true)
|
||||
.default_value("tcp")
|
||||
.possible_values(&["tcp", "upd", "mpsc"])
|
||||
.help(
|
||||
"underlying protocol used for this test, mpsc can only combined with mode=both",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("trace")
|
||||
.short("t")
|
||||
.long("trace")
|
||||
.takes_value(true)
|
||||
.default_value("warn")
|
||||
.possible_values(&["trace", "debug", "info", "warn", "error"])
|
||||
.help("set trace level, not this has a performance impact!"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let trace = matches.value_of("trace").unwrap();
|
||||
let filter = EnvFilter::from_default_env().add_directive(trace.parse().unwrap()).add_directive("veloren_network::participant=debug".parse().unwrap()).add_directive("veloren_network::api=debug".parse().unwrap());
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
.with_max_level(Level::TRACE)
|
||||
.with_env_filter(filter)
|
||||
.init();
|
||||
|
||||
let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
|
||||
let ip: &str = matches.value_of("ip").unwrap();
|
||||
let address = match matches.value_of("protocol") {
|
||||
Some("tcp") => Address::Tcp(format!("{}:{}", ip, port).parse().unwrap()),
|
||||
Some("udp") => Address::Udp(format!("{}:{}", ip, port).parse().unwrap()),
|
||||
_ => panic!("invalid mode, run --help!"),
|
||||
};
|
||||
|
||||
let mut background = None;
|
||||
match matches.value_of("mode") {
|
||||
Some("server") => server(address),
|
||||
Some("client") => client(address),
|
||||
Some("both") => {
|
||||
let address1 = address.clone();
|
||||
background = Some(thread::spawn(|| server(address1)));
|
||||
thread::sleep(Duration::from_millis(200)); //start client after server
|
||||
client(address)
|
||||
},
|
||||
_ => panic!("invalid mode, run --help!"),
|
||||
};
|
||||
if let Some(background) = background {
|
||||
background.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn server(address: Address) {
|
||||
let thread_pool = ThreadPoolBuilder::new().build();
|
||||
let server = Network::new(Pid::new(), &thread_pool);
|
||||
block_on(server.listen(address)).unwrap();
|
||||
|
||||
loop {
|
||||
info!("waiting for participant to connect");
|
||||
let p1 = block_on(server.connected()).unwrap(); //remote representation of p1
|
||||
let mut s1 = block_on(p1.opened()).unwrap(); //remote representation of s1
|
||||
block_on(async {
|
||||
let mut last = Instant::now();
|
||||
let mut id = 0u64;
|
||||
while let Ok(_msg) = s1.recv::<Msg>().await {
|
||||
id += 1;
|
||||
if id.rem_euclid(1000000) == 0 {
|
||||
let new = Instant::now();
|
||||
let diff = new.duration_since(last);
|
||||
last = new;
|
||||
println!("recv 1.000.000 took {}", diff.as_millis());
|
||||
}
|
||||
}
|
||||
info!("other stream was closed");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn client(address: Address) {
|
||||
let thread_pool = ThreadPoolBuilder::new().build();
|
||||
let client = Network::new(Pid::new(), &thread_pool);
|
||||
|
||||
let p1 = block_on(client.connect(address.clone())).unwrap(); //remote representation of p1
|
||||
let mut s1 = block_on(p1.open(16, PROMISES_ORDERED | PROMISES_CONSISTENCY)).unwrap(); //remote representation of s1
|
||||
let mut last = Instant::now();
|
||||
let mut id = 0u64;
|
||||
loop {
|
||||
s1.send(Msg::Ping {
|
||||
id,
|
||||
data: vec![0; 1000],
|
||||
})
|
||||
.unwrap();
|
||||
id += 1;
|
||||
if id.rem_euclid(1000000) == 0 {
|
||||
let new = Instant::now();
|
||||
let diff = new.duration_since(last);
|
||||
last = new;
|
||||
println!("send 1.000.000 took {}", diff.as_millis());
|
||||
}
|
||||
if id > 2000000 {
|
||||
println!("stop");
|
||||
std::thread::sleep(std::time::Duration::from_millis(50));
|
||||
break;
|
||||
}
|
||||
}
|
||||
debug!("closing client");
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
[workspace]
|
||||
|
||||
[package]
|
||||
name = "tcp-loadtest"
|
||||
version = "0.1.0"
|
@ -19,9 +19,13 @@ fn setup() -> Result<SocketAddr, u32> {
|
||||
return Err(1);
|
||||
}
|
||||
let a: SocketAddr = format!("{}:{}", args[1], args[2]).parse().unwrap();
|
||||
println!("You provided address: {}", &a);
|
||||
return Ok(a);
|
||||
}
|
||||
|
||||
/// This example file is not running veloren-network at all,
|
||||
/// instead it's just trying to create 4 threads and pump as much bytes as
|
||||
/// possible through a specific listener, the listener needs to be created
|
||||
/// before this program is started.
|
||||
fn main() -> Result<(), u32> {
|
||||
let addr = Arc::new(setup()?);
|
||||
let data: Arc<String> = Arc::new(
|
@ -1,9 +1,9 @@
|
||||
use crate::{
|
||||
message::{self, InCommingMessage, OutGoingMessage},
|
||||
scheduler::Scheduler,
|
||||
types::{Mid, Pid, Prio, Promises, Sid},
|
||||
types::{Mid, Pid, Prio, Promises, Requestor::User, Sid},
|
||||
};
|
||||
use async_std::{sync::RwLock, task};
|
||||
use async_std::{io, sync::RwLock, task};
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
sink::SinkExt,
|
||||
@ -28,13 +28,11 @@ pub enum Address {
|
||||
Mpsc(u64),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Participant {
|
||||
local_pid: Pid,
|
||||
remote_pid: Pid,
|
||||
stream_open_sender: RwLock<mpsc::UnboundedSender<(Prio, Promises, oneshot::Sender<Stream>)>>,
|
||||
stream_opened_receiver: RwLock<mpsc::UnboundedReceiver<Stream>>,
|
||||
shutdown_receiver: RwLock<oneshot::Receiver<()>>,
|
||||
closed: AtomicBool,
|
||||
disconnect_sender: Option<mpsc::UnboundedSender<Pid>>,
|
||||
}
|
||||
@ -48,25 +46,33 @@ pub struct Stream {
|
||||
promises: Promises,
|
||||
msg_send_sender: std::sync::mpsc::Sender<(Prio, Pid, Sid, OutGoingMessage)>,
|
||||
msg_recv_receiver: mpsc::UnboundedReceiver<InCommingMessage>,
|
||||
shutdown_receiver: oneshot::Receiver<()>,
|
||||
closed: AtomicBool,
|
||||
closed: Arc<AtomicBool>,
|
||||
shutdown_sender: Option<mpsc::UnboundedSender<Sid>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct NetworkError {}
|
||||
#[derive(Debug)]
|
||||
pub enum NetworkError {
|
||||
NetworkClosed,
|
||||
ListenFailed(std::io::Error),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct ParticipantError {}
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ParticipantError {
|
||||
ParticipantClosed,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct StreamError {}
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum StreamError {
|
||||
StreamClosed,
|
||||
}
|
||||
|
||||
pub struct Network {
|
||||
local_pid: Pid,
|
||||
participants: RwLock<HashMap<Pid, Arc<Participant>>>,
|
||||
listen_sender: RwLock<mpsc::UnboundedSender<Address>>,
|
||||
connect_sender: RwLock<mpsc::UnboundedSender<(Address, oneshot::Sender<Participant>)>>,
|
||||
listen_sender:
|
||||
RwLock<mpsc::UnboundedSender<(Address, oneshot::Sender<async_std::io::Result<()>>)>>,
|
||||
connect_sender:
|
||||
RwLock<mpsc::UnboundedSender<(Address, oneshot::Sender<io::Result<Participant>>)>>,
|
||||
connected_receiver: RwLock<mpsc::UnboundedReceiver<Participant>>,
|
||||
shutdown_sender: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
@ -75,10 +81,11 @@ impl Network {
|
||||
pub fn new(participant_id: Pid, thread_pool: &ThreadPool) -> Self {
|
||||
//let participants = RwLock::new(vec![]);
|
||||
let p = participant_id;
|
||||
debug!(?p, "starting Network");
|
||||
debug!(?p, ?User, "starting Network");
|
||||
let (scheduler, listen_sender, connect_sender, connected_receiver, shutdown_sender) =
|
||||
Scheduler::new(participant_id);
|
||||
thread_pool.execute(move || {
|
||||
trace!(?p, ?User, "starting sheduler in own thread");
|
||||
let _handle = task::block_on(
|
||||
scheduler
|
||||
.run()
|
||||
@ -95,56 +102,60 @@ impl Network {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn listen(&self, address: Address) -> Result<(), NetworkError> {
|
||||
task::block_on(async { self.listen_sender.write().await.send(address).await }).unwrap();
|
||||
Ok(())
|
||||
pub async fn listen(&self, address: Address) -> Result<(), NetworkError> {
|
||||
let (result_sender, result_receiver) = oneshot::channel::<async_std::io::Result<()>>();
|
||||
debug!(?address, ?User, "listening on address");
|
||||
self.listen_sender
|
||||
.write()
|
||||
.await
|
||||
.send((address, result_sender))
|
||||
.await?;
|
||||
match result_receiver.await? {
|
||||
//waiting guarantees that we either listened sucessfully or get an error like port in
|
||||
// use
|
||||
Ok(()) => Ok(()),
|
||||
Err(e) => Err(NetworkError::ListenFailed(e)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn connect(&self, address: Address) -> Result<Arc<Participant>, NetworkError> {
|
||||
let (pid_sender, pid_receiver) = oneshot::channel::<Participant>();
|
||||
let (pid_sender, pid_receiver) = oneshot::channel::<io::Result<Participant>>();
|
||||
debug!(?address, ?User, "connect to address");
|
||||
self.connect_sender
|
||||
.write()
|
||||
.await
|
||||
.send((address, pid_sender))
|
||||
.await
|
||||
.unwrap();
|
||||
match pid_receiver.await {
|
||||
Ok(participant) => {
|
||||
.await?;
|
||||
let participant = pid_receiver.await??;
|
||||
let pid = participant.remote_pid;
|
||||
debug!(?pid, "received Participant from remote");
|
||||
debug!(
|
||||
?pid,
|
||||
?User,
|
||||
"received Participant id from remote and return to user"
|
||||
);
|
||||
let participant = Arc::new(participant);
|
||||
self.participants
|
||||
.write()
|
||||
.await
|
||||
.insert(participant.remote_pid, participant.clone());
|
||||
Ok(participant)
|
||||
},
|
||||
Err(_) => Err(NetworkError {}),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn connected(&self) -> Result<Arc<Participant>, NetworkError> {
|
||||
match self.connected_receiver.write().await.next().await {
|
||||
Some(participant) => {
|
||||
let participant = self.connected_receiver.write().await.next().await?;
|
||||
let participant = Arc::new(participant);
|
||||
self.participants
|
||||
.write()
|
||||
.await
|
||||
.insert(participant.remote_pid, participant.clone());
|
||||
Ok(participant)
|
||||
},
|
||||
None => Err(NetworkError {}),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn disconnect(&self, participant: Arc<Participant>) -> Result<(), NetworkError> {
|
||||
// Remove, Close and try_unwrap error when unwrap fails!
|
||||
let participant = self
|
||||
.participants
|
||||
.write()
|
||||
.await
|
||||
.remove(&participant.remote_pid)
|
||||
.unwrap();
|
||||
let pid = participant.remote_pid;
|
||||
debug!(?pid, "removing participant from network");
|
||||
self.participants.write().await.remove(&pid)?;
|
||||
participant.closed.store(true, Ordering::Relaxed);
|
||||
|
||||
if Arc::try_unwrap(participant).is_err() {
|
||||
@ -155,9 +166,11 @@ impl Network {
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: HANDLE SHUTDOWN_RECEIVER
|
||||
pub async fn participants(&self) -> HashMap<Pid, Arc<Participant>> {
|
||||
self.participants.read().await.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Participant {
|
||||
pub(crate) fn new(
|
||||
@ -165,7 +178,6 @@ impl Participant {
|
||||
remote_pid: Pid,
|
||||
stream_open_sender: mpsc::UnboundedSender<(Prio, Promises, oneshot::Sender<Stream>)>,
|
||||
stream_opened_receiver: mpsc::UnboundedReceiver<Stream>,
|
||||
shutdown_receiver: oneshot::Receiver<()>,
|
||||
disconnect_sender: mpsc::UnboundedSender<Pid>,
|
||||
) -> Self {
|
||||
Self {
|
||||
@ -173,36 +185,66 @@ impl Participant {
|
||||
remote_pid,
|
||||
stream_open_sender: RwLock::new(stream_open_sender),
|
||||
stream_opened_receiver: RwLock::new(stream_opened_receiver),
|
||||
shutdown_receiver: RwLock::new(shutdown_receiver),
|
||||
closed: AtomicBool::new(false),
|
||||
disconnect_sender: Some(disconnect_sender),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn open(&self, prio: u8, promises: Promises) -> Result<Stream, ParticipantError> {
|
||||
//use this lock for now to make sure that only one open at a time is made,
|
||||
// TODO: not sure if we can paralise that, check in future
|
||||
let mut stream_open_sender = self.stream_open_sender.write().await;
|
||||
if self.closed.load(Ordering::Relaxed) {
|
||||
warn!(?self.remote_pid, "participant is closed but another open is tried on it");
|
||||
return Err(ParticipantError::ParticipantClosed);
|
||||
}
|
||||
let (sid_sender, sid_receiver) = oneshot::channel();
|
||||
self.stream_open_sender
|
||||
.write()
|
||||
.await
|
||||
if stream_open_sender
|
||||
.send((prio, promises, sid_sender))
|
||||
.await
|
||||
.unwrap();
|
||||
.is_err()
|
||||
{
|
||||
debug!(?self.remote_pid, ?User, "stream_open_sender failed, closing participant");
|
||||
self.closed.store(true, Ordering::Relaxed);
|
||||
return Err(ParticipantError::ParticipantClosed);
|
||||
}
|
||||
match sid_receiver.await {
|
||||
Ok(stream) => {
|
||||
let sid = stream.sid;
|
||||
debug!(?sid, "opened stream");
|
||||
debug!(?sid, ?self.remote_pid, ?User, "opened stream");
|
||||
Ok(stream)
|
||||
},
|
||||
Err(_) => Err(ParticipantError {}),
|
||||
Err(_) => {
|
||||
debug!(?self.remote_pid, ?User, "sid_receiver failed, closing participant");
|
||||
self.closed.store(true, Ordering::Relaxed);
|
||||
Err(ParticipantError::ParticipantClosed)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn opened(&self) -> Result<Stream, ParticipantError> {
|
||||
match self.stream_opened_receiver.write().await.next().await {
|
||||
Some(stream) => Ok(stream),
|
||||
None => Err(ParticipantError {}),
|
||||
//use this lock for now to make sure that only one open at a time is made,
|
||||
// TODO: not sure if we can paralise that, check in future
|
||||
let mut stream_opened_receiver = self.stream_opened_receiver.write().await;
|
||||
if self.closed.load(Ordering::Relaxed) {
|
||||
warn!(?self.remote_pid, "participant is closed but another open is tried on it");
|
||||
return Err(ParticipantError::ParticipantClosed);
|
||||
}
|
||||
match stream_opened_receiver.next().await {
|
||||
Some(stream) => {
|
||||
let sid = stream.sid;
|
||||
debug!(?sid, ?self.remote_pid, "receive opened stream");
|
||||
Ok(stream)
|
||||
},
|
||||
None => {
|
||||
debug!(?self.remote_pid, "stream_opened_receiver failed, closing participant");
|
||||
self.closed.store(true, Ordering::Relaxed);
|
||||
Err(ParticipantError::ParticipantClosed)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remote_pid(&self) -> Pid { self.remote_pid }
|
||||
}
|
||||
|
||||
impl Stream {
|
||||
@ -213,7 +255,7 @@ impl Stream {
|
||||
promises: Promises,
|
||||
msg_send_sender: std::sync::mpsc::Sender<(Prio, Pid, Sid, OutGoingMessage)>,
|
||||
msg_recv_receiver: mpsc::UnboundedReceiver<InCommingMessage>,
|
||||
shutdown_receiver: oneshot::Receiver<()>,
|
||||
closed: Arc<AtomicBool>,
|
||||
shutdown_sender: mpsc::UnboundedSender<Sid>,
|
||||
) -> Self {
|
||||
Self {
|
||||
@ -224,79 +266,139 @@ impl Stream {
|
||||
promises,
|
||||
msg_send_sender,
|
||||
msg_recv_receiver,
|
||||
shutdown_receiver,
|
||||
closed: AtomicBool::new(false),
|
||||
closed,
|
||||
shutdown_sender: Some(shutdown_sender),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send<M: Serialize>(&mut self, msg: M) -> Result<(), StreamError> {
|
||||
pub fn send<M: Serialize>(&mut self, msg: M) -> Result<(), StreamError> {
|
||||
let messagebuffer = Arc::new(message::serialize(&msg));
|
||||
if self.closed.load(Ordering::Relaxed) {
|
||||
return Err(StreamError::StreamClosed);
|
||||
}
|
||||
self.msg_send_sender
|
||||
.send((self.prio, self.pid, self.sid, OutGoingMessage {
|
||||
buffer: messagebuffer,
|
||||
cursor: 0,
|
||||
mid: self.mid,
|
||||
sid: self.sid,
|
||||
}))
|
||||
.unwrap();
|
||||
}))?;
|
||||
self.mid += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn recv<M: DeserializeOwned>(&mut self) -> Result<M, StreamError> {
|
||||
match self.msg_recv_receiver.next().await {
|
||||
Some(msg) => {
|
||||
//no need to access self.closed here, as when this stream is closed the Channel
|
||||
// is closed which will trigger a None
|
||||
let msg = self.msg_recv_receiver.next().await?;
|
||||
info!(?msg, "delivering a message");
|
||||
Ok(message::deserialize(msg.buffer))
|
||||
},
|
||||
None => panic!(
|
||||
"Unexpected error, probably stream was destroyed... maybe i dont know yet, no \
|
||||
idea of async stuff"
|
||||
),
|
||||
}
|
||||
}
|
||||
//Todo: ERROR: TODO: implement me and the disconnecting!
|
||||
}
|
||||
|
||||
impl Drop for Network {
|
||||
fn drop(&mut self) {
|
||||
let p = self.local_pid;
|
||||
debug!(?p, "shutting down Network");
|
||||
self.shutdown_sender.take().unwrap().send(()).unwrap();
|
||||
let pid = self.local_pid;
|
||||
debug!(?pid, "shutting down Network");
|
||||
self.shutdown_sender
|
||||
.take()
|
||||
.unwrap()
|
||||
.send(())
|
||||
.expect("scheduler is closed, but nobody other should be able to close it");
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Participant {
|
||||
fn drop(&mut self) {
|
||||
if !self.closed.load(Ordering::Relaxed) {
|
||||
let p = self.remote_pid;
|
||||
debug!(?p, "shutting down Participant");
|
||||
// ignore closed, as we need to send it even though we disconnected the
|
||||
// participant from network
|
||||
let pid = self.remote_pid;
|
||||
debug!(?pid, "shutting down Participant");
|
||||
task::block_on(async {
|
||||
self.disconnect_sender
|
||||
.take()
|
||||
.unwrap()
|
||||
.send(self.remote_pid)
|
||||
.await
|
||||
.unwrap()
|
||||
.expect("something is wrong in internal scheduler coding")
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Stream {
|
||||
fn drop(&mut self) {
|
||||
// a send if closed is unecessary but doesnt hurt, we must not crash here
|
||||
if !self.closed.load(Ordering::Relaxed) {
|
||||
let s = self.sid;
|
||||
debug!(?s, "shutting down Stream");
|
||||
task::block_on(async {
|
||||
self.shutdown_sender
|
||||
.take()
|
||||
.unwrap()
|
||||
.send(self.sid)
|
||||
.await
|
||||
.unwrap()
|
||||
});
|
||||
let sid = self.sid;
|
||||
let pid = self.pid;
|
||||
debug!(?pid, ?sid, "shutting down Stream");
|
||||
if task::block_on(self.shutdown_sender.take().unwrap().send(self.sid)).is_err() {
|
||||
warn!(
|
||||
"Other side got already dropped, probably due to timing, other side will \
|
||||
handle this gracefully"
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Participant {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let status = if self.closed.load(Ordering::Relaxed) {
|
||||
"[CLOSED]"
|
||||
} else {
|
||||
"[OPEN]"
|
||||
};
|
||||
write!(
|
||||
f,
|
||||
"Participant {{{} local_pid: {:?}, remote_pid: {:?} }}",
|
||||
status, &self.local_pid, &self.remote_pid,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<std::sync::mpsc::SendError<T>> for StreamError {
|
||||
fn from(_err: std::sync::mpsc::SendError<T>) -> Self { StreamError::StreamClosed }
|
||||
}
|
||||
|
||||
impl<T> From<std::sync::mpsc::SendError<T>> for ParticipantError {
|
||||
fn from(_err: std::sync::mpsc::SendError<T>) -> Self { ParticipantError::ParticipantClosed }
|
||||
}
|
||||
|
||||
impl<T> From<std::sync::mpsc::SendError<T>> for NetworkError {
|
||||
fn from(_err: std::sync::mpsc::SendError<T>) -> Self { NetworkError::NetworkClosed }
|
||||
}
|
||||
|
||||
impl From<async_std::io::Error> for NetworkError {
|
||||
fn from(err: async_std::io::Error) -> Self { NetworkError::ListenFailed(err) }
|
||||
}
|
||||
|
||||
impl From<std::option::NoneError> for StreamError {
|
||||
fn from(_err: std::option::NoneError) -> Self { StreamError::StreamClosed }
|
||||
}
|
||||
|
||||
impl From<std::option::NoneError> for ParticipantError {
|
||||
fn from(_err: std::option::NoneError) -> Self { ParticipantError::ParticipantClosed }
|
||||
}
|
||||
|
||||
impl From<std::option::NoneError> for NetworkError {
|
||||
fn from(_err: std::option::NoneError) -> Self { NetworkError::NetworkClosed }
|
||||
}
|
||||
|
||||
impl From<mpsc::SendError> for ParticipantError {
|
||||
fn from(_err: mpsc::SendError) -> Self { ParticipantError::ParticipantClosed }
|
||||
}
|
||||
|
||||
impl From<mpsc::SendError> for NetworkError {
|
||||
fn from(_err: mpsc::SendError) -> Self { NetworkError::NetworkClosed }
|
||||
}
|
||||
|
||||
impl From<oneshot::Canceled> for ParticipantError {
|
||||
fn from(_err: oneshot::Canceled) -> Self { ParticipantError::ParticipantClosed }
|
||||
}
|
||||
|
||||
impl From<oneshot::Canceled> for NetworkError {
|
||||
fn from(_err: oneshot::Canceled) -> Self { NetworkError::NetworkClosed }
|
||||
}
|
||||
|
@ -1,12 +1,16 @@
|
||||
use crate::{
|
||||
frames::Frame,
|
||||
protocols::Protocols,
|
||||
types::{
|
||||
Cid, NetworkBuffer, Pid, Sid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2, VELOREN_MAGIC_NUMBER,
|
||||
Cid, Frame, Pid, Sid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2, VELOREN_MAGIC_NUMBER,
|
||||
VELOREN_NETWORK_VERSION,
|
||||
},
|
||||
};
|
||||
use async_std::{net::TcpStream, prelude::*, sync::RwLock};
|
||||
use futures::{channel::mpsc, future::FutureExt, select, sink::SinkExt, stream::StreamExt};
|
||||
use async_std::sync::RwLock;
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
sink::SinkExt,
|
||||
stream::StreamExt,
|
||||
};
|
||||
use tracing::*;
|
||||
//use futures::prelude::*;
|
||||
|
||||
@ -27,10 +31,12 @@ enum ChannelState {
|
||||
}
|
||||
|
||||
impl Channel {
|
||||
#[cfg(debug_assertions)]
|
||||
const WRONG_NUMBER: &'static [u8] = "Handshake does not contain the magic number requiered by \
|
||||
veloren server.\nWe are not sure if you are a valid \
|
||||
veloren client.\nClosing the connection"
|
||||
.as_bytes();
|
||||
#[cfg(debug_assertions)]
|
||||
const WRONG_VERSION: &'static str = "Handshake does contain a correct magic number, but \
|
||||
invalid version.\nWe don't know how to communicate with \
|
||||
you.\nClosing the connection";
|
||||
@ -54,24 +60,36 @@ impl Channel {
|
||||
/// receiver: mpsc::Receiver
|
||||
pub async fn run(
|
||||
self,
|
||||
protocol: TcpStream,
|
||||
protocol: Protocols,
|
||||
part_in_receiver: mpsc::UnboundedReceiver<Frame>,
|
||||
part_out_sender: mpsc::UnboundedSender<(Cid, Frame)>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid)>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid, oneshot::Sender<()>)>,
|
||||
) {
|
||||
let (prot_in_sender, prot_in_receiver) = mpsc::unbounded::<Frame>();
|
||||
let (prot_out_sender, prot_out_receiver) = mpsc::unbounded::<Frame>();
|
||||
|
||||
futures::join!(
|
||||
self.read(protocol.clone(), prot_in_sender),
|
||||
self.write(protocol, prot_out_receiver, part_in_receiver),
|
||||
self.frame_handler(
|
||||
let handler_future = self.frame_handler(
|
||||
prot_in_receiver,
|
||||
prot_out_sender,
|
||||
part_out_sender,
|
||||
configured_sender
|
||||
)
|
||||
configured_sender,
|
||||
);
|
||||
match protocol {
|
||||
Protocols::Tcp(tcp) => {
|
||||
futures::join!(
|
||||
tcp.read(prot_in_sender),
|
||||
tcp.write(prot_out_receiver, part_in_receiver),
|
||||
handler_future,
|
||||
);
|
||||
},
|
||||
Protocols::Udp(udp) => {
|
||||
futures::join!(
|
||||
udp.read(prot_in_sender),
|
||||
udp.write(prot_out_receiver, part_in_receiver),
|
||||
handler_future,
|
||||
);
|
||||
},
|
||||
}
|
||||
|
||||
//return part_out_receiver;
|
||||
}
|
||||
@ -81,17 +99,17 @@ impl Channel {
|
||||
mut frames: mpsc::UnboundedReceiver<Frame>,
|
||||
mut frame_sender: mpsc::UnboundedSender<Frame>,
|
||||
mut external_frame_sender: mpsc::UnboundedSender<(Cid, Frame)>,
|
||||
mut configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid)>,
|
||||
mut configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid, oneshot::Sender<()>)>,
|
||||
) {
|
||||
const ERR_S: &str = "Got A Raw Message, these are usually Debug Messages indicating that \
|
||||
something went wrong on network layer and connection will be closed";
|
||||
while let Some(frame) = frames.next().await {
|
||||
trace!(?frame, "recv frame");
|
||||
match frame {
|
||||
Frame::Handshake {
|
||||
magic_number,
|
||||
version,
|
||||
} => {
|
||||
trace!(?magic_number, ?version, "recv handshake");
|
||||
if self
|
||||
.verify_handshake(magic_number, version, &mut frame_sender)
|
||||
.await
|
||||
@ -121,10 +139,19 @@ impl Channel {
|
||||
STREAM_ID_OFFSET1
|
||||
};
|
||||
info!(?pid, "this channel is now configured!");
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
configured_sender
|
||||
.send((self.cid, pid, stream_id_offset))
|
||||
.send((self.cid, pid, stream_id_offset, sender))
|
||||
.await
|
||||
.unwrap();
|
||||
receiver.await.unwrap();
|
||||
//TODO: this is sync anyway, because we need to wait. so find a better way than
|
||||
// there channels like direct method call... otherwise a
|
||||
// frame might jump in before its officially configured yet
|
||||
debug!(
|
||||
"STOP, if you read this, fix this error. make this a function isntead a \
|
||||
channel here"
|
||||
);
|
||||
},
|
||||
Frame::Shutdown => {
|
||||
info!("shutdown signal received");
|
||||
@ -144,81 +171,12 @@ impl Channel {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read(
|
||||
&self,
|
||||
mut protocol: TcpStream,
|
||||
mut frame_handler: mpsc::UnboundedSender<Frame>,
|
||||
) {
|
||||
let mut buffer = NetworkBuffer::new();
|
||||
loop {
|
||||
match protocol.read(buffer.get_write_slice(2048)).await {
|
||||
Ok(0) => {
|
||||
debug!(?buffer, "shutdown of tcp channel detected");
|
||||
frame_handler.send(Frame::Shutdown).await.unwrap();
|
||||
break;
|
||||
},
|
||||
Ok(n) => {
|
||||
buffer.actually_written(n);
|
||||
trace!("incomming message with len: {}", n);
|
||||
let slice = buffer.get_read_slice();
|
||||
let mut cur = std::io::Cursor::new(slice);
|
||||
let mut read_ok = 0;
|
||||
while cur.position() < n as u64 {
|
||||
let round_start = cur.position() as usize;
|
||||
let r: Result<Frame, _> = bincode::deserialize_from(&mut cur);
|
||||
match r {
|
||||
Ok(frame) => {
|
||||
frame_handler.send(frame).await.unwrap();
|
||||
read_ok = cur.position() as usize;
|
||||
},
|
||||
Err(e) => {
|
||||
// Probably we have to wait for moare data!
|
||||
let first_bytes_of_msg =
|
||||
&slice[round_start..std::cmp::min(n, round_start + 16)];
|
||||
debug!(
|
||||
?buffer,
|
||||
?e,
|
||||
?n,
|
||||
?round_start,
|
||||
?first_bytes_of_msg,
|
||||
"message cant be parsed, probably because we need to wait for \
|
||||
more data"
|
||||
);
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
buffer.actually_read(read_ok);
|
||||
},
|
||||
Err(e) => panic!("{}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn write(
|
||||
&self,
|
||||
mut protocol: TcpStream,
|
||||
mut internal_frame_receiver: mpsc::UnboundedReceiver<Frame>,
|
||||
mut external_frame_receiver: mpsc::UnboundedReceiver<Frame>,
|
||||
) {
|
||||
while let Some(frame) = select! {
|
||||
next = internal_frame_receiver.next().fuse() => next,
|
||||
next = external_frame_receiver.next().fuse() => next,
|
||||
} {
|
||||
//dezerialize here as this is executed in a seperate thread PER channel.
|
||||
// Limites Throughput per single Receiver but stays in same thread (maybe as its
|
||||
// in a threadpool)
|
||||
trace!(?frame, "going to send frame via tcp");
|
||||
let data = bincode::serialize(&frame).unwrap();
|
||||
protocol.write_all(data.as_slice()).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn verify_handshake(
|
||||
&self,
|
||||
magic_number: String,
|
||||
version: [u32; 3],
|
||||
frame_sender: &mut mpsc::UnboundedSender<Frame>,
|
||||
#[cfg(debug_assertions)] frame_sender: &mut mpsc::UnboundedSender<Frame>,
|
||||
#[cfg(not(debug_assertions))] _: &mut mpsc::UnboundedSender<Frame>,
|
||||
) -> Result<(), ()> {
|
||||
if magic_number != VELOREN_MAGIC_NUMBER {
|
||||
error!(?magic_number, "connection with invalid magic_number");
|
||||
|
@ -1,37 +0,0 @@
|
||||
use crate::types::{Mid, Pid, Prio, Promises, Sid};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// Used for Communication between Channel <----(TCP/UDP)----> Channel
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Frame {
|
||||
Handshake {
|
||||
magic_number: String,
|
||||
version: [u32; 3],
|
||||
},
|
||||
ParticipantId {
|
||||
pid: Pid,
|
||||
},
|
||||
Shutdown, /* Shutsdown this channel gracefully, if all channels are shut down, Participant
|
||||
* is deleted */
|
||||
OpenStream {
|
||||
sid: Sid,
|
||||
prio: Prio,
|
||||
promises: Promises,
|
||||
},
|
||||
CloseStream {
|
||||
sid: Sid,
|
||||
},
|
||||
DataHeader {
|
||||
mid: Mid,
|
||||
sid: Sid,
|
||||
length: u64,
|
||||
},
|
||||
Data {
|
||||
id: Mid,
|
||||
start: u64,
|
||||
data: Vec<u8>,
|
||||
},
|
||||
/* WARNING: Sending RAW is only used for debug purposes in case someone write a new API
|
||||
* against veloren Server! */
|
||||
Raw(Vec<u8>),
|
||||
}
|
@ -1,27 +1,17 @@
|
||||
#![feature(trait_alias)]
|
||||
#![feature(trait_alias, try_trait)]
|
||||
mod api;
|
||||
mod async_serde;
|
||||
mod channel;
|
||||
mod frames;
|
||||
mod message;
|
||||
mod metrics;
|
||||
mod mpsc;
|
||||
mod participant;
|
||||
mod prios;
|
||||
mod protocols;
|
||||
mod scheduler;
|
||||
mod tcp;
|
||||
mod types;
|
||||
mod udp;
|
||||
|
||||
pub use api::{Address, Network};
|
||||
pub use scheduler::Scheduler;
|
||||
pub use api::{Address, Network, NetworkError, Participant, ParticipantError, Stream, StreamError};
|
||||
pub use types::{
|
||||
Pid, Promises, PROMISES_COMPRESSED, PROMISES_CONSISTENCY, PROMISES_ENCRYPTED,
|
||||
PROMISES_GUARANTEED_DELIVERY, PROMISES_NONE, PROMISES_ORDERED,
|
||||
};
|
||||
|
||||
/*
|
||||
pub use api::{
|
||||
Address, Network, NetworkError, Participant, ParticipantError, Promise, Stream, StreamError,
|
||||
};
|
||||
*/
|
||||
|
@ -4,7 +4,6 @@ use serde::{de::DeserializeOwned, Serialize};
|
||||
use crate::types::{Mid, Sid};
|
||||
use byteorder::{NetworkEndian, ReadBytesExt};
|
||||
use std::sync::Arc;
|
||||
use tracing::*;
|
||||
|
||||
pub(crate) struct MessageBuffer {
|
||||
// use VecDeque for msg storage, because it allows to quickly remove data from front.
|
||||
@ -29,13 +28,7 @@ pub(crate) struct InCommingMessage {
|
||||
}
|
||||
|
||||
pub(crate) fn serialize<M: Serialize>(message: &M) -> MessageBuffer {
|
||||
let mut writer = {
|
||||
let actual_size = bincode::serialized_size(message).unwrap();
|
||||
Vec::<u8>::with_capacity(actual_size as usize)
|
||||
};
|
||||
if let Err(e) = bincode::serialize_into(&mut writer, message) {
|
||||
error!("Oh nooo {}", e);
|
||||
};
|
||||
let writer = bincode::serialize(message).unwrap();
|
||||
MessageBuffer { data: writer }
|
||||
}
|
||||
|
||||
|
@ -1 +1,141 @@
|
||||
use prometheus::{IntGauge, IntGaugeVec, Opts, Registry};
|
||||
use std::{
|
||||
error::Error,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
//TODO: switch over to Counter for frames_count, message_count, bytes_send,
|
||||
// frames_message_count 1 NetworkMetrics per Network
|
||||
#[allow(dead_code)]
|
||||
pub struct NetworkMetrics {
|
||||
pub participants_connected: IntGauge,
|
||||
// opened Channels, seperated by PARTICIPANT
|
||||
pub channels_connected: IntGauge,
|
||||
// opened streams, seperated by PARTICIPANT
|
||||
pub streams_open: IntGauge,
|
||||
pub network_info: IntGauge,
|
||||
// Frames, seperated by CHANNEL (and PARTICIPANT) AND FRAME TYPE,
|
||||
pub frames_count: IntGaugeVec,
|
||||
// send Messages, seperated by STREAM (and PARTICIPANT, CHANNEL),
|
||||
pub message_count: IntGaugeVec,
|
||||
// send Messages bytes, seperated by STREAM (and PARTICIPANT, CHANNEL),
|
||||
pub bytes_send: IntGaugeVec,
|
||||
// Frames, seperated by MESSAGE (and PARTICIPANT, CHANNEL, STREAM),
|
||||
pub frames_message_count: IntGaugeVec,
|
||||
// TODO: queued Messages, seperated by STREAM (add PART, CHANNEL),
|
||||
// queued Messages, seperated by PARTICIPANT
|
||||
pub queued_count: IntGaugeVec,
|
||||
// TODO: queued Messages bytes, seperated by STREAM (add PART, CHANNEL),
|
||||
// queued Messages bytes, seperated by PARTICIPANT
|
||||
pub queued_bytes: IntGaugeVec,
|
||||
// ping calculated based on last msg seperated by PARTICIPANT
|
||||
pub participants_ping: IntGaugeVec,
|
||||
tick: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl NetworkMetrics {
|
||||
#[allow(dead_code)]
|
||||
pub fn new(registry: &Registry, tick: Arc<AtomicU64>) -> Result<Self, Box<dyn Error>> {
|
||||
let participants_connected = IntGauge::with_opts(Opts::new(
|
||||
"participants_connected",
|
||||
"shows the number of participants connected to the network",
|
||||
))?;
|
||||
let channels_connected = IntGauge::with_opts(Opts::new(
|
||||
"channels_connected",
|
||||
"number of all channels currently connected on the network",
|
||||
))?;
|
||||
let streams_open = IntGauge::with_opts(Opts::new(
|
||||
"streams_open",
|
||||
"number of all streams currently open on the network",
|
||||
))?;
|
||||
let opts = Opts::new("network_info", "Static Network information").const_label(
|
||||
"version",
|
||||
&format!(
|
||||
"{}.{}.{}",
|
||||
&crate::types::VELOREN_NETWORK_VERSION[0],
|
||||
&crate::types::VELOREN_NETWORK_VERSION[1],
|
||||
&crate::types::VELOREN_NETWORK_VERSION[2]
|
||||
),
|
||||
);
|
||||
let network_info = IntGauge::with_opts(opts)?;
|
||||
|
||||
let frames_count = IntGaugeVec::from(IntGaugeVec::new(
|
||||
Opts::new(
|
||||
"frames_count",
|
||||
"number of all frames send by streams on the network",
|
||||
),
|
||||
&["channel"],
|
||||
)?);
|
||||
let message_count = IntGaugeVec::from(IntGaugeVec::new(
|
||||
Opts::new(
|
||||
"message_count",
|
||||
"number of messages send by streams on the network",
|
||||
),
|
||||
&["channel"],
|
||||
)?);
|
||||
let bytes_send = IntGaugeVec::from(IntGaugeVec::new(
|
||||
Opts::new("bytes_send", "bytes send by streams on the network"),
|
||||
&["channel"],
|
||||
)?);
|
||||
let frames_message_count = IntGaugeVec::from(IntGaugeVec::new(
|
||||
Opts::new(
|
||||
"frames_message_count",
|
||||
"bytes sends per message on the network",
|
||||
),
|
||||
&["channel"],
|
||||
)?);
|
||||
let queued_count = IntGaugeVec::from(IntGaugeVec::new(
|
||||
Opts::new(
|
||||
"queued_count",
|
||||
"queued number of messages by participant on the network",
|
||||
),
|
||||
&["channel"],
|
||||
)?);
|
||||
let queued_bytes = IntGaugeVec::from(IntGaugeVec::new(
|
||||
Opts::new(
|
||||
"queued_bytes",
|
||||
"queued bytes of messages by participant on the network",
|
||||
),
|
||||
&["channel"],
|
||||
)?);
|
||||
let participants_ping = IntGaugeVec::from(IntGaugeVec::new(
|
||||
Opts::new(
|
||||
"participants_ping",
|
||||
"ping time to participants on the network",
|
||||
),
|
||||
&["channel"],
|
||||
)?);
|
||||
|
||||
registry.register(Box::new(participants_connected.clone()))?;
|
||||
registry.register(Box::new(channels_connected.clone()))?;
|
||||
registry.register(Box::new(streams_open.clone()))?;
|
||||
registry.register(Box::new(network_info.clone()))?;
|
||||
registry.register(Box::new(frames_count.clone()))?;
|
||||
registry.register(Box::new(message_count.clone()))?;
|
||||
registry.register(Box::new(bytes_send.clone()))?;
|
||||
registry.register(Box::new(frames_message_count.clone()))?;
|
||||
registry.register(Box::new(queued_count.clone()))?;
|
||||
registry.register(Box::new(queued_bytes.clone()))?;
|
||||
registry.register(Box::new(participants_ping.clone()))?;
|
||||
|
||||
Ok(Self {
|
||||
participants_connected,
|
||||
channels_connected,
|
||||
streams_open,
|
||||
network_info,
|
||||
frames_count,
|
||||
message_count,
|
||||
bytes_send,
|
||||
frames_message_count,
|
||||
queued_count,
|
||||
queued_bytes,
|
||||
participants_ping,
|
||||
tick,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn _is_100th_tick(&self) -> bool { self.tick.load(Ordering::Relaxed).rem_euclid(100) == 0 }
|
||||
}
|
||||
|
@ -1 +0,0 @@
|
||||
|
@ -1,18 +1,22 @@
|
||||
use crate::{
|
||||
api::Stream,
|
||||
frames::Frame,
|
||||
message::{InCommingMessage, MessageBuffer, OutGoingMessage},
|
||||
types::{Cid, Pid, Prio, Promises, Sid},
|
||||
types::{Cid, Frame, Pid, Prio, Promises, Sid},
|
||||
};
|
||||
use async_std::sync::RwLock;
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
future::FutureExt,
|
||||
select,
|
||||
sink::SinkExt,
|
||||
stream::StreamExt,
|
||||
};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, Mutex},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
};
|
||||
use tracing::*;
|
||||
|
||||
@ -26,6 +30,8 @@ struct ControlChannels {
|
||||
shutdown_api_sender: mpsc::UnboundedSender<Sid>,
|
||||
send_outgoing: Arc<Mutex<std::sync::mpsc::Sender<(Prio, Pid, Sid, OutGoingMessage)>>>, //api
|
||||
frame_send_receiver: mpsc::UnboundedReceiver<(Pid, Sid, Frame)>, //scheduler
|
||||
shutdown_receiver: oneshot::Receiver<()>, //own
|
||||
stream_finished_request_sender: mpsc::UnboundedSender<(Pid, Sid, oneshot::Sender<()>)>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -40,7 +46,7 @@ pub struct BParticipant {
|
||||
Prio,
|
||||
Promises,
|
||||
mpsc::UnboundedSender<InCommingMessage>,
|
||||
oneshot::Sender<()>,
|
||||
Arc<AtomicBool>,
|
||||
),
|
||||
>,
|
||||
>,
|
||||
@ -52,6 +58,7 @@ impl BParticipant {
|
||||
remote_pid: Pid,
|
||||
offset_sid: Sid,
|
||||
send_outgoing: std::sync::mpsc::Sender<(Prio, Pid, Sid, OutGoingMessage)>,
|
||||
stream_finished_request_sender: mpsc::UnboundedSender<(Pid, Sid, oneshot::Sender<()>)>,
|
||||
) -> (
|
||||
Self,
|
||||
mpsc::UnboundedSender<(Prio, Promises, oneshot::Sender<Stream>)>,
|
||||
@ -59,6 +66,7 @@ impl BParticipant {
|
||||
mpsc::UnboundedSender<(Cid, mpsc::UnboundedSender<Frame>)>,
|
||||
mpsc::UnboundedSender<Frame>,
|
||||
mpsc::UnboundedSender<(Pid, Sid, Frame)>,
|
||||
oneshot::Sender<()>,
|
||||
) {
|
||||
let (stream_open_sender, stream_open_receiver) =
|
||||
mpsc::unbounded::<(Prio, Promises, oneshot::Sender<Stream>)>();
|
||||
@ -66,20 +74,21 @@ impl BParticipant {
|
||||
let (transfer_channel_sender, transfer_channel_receiver) =
|
||||
mpsc::unbounded::<(Cid, mpsc::UnboundedSender<Frame>)>();
|
||||
let (frame_recv_sender, frame_recv_receiver) = mpsc::unbounded::<Frame>();
|
||||
//let (shutdown1_sender, shutdown1_receiver) = oneshot::channel();
|
||||
let (shutdown_api_sender, shutdown_api_receiver) = mpsc::unbounded();
|
||||
let (frame_send_sender, frame_send_receiver) = mpsc::unbounded::<(Pid, Sid, Frame)>();
|
||||
let (shutdown_sender, shutdown_receiver) = oneshot::channel();
|
||||
|
||||
let run_channels = Some(ControlChannels {
|
||||
stream_open_receiver,
|
||||
stream_opened_sender,
|
||||
transfer_channel_receiver,
|
||||
frame_recv_receiver,
|
||||
//shutdown_sender: shutdown1_sender,
|
||||
shutdown_api_receiver,
|
||||
shutdown_api_sender,
|
||||
send_outgoing: Arc::new(Mutex::new(send_outgoing)),
|
||||
frame_send_receiver,
|
||||
shutdown_receiver,
|
||||
stream_finished_request_sender,
|
||||
});
|
||||
|
||||
(
|
||||
@ -95,11 +104,17 @@ impl BParticipant {
|
||||
transfer_channel_sender,
|
||||
frame_recv_sender,
|
||||
frame_send_sender,
|
||||
//shutdown1_receiver,
|
||||
shutdown_sender,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
//those managers that listen on api::Participant need an additional oneshot for
|
||||
// shutdown scenario, those handled by scheduler will be closed by it.
|
||||
let (shutdown_open_manager_sender, shutdown_open_manager_receiver) = oneshot::channel();
|
||||
let (shutdown_stream_close_manager_sender, shutdown_stream_close_manager_receiver) =
|
||||
oneshot::channel();
|
||||
|
||||
let run_channels = self.run_channels.take().unwrap();
|
||||
futures::join!(
|
||||
self.transfer_channel_manager(run_channels.transfer_channel_receiver),
|
||||
@ -107,6 +122,7 @@ impl BParticipant {
|
||||
run_channels.stream_open_receiver,
|
||||
run_channels.shutdown_api_sender.clone(),
|
||||
run_channels.send_outgoing.clone(),
|
||||
shutdown_open_manager_receiver,
|
||||
),
|
||||
self.handle_frames(
|
||||
run_channels.frame_recv_receiver,
|
||||
@ -115,12 +131,23 @@ impl BParticipant {
|
||||
run_channels.send_outgoing.clone(),
|
||||
),
|
||||
self.send_manager(run_channels.frame_send_receiver),
|
||||
self.shutdown_manager(run_channels.shutdown_api_receiver,),
|
||||
self.stream_close_manager(
|
||||
run_channels.shutdown_api_receiver,
|
||||
shutdown_stream_close_manager_receiver,
|
||||
run_channels.stream_finished_request_sender,
|
||||
),
|
||||
self.shutdown_manager(
|
||||
run_channels.shutdown_receiver,
|
||||
vec!(
|
||||
shutdown_open_manager_sender,
|
||||
shutdown_stream_close_manager_sender
|
||||
)
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
async fn send_frame(&self, frame: Frame) {
|
||||
// find out ideal channel
|
||||
// find out ideal channel here
|
||||
//TODO: just take first
|
||||
if let Some((_cid, channel)) = self.channels.write().await.get_mut(0) {
|
||||
channel.send(frame).await.unwrap();
|
||||
@ -155,10 +182,18 @@ impl BParticipant {
|
||||
trace!("opened frame from remote");
|
||||
},
|
||||
Frame::CloseStream { sid } => {
|
||||
if let Some((_, _, _, sender)) = self.streams.write().await.remove(&sid) {
|
||||
sender.send(()).unwrap();
|
||||
// Closing is realised by setting a AtomicBool to true, however we also have a
|
||||
// guarantee that send or recv fails if the other one is destroyed
|
||||
// However Stream.send() is not async and their receiver isn't dropped if Steam
|
||||
// is dropped, so i need a way to notify the Stream that it's send messages will
|
||||
// be dropped... from remote, notify local
|
||||
if let Some((_, _, _, closed)) = self.streams.write().await.remove(&sid) {
|
||||
closed.store(true, Ordering::Relaxed);
|
||||
} else {
|
||||
error!("unreachable, coudln't send close stream event!");
|
||||
error!(
|
||||
"couldn't find stream to close, either this is a duplicate message, \
|
||||
or the local copy of the Stream got closed simultaniously"
|
||||
);
|
||||
}
|
||||
trace!("closed frame from remote");
|
||||
},
|
||||
@ -189,6 +224,8 @@ impl BParticipant {
|
||||
self.streams.write().await.get_mut(&imsg.sid)
|
||||
{
|
||||
sender.send(imsg).await.unwrap();
|
||||
} else {
|
||||
error!("dropping message as stream no longer seems to exist");
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -230,6 +267,7 @@ impl BParticipant {
|
||||
)>,
|
||||
shutdown_api_sender: mpsc::UnboundedSender<Sid>,
|
||||
send_outgoing: Arc<Mutex<std::sync::mpsc::Sender<(Prio, Pid, Sid, OutGoingMessage)>>>,
|
||||
shutdown_open_manager_receiver: oneshot::Receiver<()>,
|
||||
) {
|
||||
trace!("start open_manager");
|
||||
let send_outgoing = {
|
||||
@ -237,7 +275,12 @@ impl BParticipant {
|
||||
send_outgoing.lock().unwrap().clone()
|
||||
};
|
||||
let mut stream_ids = self.offset_sid;
|
||||
while let Some((prio, promises, sender)) = stream_open_receiver.next().await {
|
||||
let mut shutdown_open_manager_receiver = shutdown_open_manager_receiver.fuse();
|
||||
//from api or shutdown signal
|
||||
while let Some((prio, promises, sender)) = select! {
|
||||
next = stream_open_receiver.next().fuse() => next,
|
||||
_ = shutdown_open_manager_receiver => None,
|
||||
} {
|
||||
debug!(?prio, ?promises, "got request to open a new steam");
|
||||
let send_outgoing = send_outgoing.clone();
|
||||
let sid = stream_ids;
|
||||
@ -251,21 +294,74 @@ impl BParticipant {
|
||||
})
|
||||
.await;
|
||||
sender.send(stream).unwrap();
|
||||
stream_ids += 1;
|
||||
stream_ids += Sid::from(1);
|
||||
}
|
||||
trace!("stop open_manager");
|
||||
}
|
||||
|
||||
async fn shutdown_manager(&self, mut shutdown_api_receiver: mpsc::UnboundedReceiver<Sid>) {
|
||||
async fn shutdown_manager(
|
||||
&self,
|
||||
shutdown_receiver: oneshot::Receiver<()>,
|
||||
mut to_shutdown: Vec<oneshot::Sender<()>>,
|
||||
) {
|
||||
trace!("start shutdown_manager");
|
||||
while let Some(sid) = shutdown_api_receiver.next().await {
|
||||
trace!(?sid, "got request to close steam");
|
||||
self.streams.write().await.remove(&sid);
|
||||
self.send_frame(Frame::CloseStream { sid }).await;
|
||||
shutdown_receiver.await.unwrap();
|
||||
debug!("closing all managers");
|
||||
for sender in to_shutdown.drain(..) {
|
||||
if sender.send(()).is_err() {
|
||||
debug!("manager seems to be closed already, weird, maybe a bug");
|
||||
};
|
||||
}
|
||||
debug!("closing all streams");
|
||||
let mut streams = self.streams.write().await;
|
||||
for (sid, (_, _, _, closing)) in streams.drain() {
|
||||
trace!(?sid, "shutting down Stream");
|
||||
closing.store(true, Ordering::Relaxed);
|
||||
}
|
||||
trace!("stop shutdown_manager");
|
||||
}
|
||||
|
||||
async fn stream_close_manager(
|
||||
&self,
|
||||
mut shutdown_api_receiver: mpsc::UnboundedReceiver<Sid>,
|
||||
shutdown_stream_close_manager_receiver: oneshot::Receiver<()>,
|
||||
mut stream_finished_request_sender: mpsc::UnboundedSender<(Pid, Sid, oneshot::Sender<()>)>,
|
||||
) {
|
||||
trace!("start stream_close_manager");
|
||||
let mut shutdown_stream_close_manager_receiver =
|
||||
shutdown_stream_close_manager_receiver.fuse();
|
||||
//from api or shutdown signal
|
||||
while let Some(sid) = select! {
|
||||
next = shutdown_api_receiver.next().fuse() => next,
|
||||
_ = shutdown_stream_close_manager_receiver => None,
|
||||
} {
|
||||
trace!(?sid, "got request from api to close steam");
|
||||
//TODO: wait here till the last prio was send!
|
||||
//The error is, that the close msg as a control message is send directly, while
|
||||
// messages are only send after a next prio tick. This means, we
|
||||
// close it first, and then send the headers and data packages...
|
||||
// ofc the other side then no longer finds the respective stream.
|
||||
//however we need to find out when the last message of a stream is send. it
|
||||
// would be usefull to get a snapshot here, like, this stream has send out to
|
||||
// msgid n, while the prio only has send m. then sleep as long as n < m maybe...
|
||||
debug!("IF YOU SEE THIS, FIND A PROPPER FIX FOR CLOSING STREAMS");
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
trace!(?sid, "wait for stream to be flushed");
|
||||
stream_finished_request_sender
|
||||
.send((self.remote_pid, sid, sender))
|
||||
.await
|
||||
.unwrap();
|
||||
receiver.await.unwrap();
|
||||
trace!(?sid, "stream was successfully flushed");
|
||||
|
||||
self.streams.write().await.remove(&sid);
|
||||
//from local, notify remote
|
||||
self.send_frame(Frame::CloseStream { sid }).await;
|
||||
}
|
||||
trace!("stop stream_close_manager");
|
||||
}
|
||||
|
||||
async fn create_stream(
|
||||
&self,
|
||||
sid: Sid,
|
||||
@ -275,11 +371,11 @@ impl BParticipant {
|
||||
shutdown_api_sender: &mpsc::UnboundedSender<Sid>,
|
||||
) -> Stream {
|
||||
let (msg_recv_sender, msg_recv_receiver) = mpsc::unbounded::<InCommingMessage>();
|
||||
let (shutdown1_sender, shutdown1_receiver) = oneshot::channel();
|
||||
let closed = Arc::new(AtomicBool::new(false));
|
||||
self.streams
|
||||
.write()
|
||||
.await
|
||||
.insert(sid, (prio, promises, msg_recv_sender, shutdown1_sender));
|
||||
.insert(sid, (prio, promises, msg_recv_sender, closed.clone()));
|
||||
Stream::new(
|
||||
self.remote_pid,
|
||||
sid,
|
||||
@ -287,7 +383,7 @@ impl BParticipant {
|
||||
promises,
|
||||
send_outgoing,
|
||||
msg_recv_receiver,
|
||||
shutdown1_receiver,
|
||||
closed.clone(),
|
||||
shutdown_api_sender.clone(),
|
||||
)
|
||||
}
|
||||
|
@ -7,12 +7,11 @@ Note: TODO: prio0 will be send immeadiatly when found!
|
||||
*/
|
||||
|
||||
use crate::{
|
||||
frames::Frame,
|
||||
message::OutGoingMessage,
|
||||
types::{Pid, Prio, Sid},
|
||||
types::{Frame, Pid, Prio, Sid},
|
||||
};
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
collections::{HashMap, HashSet, VecDeque},
|
||||
sync::mpsc::{channel, Receiver, Sender},
|
||||
};
|
||||
|
||||
@ -24,6 +23,7 @@ pub(crate) struct PrioManager {
|
||||
points: [u32; PRIO_MAX],
|
||||
messages: [VecDeque<(Pid, Sid, OutGoingMessage)>; PRIO_MAX],
|
||||
messages_rx: Receiver<(Prio, Pid, Sid, OutGoingMessage)>,
|
||||
pid_sid_owned: HashMap<(Pid, Sid), u64>,
|
||||
queued: HashSet<u8>,
|
||||
}
|
||||
|
||||
@ -110,6 +110,7 @@ impl PrioManager {
|
||||
],
|
||||
messages_rx,
|
||||
queued: HashSet::new(), //TODO: optimize with u64 and 64 bits
|
||||
pid_sid_owned: HashMap::new(),
|
||||
},
|
||||
messages_tx,
|
||||
)
|
||||
@ -117,11 +118,21 @@ impl PrioManager {
|
||||
|
||||
fn tick(&mut self) {
|
||||
// Check Range
|
||||
let mut times = 0;
|
||||
for (prio, pid, sid, msg) in self.messages_rx.try_iter() {
|
||||
debug_assert!(prio as usize <= PRIO_MAX);
|
||||
trace!(?prio, ?sid, ?pid, "tick");
|
||||
times += 1;
|
||||
//trace!(?prio, ?sid, ?pid, "tick");
|
||||
self.queued.insert(prio);
|
||||
self.messages[prio as usize].push_back((pid, sid, msg));
|
||||
if let Some(cnt) = self.pid_sid_owned.get_mut(&(pid, sid)) {
|
||||
*cnt += 1;
|
||||
} else {
|
||||
self.pid_sid_owned.insert((pid, sid), 1);
|
||||
}
|
||||
}
|
||||
if times > 0 {
|
||||
trace!(?times, "tick");
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,7 +202,7 @@ impl PrioManager {
|
||||
for _ in 0..no_of_frames {
|
||||
match self.calc_next_prio() {
|
||||
Some(prio) => {
|
||||
trace!(?prio, "handle next prio");
|
||||
//trace!(?prio, "handle next prio");
|
||||
self.points[prio as usize] += Self::PRIOS[prio as usize];
|
||||
//pop message from front of VecDeque, handle it and push it back, so that all
|
||||
// => messages with same prio get a fair chance :)
|
||||
@ -204,6 +215,15 @@ impl PrioManager {
|
||||
if self.messages[prio as usize].is_empty() {
|
||||
self.queued.remove(&prio);
|
||||
}
|
||||
//decrease pid_sid counter by 1 again
|
||||
let cnt = self.pid_sid_owned.get_mut(&(pid, sid)).expect(
|
||||
"the pid_sid_owned counter works wrong, more pid,sid removed \
|
||||
than inserted",
|
||||
);
|
||||
*cnt -= 1;
|
||||
if *cnt == 0 {
|
||||
self.pid_sid_owned.remove(&(pid, sid));
|
||||
}
|
||||
} else {
|
||||
self.messages[prio as usize].push_back((pid, sid, msg));
|
||||
//trace!(?m.mid, "repush message");
|
||||
@ -221,6 +241,12 @@ impl PrioManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// if you want to make sure to empty the prio of a single pid and sid, use
|
||||
/// this
|
||||
pub(crate) fn contains_pid_sid(&self, pid: Pid, sid: Sid) -> bool {
|
||||
self.pid_sid_owned.contains_key(&(pid, sid))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for PrioManager {
|
||||
@ -237,17 +263,17 @@ impl std::fmt::Debug for PrioManager {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
frames::Frame,
|
||||
message::{MessageBuffer, OutGoingMessage},
|
||||
prios::*,
|
||||
types::{Pid, Prio, Sid},
|
||||
types::{Frame, Pid, Prio, Sid},
|
||||
};
|
||||
use std::{collections::VecDeque, sync::Arc};
|
||||
|
||||
const SIZE: u64 = PrioManager::FRAME_DATA_SIZE;
|
||||
const USIZE: usize = PrioManager::FRAME_DATA_SIZE as usize;
|
||||
|
||||
fn mock_out(prio: Prio, sid: Sid) -> (Prio, Pid, Sid, OutGoingMessage) {
|
||||
fn mock_out(prio: Prio, sid: u64) -> (Prio, Pid, Sid, OutGoingMessage) {
|
||||
let sid = Sid::new(sid);
|
||||
(prio, Pid::fake(0), sid, OutGoingMessage {
|
||||
buffer: Arc::new(MessageBuffer {
|
||||
data: vec![48, 49, 50],
|
||||
@ -258,7 +284,8 @@ mod tests {
|
||||
})
|
||||
}
|
||||
|
||||
fn mock_out_large(prio: Prio, sid: Sid) -> (Prio, Pid, Sid, OutGoingMessage) {
|
||||
fn mock_out_large(prio: Prio, sid: u64) -> (Prio, Pid, Sid, OutGoingMessage) {
|
||||
let sid = Sid::new(sid);
|
||||
let mut data = vec![48; USIZE];
|
||||
data.append(&mut vec![49; USIZE]);
|
||||
data.append(&mut vec![50; 20]);
|
||||
@ -270,14 +297,14 @@ mod tests {
|
||||
})
|
||||
}
|
||||
|
||||
fn assert_header(frames: &mut VecDeque<(Pid, Sid, Frame)>, f_sid: Sid, f_length: u64) {
|
||||
fn assert_header(frames: &mut VecDeque<(Pid, Sid, Frame)>, f_sid: u64, f_length: u64) {
|
||||
let frame = frames
|
||||
.pop_front()
|
||||
.expect("frames vecdeque doesn't contain enough frames!")
|
||||
.2;
|
||||
if let Frame::DataHeader { mid, sid, length } = frame {
|
||||
assert_eq!(mid, 1);
|
||||
assert_eq!(sid, f_sid);
|
||||
assert_eq!(sid, Sid::new(f_sid));
|
||||
assert_eq!(length, f_length);
|
||||
} else {
|
||||
panic!("wrong frame type!, expected DataHeader");
|
||||
@ -298,6 +325,14 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_contains(mgr: &PrioManager, sid: u64) {
|
||||
assert!(mgr.contains_pid_sid(Pid::fake(0), Sid::new(sid)));
|
||||
}
|
||||
|
||||
fn assert_no_contains(mgr: &PrioManager, sid: u64) {
|
||||
assert!(!mgr.contains_pid_sid(Pid::fake(0), Sid::new(sid)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_p16() {
|
||||
let (mut mgr, tx) = PrioManager::new();
|
||||
@ -316,8 +351,13 @@ mod tests {
|
||||
tx.send(mock_out(16, 1337)).unwrap();
|
||||
tx.send(mock_out(20, 42)).unwrap();
|
||||
let mut frames = VecDeque::new();
|
||||
|
||||
mgr.fill_frames(100, &mut frames);
|
||||
|
||||
assert_no_contains(&mgr, 1337);
|
||||
assert_no_contains(&mgr, 42);
|
||||
assert_no_contains(&mgr, 666);
|
||||
|
||||
assert_header(&mut frames, 1337, 3);
|
||||
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
||||
assert_header(&mut frames, 42, 3);
|
||||
@ -382,8 +422,14 @@ mod tests {
|
||||
tx.send(mock_out(16, 9)).unwrap();
|
||||
tx.send(mock_out(16, 11)).unwrap();
|
||||
tx.send(mock_out(20, 13)).unwrap();
|
||||
|
||||
let mut frames = VecDeque::new();
|
||||
mgr.fill_frames(3, &mut frames);
|
||||
|
||||
assert_no_contains(&mgr, 1);
|
||||
assert_no_contains(&mgr, 3);
|
||||
assert_contains(&mgr, 13);
|
||||
|
||||
for i in 1..4 {
|
||||
assert_header(&mut frames, i, 3);
|
||||
assert_data(&mut frames, 0, vec![48, 49, 50]);
|
||||
|
269
network/src/protocols.rs
Normal file
269
network/src/protocols.rs
Normal file
@ -0,0 +1,269 @@
|
||||
use crate::types::Frame;
|
||||
use async_std::{
|
||||
net::{TcpStream, UdpSocket},
|
||||
prelude::*,
|
||||
sync::RwLock,
|
||||
};
|
||||
use futures::{channel::mpsc, future::FutureExt, select, sink::SinkExt, stream::StreamExt};
|
||||
use std::{net::SocketAddr, sync::Arc};
|
||||
use tracing::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum Protocols {
|
||||
Tcp(TcpProtocol),
|
||||
Udp(UdpProtocol),
|
||||
//Mpsc(MpscChannel),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct TcpProtocol {
|
||||
stream: TcpStream,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UdpProtocol {
|
||||
socket: Arc<UdpSocket>,
|
||||
remote_addr: SocketAddr,
|
||||
data_in: RwLock<mpsc::UnboundedReceiver<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl TcpProtocol {
|
||||
pub(crate) fn new(stream: TcpStream) -> Self { Self { stream } }
|
||||
|
||||
pub async fn read(&self, mut frame_handler: mpsc::UnboundedSender<Frame>) {
|
||||
let mut stream = self.stream.clone();
|
||||
let mut buffer = NetworkBuffer::new();
|
||||
loop {
|
||||
match stream.read(buffer.get_write_slice(2048)).await {
|
||||
Ok(0) => {
|
||||
debug!(?buffer, "shutdown of tcp channel detected");
|
||||
frame_handler.send(Frame::Shutdown).await.unwrap();
|
||||
break;
|
||||
},
|
||||
Ok(n) => {
|
||||
buffer.actually_written(n);
|
||||
trace!("incomming message with len: {}", n);
|
||||
let slice = buffer.get_read_slice();
|
||||
let mut cur = std::io::Cursor::new(slice);
|
||||
let mut read_ok = 0;
|
||||
while cur.position() < n as u64 {
|
||||
let round_start = cur.position() as usize;
|
||||
let r: Result<Frame, _> = bincode::deserialize_from(&mut cur);
|
||||
match r {
|
||||
Ok(frame) => {
|
||||
frame_handler.send(frame).await.unwrap();
|
||||
read_ok = cur.position() as usize;
|
||||
},
|
||||
Err(e) => {
|
||||
// Probably we have to wait for moare data!
|
||||
let first_bytes_of_msg =
|
||||
&slice[round_start..std::cmp::min(n, round_start + 16)];
|
||||
trace!(
|
||||
?buffer,
|
||||
?e,
|
||||
?n,
|
||||
?round_start,
|
||||
?first_bytes_of_msg,
|
||||
"message cant be parsed, probably because we need to wait for \
|
||||
more data"
|
||||
);
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
buffer.actually_read(read_ok);
|
||||
},
|
||||
Err(e) => panic!("{}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//dezerialize here as this is executed in a seperate thread PER channel.
|
||||
// Limites Throughput per single Receiver but stays in same thread (maybe as its
|
||||
// in a threadpool) for TCP, UDP and MPSC
|
||||
pub async fn write(
|
||||
&self,
|
||||
mut internal_frame_receiver: mpsc::UnboundedReceiver<Frame>,
|
||||
mut external_frame_receiver: mpsc::UnboundedReceiver<Frame>,
|
||||
) {
|
||||
let mut stream = self.stream.clone();
|
||||
while let Some(frame) = select! {
|
||||
next = internal_frame_receiver.next().fuse() => next,
|
||||
next = external_frame_receiver.next().fuse() => next,
|
||||
} {
|
||||
let data = bincode::serialize(&frame).unwrap();
|
||||
let len = data.len();
|
||||
trace!(?len, "going to send frame via Tcp");
|
||||
stream.write_all(data.as_slice()).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UdpProtocol {
|
||||
pub(crate) fn new(
|
||||
socket: Arc<UdpSocket>,
|
||||
remote_addr: SocketAddr,
|
||||
data_in: mpsc::UnboundedReceiver<Vec<u8>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
socket,
|
||||
remote_addr,
|
||||
data_in: RwLock::new(data_in),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read(&self, mut frame_handler: mpsc::UnboundedSender<Frame>) {
|
||||
let mut data_in = self.data_in.write().await;
|
||||
let mut buffer = NetworkBuffer::new();
|
||||
while let Some(data) = data_in.next().await {
|
||||
let n = data.len();
|
||||
let slice = &mut buffer.get_write_slice(n)[0..n]; //get_write_slice can return more then n!
|
||||
slice.clone_from_slice(data.as_slice());
|
||||
buffer.actually_written(n);
|
||||
trace!("incomming message with len: {}", n);
|
||||
let slice = buffer.get_read_slice();
|
||||
let mut cur = std::io::Cursor::new(slice);
|
||||
let mut read_ok = 0;
|
||||
while cur.position() < n as u64 {
|
||||
let round_start = cur.position() as usize;
|
||||
let r: Result<Frame, _> = bincode::deserialize_from(&mut cur);
|
||||
match r {
|
||||
Ok(frame) => {
|
||||
frame_handler.send(frame).await.unwrap();
|
||||
read_ok = cur.position() as usize;
|
||||
},
|
||||
Err(e) => {
|
||||
// Probably we have to wait for moare data!
|
||||
let first_bytes_of_msg =
|
||||
&slice[round_start..std::cmp::min(n, round_start + 16)];
|
||||
debug!(
|
||||
?buffer,
|
||||
?e,
|
||||
?n,
|
||||
?round_start,
|
||||
?first_bytes_of_msg,
|
||||
"message cant be parsed, probably because we need to wait for more \
|
||||
data"
|
||||
);
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
buffer.actually_read(read_ok);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn write(
|
||||
&self,
|
||||
mut internal_frame_receiver: mpsc::UnboundedReceiver<Frame>,
|
||||
mut external_frame_receiver: mpsc::UnboundedReceiver<Frame>,
|
||||
) {
|
||||
let mut buffer = NetworkBuffer::new();
|
||||
while let Some(frame) = select! {
|
||||
next = internal_frame_receiver.next().fuse() => next,
|
||||
next = external_frame_receiver.next().fuse() => next,
|
||||
} {
|
||||
let len = bincode::serialized_size(&frame).unwrap() as usize;
|
||||
match bincode::serialize_into(buffer.get_write_slice(len), &frame) {
|
||||
Ok(_) => buffer.actually_written(len),
|
||||
Err(e) => error!("Oh nooo {}", e),
|
||||
};
|
||||
trace!(?len, "going to send frame via Udp");
|
||||
let mut to_send = buffer.get_read_slice();
|
||||
while to_send.len() > 0 {
|
||||
match self.socket.send_to(to_send, self.remote_addr).await {
|
||||
Ok(n) => buffer.actually_read(n),
|
||||
Err(e) => error!(?e, "need to handle that error!"),
|
||||
}
|
||||
to_send = buffer.get_read_slice();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// INTERNAL NetworkBuffer
|
||||
|
||||
struct NetworkBuffer {
|
||||
pub(crate) data: Vec<u8>,
|
||||
pub(crate) read_idx: usize,
|
||||
pub(crate) write_idx: usize,
|
||||
}
|
||||
|
||||
/// NetworkBuffer to use for streamed access
|
||||
/// valid data is between read_idx and write_idx!
|
||||
/// everything before read_idx is already processed and no longer important
|
||||
/// everything after write_idx is either 0 or random data buffered
|
||||
impl NetworkBuffer {
|
||||
fn new() -> Self {
|
||||
NetworkBuffer {
|
||||
data: vec![0; 2048],
|
||||
read_idx: 0,
|
||||
write_idx: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_write_slice(&mut self, min_size: usize) -> &mut [u8] {
|
||||
if self.data.len() < self.write_idx + min_size {
|
||||
trace!(
|
||||
?self,
|
||||
?min_size,
|
||||
"need to resize because buffer is to small"
|
||||
);
|
||||
self.data.resize(self.write_idx + min_size, 0);
|
||||
}
|
||||
&mut self.data[self.write_idx..]
|
||||
}
|
||||
|
||||
fn actually_written(&mut self, cnt: usize) { self.write_idx += cnt; }
|
||||
|
||||
fn get_read_slice(&self) -> &[u8] { &self.data[self.read_idx..self.write_idx] }
|
||||
|
||||
fn actually_read(&mut self, cnt: usize) {
|
||||
self.read_idx += cnt;
|
||||
if self.read_idx == self.write_idx {
|
||||
if self.read_idx > 10485760 {
|
||||
trace!(?self, "buffer empty, resetting indices");
|
||||
}
|
||||
self.read_idx = 0;
|
||||
self.write_idx = 0;
|
||||
}
|
||||
if self.write_idx > 10485760 {
|
||||
if self.write_idx - self.read_idx < 65536 {
|
||||
debug!(
|
||||
?self,
|
||||
"This buffer is filled over 10 MB, but the actual data diff is less then \
|
||||
65kB, which is a sign of stressing this connection much as always new data \
|
||||
comes in - nevertheless, in order to handle this we will remove some data \
|
||||
now so that this buffer doesn't grow endlessly"
|
||||
);
|
||||
let mut i2 = 0;
|
||||
for i in self.read_idx..self.write_idx {
|
||||
self.data[i2] = self.data[i];
|
||||
i2 += 1;
|
||||
}
|
||||
self.read_idx = 0;
|
||||
self.write_idx = i2;
|
||||
}
|
||||
if self.data.len() > 67108864 {
|
||||
warn!(
|
||||
?self,
|
||||
"over 64Mbyte used, something seems fishy, len: {}",
|
||||
self.data.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for NetworkBuffer {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"NetworkBuffer(len: {}, read: {}, write: {})",
|
||||
self.data.len(),
|
||||
self.read_idx,
|
||||
self.write_idx
|
||||
)
|
||||
}
|
||||
}
|
@ -1,13 +1,16 @@
|
||||
use crate::{
|
||||
api::{Address, Participant},
|
||||
channel::Channel,
|
||||
frames::Frame,
|
||||
message::OutGoingMessage,
|
||||
participant::BParticipant,
|
||||
prios::PrioManager,
|
||||
types::{Cid, Pid, Prio, Sid},
|
||||
protocols::{Protocols, TcpProtocol, UdpProtocol},
|
||||
types::{Cid, Frame, Pid, Prio, Sid},
|
||||
};
|
||||
use async_std::{
|
||||
io, net,
|
||||
sync::{Mutex, RwLock},
|
||||
};
|
||||
use async_std::sync::RwLock;
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
executor::ThreadPool,
|
||||
@ -27,13 +30,23 @@ use tracing::*;
|
||||
use tracing_futures::Instrument;
|
||||
//use futures::prelude::*;
|
||||
|
||||
type ParticipantInfo = (
|
||||
mpsc::UnboundedSender<(Cid, mpsc::UnboundedSender<Frame>)>,
|
||||
mpsc::UnboundedSender<Frame>,
|
||||
mpsc::UnboundedSender<(Pid, Sid, Frame)>,
|
||||
oneshot::Sender<()>,
|
||||
);
|
||||
type UnknownChannelInfo = (
|
||||
mpsc::UnboundedSender<Frame>,
|
||||
Option<oneshot::Sender<io::Result<Participant>>>,
|
||||
);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ControlChannels {
|
||||
listen_receiver: mpsc::UnboundedReceiver<Address>,
|
||||
connect_receiver: mpsc::UnboundedReceiver<(Address, oneshot::Sender<Participant>)>,
|
||||
listen_receiver: mpsc::UnboundedReceiver<(Address, oneshot::Sender<io::Result<()>>)>,
|
||||
connect_receiver: mpsc::UnboundedReceiver<(Address, oneshot::Sender<io::Result<Participant>>)>,
|
||||
connected_sender: mpsc::UnboundedSender<Participant>,
|
||||
shutdown_receiver: oneshot::Receiver<()>,
|
||||
prios: PrioManager,
|
||||
prios_sender: std::sync::mpsc::Sender<(Prio, Pid, Sid, OutGoingMessage)>,
|
||||
}
|
||||
|
||||
@ -43,32 +56,12 @@ pub struct Scheduler {
|
||||
closed: AtomicBool,
|
||||
pool: Arc<ThreadPool>,
|
||||
run_channels: Option<ControlChannels>,
|
||||
participants: Arc<
|
||||
RwLock<
|
||||
HashMap<
|
||||
Pid,
|
||||
(
|
||||
mpsc::UnboundedSender<(Cid, mpsc::UnboundedSender<Frame>)>,
|
||||
mpsc::UnboundedSender<Frame>,
|
||||
mpsc::UnboundedSender<(Pid, Sid, Frame)>,
|
||||
),
|
||||
>,
|
||||
>,
|
||||
>,
|
||||
participants: Arc<RwLock<HashMap<Pid, ParticipantInfo>>>,
|
||||
participant_from_channel: Arc<RwLock<HashMap<Cid, Pid>>>,
|
||||
channel_ids: Arc<AtomicU64>,
|
||||
channel_listener: RwLock<HashMap<Address, oneshot::Sender<()>>>,
|
||||
unknown_channels: Arc<
|
||||
RwLock<
|
||||
HashMap<
|
||||
Cid,
|
||||
(
|
||||
mpsc::UnboundedSender<Frame>,
|
||||
Option<oneshot::Sender<Participant>>,
|
||||
),
|
||||
>,
|
||||
>,
|
||||
>,
|
||||
unknown_channels: Arc<RwLock<HashMap<Cid, UnknownChannelInfo>>>,
|
||||
prios: Arc<Mutex<PrioManager>>,
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
@ -76,14 +69,15 @@ impl Scheduler {
|
||||
local_pid: Pid,
|
||||
) -> (
|
||||
Self,
|
||||
mpsc::UnboundedSender<Address>,
|
||||
mpsc::UnboundedSender<(Address, oneshot::Sender<Participant>)>,
|
||||
mpsc::UnboundedSender<(Address, oneshot::Sender<io::Result<()>>)>,
|
||||
mpsc::UnboundedSender<(Address, oneshot::Sender<io::Result<Participant>>)>,
|
||||
mpsc::UnboundedReceiver<Participant>,
|
||||
oneshot::Sender<()>,
|
||||
) {
|
||||
let (listen_sender, listen_receiver) = mpsc::unbounded::<Address>();
|
||||
let (listen_sender, listen_receiver) =
|
||||
mpsc::unbounded::<(Address, oneshot::Sender<io::Result<()>>)>();
|
||||
let (connect_sender, connect_receiver) =
|
||||
mpsc::unbounded::<(Address, oneshot::Sender<Participant>)>();
|
||||
mpsc::unbounded::<(Address, oneshot::Sender<io::Result<Participant>>)>();
|
||||
let (connected_sender, connected_receiver) = mpsc::unbounded::<Participant>();
|
||||
let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>();
|
||||
let (prios, prios_sender) = PrioManager::new();
|
||||
@ -93,7 +87,6 @@ impl Scheduler {
|
||||
connect_receiver,
|
||||
connected_sender,
|
||||
shutdown_receiver,
|
||||
prios,
|
||||
prios_sender,
|
||||
});
|
||||
|
||||
@ -108,6 +101,7 @@ impl Scheduler {
|
||||
channel_ids: Arc::new(AtomicU64::new(0)),
|
||||
channel_listener: RwLock::new(HashMap::new()),
|
||||
unknown_channels: Arc::new(RwLock::new(HashMap::new())),
|
||||
prios: Arc::new(Mutex::new(prios)),
|
||||
},
|
||||
listen_sender,
|
||||
connect_sender,
|
||||
@ -118,8 +112,10 @@ impl Scheduler {
|
||||
|
||||
pub async fn run(mut self) {
|
||||
let (part_out_sender, part_out_receiver) = mpsc::unbounded::<(Cid, Frame)>();
|
||||
let (configured_sender, configured_receiver) = mpsc::unbounded::<(Cid, Pid, Sid)>();
|
||||
let (configured_sender, configured_receiver) =
|
||||
mpsc::unbounded::<(Cid, Pid, Sid, oneshot::Sender<()>)>();
|
||||
let (disconnect_sender, disconnect_receiver) = mpsc::unbounded::<Pid>();
|
||||
let (stream_finished_request_sender, stream_finished_request_receiver) = mpsc::unbounded();
|
||||
let run_channels = self.run_channels.take().unwrap();
|
||||
|
||||
futures::join!(
|
||||
@ -134,7 +130,8 @@ impl Scheduler {
|
||||
configured_sender,
|
||||
),
|
||||
self.disconnect_manager(disconnect_receiver,),
|
||||
self.send_outgoing(run_channels.prios),
|
||||
self.send_outgoing(),
|
||||
self.stream_finished_manager(stream_finished_request_receiver),
|
||||
self.shutdown_manager(run_channels.shutdown_receiver),
|
||||
self.handle_frames(part_out_receiver),
|
||||
self.channel_configurer(
|
||||
@ -142,18 +139,19 @@ impl Scheduler {
|
||||
configured_receiver,
|
||||
disconnect_sender,
|
||||
run_channels.prios_sender.clone(),
|
||||
stream_finished_request_sender.clone(),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
async fn listen_manager(
|
||||
&self,
|
||||
mut listen_receiver: mpsc::UnboundedReceiver<Address>,
|
||||
mut listen_receiver: mpsc::UnboundedReceiver<(Address, oneshot::Sender<io::Result<()>>)>,
|
||||
part_out_sender: mpsc::UnboundedSender<(Cid, Frame)>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid)>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid, oneshot::Sender<()>)>,
|
||||
) {
|
||||
trace!("start listen_manager");
|
||||
while let Some(address) = listen_receiver.next().await {
|
||||
while let Some((address, result_sender)) = listen_receiver.next().await {
|
||||
debug!(?address, "got request to open a channel_creator");
|
||||
let (end_sender, end_receiver) = oneshot::channel::<()>();
|
||||
self.channel_listener
|
||||
@ -169,6 +167,7 @@ impl Scheduler {
|
||||
part_out_sender.clone(),
|
||||
configured_sender.clone(),
|
||||
self.unknown_channels.clone(),
|
||||
result_sender,
|
||||
));
|
||||
}
|
||||
trace!("stop listen_manager");
|
||||
@ -176,33 +175,72 @@ impl Scheduler {
|
||||
|
||||
async fn connect_manager(
|
||||
&self,
|
||||
mut connect_receiver: mpsc::UnboundedReceiver<(Address, oneshot::Sender<Participant>)>,
|
||||
mut connect_receiver: mpsc::UnboundedReceiver<(
|
||||
Address,
|
||||
oneshot::Sender<io::Result<Participant>>,
|
||||
)>,
|
||||
part_out_sender: mpsc::UnboundedSender<(Cid, Frame)>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid)>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid, oneshot::Sender<()>)>,
|
||||
) {
|
||||
trace!("start connect_manager");
|
||||
while let Some((addr, pid_sender)) = connect_receiver.next().await {
|
||||
match addr {
|
||||
Address::Tcp(addr) => {
|
||||
let stream = async_std::net::TcpStream::connect(addr).await.unwrap();
|
||||
info!("Connectiong TCP to: {}", stream.peer_addr().unwrap());
|
||||
let (part_in_sender, part_in_receiver) = mpsc::unbounded::<Frame>();
|
||||
//channels are unknown till PID is known!
|
||||
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
|
||||
self.unknown_channels
|
||||
.write()
|
||||
.await
|
||||
.insert(cid, (part_in_sender, Some(pid_sender)));
|
||||
self.pool.spawn_ok(
|
||||
Channel::new(cid, self.local_pid)
|
||||
.run(
|
||||
stream,
|
||||
part_in_receiver,
|
||||
part_out_sender.clone(),
|
||||
configured_sender.clone(),
|
||||
let stream = match net::TcpStream::connect(addr).await {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
pid_sender.send(Err(e)).unwrap();
|
||||
continue;
|
||||
},
|
||||
};
|
||||
info!("Connecting Tcp to: {}", stream.peer_addr().unwrap());
|
||||
Self::init_protocol(
|
||||
&self.channel_ids,
|
||||
self.local_pid,
|
||||
addr,
|
||||
&self.pool,
|
||||
&part_out_sender,
|
||||
&configured_sender,
|
||||
&self.unknown_channels,
|
||||
Protocols::Tcp(TcpProtocol::new(stream)),
|
||||
Some(pid_sender),
|
||||
false,
|
||||
)
|
||||
.instrument(tracing::info_span!("channel", ?addr)),
|
||||
.await;
|
||||
},
|
||||
Address::Udp(addr) => {
|
||||
let socket = match net::UdpSocket::bind("0.0.0.0:0").await {
|
||||
Ok(socket) => Arc::new(socket),
|
||||
Err(e) => {
|
||||
pid_sender.send(Err(e)).unwrap();
|
||||
continue;
|
||||
},
|
||||
};
|
||||
if let Err(e) = socket.connect(addr).await {
|
||||
pid_sender.send(Err(e)).unwrap();
|
||||
continue;
|
||||
};
|
||||
info!("Connecting Udp to: {}", addr);
|
||||
let (udp_data_sender, udp_data_receiver) = mpsc::unbounded::<Vec<u8>>();
|
||||
let protocol =
|
||||
Protocols::Udp(UdpProtocol::new(socket.clone(), addr, udp_data_receiver));
|
||||
self.pool.spawn_ok(
|
||||
Self::udp_single_channel_connect(socket.clone(), udp_data_sender)
|
||||
.instrument(tracing::info_span!("udp", ?addr)),
|
||||
);
|
||||
Self::init_protocol(
|
||||
&self.channel_ids,
|
||||
self.local_pid,
|
||||
addr,
|
||||
&self.pool,
|
||||
&part_out_sender,
|
||||
&configured_sender,
|
||||
&self.unknown_channels,
|
||||
protocol,
|
||||
Some(pid_sender),
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
},
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
@ -213,22 +251,33 @@ impl Scheduler {
|
||||
async fn disconnect_manager(&self, mut disconnect_receiver: mpsc::UnboundedReceiver<Pid>) {
|
||||
trace!("start disconnect_manager");
|
||||
while let Some(pid) = disconnect_receiver.next().await {
|
||||
error!(?pid, "I need to disconnect the pid");
|
||||
//Closing Participants is done the following way:
|
||||
// 1. We drop our senders and receivers
|
||||
// 2. we need to close BParticipant, this will drop its senderns and receivers
|
||||
// 3. Participant will try to access the BParticipant senders and receivers with
|
||||
// their next api action, it will fail and be closed then.
|
||||
if let Some((_, _, _, sender)) = self.participants.write().await.remove(&pid) {
|
||||
sender.send(()).unwrap();
|
||||
}
|
||||
}
|
||||
trace!("stop disconnect_manager");
|
||||
}
|
||||
|
||||
async fn send_outgoing(&self, mut prios: PrioManager) {
|
||||
async fn send_outgoing(&self) {
|
||||
//This time equals the MINIMUM Latency in average, so keep it down and //Todo:
|
||||
// make it configureable or switch to await E.g. Prio 0 = await, prio 50
|
||||
// wait for more messages
|
||||
const TICK_TIME: std::time::Duration = std::time::Duration::from_millis(10);
|
||||
const FRAMES_PER_TICK: usize = 1000000;
|
||||
trace!("start send_outgoing");
|
||||
while !self.closed.load(Ordering::Relaxed) {
|
||||
let mut frames = VecDeque::new();
|
||||
prios.fill_frames(3, &mut frames);
|
||||
self.prios
|
||||
.lock()
|
||||
.await
|
||||
.fill_frames(FRAMES_PER_TICK, &mut frames);
|
||||
for (pid, sid, frame) in frames {
|
||||
if let Some((_, _, sender)) = self.participants.write().await.get_mut(&pid) {
|
||||
if let Some((_, _, sender, _)) = self.participants.write().await.get_mut(&pid) {
|
||||
sender.send((pid, sid, frame)).await.unwrap();
|
||||
}
|
||||
}
|
||||
@ -242,7 +291,7 @@ impl Scheduler {
|
||||
while let Some((cid, frame)) = part_out_receiver.next().await {
|
||||
trace!("handling frame");
|
||||
if let Some(pid) = self.participant_from_channel.read().await.get(&cid) {
|
||||
if let Some((_, sender, _)) = self.participants.write().await.get_mut(&pid) {
|
||||
if let Some((_, sender, _, _)) = self.participants.write().await.get_mut(&pid) {
|
||||
sender.send(frame).await.unwrap();
|
||||
}
|
||||
} else {
|
||||
@ -256,12 +305,13 @@ impl Scheduler {
|
||||
async fn channel_configurer(
|
||||
&self,
|
||||
mut connected_sender: mpsc::UnboundedSender<Participant>,
|
||||
mut receiver: mpsc::UnboundedReceiver<(Cid, Pid, Sid)>,
|
||||
mut receiver: mpsc::UnboundedReceiver<(Cid, Pid, Sid, oneshot::Sender<()>)>,
|
||||
disconnect_sender: mpsc::UnboundedSender<Pid>,
|
||||
prios_sender: std::sync::mpsc::Sender<(Prio, Pid, Sid, OutGoingMessage)>,
|
||||
stream_finished_request_sender: mpsc::UnboundedSender<(Pid, Sid, oneshot::Sender<()>)>,
|
||||
) {
|
||||
trace!("start channel_activator");
|
||||
while let Some((cid, pid, offset_sid)) = receiver.next().await {
|
||||
while let Some((cid, pid, offset_sid, sender)) = receiver.next().await {
|
||||
if let Some((frame_sender, pid_oneshot)) =
|
||||
self.unknown_channels.write().await.remove(&cid)
|
||||
{
|
||||
@ -273,8 +323,6 @@ impl Scheduler {
|
||||
let mut participants = self.participants.write().await;
|
||||
if !participants.contains_key(&pid) {
|
||||
debug!(?cid, "new participant connected via a channel");
|
||||
let (shutdown_sender, shutdown_receiver) = oneshot::channel();
|
||||
|
||||
let (
|
||||
bparticipant,
|
||||
stream_open_sender,
|
||||
@ -282,19 +330,24 @@ impl Scheduler {
|
||||
mut transfer_channel_receiver,
|
||||
frame_recv_sender,
|
||||
frame_send_sender,
|
||||
) = BParticipant::new(pid, offset_sid, prios_sender.clone());
|
||||
shutdown_sender,
|
||||
) = BParticipant::new(
|
||||
pid,
|
||||
offset_sid,
|
||||
prios_sender.clone(),
|
||||
stream_finished_request_sender.clone(),
|
||||
);
|
||||
|
||||
let participant = Participant::new(
|
||||
self.local_pid,
|
||||
pid,
|
||||
stream_open_sender,
|
||||
stream_opened_receiver,
|
||||
shutdown_receiver,
|
||||
disconnect_sender.clone(),
|
||||
);
|
||||
if let Some(pid_oneshot) = pid_oneshot {
|
||||
// someone is waiting with connect, so give them their PID
|
||||
pid_oneshot.send(participant).unwrap();
|
||||
pid_oneshot.send(Ok(participant)).unwrap();
|
||||
} else {
|
||||
// noone is waiting on this Participant, return in to Network
|
||||
connected_sender.send(participant).await.unwrap();
|
||||
@ -309,6 +362,7 @@ impl Scheduler {
|
||||
transfer_channel_receiver,
|
||||
frame_recv_sender,
|
||||
frame_send_sender,
|
||||
shutdown_sender,
|
||||
),
|
||||
);
|
||||
self.participant_from_channel.write().await.insert(cid, pid);
|
||||
@ -323,42 +377,112 @@ impl Scheduler {
|
||||
a attack to "
|
||||
)
|
||||
}
|
||||
sender.send(()).unwrap();
|
||||
}
|
||||
}
|
||||
trace!("stop channel_activator");
|
||||
}
|
||||
|
||||
pub async fn shutdown_manager(&self, receiver: oneshot::Receiver<()>) {
|
||||
// requested by participant when stream wants to close from api, checking if no
|
||||
// more msg is in prio and return
|
||||
pub(crate) async fn stream_finished_manager(
|
||||
&self,
|
||||
mut stream_finished_request_receiver: mpsc::UnboundedReceiver<(
|
||||
Pid,
|
||||
Sid,
|
||||
oneshot::Sender<()>,
|
||||
)>,
|
||||
) {
|
||||
trace!("start stream_finished_manager");
|
||||
while let Some((pid, sid, sender)) = stream_finished_request_receiver.next().await {
|
||||
//TODO: THERE MUST BE A MORE CLEVER METHOD THAN SPIN LOCKING! LIKE REGISTERING
|
||||
// DIRECTLY IN PRIO AS A FUTURE WERE PRIO IS WAKER! TODO: also this
|
||||
// has a great potential for handing network, if you create a network, send
|
||||
// gigabytes close it then. Also i need a Mutex, which really adds
|
||||
// to cost if alot strems want to close
|
||||
let prios = self.prios.clone();
|
||||
self.pool
|
||||
.spawn_ok(Self::stream_finished_waiter(pid, sid, sender, prios));
|
||||
}
|
||||
}
|
||||
|
||||
async fn stream_finished_waiter(
|
||||
pid: Pid,
|
||||
sid: Sid,
|
||||
sender: oneshot::Sender<()>,
|
||||
prios: Arc<Mutex<PrioManager>>,
|
||||
) {
|
||||
const TICK_TIME: std::time::Duration = std::time::Duration::from_millis(5);
|
||||
//TODO: ARRRG, i need to wait for AT LEAST 1 TICK, because i am lazy i just
|
||||
// wait 15mn and tick count is 10ms because recv is only done with a
|
||||
// tick and not async as soon as we send....
|
||||
async_std::task::sleep(TICK_TIME * 3).await;
|
||||
let mut n = 0u64;
|
||||
loop {
|
||||
if !prios.lock().await.contains_pid_sid(pid, sid) {
|
||||
trace!("prio is clear, go to close stream as requested from api");
|
||||
sender.send(()).unwrap();
|
||||
break;
|
||||
}
|
||||
n += 1;
|
||||
if n > 200 {
|
||||
warn!(
|
||||
?pid,
|
||||
?sid,
|
||||
?n,
|
||||
"cant close stream, as it still queued, even after 1000ms, this starts to \
|
||||
take long"
|
||||
);
|
||||
async_std::task::sleep(TICK_TIME * 50).await;
|
||||
} else {
|
||||
async_std::task::sleep(TICK_TIME).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn shutdown_manager(&self, receiver: oneshot::Receiver<()>) {
|
||||
trace!("start shutdown_manager");
|
||||
receiver.await.unwrap();
|
||||
self.closed.store(true, Ordering::Relaxed);
|
||||
debug!("shutting down all BParticipants gracefully");
|
||||
let mut participants = self.participants.write().await;
|
||||
for (pid, (_, _, _, sender)) in participants.drain() {
|
||||
trace!(?pid, "shutting down BParticipants");
|
||||
sender.send(()).unwrap();
|
||||
}
|
||||
trace!("stop shutdown_manager");
|
||||
}
|
||||
|
||||
pub async fn channel_creator(
|
||||
pub(crate) async fn channel_creator(
|
||||
channel_ids: Arc<AtomicU64>,
|
||||
local_pid: Pid,
|
||||
addr: Address,
|
||||
end_receiver: oneshot::Receiver<()>,
|
||||
pool: Arc<ThreadPool>,
|
||||
part_out_sender: mpsc::UnboundedSender<(Cid, Frame)>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid)>,
|
||||
unknown_channels: Arc<
|
||||
RwLock<
|
||||
HashMap<
|
||||
Cid,
|
||||
(
|
||||
mpsc::UnboundedSender<Frame>,
|
||||
Option<oneshot::Sender<Participant>>,
|
||||
),
|
||||
>,
|
||||
>,
|
||||
>,
|
||||
configured_sender: mpsc::UnboundedSender<(Cid, Pid, Sid, oneshot::Sender<()>)>,
|
||||
unknown_channels: Arc<RwLock<HashMap<Cid, UnknownChannelInfo>>>,
|
||||
result_sender: oneshot::Sender<io::Result<()>>,
|
||||
) {
|
||||
info!(?addr, "start up channel creator");
|
||||
match addr {
|
||||
Address::Tcp(addr) => {
|
||||
let listener = async_std::net::TcpListener::bind(addr).await.unwrap();
|
||||
let listener = match net::TcpListener::bind(addr).await {
|
||||
Ok(listener) => {
|
||||
result_sender.send(Ok(())).unwrap();
|
||||
listener
|
||||
},
|
||||
Err(e) => {
|
||||
info!(
|
||||
?addr,
|
||||
?e,
|
||||
"listener couldn't be started due to error on tcp bind"
|
||||
);
|
||||
result_sender.send(Err(e)).unwrap();
|
||||
return;
|
||||
},
|
||||
};
|
||||
trace!(?addr, "listener bound");
|
||||
let mut incoming = listener.incoming();
|
||||
let mut end_receiver = end_receiver.fuse();
|
||||
while let Some(stream) = select! {
|
||||
@ -366,7 +490,118 @@ impl Scheduler {
|
||||
_ = end_receiver => None,
|
||||
} {
|
||||
let stream = stream.unwrap();
|
||||
info!("Accepting TCP from: {}", stream.peer_addr().unwrap());
|
||||
info!("Accepting Tcp from: {}", stream.peer_addr().unwrap());
|
||||
Self::init_protocol(
|
||||
&channel_ids,
|
||||
local_pid,
|
||||
addr,
|
||||
&pool,
|
||||
&part_out_sender,
|
||||
&configured_sender,
|
||||
&unknown_channels,
|
||||
Protocols::Tcp(TcpProtocol::new(stream)),
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
},
|
||||
Address::Udp(addr) => {
|
||||
let socket = match net::UdpSocket::bind(addr).await {
|
||||
Ok(socket) => {
|
||||
result_sender.send(Ok(())).unwrap();
|
||||
Arc::new(socket)
|
||||
},
|
||||
Err(e) => {
|
||||
info!(
|
||||
?addr,
|
||||
?e,
|
||||
"listener couldn't be started due to error on udp bind"
|
||||
);
|
||||
result_sender.send(Err(e)).unwrap();
|
||||
return;
|
||||
},
|
||||
};
|
||||
trace!(?addr, "listener bound");
|
||||
// receiving is done from here and will be piped to protocol as UDP does not
|
||||
// have any state
|
||||
let mut listeners = HashMap::new();
|
||||
let mut end_receiver = end_receiver.fuse();
|
||||
let mut data = [0u8; 9216];
|
||||
while let Ok((size, remote_addr)) = select! {
|
||||
next = socket.recv_from(&mut data).fuse() => next,
|
||||
_ = end_receiver => Err(std::io::Error::new(std::io::ErrorKind::Other, "")),
|
||||
} {
|
||||
let mut datavec = Vec::with_capacity(size);
|
||||
datavec.extend_from_slice(&data[0..size]);
|
||||
if !listeners.contains_key(&remote_addr) {
|
||||
info!("Accepting Udp from: {}", &remote_addr);
|
||||
let (udp_data_sender, udp_data_receiver) = mpsc::unbounded::<Vec<u8>>();
|
||||
listeners.insert(remote_addr.clone(), udp_data_sender);
|
||||
let protocol = Protocols::Udp(UdpProtocol::new(
|
||||
socket.clone(),
|
||||
remote_addr,
|
||||
udp_data_receiver,
|
||||
));
|
||||
Self::init_protocol(
|
||||
&channel_ids,
|
||||
local_pid,
|
||||
addr,
|
||||
&pool,
|
||||
&part_out_sender,
|
||||
&configured_sender,
|
||||
&unknown_channels,
|
||||
protocol,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
let udp_data_sender = listeners.get_mut(&remote_addr).unwrap();
|
||||
udp_data_sender.send(datavec).await.unwrap();
|
||||
}
|
||||
},
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
info!(?addr, "ending channel creator");
|
||||
}
|
||||
|
||||
pub(crate) async fn udp_single_channel_connect(
|
||||
socket: Arc<net::UdpSocket>,
|
||||
mut udp_data_sender: mpsc::UnboundedSender<Vec<u8>>,
|
||||
) {
|
||||
let addr = socket.local_addr();
|
||||
info!(?addr, "start udp_single_channel_connect");
|
||||
//TODO: implement real closing
|
||||
let (_end_sender, end_receiver) = oneshot::channel::<()>();
|
||||
|
||||
// receiving is done from here and will be piped to protocol as UDP does not
|
||||
// have any state
|
||||
let mut end_receiver = end_receiver.fuse();
|
||||
let mut data = [0u8; 9216];
|
||||
while let Ok(size) = select! {
|
||||
next = socket.recv(&mut data).fuse() => next,
|
||||
_ = end_receiver => Err(std::io::Error::new(std::io::ErrorKind::Other, "")),
|
||||
} {
|
||||
let mut datavec = Vec::with_capacity(size);
|
||||
datavec.extend_from_slice(&data[0..size]);
|
||||
udp_data_sender.send(datavec).await.unwrap();
|
||||
}
|
||||
info!(?addr, "stop udp_single_channel_connect");
|
||||
}
|
||||
|
||||
async fn init_protocol(
|
||||
channel_ids: &Arc<AtomicU64>,
|
||||
local_pid: Pid,
|
||||
addr: std::net::SocketAddr,
|
||||
pool: &Arc<ThreadPool>,
|
||||
part_out_sender: &mpsc::UnboundedSender<(Cid, Frame)>,
|
||||
configured_sender: &mpsc::UnboundedSender<(Cid, Pid, Sid, oneshot::Sender<()>)>,
|
||||
unknown_channels: &Arc<RwLock<HashMap<Cid, UnknownChannelInfo>>>,
|
||||
protocol: Protocols,
|
||||
pid_sender: Option<oneshot::Sender<io::Result<Participant>>>,
|
||||
send_handshake: bool,
|
||||
) {
|
||||
let (mut part_in_sender, part_in_receiver) = mpsc::unbounded::<Frame>();
|
||||
//channels are unknown till PID is known!
|
||||
/* When A connects to a NETWORK, we, the listener answers with a Handshake.
|
||||
@ -376,11 +611,13 @@ impl Scheduler {
|
||||
*/
|
||||
let cid = channel_ids.fetch_add(1, Ordering::Relaxed);
|
||||
let channel = Channel::new(cid, local_pid);
|
||||
if send_handshake {
|
||||
channel.send_handshake(&mut part_in_sender).await;
|
||||
}
|
||||
pool.spawn_ok(
|
||||
channel
|
||||
.run(
|
||||
stream,
|
||||
protocol,
|
||||
part_in_receiver,
|
||||
part_out_sender.clone(),
|
||||
configured_sender.clone(),
|
||||
@ -390,260 +627,6 @@ impl Scheduler {
|
||||
unknown_channels
|
||||
.write()
|
||||
.await
|
||||
.insert(cid, (part_in_sender, None));
|
||||
}
|
||||
},
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
info!(?addr, "ending channel creator");
|
||||
.insert(cid, (part_in_sender, pid_sender));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
use crate::{
|
||||
async_serde,
|
||||
channel::{Channel, ChannelProtocol, ChannelProtocols},
|
||||
controller::Controller,
|
||||
metrics::NetworkMetrics,
|
||||
prios::PrioManager,
|
||||
tcp::TcpChannel,
|
||||
types::{CtrlMsg, Pid, RtrnMsg, Sid, TokenObjects},
|
||||
};
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc,
|
||||
mpsc::TryRecvError,
|
||||
Arc,
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
use tlid;
|
||||
use tracing::*;
|
||||
use crate::types::Protocols;
|
||||
use crate::frames::{ChannelFrame, ParticipantFrame, StreamFrame, Frame};
|
||||
|
||||
/*
|
||||
The worker lives in a own thread and only communcates with the outside via a Channel
|
||||
|
||||
Prios are done per participant, but their throughput is split equalli,
|
||||
That allows indepentend calculation of prios (no global hotspot) while no Participant is starved as the total throughput is measured and aproximated :)
|
||||
|
||||
streams are per participant, and channels are per participants, streams dont have a specific channel!
|
||||
*/
|
||||
|
||||
use async_std::sync::RwLock;
|
||||
use async_std::io::prelude::*;
|
||||
use crate::async_serde::{SerializeFuture, DeserializeFuture};
|
||||
use uvth::ThreadPoolBuilder;
|
||||
use async_std::stream::Stream;
|
||||
use async_std::sync::{self, Sender, Receiver};
|
||||
use crate::types::{VELOREN_MAGIC_NUMBER, VELOREN_NETWORK_VERSION,};
|
||||
use crate::message::InCommingMessage;
|
||||
|
||||
use futures::channel::mpsc;
|
||||
use futures::sink::SinkExt;
|
||||
use futures::{select, FutureExt};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BStream {
|
||||
sid: Sid,
|
||||
prio: u8,
|
||||
promises: u8,
|
||||
}
|
||||
|
||||
struct BChannel {
|
||||
remote_pid: Option<Pid>,
|
||||
stream: RwLock<async_std::net::TcpStream>,
|
||||
send_stream: Sender<Frame>,
|
||||
recv_stream: Receiver<Frame>,
|
||||
send_participant: Sender<Frame>,
|
||||
recv_participant: Receiver<Frame>,
|
||||
|
||||
send_handshake: bool,
|
||||
send_pid: bool,
|
||||
send_shutdown: bool,
|
||||
recv_handshake: bool,
|
||||
recv_pid: bool,
|
||||
recv_shutdown: bool,
|
||||
}
|
||||
|
||||
struct BAcceptor {
|
||||
listener: RwLock<async_std::net::TcpListener>,
|
||||
}
|
||||
|
||||
struct BParticipant {
|
||||
remote_pid: Pid,
|
||||
channels: HashMap<Protocols, Vec<BChannel>>,
|
||||
streams: Vec<BStream>,
|
||||
sid_pool: tlid::Pool<tlid::Wrapping<Sid>>,
|
||||
prios: RwLock<PrioManager>,
|
||||
closed: AtomicBool,
|
||||
}
|
||||
|
||||
pub(crate) struct Scheduler {
|
||||
local_pid: Pid,
|
||||
metrics: Arc<Option<NetworkMetrics>>,
|
||||
participants: HashMap<Pid, BParticipant>,
|
||||
pending_channels: HashMap<Protocols, Vec<BChannel>>,
|
||||
/* ctrl_rx: Receiver<CtrlMsg>,
|
||||
* rtrn_tx: mpsc::Sender<RtrnMsg>, */
|
||||
}
|
||||
|
||||
impl BStream {
|
||||
|
||||
}
|
||||
|
||||
impl BChannel {
|
||||
/*
|
||||
/// Execute when ready to read
|
||||
pub async fn recv(&self) -> Vec<Frame> {
|
||||
let mut buffer: [u8; 2000] = [0; 2000];
|
||||
let read = self.stream.write().await.read(&mut buffer).await;
|
||||
match read {
|
||||
Ok(n) => {
|
||||
let x = DeserializeFuture::new(buffer[0..n].to_vec(), &ThreadPoolBuilder::new().build()).await;
|
||||
return vec!(x);
|
||||
},
|
||||
Err(e) => {
|
||||
panic!("woops {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Execute when ready to write
|
||||
pub async fn send<I: std::iter::Iterator<Item = Frame>>(&self, frames: &mut I) {
|
||||
for frame in frames {
|
||||
let x = SerializeFuture::new(frame, &ThreadPoolBuilder::new().build()).await;
|
||||
self.stream.write().await.write_all(&x).await;
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
pub fn get_tx(&self) -> &Sender<Frame> {
|
||||
&self.send_stream
|
||||
}
|
||||
|
||||
pub fn get_rx(&self) -> &Receiver<Frame> {
|
||||
&self.recv_stream
|
||||
}
|
||||
|
||||
pub fn get_participant_tx(&self) -> &Sender<Frame> {
|
||||
&self.send_participant
|
||||
}
|
||||
|
||||
pub fn get_participant_rx(&self) -> &Receiver<Frame> {
|
||||
&self.recv_participant
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl BParticipant {
|
||||
pub async fn read(&self) {
|
||||
while self.closed.load(Ordering::Relaxed) {
|
||||
for channels in self.channels.values() {
|
||||
for channel in channels.iter() {
|
||||
//let frames = channel.recv().await;
|
||||
let frame = channel.get_rx().recv().await.unwrap();
|
||||
match frame {
|
||||
Frame::Channel(cf) => channel.handle(cf).await,
|
||||
Frame::Participant(pf) => self.handle(pf).await,
|
||||
Frame::Stream(sf) => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
async_std::task::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn write(&self) {
|
||||
let mut frames = VecDeque::<(u8, StreamFrame)>::new();
|
||||
while self.closed.load(Ordering::Relaxed) {
|
||||
let todo_synced_amount_and_reasonable_choosen_throughput_based_on_feedback = 100;
|
||||
self.prios.write().await.fill_frames(
|
||||
todo_synced_amount_and_reasonable_choosen_throughput_based_on_feedback,
|
||||
&mut frames,
|
||||
);
|
||||
for (promises, frame) in frames.drain(..) {
|
||||
let channel = self.chose_channel(promises);
|
||||
channel.get_tx().send(Frame::Stream(frame)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, frame: ParticipantFrame) {
|
||||
info!("got a frame to handle");
|
||||
/*
|
||||
match frame {
|
||||
ParticipantFrame::OpenStream {
|
||||
sid,
|
||||
prio,
|
||||
promises,
|
||||
} => {
|
||||
if let Some(pid) = self.remote_pid {
|
||||
let (msg_tx, msg_rx) = futures::channel::mpsc::unbounded::<InCommingMessage>();
|
||||
let stream = IntStream::new(sid, prio, promises.clone(), msg_tx);
|
||||
|
||||
trace!(?self.streams, "-OPEN STREAM- going to modify streams");
|
||||
self.streams.push(stream);
|
||||
trace!(?self.streams, "-OPEN STREAM- did to modify streams");
|
||||
info!("opened a stream");
|
||||
if let Err(err) = rtrn_tx.send(RtrnMsg::OpendStream {
|
||||
pid,
|
||||
sid,
|
||||
prio,
|
||||
msg_rx,
|
||||
promises,
|
||||
}) {
|
||||
error!(?err, "couldn't notify of opened stream");
|
||||
}
|
||||
} else {
|
||||
error!("called OpenStream before PartcipantID!");
|
||||
}
|
||||
},
|
||||
ParticipantFrame::CloseStream { sid } => {
|
||||
if let Some(pid) = self.remote_pid {
|
||||
trace!(?self.streams, "-CLOSE STREAM- going to modify streams");
|
||||
self.streams.retain(|stream| stream.sid() != sid);
|
||||
trace!(?self.streams, "-CLOSE STREAM- did to modify streams");
|
||||
info!("closed a stream");
|
||||
if let Err(err) = rtrn_tx.send(RtrnMsg::ClosedStream { pid, sid }) {
|
||||
error!(?err, "couldn't notify of closed stream");
|
||||
}
|
||||
}
|
||||
},
|
||||
}*/
|
||||
}
|
||||
|
||||
/// Endless task that will cover sending for Participant
|
||||
pub async fn run(&mut self) {
|
||||
let (incomming_sender, incomming_receiver) = mpsc::unbounded();
|
||||
futures::join!(self.read(), self.write());
|
||||
}
|
||||
|
||||
pub fn chose_channel(&self,
|
||||
promises: u8, /* */
|
||||
) -> &BChannel {
|
||||
for v in self.channels.values() {
|
||||
for c in v {
|
||||
return c;
|
||||
}
|
||||
}
|
||||
panic!("No Channel!");
|
||||
}
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new(
|
||||
pid: Pid,
|
||||
metrics: Arc<Option<NetworkMetrics>>,
|
||||
sid_backup_per_participant: Arc<RwLock<HashMap<Pid, tlid::Pool<tlid::Checked<Sid>>>>>,
|
||||
token_pool: tlid::Pool<tlid::Wrapping<usize>>,
|
||||
) -> Self {
|
||||
panic!("asd");
|
||||
}
|
||||
|
||||
pub fn run(&mut self) { loop {} }
|
||||
}
|
||||
*/
|
||||
|
@ -1 +0,0 @@
|
||||
|
@ -1,8 +1,6 @@
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::*;
|
||||
|
||||
pub type Sid = u64;
|
||||
pub type Mid = u64;
|
||||
pub type Cid = u64;
|
||||
pub type Prio = u8;
|
||||
@ -17,20 +15,62 @@ pub const PROMISES_ENCRYPTED: Promises = 16;
|
||||
|
||||
pub(crate) const VELOREN_MAGIC_NUMBER: &str = "VELOREN";
|
||||
pub const VELOREN_NETWORK_VERSION: [u32; 3] = [0, 2, 0];
|
||||
pub(crate) const STREAM_ID_OFFSET1: Sid = 0;
|
||||
pub(crate) const STREAM_ID_OFFSET2: Sid = u64::MAX / 2;
|
||||
|
||||
pub(crate) struct NetworkBuffer {
|
||||
pub(crate) data: Vec<u8>,
|
||||
pub(crate) read_idx: usize,
|
||||
pub(crate) write_idx: usize,
|
||||
}
|
||||
pub(crate) const STREAM_ID_OFFSET1: Sid = Sid::new(0);
|
||||
pub(crate) const STREAM_ID_OFFSET2: Sid = Sid::new(u64::MAX / 2);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct Pid {
|
||||
internal: u128,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)]
|
||||
pub(crate) struct Sid {
|
||||
internal: u64,
|
||||
}
|
||||
|
||||
// Used for Communication between Channel <----(TCP/UDP)----> Channel
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub(crate) enum Frame {
|
||||
Handshake {
|
||||
magic_number: String,
|
||||
version: [u32; 3],
|
||||
},
|
||||
ParticipantId {
|
||||
pid: Pid,
|
||||
},
|
||||
Shutdown, /* Shutsdown this channel gracefully, if all channels are shut down, Participant
|
||||
* is deleted */
|
||||
OpenStream {
|
||||
sid: Sid,
|
||||
prio: Prio,
|
||||
promises: Promises,
|
||||
},
|
||||
CloseStream {
|
||||
sid: Sid,
|
||||
},
|
||||
DataHeader {
|
||||
mid: Mid,
|
||||
sid: Sid,
|
||||
length: u64,
|
||||
},
|
||||
Data {
|
||||
id: Mid,
|
||||
start: u64,
|
||||
data: Vec<u8>,
|
||||
},
|
||||
/* WARNING: Sending RAW is only used for debug purposes in case someone write a new API
|
||||
* against veloren Server! */
|
||||
Raw(Vec<u8>),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub(crate) enum Requestor {
|
||||
User,
|
||||
Api,
|
||||
Scheduler,
|
||||
Remote,
|
||||
}
|
||||
|
||||
impl Pid {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
@ -49,88 +89,34 @@ impl Pid {
|
||||
}
|
||||
}
|
||||
|
||||
/// NetworkBuffer to use for streamed access
|
||||
/// valid data is between read_idx and write_idx!
|
||||
/// everything before read_idx is already processed and no longer important
|
||||
/// everything after write_idx is either 0 or random data buffered
|
||||
impl NetworkBuffer {
|
||||
pub(crate) fn new() -> Self {
|
||||
NetworkBuffer {
|
||||
data: vec![0; 2048],
|
||||
read_idx: 0,
|
||||
write_idx: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_write_slice(&mut self, min_size: usize) -> &mut [u8] {
|
||||
if self.data.len() < self.write_idx + min_size {
|
||||
trace!(
|
||||
?self,
|
||||
?min_size,
|
||||
"need to resize because buffer is to small"
|
||||
);
|
||||
self.data.resize(self.write_idx + min_size, 0);
|
||||
}
|
||||
&mut self.data[self.write_idx..]
|
||||
}
|
||||
|
||||
pub(crate) fn actually_written(&mut self, cnt: usize) { self.write_idx += cnt; }
|
||||
|
||||
pub(crate) fn get_read_slice(&self) -> &[u8] { &self.data[self.read_idx..self.write_idx] }
|
||||
|
||||
pub(crate) fn actually_read(&mut self, cnt: usize) {
|
||||
self.read_idx += cnt;
|
||||
if self.read_idx == self.write_idx {
|
||||
if self.read_idx > 10485760 {
|
||||
trace!(?self, "buffer empty, resetting indices");
|
||||
}
|
||||
self.read_idx = 0;
|
||||
self.write_idx = 0;
|
||||
}
|
||||
if self.write_idx > 10485760 {
|
||||
if self.write_idx - self.read_idx < 65536 {
|
||||
debug!(
|
||||
?self,
|
||||
"This buffer is filled over 10 MB, but the actual data diff is less then \
|
||||
65kB, which is a sign of stressing this connection much as always new data \
|
||||
comes in - nevertheless, in order to handle this we will remove some data \
|
||||
now so that this buffer doesn't grow endlessly"
|
||||
);
|
||||
let mut i2 = 0;
|
||||
for i in self.read_idx..self.write_idx {
|
||||
self.data[i2] = self.data[i];
|
||||
i2 += 1;
|
||||
}
|
||||
self.read_idx = 0;
|
||||
self.write_idx = i2;
|
||||
}
|
||||
if self.data.len() > 67108864 {
|
||||
warn!(
|
||||
?self,
|
||||
"over 64Mbyte used, something seems fishy, len: {}",
|
||||
self.data.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for NetworkBuffer {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"NetworkBuffer(len: {}, read: {}, write: {})",
|
||||
self.data.len(),
|
||||
self.read_idx,
|
||||
self.write_idx
|
||||
)
|
||||
}
|
||||
impl Sid {
|
||||
pub const fn new(internal: u64) -> Self { Self { internal } }
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Pid {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.internal)
|
||||
//only print last 6 chars of number as full u128 logs are unreadable
|
||||
write!(f, "{}", self.internal.rem_euclid(100000))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::AddAssign for Sid {
|
||||
fn add_assign(&mut self, other: Self) {
|
||||
*self = Self {
|
||||
internal: self.internal + other.internal,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Sid {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
//only print last 6 chars of number as full u128 logs are unreadable
|
||||
write!(f, "{}", self.internal.rem_euclid(1000000))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for Sid {
|
||||
fn from(internal: u64) -> Self { Sid { internal } }
|
||||
}
|
||||
|
@ -1 +0,0 @@
|
||||
|
@ -1,12 +1,17 @@
|
||||
use lazy_static::*;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::atomic::{AtomicU16, Ordering},
|
||||
sync::{
|
||||
atomic::{AtomicU16, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
use tracing::*;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
use veloren_network::{Address, Network, Participant, Pid, Stream, PROMISES_NONE};
|
||||
|
||||
pub fn setup(tracing: bool, mut sleep: u64) -> (u64, u64) {
|
||||
if tracing {
|
||||
@ -18,18 +23,14 @@ pub fn setup(tracing: bool, mut sleep: u64) -> (u64, u64) {
|
||||
|
||||
let _subscriber = if tracing {
|
||||
let filter = EnvFilter::from_default_env()
|
||||
//.add_directive("[worker]=trace".parse().unwrap())
|
||||
.add_directive("trace".parse().unwrap())
|
||||
.add_directive("async_std::task::block_on=warn".parse().unwrap())
|
||||
.add_directive("veloren_network::tests=trace".parse().unwrap())
|
||||
.add_directive("veloren_network::worker=debug".parse().unwrap())
|
||||
.add_directive("veloren_network::controller=trace".parse().unwrap())
|
||||
.add_directive("veloren_network::channel=trace".parse().unwrap())
|
||||
.add_directive("veloren_network::message=trace".parse().unwrap())
|
||||
.add_directive("veloren_network::metrics=trace".parse().unwrap())
|
||||
.add_directive("veloren_network::types=trace".parse().unwrap())
|
||||
.add_directive("veloren_network::mpsc=debug".parse().unwrap())
|
||||
.add_directive("veloren_network::udp=debug".parse().unwrap())
|
||||
.add_directive("veloren_network::tcp=debug".parse().unwrap());
|
||||
.add_directive("veloren_network::types=trace".parse().unwrap());
|
||||
|
||||
Some(
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
@ -47,6 +48,30 @@ pub fn setup(tracing: bool, mut sleep: u64) -> (u64, u64) {
|
||||
(0, 0)
|
||||
}
|
||||
|
||||
pub async fn network_participant_stream(
|
||||
addr: Address,
|
||||
) -> (
|
||||
Network,
|
||||
Arc<Participant>,
|
||||
Stream,
|
||||
Network,
|
||||
Arc<Participant>,
|
||||
Stream,
|
||||
) {
|
||||
let pool = ThreadPoolBuilder::new().num_threads(2).build();
|
||||
let n_a = Network::new(Pid::fake(1), &pool);
|
||||
let n_b = Network::new(Pid::fake(2), &pool);
|
||||
|
||||
n_a.listen(addr.clone()).await.unwrap();
|
||||
let p1_b = n_b.connect(addr).await.unwrap();
|
||||
let p1_a = n_a.connected().await.unwrap();
|
||||
|
||||
let s1_a = p1_a.open(10, PROMISES_NONE).await.unwrap();
|
||||
let s1_b = p1_b.opened().await.unwrap();
|
||||
|
||||
(n_a, p1_a, s1_a, n_b, p1_b, s1_b)
|
||||
}
|
||||
|
||||
pub fn tcp() -> veloren_network::Address {
|
||||
lazy_static! {
|
||||
static ref PORTS: AtomicU16 = AtomicU16::new(5000);
|
||||
@ -54,3 +79,11 @@ pub fn tcp() -> veloren_network::Address {
|
||||
let port = PORTS.fetch_add(1, Ordering::Relaxed);
|
||||
veloren_network::Address::Tcp(SocketAddr::from(([127, 0, 0, 1], port)))
|
||||
}
|
||||
|
||||
pub fn udp() -> veloren_network::Address {
|
||||
lazy_static! {
|
||||
static ref PORTS: AtomicU16 = AtomicU16::new(5000);
|
||||
}
|
||||
let port = PORTS.fetch_add(1, Ordering::Relaxed);
|
||||
veloren_network::Address::Udp(SocketAddr::from(([127, 0, 0, 1], port)))
|
||||
}
|
||||
|
@ -1,77 +1,133 @@
|
||||
use async_std::{sync::RwLock, task};
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
executor::ThreadPool,
|
||||
sink::SinkExt,
|
||||
};
|
||||
use std::sync::{atomic::AtomicU64, Arc};
|
||||
use veloren_network::{Network, Pid, Scheduler};
|
||||
use async_std::task;
|
||||
use task::block_on;
|
||||
use veloren_network::StreamError;
|
||||
mod helper;
|
||||
use std::collections::HashMap;
|
||||
use tracing::*;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
|
||||
#[test]
|
||||
fn network() {
|
||||
let (_, _) = helper::setup(true, 100);
|
||||
{
|
||||
let addr1 = helper::tcp();
|
||||
let pool = ThreadPoolBuilder::new().num_threads(2).build();
|
||||
let n1 = Network::new(Pid::fake(1), &pool);
|
||||
let n2 = Network::new(Pid::fake(2), &pool);
|
||||
|
||||
n1.listen(addr1.clone()).unwrap();
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
|
||||
let pid1 = task::block_on(n2.connect(addr1)).unwrap();
|
||||
warn!("yay connected");
|
||||
|
||||
let pid2 = task::block_on(n1.connected()).unwrap();
|
||||
warn!("yay connected");
|
||||
|
||||
let mut sid1_p1 = task::block_on(pid1.open(10, 0)).unwrap();
|
||||
let mut sid1_p2 = task::block_on(pid2.opened()).unwrap();
|
||||
|
||||
task::block_on(sid1_p1.send("Hello World")).unwrap();
|
||||
let m1: Result<String, _> = task::block_on(sid1_p2.recv());
|
||||
assert_eq!(m1, Ok("Hello World".to_string()));
|
||||
|
||||
//assert_eq!(pid, Pid::fake(1));
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_secs(10));
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
}
|
||||
use helper::{network_participant_stream, tcp, udp};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn scheduler() {
|
||||
let (_, _) = helper::setup(true, 100);
|
||||
let addr = helper::tcp();
|
||||
let (scheduler, mut listen_tx, _, _, _) = Scheduler::new(Pid::new());
|
||||
task::block_on(listen_tx.send(addr)).unwrap();
|
||||
task::block_on(scheduler.run());
|
||||
fn network_20s() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, _, _n_b, _, _) = block_on(network_participant_stream(tcp()));
|
||||
std::thread::sleep(std::time::Duration::from_secs(30));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn channel_creator_test() {
|
||||
let (_, _) = helper::setup(true, 100);
|
||||
let (_end_sender, end_receiver) = oneshot::channel::<()>();
|
||||
let (part_out_sender, _part_out_receiver) = mpsc::unbounded();
|
||||
let (configured_sender, _configured_receiver) = mpsc::unbounded::<(u64, Pid, u64)>();
|
||||
let addr = helper::tcp();
|
||||
task::block_on(async {
|
||||
Scheduler::channel_creator(
|
||||
Arc::new(AtomicU64::new(0)),
|
||||
Pid::new(),
|
||||
addr,
|
||||
end_receiver,
|
||||
Arc::new(ThreadPool::new().unwrap()),
|
||||
part_out_sender,
|
||||
configured_sender,
|
||||
Arc::new(RwLock::new(HashMap::new())),
|
||||
)
|
||||
.await;
|
||||
});
|
||||
fn close_network() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_, _p1_a, mut s1_a, _, _p1_b, mut s1_b) = block_on(network_participant_stream(tcp()));
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(30));
|
||||
|
||||
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
|
||||
let msg1: Result<String, _> = block_on(s1_b.recv());
|
||||
assert_eq!(msg1, Err(StreamError::StreamClosed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn close_participant() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (n_a, p1_a, mut s1_a, n_b, p1_b, mut s1_b) = block_on(network_participant_stream(tcp()));
|
||||
|
||||
block_on(n_a.disconnect(p1_a)).unwrap();
|
||||
block_on(n_b.disconnect(p1_b)).unwrap();
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(30));
|
||||
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
|
||||
assert_eq!(
|
||||
block_on(s1_b.recv::<String>()),
|
||||
Err(StreamError::StreamClosed)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn close_stream() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, mut s1_a, _n_b, _, _) = block_on(network_participant_stream(tcp()));
|
||||
|
||||
// s1_b is dropped directly while s1_a isn't
|
||||
std::thread::sleep(std::time::Duration::from_millis(30));
|
||||
|
||||
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
|
||||
assert_eq!(
|
||||
block_on(s1_a.recv::<String>()),
|
||||
Err(StreamError::StreamClosed)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stream_simple() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
|
||||
|
||||
s1_a.send("Hello World").unwrap();
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stream_simple_3msg() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
|
||||
|
||||
s1_a.send("Hello World").unwrap();
|
||||
s1_a.send(1337).unwrap();
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||
assert_eq!(block_on(s1_b.recv()), Ok(1337));
|
||||
s1_a.send("3rdMessage").unwrap();
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stream_simple_3msg_then_close() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
|
||||
|
||||
s1_a.send(1u8).unwrap();
|
||||
s1_a.send(42).unwrap();
|
||||
s1_a.send("3rdMessage").unwrap();
|
||||
assert_eq!(block_on(s1_b.recv()), Ok(1u8));
|
||||
assert_eq!(block_on(s1_b.recv()), Ok(42));
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
|
||||
drop(s1_a);
|
||||
std::thread::sleep(std::time::Duration::from_millis(30));
|
||||
assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stream_send_first_then_receive() {
|
||||
// recv should still be possible even if stream got closed if they are in queue
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(tcp()));
|
||||
|
||||
s1_a.send(1u8).unwrap();
|
||||
s1_a.send(42).unwrap();
|
||||
s1_a.send("3rdMessage").unwrap();
|
||||
drop(s1_a);
|
||||
std::thread::sleep(std::time::Duration::from_millis(2000));
|
||||
assert_eq!(block_on(s1_b.recv()), Ok(1u8));
|
||||
assert_eq!(block_on(s1_b.recv()), Ok(42));
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
|
||||
assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stream_simple_udp() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(udp()));
|
||||
|
||||
s1_a.send("Hello World").unwrap();
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stream_simple_udp_3msg() {
|
||||
let (_, _) = helper::setup(false, 0);
|
||||
let (_n_a, _, mut s1_a, _n_b, _, mut s1_b) = block_on(network_participant_stream(udp()));
|
||||
|
||||
s1_a.send("Hello World").unwrap();
|
||||
s1_a.send(1337).unwrap();
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||
assert_eq!(block_on(s1_b.recv()), Ok(1337));
|
||||
s1_a.send("3rdMessage").unwrap();
|
||||
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
|
||||
}
|
||||
|
@ -1,178 +0,0 @@
|
||||
use chrono::prelude::*;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use futures::executor::block_on;
|
||||
use network::{Address, Network, Promise, Stream};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::Arc,
|
||||
thread,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::*;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use uuid::Uuid;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum Msg {
|
||||
Ping(u64),
|
||||
Pong(u64),
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let matches = App::new("Veloren Speed Test Utility")
|
||||
.version("0.1.0")
|
||||
.author("Marcel Märtens <marcel.cochem@googlemail.com>")
|
||||
.about("Runs speedtests regarding different parameter to benchmark veloren-network")
|
||||
.subcommand(
|
||||
SubCommand::with_name("listen")
|
||||
.about("Runs the counter part that pongs all requests")
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.help("port to listen on"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("run").arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.help("port to connect too"),
|
||||
),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let filter = EnvFilter::from_default_env().add_directive("trace".parse().unwrap());
|
||||
//.add_directive("veloren_network::tests=trace".parse().unwrap());
|
||||
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
// all spans/events with a level higher than TRACE (e.g, info, warn, etc.)
|
||||
// will be written to stdout.
|
||||
.with_max_level(Level::TRACE)
|
||||
.with_env_filter(filter)
|
||||
// sets this to be the default, global subscriber for this application.
|
||||
.init();
|
||||
|
||||
if let Some(matches) = matches.subcommand_matches("listen") {
|
||||
let port = matches
|
||||
.value_of("port")
|
||||
.map_or(52000, |v| v.parse::<u16>().unwrap_or(52000));
|
||||
server(port);
|
||||
};
|
||||
if let Some(matches) = matches.subcommand_matches("run") {
|
||||
let port = matches
|
||||
.value_of("port")
|
||||
.map_or(52000, |v| v.parse::<u16>().unwrap_or(52000));
|
||||
client(port);
|
||||
};
|
||||
}
|
||||
|
||||
fn server(port: u16) {
|
||||
let thread_pool = Arc::new(
|
||||
ThreadPoolBuilder::new()
|
||||
.name("veloren-network-server".into())
|
||||
.build(),
|
||||
);
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
let server = Network::new(Uuid::new_v4(), thread_pool.clone());
|
||||
let address = Address::Tcp(SocketAddr::from(([127, 0, 0, 1], port)));
|
||||
server.listen(&address).unwrap(); //await
|
||||
thread::sleep(Duration::from_millis(10)); //TODO: listeing still doesnt block correctly!
|
||||
println!("waiting for client");
|
||||
|
||||
let p1 = block_on(server.connected()).unwrap(); //remote representation of p1
|
||||
let mut s1 = block_on(p1.opened()).unwrap(); //remote representation of s1
|
||||
let mut s2 = block_on(p1.opened()).unwrap(); //remote representation of s2
|
||||
let t1 = thread::spawn(move || {
|
||||
if let Ok(Msg::Ping(id)) = block_on(s1.recv()) {
|
||||
thread::sleep(Duration::from_millis(3000));
|
||||
s1.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s1_1", Utc::now().time());
|
||||
}
|
||||
if let Ok(Msg::Ping(id)) = block_on(s1.recv()) {
|
||||
thread::sleep(Duration::from_millis(3000));
|
||||
s1.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s1_2", Utc::now().time());
|
||||
}
|
||||
});
|
||||
let t2 = thread::spawn(move || {
|
||||
if let Ok(Msg::Ping(id)) = block_on(s2.recv()) {
|
||||
thread::sleep(Duration::from_millis(1000));
|
||||
s2.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s2_1", Utc::now().time());
|
||||
}
|
||||
if let Ok(Msg::Ping(id)) = block_on(s2.recv()) {
|
||||
thread::sleep(Duration::from_millis(1000));
|
||||
s2.send(Msg::Pong(id)).unwrap();
|
||||
println!("[{}], send s2_2", Utc::now().time());
|
||||
}
|
||||
});
|
||||
t1.join().unwrap();
|
||||
t2.join().unwrap();
|
||||
thread::sleep(Duration::from_millis(50));
|
||||
}
|
||||
|
||||
async fn async_task1(mut s: Stream) -> u64 {
|
||||
s.send(Msg::Ping(100)).unwrap();
|
||||
println!("[{}], s1_1...", Utc::now().time());
|
||||
let m1: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s1_1: {:?}", Utc::now().time(), m1);
|
||||
thread::sleep(Duration::from_millis(1000));
|
||||
s.send(Msg::Ping(101)).unwrap();
|
||||
println!("[{}], s1_2...", Utc::now().time());
|
||||
let m2: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s1_2: {:?}", Utc::now().time(), m2);
|
||||
match m2.unwrap() {
|
||||
Msg::Pong(id) => id,
|
||||
_ => panic!("wrong answer"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn async_task2(mut s: Stream) -> u64 {
|
||||
s.send(Msg::Ping(200)).unwrap();
|
||||
println!("[{}], s2_1...", Utc::now().time());
|
||||
let m1: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s2_1: {:?}", Utc::now().time(), m1);
|
||||
thread::sleep(Duration::from_millis(5000));
|
||||
s.send(Msg::Ping(201)).unwrap();
|
||||
println!("[{}], s2_2...", Utc::now().time());
|
||||
let m2: Result<Msg, _> = s.recv().await;
|
||||
println!("[{}], s2_2: {:?}", Utc::now().time(), m2);
|
||||
match m2.unwrap() {
|
||||
Msg::Pong(id) => id,
|
||||
_ => panic!("wrong answer"),
|
||||
}
|
||||
}
|
||||
|
||||
fn client(port: u16) {
|
||||
let thread_pool = Arc::new(
|
||||
ThreadPoolBuilder::new()
|
||||
.name("veloren-network-server".into())
|
||||
.build(),
|
||||
);
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
let client = Network::new(Uuid::new_v4(), thread_pool.clone());
|
||||
let address = Address::Tcp(SocketAddr::from(([127, 0, 0, 1], port)));
|
||||
thread::sleep(Duration::from_millis(3)); //TODO: listeing still doesnt block correctly!
|
||||
|
||||
let p1 = block_on(client.connect(&address)).unwrap(); //remote representation of p1
|
||||
let s1 = p1.open(16, Promise::InOrder | Promise::NoCorrupt).unwrap(); //remote representation of s1
|
||||
let s2 = p1.open(16, Promise::InOrder | Promise::NoCorrupt).unwrap(); //remote representation of s2
|
||||
let before = Instant::now();
|
||||
block_on(async {
|
||||
let f1 = async_task1(s1);
|
||||
let f2 = async_task2(s2);
|
||||
let _ = futures::join!(f1, f2);
|
||||
});
|
||||
if before.elapsed() < Duration::from_secs(13) {
|
||||
println!("IT WORKS!");
|
||||
} else {
|
||||
println!("doesn't seem to work :/")
|
||||
}
|
||||
thread::sleep(Duration::from_millis(50));
|
||||
}
|
@ -1,150 +0,0 @@
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use futures::executor::block_on;
|
||||
use network::{Address, Network, Participant, Promise, Stream};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::Arc,
|
||||
thread,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::*;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use uuid::Uuid;
|
||||
use uvth::ThreadPoolBuilder;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum Msg {
|
||||
Ping { id: u64, data: Vec<u8> },
|
||||
Pong { id: u64, data: Vec<u8> },
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let matches = App::new("Veloren Speed Test Utility")
|
||||
.version("0.1.0")
|
||||
.author("Marcel Märtens <marcel.cochem@googlemail.com>")
|
||||
.about("Runs speedtests regarding different parameter to benchmark veloren-network")
|
||||
.subcommand(
|
||||
SubCommand::with_name("listen")
|
||||
.about("Runs the counter part that pongs all requests")
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.help("port to listen on"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("run").arg(
|
||||
Arg::with_name("port")
|
||||
.short("p")
|
||||
.long("port")
|
||||
.takes_value(true)
|
||||
.help("port to connect too"),
|
||||
), /*
|
||||
.arg(Arg::with_name("participants")
|
||||
.long("participants")
|
||||
.takes_value(true)
|
||||
.help("number of participants to open"))
|
||||
.arg(Arg::with_name("streams")
|
||||
.long("streams")
|
||||
.takes_value(true)
|
||||
.help("number of streams to open per participant"))*/
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let filter = EnvFilter::from_default_env().add_directive("error".parse().unwrap());
|
||||
//.add_directive("veloren_network::tests=trace".parse().unwrap());
|
||||
|
||||
tracing_subscriber::FmtSubscriber::builder()
|
||||
// all spans/events with a level higher than TRACE (e.g, info, warn, etc.)
|
||||
// will be written to stdout.
|
||||
.with_max_level(Level::TRACE)
|
||||
.with_env_filter(filter)
|
||||
// sets this to be the default, global subscriber for this application.
|
||||
.init();
|
||||
/*
|
||||
if let Some(matches) = matches.subcommand_matches("listen") {
|
||||
let port = matches
|
||||
.value_of("port")
|
||||
.map_or(52000, |v| v.parse::<u16>().unwrap_or(52000));
|
||||
server(port);
|
||||
};
|
||||
if let Some(matches) = matches.subcommand_matches("run") {
|
||||
let port = matches
|
||||
.value_of("port")
|
||||
.map_or(52000, |v| v.parse::<u16>().unwrap_or(52000));
|
||||
client(port);
|
||||
};*/
|
||||
thread::spawn(|| {
|
||||
server(52000);
|
||||
});
|
||||
thread::sleep(Duration::from_millis(3));
|
||||
client(52000);
|
||||
}
|
||||
|
||||
fn server(port: u16) {
|
||||
let thread_pool = Arc::new(
|
||||
ThreadPoolBuilder::new()
|
||||
.name("veloren-network-server".into())
|
||||
.build(),
|
||||
);
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
let server = Network::new(Uuid::new_v4(), thread_pool.clone());
|
||||
let address = Address::Tcp(SocketAddr::from(([127, 0, 0, 1], port)));
|
||||
//let address = Address::Mpsc(port as u64);
|
||||
//let address = Address::Udp(SocketAddr::from(([127, 0, 0, 1], port)));
|
||||
server.listen(&address).unwrap(); //await
|
||||
thread::sleep(Duration::from_millis(3)); //TODO: listeing still doesnt block correctly!
|
||||
|
||||
loop {
|
||||
let p1 = block_on(server.connected()).unwrap(); //remote representation of p1
|
||||
let mut s1 = block_on(p1.opened()).unwrap(); //remote representation of s1
|
||||
loop {
|
||||
let m: Result<Option<Msg>, _> = block_on(s1.recv());
|
||||
match m {
|
||||
Ok(Some(Msg::Ping { id, data })) => {
|
||||
//s1.send(Msg::Pong {id, data});
|
||||
},
|
||||
Err(e) => {},
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn client(port: u16) {
|
||||
let thread_pool = Arc::new(
|
||||
ThreadPoolBuilder::new()
|
||||
.name("veloren-network-server".into())
|
||||
.build(),
|
||||
);
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
let client = Network::new(Uuid::new_v4(), thread_pool.clone());
|
||||
let address = Address::Tcp(SocketAddr::from(([127, 0, 0, 1], port)));
|
||||
//let address = Address::Mpsc(port as u64);
|
||||
//let address = Address::Udp(SocketAddr::from(([127, 0, 0, 1], port)));
|
||||
thread::sleep(Duration::from_millis(3)); //TODO: listeing still doesnt block correctly!
|
||||
|
||||
loop {
|
||||
let p1 = block_on(client.connect(&address)).unwrap(); //remote representation of p1
|
||||
let mut s1 = p1.open(16, Promise::InOrder | Promise::NoCorrupt).unwrap(); //remote representation of s1
|
||||
let mut last = Instant::now();
|
||||
let mut id = 0u64;
|
||||
loop {
|
||||
s1.send(Msg::Ping {
|
||||
id,
|
||||
data: vec![0; 1000],
|
||||
});
|
||||
id += 1;
|
||||
if id.rem_euclid(1000000) == 0 {
|
||||
let new = Instant::now();
|
||||
let diff = new.duration_since(last);
|
||||
last = new;
|
||||
println!("1.000.000 took {}", diff.as_millis());
|
||||
}
|
||||
//let _: Result<Option<Msg>, _> = block_on(s1.recv());
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user