Merge branch 'xMAC94x/network_tokio' into 'master'

xMAC94x/network_tokio switch from `async_std` to `tokio`

See merge request veloren/veloren!1789
This commit is contained in:
Marcel 2021-02-17 22:47:11 +00:00
commit c6d69d1196
52 changed files with 5631 additions and 4121 deletions

View File

@ -46,6 +46,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Cave scatter now includes all 6 gems. - Cave scatter now includes all 6 gems.
- Adjusted Stonework Defender loot table to remove mindflayer drops (bag, staff, glider). - Adjusted Stonework Defender loot table to remove mindflayer drops (bag, staff, glider).
- Changed default controller key bindings - Changed default controller key bindings
- Improved network efficiency by ≈ factor 10 by using tokio.
### Removed ### Removed

339
Cargo.lock generated
View File

@ -225,12 +225,6 @@ version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e"
[[package]]
name = "ascii"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109"
[[package]] [[package]]
name = "assets_manager" name = "assets_manager"
version = "0.4.3" version = "0.4.3"
@ -249,38 +243,25 @@ dependencies = [
] ]
[[package]] [[package]]
name = "async-std" name = "async-channel"
version = "1.5.0" version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "538ecb01eb64eecd772087e5b6f7540cbc917f047727339a472dafed2185b267" checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9"
dependencies = [ dependencies = [
"async-task", "concurrent-queue",
"crossbeam-channel 0.4.4", "event-listener",
"crossbeam-deque 0.7.3",
"crossbeam-utils 0.7.2",
"futures-core", "futures-core",
"futures-io",
"futures-timer 2.0.2",
"kv-log-macro",
"log",
"memchr",
"mio 0.6.23",
"mio-uds",
"num_cpus",
"once_cell",
"pin-project-lite 0.1.11",
"pin-utils",
"slab",
] ]
[[package]] [[package]]
name = "async-task" name = "async-trait"
version = "1.3.1" version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ac2c016b079e771204030951c366db398864f5026f84a44dafb0ff20f02085d" checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d"
dependencies = [ dependencies = [
"libc", "proc-macro2 1.0.24",
"winapi 0.3.9", "quote 1.0.9",
"syn 1.0.60",
] ]
[[package]] [[package]]
@ -487,6 +468,12 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040"
[[package]]
name = "cache-padded"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba"
[[package]] [[package]]
name = "calloop" name = "calloop"
version = "0.6.5" version = "0.6.5"
@ -730,7 +717,7 @@ version = "3.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680"
dependencies = [ dependencies = [
"ascii 0.9.3", "ascii",
"byteorder", "byteorder",
"either", "either",
"memchr", "memchr",
@ -747,6 +734,15 @@ dependencies = [
"memchr", "memchr",
] ]
[[package]]
name = "concurrent-queue"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3"
dependencies = [
"cache-padded",
]
[[package]] [[package]]
name = "conrod_core" name = "conrod_core"
version = "0.63.0" version = "0.63.0"
@ -1066,6 +1062,7 @@ dependencies = [
"clap", "clap",
"criterion-plot", "criterion-plot",
"csv", "csv",
"futures",
"itertools 0.10.0", "itertools 0.10.0",
"lazy_static", "lazy_static",
"num-traits", "num-traits",
@ -1078,6 +1075,7 @@ dependencies = [
"serde_derive", "serde_derive",
"serde_json", "serde_json",
"tinytemplate", "tinytemplate",
"tokio 1.2.0",
"walkdir 2.3.1", "walkdir 2.3.1",
] ]
@ -1114,16 +1112,6 @@ dependencies = [
"crossbeam-utils 0.6.6", "crossbeam-utils 0.6.6",
] ]
[[package]]
name = "crossbeam-channel"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
dependencies = [
"crossbeam-utils 0.7.2",
"maybe-uninit",
]
[[package]] [[package]]
name = "crossbeam-channel" name = "crossbeam-channel"
version = "0.5.0" version = "0.5.0"
@ -1290,16 +1278,6 @@ dependencies = [
"memchr", "memchr",
] ]
[[package]]
name = "ctor"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8f45d9ad417bcef4817d614a501ab55cdd96a6fdb24f49aab89a54acfd66b19"
dependencies = [
"quote 1.0.9",
"syn 1.0.60",
]
[[package]] [[package]]
name = "daggy" name = "daggy"
version = "0.5.0" version = "0.5.0"
@ -1621,6 +1599,12 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "event-listener"
version = "2.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59"
[[package]] [[package]]
name = "fallible-iterator" name = "fallible-iterator"
version = "0.2.0" version = "0.2.0"
@ -1861,12 +1845,6 @@ dependencies = [
"once_cell", "once_cell",
] ]
[[package]]
name = "futures-timer"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6"
[[package]] [[package]]
name = "futures-timer" name = "futures-timer"
version = "3.0.2" version = "3.0.2"
@ -2242,7 +2220,7 @@ dependencies = [
"http", "http",
"indexmap", "indexmap",
"slab", "slab",
"tokio", "tokio 0.2.25",
"tokio-util", "tokio-util",
"tracing", "tracing",
"tracing-futures", "tracing-futures",
@ -2357,6 +2335,16 @@ dependencies = [
"http", "http",
] ]
[[package]]
name = "http-body"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994"
dependencies = [
"bytes 1.0.1",
"http",
]
[[package]] [[package]]
name = "httparse" name = "httparse"
version = "1.3.5" version = "1.3.5"
@ -2387,13 +2375,36 @@ dependencies = [
"futures-util", "futures-util",
"h2", "h2",
"http", "http",
"http-body", "http-body 0.3.1",
"httparse", "httparse",
"httpdate", "httpdate",
"itoa", "itoa",
"pin-project 1.0.5", "pin-project 1.0.5",
"socket2", "socket2",
"tokio", "tokio 0.2.25",
"tower-service",
"tracing",
"want",
]
[[package]]
name = "hyper"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7"
dependencies = [
"bytes 1.0.1",
"futures-channel",
"futures-core",
"futures-util",
"http",
"http-body 0.4.0",
"httparse",
"httpdate",
"itoa",
"pin-project 1.0.5",
"socket2",
"tokio 1.2.0",
"tower-service", "tower-service",
"tracing", "tracing",
"want", "want",
@ -2407,10 +2418,10 @@ checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6"
dependencies = [ dependencies = [
"bytes 0.5.6", "bytes 0.5.6",
"futures-util", "futures-util",
"hyper", "hyper 0.13.10",
"log", "log",
"rustls 0.18.1", "rustls 0.18.1",
"tokio", "tokio 0.2.25",
"tokio-rustls", "tokio-rustls",
"webpki", "webpki",
] ]
@ -2687,15 +2698,6 @@ version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2db585e1d738fc771bf08a151420d3ed193d9d895a36df7f6f8a9456b911ddc" checksum = "e2db585e1d738fc771bf08a151420d3ed193d9d895a36df7f6f8a9456b911ddc"
[[package]]
name = "kv-log-macro"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f"
dependencies = [
"log",
]
[[package]] [[package]]
name = "lazy-bytes-cast" name = "lazy-bytes-cast"
version = "5.0.1" version = "5.0.1"
@ -2863,7 +2865,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"value-bag",
] ]
[[package]] [[package]]
@ -3088,17 +3089,6 @@ dependencies = [
"slab", "slab",
] ]
[[package]]
name = "mio-uds"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0"
dependencies = [
"iovec",
"libc",
"mio 0.6.23",
]
[[package]] [[package]]
name = "miow" name = "miow"
version = "0.2.2" version = "0.2.2"
@ -3989,6 +3979,18 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "prometheus-hyper"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc47fa532a12d544229015dd3fae32394949af098b8fe9a327b8c1e4c911d1c8"
dependencies = [
"hyper 0.14.4",
"prometheus",
"tokio 1.2.0",
"tracing",
]
[[package]] [[package]]
name = "publicsuffix" name = "publicsuffix"
version = "1.5.4" version = "1.5.4"
@ -4267,8 +4269,8 @@ dependencies = [
"futures-core", "futures-core",
"futures-util", "futures-util",
"http", "http",
"http-body", "http-body 0.3.1",
"hyper", "hyper 0.13.10",
"hyper-rustls", "hyper-rustls",
"ipnet", "ipnet",
"js-sys", "js-sys",
@ -4282,7 +4284,7 @@ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
"serde_urlencoded", "serde_urlencoded",
"tokio", "tokio 0.2.25",
"tokio-rustls", "tokio-rustls",
"url", "url",
"wasm-bindgen", "wasm-bindgen",
@ -5106,19 +5108,6 @@ dependencies = [
"syn 1.0.60", "syn 1.0.60",
] ]
[[package]]
name = "tiny_http"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eded47106b8e52d8ed8119f0ea6e8c0f5881e69783e0297b5a8462958f334bc1"
dependencies = [
"ascii 1.0.0",
"chrono",
"chunked_transfer",
"log",
"url",
]
[[package]] [[package]]
name = "tinytemplate" name = "tinytemplate"
version = "1.2.0" version = "1.2.0"
@ -5162,6 +5151,36 @@ dependencies = [
"slab", "slab",
] ]
[[package]]
name = "tokio"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a"
dependencies = [
"autocfg",
"bytes 1.0.1",
"libc",
"memchr",
"mio 0.7.7",
"num_cpus",
"once_cell",
"pin-project-lite 0.2.4",
"signal-hook-registry",
"tokio-macros",
"winapi 0.3.9",
]
[[package]]
name = "tokio-macros"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57"
dependencies = [
"proc-macro2 1.0.24",
"quote 1.0.9",
"syn 1.0.60",
]
[[package]] [[package]]
name = "tokio-rustls" name = "tokio-rustls"
version = "0.14.1" version = "0.14.1"
@ -5170,10 +5189,21 @@ checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a"
dependencies = [ dependencies = [
"futures-core", "futures-core",
"rustls 0.18.1", "rustls 0.18.1",
"tokio", "tokio 0.2.25",
"webpki", "webpki",
] ]
[[package]]
name = "tokio-stream"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1981ad97df782ab506a1f43bf82c967326960d278acf3bf8279809648c3ff3ea"
dependencies = [
"futures-core",
"pin-project-lite 0.2.4",
"tokio 1.2.0",
]
[[package]] [[package]]
name = "tokio-util" name = "tokio-util"
version = "0.3.1" version = "0.3.1"
@ -5185,7 +5215,7 @@ dependencies = [
"futures-sink", "futures-sink",
"log", "log",
"pin-project-lite 0.1.11", "pin-project-lite 0.1.11",
"tokio", "tokio 0.2.25",
] ]
[[package]] [[package]]
@ -5517,26 +5547,6 @@ dependencies = [
"num_cpus", "num_cpus",
] ]
[[package]]
name = "uvth"
version = "4.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e5910f9106b96334c6cae1f1d77a764bda66ac4ca9f507f73259f184fe1bb6b"
dependencies = [
"crossbeam-channel 0.3.9",
"log",
"num_cpus",
]
[[package]]
name = "value-bag"
version = "1.0.0-alpha.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1"
dependencies = [
"ctor",
]
[[package]] [[package]]
name = "vcpkg" name = "vcpkg"
version = "0.2.11" version = "0.2.11"
@ -5596,7 +5606,7 @@ dependencies = [
"authc", "authc",
"byteorder", "byteorder",
"futures-executor", "futures-executor",
"futures-timer 3.0.2", "futures-timer",
"futures-util", "futures-util",
"hashbrown 0.9.1", "hashbrown 0.9.1",
"image", "image",
@ -5604,14 +5614,15 @@ dependencies = [
"num_cpus", "num_cpus",
"rayon", "rayon",
"specs", "specs",
"tokio 1.2.0",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
"uvth 3.1.1", "uvth",
"vek 0.12.0", "vek 0.12.0",
"veloren-common", "veloren-common",
"veloren-common-net", "veloren-common-net",
"veloren-common-sys", "veloren-common-sys",
"veloren_network", "veloren-network",
] ]
[[package]] [[package]]
@ -5692,6 +5703,49 @@ dependencies = [
"wasmer", "wasmer",
] ]
[[package]]
name = "veloren-network"
version = "0.3.0"
dependencies = [
"async-channel",
"async-trait",
"bincode",
"bitflags",
"bytes 1.0.1",
"clap",
"criterion",
"crossbeam-channel 0.5.0",
"futures-core",
"futures-util",
"lazy_static",
"lz-fear",
"prometheus",
"prometheus-hyper",
"rand 0.8.3",
"serde",
"shellexpand",
"tokio 1.2.0",
"tokio-stream",
"tracing",
"tracing-subscriber",
"veloren-network-protocol",
]
[[package]]
name = "veloren-network-protocol"
version = "0.5.0"
dependencies = [
"async-channel",
"async-trait",
"bitflags",
"bytes 1.0.1",
"criterion",
"prometheus",
"rand 0.8.3",
"tokio 1.2.0",
"tracing",
]
[[package]] [[package]]
name = "veloren-plugin-api" name = "veloren-plugin-api"
version = "0.1.0" version = "0.1.0"
@ -5731,7 +5785,7 @@ dependencies = [
"dotenv", "dotenv",
"futures-channel", "futures-channel",
"futures-executor", "futures-executor",
"futures-timer 3.0.2", "futures-timer",
"futures-util", "futures-util",
"hashbrown 0.9.1", "hashbrown 0.9.1",
"itertools 0.9.0", "itertools 0.9.0",
@ -5739,6 +5793,7 @@ dependencies = [
"libsqlite3-sys", "libsqlite3-sys",
"portpicker", "portpicker",
"prometheus", "prometheus",
"prometheus-hyper",
"rand 0.8.3", "rand 0.8.3",
"rayon", "rayon",
"ron", "ron",
@ -5748,16 +5803,16 @@ dependencies = [
"slab", "slab",
"specs", "specs",
"specs-idvs", "specs-idvs",
"tiny_http", "tokio 1.2.0",
"tracing", "tracing",
"uvth 3.1.1", "uvth",
"vek 0.12.0", "vek 0.12.0",
"veloren-common", "veloren-common",
"veloren-common-net", "veloren-common-net",
"veloren-common-sys", "veloren-common-sys",
"veloren-network",
"veloren-plugin-api", "veloren-plugin-api",
"veloren-world", "veloren-world",
"veloren_network",
] ]
[[package]] [[package]]
@ -5772,6 +5827,7 @@ dependencies = [
"serde", "serde",
"signal-hook 0.2.3", "signal-hook 0.2.3",
"termcolor", "termcolor",
"tokio 1.2.0",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
"tracing-tracy", "tracing-tracy",
@ -5818,6 +5874,7 @@ dependencies = [
"lazy_static", "lazy_static",
"native-dialog", "native-dialog",
"num 0.3.1", "num 0.3.1",
"num_cpus",
"old_school_gfx_glutin_ext", "old_school_gfx_glutin_ext",
"ordered-float 2.1.1", "ordered-float 2.1.1",
"rand 0.8.3", "rand 0.8.3",
@ -5827,13 +5884,14 @@ dependencies = [
"specs", "specs",
"specs-idvs", "specs-idvs",
"termcolor", "termcolor",
"tokio 1.2.0",
"tracing", "tracing",
"tracing-appender", "tracing-appender",
"tracing-log", "tracing-log",
"tracing-subscriber", "tracing-subscriber",
"tracing-tracy", "tracing-tracy",
"treeculler", "treeculler",
"uvth 3.1.1", "uvth",
"vek 0.12.0", "vek 0.12.0",
"veloren-client", "veloren-client",
"veloren-common", "veloren-common",
@ -5891,29 +5949,6 @@ dependencies = [
"veloren-common-net", "veloren-common-net",
] ]
[[package]]
name = "veloren_network"
version = "0.2.0"
dependencies = [
"async-std",
"bincode",
"bitflags",
"clap",
"crossbeam-channel 0.5.0",
"futures",
"lazy_static",
"lz-fear",
"prometheus",
"rand 0.8.3",
"serde",
"shellexpand",
"tiny_http",
"tracing",
"tracing-futures",
"tracing-subscriber",
"uvth 4.0.1",
]
[[package]] [[package]]
name = "version-compare" name = "version-compare"
version = "0.0.10" version = "0.0.10"

View File

@ -15,6 +15,7 @@ members = [
"voxygen/anim", "voxygen/anim",
"world", "world",
"network", "network",
"network/protocol"
] ]
# default profile for devs, fast to compile, okay enough to run, no debug information # default profile for devs, fast to compile, okay enough to run, no debug information
@ -30,8 +31,10 @@ incremental = true
# All dependencies (but not this crate itself) # All dependencies (but not this crate itself)
[profile.dev.package."*"] [profile.dev.package."*"]
opt-level = 3 opt-level = 3
[profile.dev.package."veloren_network"] [profile.dev.package."veloren-network"]
opt-level = 2 opt-level = 2
[profile.dev.package."veloren-network-protocol"]
opt-level = 3
[profile.dev.package."veloren-common"] [profile.dev.package."veloren-common"]
opt-level = 2 opt-level = 2
[profile.dev.package."veloren-client"] [profile.dev.package."veloren-client"]

View File

@ -14,13 +14,14 @@ default = ["simd"]
common = { package = "veloren-common", path = "../common", features = ["no-assets"] } common = { package = "veloren-common", path = "../common", features = ["no-assets"] }
common-sys = { package = "veloren-common-sys", path = "../common/sys", default-features = false } common-sys = { package = "veloren-common-sys", path = "../common/sys", default-features = false }
common-net = { package = "veloren-common-net", path = "../common/net" } common-net = { package = "veloren-common-net", path = "../common/net" }
network = { package = "veloren_network", path = "../network", features = ["compression"], default-features = false } network = { package = "veloren-network", path = "../network", features = ["compression"], default-features = false }
byteorder = "1.3.2" byteorder = "1.3.2"
uvth = "3.1.1" uvth = "3.1.1"
futures-util = "0.3.7" futures-util = "0.3.7"
futures-executor = "0.3" futures-executor = "0.3"
futures-timer = "3.0" futures-timer = "3.0"
tokio = { version = "1", default-features = false, features = ["rt-multi-thread"] }
image = { version = "0.23.12", default-features = false, features = ["png"] } image = { version = "0.23.12", default-features = false, features = ["png"] }
num = "0.3.1" num = "0.3.1"
num_cpus = "1.10.1" num_cpus = "1.10.1"

View File

@ -3,7 +3,14 @@
#![deny(clippy::clone_on_ref_ptr)] #![deny(clippy::clone_on_ref_ptr)]
use common::{clock::Clock, comp}; use common::{clock::Clock, comp};
use std::{io, net::ToSocketAddrs, sync::mpsc, thread, time::Duration}; use std::{
io,
net::ToSocketAddrs,
sync::{mpsc, Arc},
thread,
time::Duration,
};
use tokio::runtime::Runtime;
use tracing::{error, info}; use tracing::{error, info};
use veloren_client::{Client, Event}; use veloren_client::{Client, Event};
@ -37,6 +44,8 @@ fn main() {
println!("Enter your password"); println!("Enter your password");
let password = read_input(); let password = read_input();
let runtime = Arc::new(Runtime::new().unwrap());
// Create a client. // Create a client.
let mut client = Client::new( let mut client = Client::new(
server_addr server_addr
@ -45,6 +54,7 @@ fn main() {
.next() .next()
.unwrap(), .unwrap(),
None, None,
runtime,
) )
.expect("Failed to create client instance"); .expect("Failed to create client instance");

View File

@ -63,6 +63,7 @@ use std::{
sync::Arc, sync::Arc,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use tokio::runtime::Runtime;
use tracing::{debug, error, trace, warn}; use tracing::{debug, error, trace, warn};
use uvth::{ThreadPool, ThreadPoolBuilder}; use uvth::{ThreadPool, ThreadPoolBuilder};
use vek::*; use vek::*;
@ -129,6 +130,7 @@ impl WorldData {
pub struct Client { pub struct Client {
registered: bool, registered: bool,
presence: Option<PresenceKind>, presence: Option<PresenceKind>,
runtime: Arc<Runtime>,
thread_pool: ThreadPool, thread_pool: ThreadPool,
server_info: ServerInfo, server_info: ServerInfo,
world_data: WorldData, world_data: WorldData,
@ -185,15 +187,18 @@ pub struct CharacterList {
impl Client { impl Client {
/// Create a new `Client`. /// Create a new `Client`.
pub fn new<A: Into<SocketAddr>>(addr: A, view_distance: Option<u32>) -> Result<Self, Error> { pub fn new<A: Into<SocketAddr>>(
addr: A,
view_distance: Option<u32>,
runtime: Arc<Runtime>,
) -> Result<Self, Error> {
let mut thread_pool = ThreadPoolBuilder::new() let mut thread_pool = ThreadPoolBuilder::new()
.name("veloren-worker".into()) .name("veloren-worker".into())
.build(); .build();
// We reduce the thread count by 1 to keep rendering smooth // We reduce the thread count by 1 to keep rendering smooth
thread_pool.set_num_threads((num_cpus::get() - 1).max(1)); thread_pool.set_num_threads((num_cpus::get() - 1).max(1));
let (network, scheduler) = Network::new(Pid::new()); let network = Network::new(Pid::new(), Arc::clone(&runtime));
thread_pool.execute(scheduler);
let participant = block_on(network.connect(ProtocolAddr::Tcp(addr.into())))?; let participant = block_on(network.connect(ProtocolAddr::Tcp(addr.into())))?;
let stream = block_on(participant.opened())?; let stream = block_on(participant.opened())?;
@ -417,6 +422,7 @@ impl Client {
Ok(Self { Ok(Self {
registered: false, registered: false,
presence: None, presence: None,
runtime,
thread_pool, thread_pool,
server_info, server_info,
world_data: WorldData { world_data: WorldData {
@ -1733,6 +1739,8 @@ impl Client {
/// exempt). /// exempt).
pub fn thread_pool(&self) -> &ThreadPool { &self.thread_pool } pub fn thread_pool(&self) -> &ThreadPool { &self.thread_pool }
pub fn runtime(&self) -> &Arc<Runtime> { &self.runtime }
/// Get a reference to the client's game state. /// Get a reference to the client's game state.
pub fn state(&self) -> &State { &self.state } pub fn state(&self) -> &State { &self.state }
@ -2058,7 +2066,8 @@ mod tests {
let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 9000); let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 9000);
let view_distance: Option<u32> = None; let view_distance: Option<u32> = None;
let veloren_client: Result<Client, Error> = Client::new(socket, view_distance); let runtime = Arc::new(Runtime::new().unwrap());
let veloren_client: Result<Client, Error> = Client::new(socket, view_distance, runtime);
let _ = veloren_client.map(|mut client| { let _ = veloren_client.map(|mut client| {
//register //register

View File

@ -1,44 +1,67 @@
[package] [package]
name = "veloren_network" name = "veloren-network"
version = "0.2.0" version = "0.3.0"
authors = ["Marcel Märtens <marcel.cochem@googlemail.com>"] authors = ["Marcel Märtens <marcel.cochem@googlemail.com>"]
edition = "2018" edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features] [features]
metrics = ["prometheus"] metrics = ["prometheus", "network-protocol/metrics"]
compression = ["lz-fear"] compression = ["lz-fear"]
default = ["metrics","compression"] default = ["metrics","compression"]
[dependencies] [dependencies]
network-protocol = { package = "veloren-network-protocol", path = "protocol" }
#serialisation #serialisation
bincode = "1.3.1" bincode = "1.3.1"
serde = { version = "1.0" } serde = { version = "1.0" }
#sending #sending
crossbeam-channel = "0.5" crossbeam-channel = "0.5"
# NOTE: Upgrading async-std can trigger spontanious crashes for `network`ing. Consider elaborate tests before upgrading tokio = { version = "1.2", default-features = false, features = ["io-util", "macros", "rt", "net", "time"] }
async-std = { version = "~1.5", default-features = false, features = ["std", "async-task", "default"] } tokio-stream = { version = "0.1.2", default-features = false }
#tracing and metrics #tracing and metrics
tracing = { version = "0.1", default-features = false } tracing = { version = "0.1", default-features = false, features = ["attributes"]}
tracing-futures = "0.2"
prometheus = { version = "0.11", default-features = false, optional = true } prometheus = { version = "0.11", default-features = false, optional = true }
#async #async
futures = { version = "0.3", features = ["thread-pool"] } futures-core = { version = "0.3", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["std"] }
async-channel = "1.5.1" #use for .close() channels
#mpsc channel registry #mpsc channel registry
lazy_static = { version = "1.4", default-features = false } lazy_static = { version = "1.4", default-features = false }
rand = { version = "0.8" } rand = { version = "0.8" }
#stream flags #stream flags
bitflags = "1.2.1" bitflags = "1.2.1"
lz-fear = { version = "0.1.1", optional = true } lz-fear = { version = "0.1.1", optional = true }
# async traits
async-trait = "0.1.42"
bytes = "^1"
[dev-dependencies] [dev-dependencies]
tracing-subscriber = { version = "0.2.3", default-features = false, features = ["env-filter", "fmt", "chrono", "ansi", "smallvec"] } tracing-subscriber = { version = "0.2.3", default-features = false, features = ["env-filter", "fmt", "chrono", "ansi", "smallvec"] }
# `uvth` needed for doc tests tokio = { version = "1.2", default-features = false, features = ["io-std", "fs", "rt-multi-thread"] }
uvth = { version = ">= 3.0, <= 4.0", default-features = false } futures-util = { version = "0.3", default-features = false, features = ["sink", "std"] }
clap = { version = "2.33", default-features = false } clap = { version = "2.33", default-features = false }
shellexpand = "2.0.0" shellexpand = "2.0.0"
tiny_http = "0.8.0"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
prometheus-hyper = "0.1.1"
criterion = { version = "0.3.4", features = ["default", "async_tokio"] }
[[bench]]
name = "speed"
harness = false
[[example]]
name = "fileshare"
[[example]]
name = "network-speed"
[[example]]
name = "chat"
[[example]]
name = "tcp_loadtest"

143
network/benches/speed.rs Normal file
View File

@ -0,0 +1,143 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use std::{net::SocketAddr, sync::Arc};
use tokio::{runtime::Runtime, sync::Mutex};
use veloren_network::{Message, Network, Participant, Pid, Promises, ProtocolAddr, Stream};
fn serialize(data: &[u8], stream: &Stream) { let _ = Message::serialize(data, &stream); }
async fn stream_msg(s1_a: Arc<Mutex<Stream>>, s1_b: Arc<Mutex<Stream>>, data: &[u8], cnt: usize) {
let mut s1_b = s1_b.lock().await;
let m = Message::serialize(&data, &s1_b);
std::thread::spawn(move || {
let mut s1_a = s1_a.try_lock().unwrap();
for _ in 0..cnt {
s1_a.send_raw(&m).unwrap();
}
});
for _ in 0..cnt {
s1_b.recv_raw().await.unwrap();
}
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.build()
.unwrap()
}
fn criterion_util(c: &mut Criterion) {
let mut c = c.benchmark_group("net_util");
c.significance_level(0.1).sample_size(100);
let (r, _n_a, p_a, s1_a, _n_b, _p_b, _s1_b) =
network_participant_stream(ProtocolAddr::Mpsc(5000));
let s2_a = r.block_on(p_a.open(4, Promises::COMPRESSED)).unwrap();
c.throughput(Throughput::Bytes(1000))
.bench_function("message_serialize", |b| {
let data = vec![0u8; 1000];
b.iter(|| serialize(&data, &s1_a))
});
c.throughput(Throughput::Bytes(1000))
.bench_function("message_serialize_compress", |b| {
let data = vec![0u8; 1000];
b.iter(|| serialize(&data, &s2_a))
});
}
fn criterion_mpsc(c: &mut Criterion) {
let mut c = c.benchmark_group("net_mpsc");
c.significance_level(0.1).sample_size(10);
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) =
network_participant_stream(ProtocolAddr::Mpsc(5000));
let s1_a = Arc::new(Mutex::new(s1_a));
let s1_b = Arc::new(Mutex::new(s1_b));
c.throughput(Throughput::Bytes(100000000)).bench_function(
BenchmarkId::new("100MB_in_10000_msg", ""),
|b| {
let data = vec![155u8; 100_000];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 1_000),
)
},
);
c.throughput(Throughput::Elements(100000)).bench_function(
BenchmarkId::new("100000_tiny_msg", ""),
|b| {
let data = vec![3u8; 5];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 100_000),
)
},
);
c.finish();
drop((_n_a, _p_a, _n_b, _p_b));
}
fn criterion_tcp(c: &mut Criterion) {
let mut c = c.benchmark_group("net_tcp");
c.significance_level(0.1).sample_size(10);
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) =
network_participant_stream(ProtocolAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], 5000))));
let s1_a = Arc::new(Mutex::new(s1_a));
let s1_b = Arc::new(Mutex::new(s1_b));
c.throughput(Throughput::Bytes(100000000)).bench_function(
BenchmarkId::new("100MB_in_1000_msg", ""),
|b| {
let data = vec![155u8; 100_000];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 1_000),
)
},
);
c.throughput(Throughput::Elements(100000)).bench_function(
BenchmarkId::new("100000_tiny_msg", ""),
|b| {
let data = vec![3u8; 5];
b.to_async(rt()).iter_with_setup(
|| (Arc::clone(&s1_a), Arc::clone(&s1_b)),
|(s1_a, s1_b)| stream_msg(s1_a, s1_b, &data, 100_000),
)
},
);
c.finish();
drop((_n_a, _p_a, _n_b, _p_b));
}
criterion_group!(benches, criterion_util, criterion_mpsc, criterion_tcp);
criterion_main!(benches);
pub fn network_participant_stream(
addr: ProtocolAddr,
) -> (
Arc<Runtime>,
Network,
Participant,
Stream,
Network,
Participant,
Stream,
) {
let runtime = Arc::new(Runtime::new().unwrap());
let (n_a, p1_a, s1_a, n_b, p1_b, s1_b) = runtime.block_on(async {
let n_a = Network::new(Pid::fake(0), Arc::clone(&runtime));
let n_b = Network::new(Pid::fake(1), Arc::clone(&runtime));
n_a.listen(addr.clone()).await.unwrap();
let p1_b = n_b.connect(addr).await.unwrap();
let p1_a = n_a.connected().await.unwrap();
let s1_a = p1_a.open(4, Promises::empty()).await.unwrap();
let s1_b = p1_b.opened().await.unwrap();
(n_a, p1_a, s1_a, n_b, p1_b, s1_b)
});
(runtime, n_a, p1_a, s1_a, n_b, p1_b, s1_b)
}

View File

@ -3,10 +3,9 @@
//! RUST_BACKTRACE=1 cargo run --example chat -- --trace=info --port 15006 //! RUST_BACKTRACE=1 cargo run --example chat -- --trace=info --port 15006
//! RUST_BACKTRACE=1 cargo run --example chat -- --trace=info --port 15006 --mode=client //! RUST_BACKTRACE=1 cargo run --example chat -- --trace=info --port 15006 --mode=client
//! ``` //! ```
use async_std::{io, sync::RwLock};
use clap::{App, Arg}; use clap::{App, Arg};
use futures::executor::{block_on, ThreadPool};
use std::{sync::Arc, thread, time::Duration}; use std::{sync::Arc, thread, time::Duration};
use tokio::{io, io::AsyncBufReadExt, runtime::Runtime, sync::RwLock};
use tracing::*; use tracing::*;
use tracing_subscriber::EnvFilter; use tracing_subscriber::EnvFilter;
use veloren_network::{Network, Participant, Pid, Promises, ProtocolAddr}; use veloren_network::{Network, Participant, Pid, Promises, ProtocolAddr};
@ -100,18 +99,17 @@ fn main() {
} }
fn server(address: ProtocolAddr) { fn server(address: ProtocolAddr) {
let (server, f) = Network::new(Pid::new()); let r = Arc::new(Runtime::new().unwrap());
let server = Network::new(Pid::new(), Arc::clone(&r));
let server = Arc::new(server); let server = Arc::new(server);
std::thread::spawn(f);
let pool = ThreadPool::new().unwrap();
let participants = Arc::new(RwLock::new(Vec::new())); let participants = Arc::new(RwLock::new(Vec::new()));
block_on(async { r.block_on(async {
server.listen(address).await.unwrap(); server.listen(address).await.unwrap();
loop { loop {
let p1 = Arc::new(server.connected().await.unwrap()); let p1 = Arc::new(server.connected().await.unwrap());
let server1 = server.clone(); let server1 = server.clone();
participants.write().await.push(p1.clone()); participants.write().await.push(p1.clone());
pool.spawn_ok(client_connection(server1, p1, participants.clone())); tokio::spawn(client_connection(server1, p1, participants.clone()));
} }
}); });
} }
@ -132,7 +130,7 @@ async fn client_connection(
Ok(msg) => { Ok(msg) => {
println!("[{}]: {}", username, msg); println!("[{}]: {}", username, msg);
for p in participants.read().await.iter() { for p in participants.read().await.iter() {
match p.open(32, Promises::ORDERED | Promises::CONSISTENCY).await { match p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await {
Err(_) => info!("error talking to client, //TODO drop it"), Err(_) => info!("error talking to client, //TODO drop it"),
Ok(mut s) => s.send((username.clone(), msg.clone())).unwrap(), Ok(mut s) => s.send((username.clone(), msg.clone())).unwrap(),
}; };
@ -144,27 +142,27 @@ async fn client_connection(
} }
fn client(address: ProtocolAddr) { fn client(address: ProtocolAddr) {
let (client, f) = Network::new(Pid::new()); let r = Arc::new(Runtime::new().unwrap());
std::thread::spawn(f); let client = Network::new(Pid::new(), Arc::clone(&r));
let pool = ThreadPool::new().unwrap();
block_on(async { r.block_on(async {
let p1 = client.connect(address.clone()).await.unwrap(); //remote representation of p1 let p1 = client.connect(address.clone()).await.unwrap(); //remote representation of p1
let mut s1 = p1 let mut s1 = p1
.open(16, Promises::ORDERED | Promises::CONSISTENCY) .open(4, Promises::ORDERED | Promises::CONSISTENCY)
.await .await
.unwrap(); //remote representation of s1 .unwrap(); //remote representation of s1
let mut input_lines = io::BufReader::new(io::stdin());
println!("Enter your username:"); println!("Enter your username:");
let mut username = String::new(); let mut username = String::new();
io::stdin().read_line(&mut username).await.unwrap(); input_lines.read_line(&mut username).await.unwrap();
username = username.split_whitespace().collect(); username = username.split_whitespace().collect();
println!("Your username is: {}", username); println!("Your username is: {}", username);
println!("write /quit to close"); println!("write /quit to close");
pool.spawn_ok(read_messages(p1)); tokio::spawn(read_messages(p1));
s1.send(username).unwrap(); s1.send(username).unwrap();
loop { loop {
let mut line = String::new(); let mut line = String::new();
io::stdin().read_line(&mut line).await.unwrap(); input_lines.read_line(&mut line).await.unwrap();
line = line.split_whitespace().collect(); line = line.split_whitespace().collect();
if line.as_str() == "/quit" { if line.as_str() == "/quit" {
println!("goodbye"); println!("goodbye");

View File

@ -1,9 +1,7 @@
use async_std::{
fs,
path::{Path, PathBuf},
};
use rand::Rng; use rand::Rng;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use tokio::fs;
use veloren_network::{Participant, ProtocolAddr, Stream}; use veloren_network::{Participant, ProtocolAddr, Stream};
use std::collections::HashMap; use std::collections::HashMap;

View File

@ -4,14 +4,9 @@
//! --profile=release -Z unstable-options -- --trace=info --port 15006) //! --profile=release -Z unstable-options -- --trace=info --port 15006)
//! (cd network/examples/fileshare && RUST_BACKTRACE=1 cargo run //! (cd network/examples/fileshare && RUST_BACKTRACE=1 cargo run
//! --profile=release -Z unstable-options -- --trace=info --port 15007) ``` //! --profile=release -Z unstable-options -- --trace=info --port 15007) ```
use async_std::{io, path::PathBuf};
use clap::{App, Arg, SubCommand}; use clap::{App, Arg, SubCommand};
use futures::{ use std::{path::PathBuf, sync::Arc, thread, time::Duration};
channel::mpsc, use tokio::{io, io::AsyncBufReadExt, runtime::Runtime, sync::mpsc};
executor::{block_on, ThreadPool},
sink::SinkExt,
};
use std::{thread, time::Duration};
use tracing::*; use tracing::*;
use tracing_subscriber::EnvFilter; use tracing_subscriber::EnvFilter;
use veloren_network::ProtocolAddr; use veloren_network::ProtocolAddr;
@ -56,14 +51,14 @@ fn main() {
let port: u16 = matches.value_of("port").unwrap().parse().unwrap(); let port: u16 = matches.value_of("port").unwrap().parse().unwrap();
let address = ProtocolAddr::Tcp(format!("{}:{}", "127.0.0.1", port).parse().unwrap()); let address = ProtocolAddr::Tcp(format!("{}:{}", "127.0.0.1", port).parse().unwrap());
let runtime = Arc::new(Runtime::new().unwrap());
let (server, cmd_sender) = Server::new(); let (server, cmd_sender) = Server::new(Arc::clone(&runtime));
let pool = ThreadPool::new().unwrap(); runtime.spawn(server.run(address));
pool.spawn_ok(server.run(address));
thread::sleep(Duration::from_millis(50)); //just for trace thread::sleep(Duration::from_millis(50)); //just for trace
block_on(client(cmd_sender)); runtime.block_on(client(cmd_sender));
} }
fn file_exists(file: String) -> Result<(), String> { fn file_exists(file: String) -> Result<(), String> {
@ -130,14 +125,15 @@ fn get_options<'a, 'b>() -> App<'a, 'b> {
) )
} }
async fn client(mut cmd_sender: mpsc::UnboundedSender<LocalCommand>) { async fn client(cmd_sender: mpsc::UnboundedSender<LocalCommand>) {
use std::io::Write; use std::io::Write;
loop { loop {
let mut line = String::new(); let mut line = String::new();
let mut input_lines = io::BufReader::new(io::stdin());
print!("==> "); print!("==> ");
std::io::stdout().flush().unwrap(); std::io::stdout().flush().unwrap();
io::stdin().read_line(&mut line).await.unwrap(); input_lines.read_line(&mut line).await.unwrap();
let matches = match get_options().get_matches_from_safe(line.split_whitespace()) { let matches = match get_options().get_matches_from_safe(line.split_whitespace()) {
Err(e) => { Err(e) => {
println!("{}", e.message); println!("{}", e.message);
@ -148,12 +144,12 @@ async fn client(mut cmd_sender: mpsc::UnboundedSender<LocalCommand>) {
match matches.subcommand() { match matches.subcommand() {
("quit", _) => { ("quit", _) => {
cmd_sender.send(LocalCommand::Shutdown).await.unwrap(); cmd_sender.send(LocalCommand::Shutdown).unwrap();
println!("goodbye"); println!("goodbye");
break; break;
}, },
("disconnect", _) => { ("disconnect", _) => {
cmd_sender.send(LocalCommand::Disconnect).await.unwrap(); cmd_sender.send(LocalCommand::Disconnect).unwrap();
}, },
("connect", Some(connect_matches)) => { ("connect", Some(connect_matches)) => {
let socketaddr = connect_matches let socketaddr = connect_matches
@ -163,7 +159,6 @@ async fn client(mut cmd_sender: mpsc::UnboundedSender<LocalCommand>) {
.unwrap(); .unwrap();
cmd_sender cmd_sender
.send(LocalCommand::Connect(ProtocolAddr::Tcp(socketaddr))) .send(LocalCommand::Connect(ProtocolAddr::Tcp(socketaddr)))
.await
.unwrap(); .unwrap();
}, },
("t", _) => { ("t", _) => {
@ -171,28 +166,23 @@ async fn client(mut cmd_sender: mpsc::UnboundedSender<LocalCommand>) {
.send(LocalCommand::Connect(ProtocolAddr::Tcp( .send(LocalCommand::Connect(ProtocolAddr::Tcp(
"127.0.0.1:1231".parse().unwrap(), "127.0.0.1:1231".parse().unwrap(),
))) )))
.await
.unwrap(); .unwrap();
}, },
("serve", Some(serve_matches)) => { ("serve", Some(serve_matches)) => {
let path = shellexpand::tilde(serve_matches.value_of("file").unwrap()); let path = shellexpand::tilde(serve_matches.value_of("file").unwrap());
let path: PathBuf = path.parse().unwrap(); let path: PathBuf = path.parse().unwrap();
if let Some(fileinfo) = FileInfo::new(&path).await { if let Some(fileinfo) = FileInfo::new(&path).await {
cmd_sender cmd_sender.send(LocalCommand::Serve(fileinfo)).unwrap();
.send(LocalCommand::Serve(fileinfo))
.await
.unwrap();
} }
}, },
("list", _) => { ("list", _) => {
cmd_sender.send(LocalCommand::List).await.unwrap(); cmd_sender.send(LocalCommand::List).unwrap();
}, },
("get", Some(get_matches)) => { ("get", Some(get_matches)) => {
let id: u32 = get_matches.value_of("id").unwrap().parse().unwrap(); let id: u32 = get_matches.value_of("id").unwrap().parse().unwrap();
let file = get_matches.value_of("file"); let file = get_matches.value_of("file");
cmd_sender cmd_sender
.send(LocalCommand::Get(id, file.map(|s| s.to_string()))) .send(LocalCommand::Get(id, file.map(|s| s.to_string())))
.await
.unwrap(); .unwrap();
}, },

View File

@ -1,11 +1,12 @@
use crate::commands::{Command, FileInfo, LocalCommand, RemoteInfo}; use crate::commands::{Command, FileInfo, LocalCommand, RemoteInfo};
use async_std::{ use futures_util::{FutureExt, StreamExt};
fs, use std::{collections::HashMap, path::PathBuf, sync::Arc};
path::PathBuf, use tokio::{
sync::{Mutex, RwLock}, fs, join,
runtime::Runtime,
sync::{mpsc, Mutex, RwLock},
}; };
use futures::{channel::mpsc, future::FutureExt, stream::StreamExt}; use tokio_stream::wrappers::UnboundedReceiverStream;
use std::{collections::HashMap, sync::Arc};
use tracing::*; use tracing::*;
use veloren_network::{Network, Participant, Pid, Promises, ProtocolAddr, Stream}; use veloren_network::{Network, Participant, Pid, Promises, ProtocolAddr, Stream};
@ -23,11 +24,10 @@ pub struct Server {
} }
impl Server { impl Server {
pub fn new() -> (Self, mpsc::UnboundedSender<LocalCommand>) { pub fn new(runtime: Arc<Runtime>) -> (Self, mpsc::UnboundedSender<LocalCommand>) {
let (command_sender, command_receiver) = mpsc::unbounded(); let (command_sender, command_receiver) = mpsc::unbounded_channel();
let (network, f) = Network::new(Pid::new()); let network = Network::new(Pid::new(), runtime);
std::thread::spawn(f);
let run_channels = Some(ControlChannels { command_receiver }); let run_channels = Some(ControlChannels { command_receiver });
( (
@ -47,7 +47,7 @@ impl Server {
self.network.listen(address).await.unwrap(); self.network.listen(address).await.unwrap();
futures::join!( join!(
self.command_manager(run_channels.command_receiver,), self.command_manager(run_channels.command_receiver,),
self.connect_manager(), self.connect_manager(),
); );
@ -55,6 +55,7 @@ impl Server {
async fn command_manager(&self, command_receiver: mpsc::UnboundedReceiver<LocalCommand>) { async fn command_manager(&self, command_receiver: mpsc::UnboundedReceiver<LocalCommand>) {
trace!("Start command_manager"); trace!("Start command_manager");
let command_receiver = UnboundedReceiverStream::new(command_receiver);
command_receiver command_receiver
.for_each_concurrent(None, async move |cmd| { .for_each_concurrent(None, async move |cmd| {
match cmd { match cmd {
@ -106,7 +107,7 @@ impl Server {
async fn connect_manager(&self) { async fn connect_manager(&self) {
trace!("Start connect_manager"); trace!("Start connect_manager");
let iter = futures::stream::unfold((), |_| { let iter = futures_util::stream::unfold((), |_| {
self.network.connected().map(|r| r.ok().map(|v| (v, ()))) self.network.connected().map(|r| r.ok().map(|v| (v, ())))
}); });
@ -120,8 +121,8 @@ impl Server {
#[allow(clippy::eval_order_dependence)] #[allow(clippy::eval_order_dependence)]
async fn loop_participant(&self, p: Participant) { async fn loop_participant(&self, p: Participant) {
if let (Ok(cmd_out), Ok(file_out), Ok(cmd_in), Ok(file_in)) = ( if let (Ok(cmd_out), Ok(file_out), Ok(cmd_in), Ok(file_in)) = (
p.open(15, Promises::ORDERED | Promises::CONSISTENCY).await, p.open(3, Promises::ORDERED | Promises::CONSISTENCY).await,
p.open(40, Promises::CONSISTENCY).await, p.open(6, Promises::CONSISTENCY).await,
p.opened().await, p.opened().await,
p.opened().await, p.opened().await,
) { ) {
@ -129,7 +130,7 @@ impl Server {
let id = p.remote_pid(); let id = p.remote_pid();
let ri = Arc::new(Mutex::new(RemoteInfo::new(cmd_out, file_out, p))); let ri = Arc::new(Mutex::new(RemoteInfo::new(cmd_out, file_out, p)));
self.remotes.write().await.insert(id, ri.clone()); self.remotes.write().await.insert(id, ri.clone());
futures::join!( join!(
self.handle_remote_cmd(cmd_in, ri.clone()), self.handle_remote_cmd(cmd_in, ri.clone()),
self.handle_files(file_in, ri.clone()), self.handle_files(file_in, ri.clone()),
); );
@ -174,7 +175,7 @@ impl Server {
let mut path = std::env::current_dir().unwrap(); let mut path = std::env::current_dir().unwrap();
path.push(fi.path().file_name().unwrap()); path.push(fi.path().file_name().unwrap());
trace!("No path provided, saving down to {:?}", path); trace!("No path provided, saving down to {:?}", path);
PathBuf::from(path) path
}, },
}; };
debug!("Received file, going to save it under {:?}", path); debug!("Received file, going to save it under {:?}", path);

View File

@ -3,15 +3,17 @@
/// (cd network/examples/network-speed && RUST_BACKTRACE=1 cargo run --profile=debuginfo -Z unstable-options -- --trace=error --protocol=tcp --mode=server) /// (cd network/examples/network-speed && RUST_BACKTRACE=1 cargo run --profile=debuginfo -Z unstable-options -- --trace=error --protocol=tcp --mode=server)
/// (cd network/examples/network-speed && RUST_BACKTRACE=1 cargo run --profile=debuginfo -Z unstable-options -- --trace=error --protocol=tcp --mode=client) /// (cd network/examples/network-speed && RUST_BACKTRACE=1 cargo run --profile=debuginfo -Z unstable-options -- --trace=error --protocol=tcp --mode=client)
/// ``` /// ```
mod metrics;
use clap::{App, Arg}; use clap::{App, Arg};
use futures::executor::block_on; use prometheus::Registry;
use prometheus_hyper::Server;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{ use std::{
net::SocketAddr,
sync::Arc,
thread, thread,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use tokio::runtime::Runtime;
use tracing::*; use tracing::*;
use tracing_subscriber::EnvFilter; use tracing_subscriber::EnvFilter;
use veloren_network::{Message, Network, Pid, Promises, ProtocolAddr}; use veloren_network::{Message, Network, Pid, Promises, ProtocolAddr};
@ -101,14 +103,16 @@ fn main() {
}; };
let mut background = None; let mut background = None;
let runtime = Arc::new(Runtime::new().unwrap());
match matches.value_of("mode") { match matches.value_of("mode") {
Some("server") => server(address), Some("server") => server(address, Arc::clone(&runtime)),
Some("client") => client(address), Some("client") => client(address, Arc::clone(&runtime)),
Some("both") => { Some("both") => {
let address1 = address.clone(); let address1 = address.clone();
background = Some(thread::spawn(|| server(address1))); let runtime2 = Arc::clone(&runtime);
background = Some(thread::spawn(|| server(address1, runtime2)));
thread::sleep(Duration::from_millis(200)); //start client after server thread::sleep(Duration::from_millis(200)); //start client after server
client(address); client(address, Arc::clone(&runtime));
}, },
_ => panic!("Invalid mode, run --help!"), _ => panic!("Invalid mode, run --help!"),
}; };
@ -117,18 +121,22 @@ fn main() {
} }
} }
fn server(address: ProtocolAddr) { fn server(address: ProtocolAddr, runtime: Arc<Runtime>) {
let mut metrics = metrics::SimpleMetrics::new(); let registry = Arc::new(Registry::new());
let (server, f) = Network::new_with_registry(Pid::new(), metrics.registry()); let server = Network::new_with_registry(Pid::new(), Arc::clone(&runtime), &registry);
std::thread::spawn(f); runtime.spawn(Server::run(
metrics.run("0.0.0.0:59112".parse().unwrap()).unwrap(); Arc::clone(&registry),
block_on(server.listen(address)).unwrap(); SocketAddr::from(([0; 4], 59112)),
futures_util::future::pending(),
));
runtime.block_on(server.listen(address)).unwrap();
loop { loop {
info!("----");
info!("Waiting for participant to connect"); info!("Waiting for participant to connect");
let p1 = block_on(server.connected()).unwrap(); //remote representation of p1 let p1 = runtime.block_on(server.connected()).unwrap(); //remote representation of p1
let mut s1 = block_on(p1.opened()).unwrap(); //remote representation of s1 let mut s1 = runtime.block_on(p1.opened()).unwrap(); //remote representation of s1
block_on(async { runtime.block_on(async {
let mut last = Instant::now(); let mut last = Instant::now();
let mut id = 0u64; let mut id = 0u64;
while let Ok(_msg) = s1.recv_raw().await { while let Ok(_msg) = s1.recv_raw().await {
@ -145,14 +153,19 @@ fn server(address: ProtocolAddr) {
} }
} }
fn client(address: ProtocolAddr) { fn client(address: ProtocolAddr, runtime: Arc<Runtime>) {
let mut metrics = metrics::SimpleMetrics::new(); let registry = Arc::new(Registry::new());
let (client, f) = Network::new_with_registry(Pid::new(), metrics.registry()); let client = Network::new_with_registry(Pid::new(), Arc::clone(&runtime), &registry);
std::thread::spawn(f); runtime.spawn(Server::run(
metrics.run("0.0.0.0:59111".parse().unwrap()).unwrap(); Arc::clone(&registry),
SocketAddr::from(([0; 4], 59111)),
futures_util::future::pending(),
));
let p1 = block_on(client.connect(address)).unwrap(); //remote representation of p1 let p1 = runtime.block_on(client.connect(address)).unwrap(); //remote representation of p1
let mut s1 = block_on(p1.open(16, Promises::ORDERED | Promises::CONSISTENCY)).unwrap(); //remote representation of s1 let mut s1 = runtime
.block_on(p1.open(4, Promises::ORDERED | Promises::CONSISTENCY))
.unwrap(); //remote representation of s1
let mut last = Instant::now(); let mut last = Instant::now();
let mut id = 0u64; let mut id = 0u64;
let raw_msg = Message::serialize( let raw_msg = Message::serialize(
@ -173,16 +186,16 @@ fn client(address: ProtocolAddr) {
} }
if id > 2000000 { if id > 2000000 {
println!("Stop"); println!("Stop");
std::thread::sleep(std::time::Duration::from_millis(5000)); std::thread::sleep(std::time::Duration::from_millis(2000));
break; break;
} }
} }
drop(s1); drop(s1);
std::thread::sleep(std::time::Duration::from_millis(5000)); std::thread::sleep(std::time::Duration::from_millis(2000));
info!("Closing participant"); info!("Closing participant");
block_on(p1.disconnect()).unwrap(); runtime.block_on(p1.disconnect()).unwrap();
std::thread::sleep(std::time::Duration::from_millis(25000)); std::thread::sleep(std::time::Duration::from_millis(2000));
info!("DROPPING! client"); info!("DROPPING! client");
drop(client); drop(client);
std::thread::sleep(std::time::Duration::from_millis(25000)); std::thread::sleep(std::time::Duration::from_millis(2000));
} }

View File

@ -1,92 +0,0 @@
use prometheus::{Encoder, Registry, TextEncoder};
use std::{
error::Error,
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread,
};
use tracing::*;
pub struct SimpleMetrics {
running: Arc<AtomicBool>,
handle: Option<thread::JoinHandle<()>>,
registry: Option<Registry>,
}
impl SimpleMetrics {
pub fn new() -> Self {
let running = Arc::new(AtomicBool::new(false));
let registry = Some(Registry::new());
Self {
running,
handle: None,
registry,
}
}
pub fn registry(&self) -> &Registry {
match self.registry {
Some(ref r) => r,
None => panic!("You cannot longer register new metrics after the server has started!"),
}
}
pub fn run(&mut self, addr: SocketAddr) -> Result<(), Box<dyn Error>> {
self.running.store(true, Ordering::Relaxed);
let running2 = self.running.clone();
let registry = self
.registry
.take()
.expect("ServerMetrics must be already started");
//TODO: make this a job
self.handle = Some(thread::spawn(move || {
let server = tiny_http::Server::http(addr).unwrap();
const TIMEOUT: std::time::Duration = std::time::Duration::from_secs(1);
debug!("starting tiny_http server to serve metrics");
while running2.load(Ordering::Relaxed) {
let request = match server.recv_timeout(TIMEOUT) {
Ok(Some(rq)) => rq,
Ok(None) => continue,
Err(e) => {
println!("Error: {}", e);
break;
},
};
let mf = registry.gather();
let encoder = TextEncoder::new();
let mut buffer = vec![];
encoder
.encode(&mf, &mut buffer)
.expect("Failed to encoder metrics text.");
let response = tiny_http::Response::from_string(
String::from_utf8(buffer).expect("Failed to parse bytes as a string."),
);
if let Err(e) = request.respond(response) {
error!(
?e,
"The metrics HTTP server had encountered and error with answering"
)
}
}
debug!("Stopping tiny_http server to serve metrics");
}));
Ok(())
}
}
impl Drop for SimpleMetrics {
fn drop(&mut self) {
self.running.store(false, Ordering::Relaxed);
let handle = self.handle.take();
handle
.expect("ServerMetrics worker handle does not exist.")
.join()
.expect("Error shutting down prometheus metric exporter");
}
}

View File

@ -0,0 +1,35 @@
[package]
name = "veloren-network-protocol"
description = "pure Protocol without any I/O itself"
version = "0.5.0"
authors = ["Marcel Märtens <marcel.cochem@googlemail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
metrics = ["prometheus"]
trace_pedantic = [] # use for debug only
default = ["metrics"]
[dependencies]
#tracing and metrics
tracing = { version = "0.1", default-features = false }
prometheus = { version = "0.11", default-features = false, optional = true }
#stream flags
bitflags = "1.2.1"
rand = { version = "0.8" }
# async traits
async-trait = "0.1.42"
bytes = "^1"
[dev-dependencies]
async-channel = "1.5.1"
tokio = { version = "1.2", default-features = false, features = ["rt", "macros"] }
criterion = { version = "0.3.4", features = ["default", "async_tokio"] }
[[bench]]
name = "protocols"
harness = false

View File

@ -0,0 +1,262 @@
use async_channel::*;
use async_trait::async_trait;
use bytes::{Bytes, BytesMut};
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
use std::{sync::Arc, time::Duration};
use tokio::runtime::Runtime;
use veloren_network_protocol::{
InitProtocol, MpscMsg, MpscRecvProtocol, MpscSendProtocol, Pid, Promises, ProtocolError,
ProtocolEvent, ProtocolMetricCache, ProtocolMetrics, RecvProtocol, SendProtocol, Sid,
TcpRecvProtocol, TcpSendProtocol, UnreliableDrain, UnreliableSink, _internal::OTFrame,
};
fn frame_serialize(frame: OTFrame, buffer: &mut BytesMut) { frame.write_bytes(buffer); }
async fn handshake<S, R>(p: [(S, R); 2])
where
S: SendProtocol,
R: RecvProtocol,
(S, R): InitProtocol,
{
let [mut p1, mut p2] = p;
tokio::join!(
async {
p1.initialize(true, Pid::fake(2), 1337).await.unwrap();
p1
},
async {
p2.initialize(false, Pid::fake(3), 42).await.unwrap();
p2
}
);
}
async fn send_msg<T: SendProtocol>(mut s: T, data: Bytes, cnt: usize) {
let bandwidth = data.len() as u64 + 100;
const SEC1: Duration = Duration::from_secs(1);
s.send(ProtocolEvent::OpenStream {
sid: Sid::new(12),
prio: 0,
promises: Promises::ORDERED,
guaranteed_bandwidth: 100_000,
})
.await
.unwrap();
for i in 0..cnt {
s.send(ProtocolEvent::Message {
sid: Sid::new(12),
mid: i as u64,
data: data.clone(),
})
.await
.unwrap();
if i.rem_euclid(50) == 0 {
s.flush(bandwidth * 50_u64, SEC1).await.unwrap();
}
}
s.flush(bandwidth * 1000_u64, SEC1).await.unwrap();
}
async fn recv_msg<T: RecvProtocol>(mut r: T, cnt: usize) {
r.recv().await.unwrap();
for _ in 0..cnt {
r.recv().await.unwrap();
}
}
async fn send_and_recv_msg<S: SendProtocol, R: RecvProtocol>(
p: [(S, R); 2],
data: Bytes,
cnt: usize,
) {
let [p1, p2] = p;
let (s, r) = (p1.0, p2.1);
tokio::join!(send_msg(s, data, cnt), recv_msg(r, cnt));
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.build()
.unwrap()
}
fn criterion_util(c: &mut Criterion) {
c.bench_function("mpsc_handshake", |b| {
b.to_async(rt())
.iter_with_setup(|| utils::ac_bound(10, None), handshake)
});
c.bench_function("frame_serialize_short", |b| {
let mut buffer = BytesMut::with_capacity(1500);
let frame = OTFrame::Data {
mid: 65,
start: 89u64,
data: Bytes::from(&b"hello_world"[..]),
};
b.iter_with_setup(
|| frame.clone(),
|frame| frame_serialize(frame, &mut buffer),
)
});
}
fn criterion_mpsc(c: &mut Criterion) {
let mut c = c.benchmark_group("mpsc");
c.significance_level(0.1).sample_size(10);
c.throughput(Throughput::Bytes(1000000000))
.bench_function("1GB_in_10000_msg", |b| {
let buffer = Bytes::from(&[155u8; 100_000][..]);
b.to_async(rt()).iter_with_setup(
|| (buffer.clone(), utils::ac_bound(10, None)),
|(b, p)| send_and_recv_msg(p, b, 10_000),
)
});
c.throughput(Throughput::Elements(1000000))
.bench_function("1000000_tiny_msg", |b| {
let buffer = Bytes::from(&[3u8; 5][..]);
b.to_async(rt()).iter_with_setup(
|| (buffer.clone(), utils::ac_bound(10, None)),
|(b, p)| send_and_recv_msg(p, b, 1_000_000),
)
});
c.finish();
}
fn criterion_tcp(c: &mut Criterion) {
let mut c = c.benchmark_group("tcp");
c.significance_level(0.1).sample_size(10);
c.throughput(Throughput::Bytes(1000000000))
.bench_function("1GB_in_10000_msg", |b| {
let buf = Bytes::from(&[155u8; 100_000][..]);
b.to_async(rt()).iter_with_setup(
|| (buf.clone(), utils::tcp_bound(10000, None)),
|(b, p)| send_and_recv_msg(p, b, 10_000),
)
});
c.throughput(Throughput::Elements(1000000))
.bench_function("1000000_tiny_msg", |b| {
let buf = Bytes::from(&[3u8; 5][..]);
b.to_async(rt()).iter_with_setup(
|| (buf.clone(), utils::tcp_bound(10000, None)),
|(b, p)| send_and_recv_msg(p, b, 1_000_000),
)
});
c.finish();
}
criterion_group!(benches, criterion_util, criterion_mpsc, criterion_tcp);
criterion_main!(benches);
mod utils {
use super::*;
pub struct ACDrain {
sender: Sender<MpscMsg>,
}
pub struct ACSink {
receiver: Receiver<MpscMsg>,
}
pub fn ac_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(MpscSendProtocol<ACDrain>, MpscRecvProtocol<ACSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
ProtocolMetricCache::new("mpsc", Arc::new(ProtocolMetrics::new().unwrap()))
});
[
(
MpscSendProtocol::new(ACDrain { sender: s1 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r2 }, m.clone()),
),
(
MpscSendProtocol::new(ACDrain { sender: s2 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r1 }, m),
),
]
}
pub struct TcpDrain {
sender: Sender<BytesMut>,
}
pub struct TcpSink {
receiver: Receiver<BytesMut>,
}
/// emulate Tcp protocol on Channels
pub fn tcp_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(TcpSendProtocol<TcpDrain>, TcpRecvProtocol<TcpSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
ProtocolMetricCache::new("tcp", Arc::new(ProtocolMetrics::new().unwrap()))
});
[
(
TcpSendProtocol::new(TcpDrain { sender: s1 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r2 }, m.clone()),
),
(
TcpSendProtocol::new(TcpDrain { sender: s2 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r1 }, m),
),
]
}
#[async_trait]
impl UnreliableDrain for ACDrain {
type DataFormat = MpscMsg;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
self.sender
.send(data)
.await
.map_err(|_| ProtocolError::Closed)
}
}
#[async_trait]
impl UnreliableSink for ACSink {
type DataFormat = MpscMsg;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
self.receiver
.recv()
.await
.map_err(|_| ProtocolError::Closed)
}
}
#[async_trait]
impl UnreliableDrain for TcpDrain {
type DataFormat = BytesMut;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
self.sender
.send(data)
.await
.map_err(|_| ProtocolError::Closed)
}
}
#[async_trait]
impl UnreliableSink for TcpSink {
type DataFormat = BytesMut;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
self.receiver
.recv()
.await
.map_err(|_| ProtocolError::Closed)
}
}
}

View File

@ -0,0 +1,76 @@
use crate::{
frame::OTFrame,
types::{Bandwidth, Mid, Prio, Promises, Sid},
};
use bytes::Bytes;
/// used for communication with [`SendProtocol`] and [`RecvProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
/// [`RecvProtocol`]: crate::RecvProtocol
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
pub enum ProtocolEvent {
Shutdown,
OpenStream {
sid: Sid,
prio: Prio,
promises: Promises,
guaranteed_bandwidth: Bandwidth,
},
CloseStream {
sid: Sid,
},
Message {
data: Bytes,
mid: Mid,
sid: Sid,
},
}
impl ProtocolEvent {
pub(crate) fn to_frame(&self) -> OTFrame {
match self {
ProtocolEvent::Shutdown => OTFrame::Shutdown,
ProtocolEvent::OpenStream {
sid,
prio,
promises,
guaranteed_bandwidth: _,
} => OTFrame::OpenStream {
sid: *sid,
prio: *prio,
promises: *promises,
},
ProtocolEvent::CloseStream { sid } => OTFrame::CloseStream { sid: *sid },
ProtocolEvent::Message { .. } => {
unimplemented!("Event::Message to OTFrame IS NOT supported")
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_frame() {
assert_eq!(ProtocolEvent::Shutdown.to_frame(), OTFrame::Shutdown);
assert_eq!(
ProtocolEvent::CloseStream { sid: Sid::new(42) }.to_frame(),
OTFrame::CloseStream { sid: Sid::new(42) }
);
}
#[test]
#[should_panic]
fn test_msg_buffer_panic() {
let _ = ProtocolEvent::Message {
data: Bytes::new(),
mid: 0,
sid: Sid::new(23),
}
.to_frame();
}
}

View File

@ -0,0 +1,565 @@
use crate::types::{Mid, Pid, Prio, Promises, Sid};
use bytes::{Buf, BufMut, Bytes, BytesMut};
// const FRAME_RESERVED_1: u8 = 0;
const FRAME_HANDSHAKE: u8 = 1;
const FRAME_INIT: u8 = 2;
const FRAME_SHUTDOWN: u8 = 3;
const FRAME_OPEN_STREAM: u8 = 4;
const FRAME_CLOSE_STREAM: u8 = 5;
const FRAME_DATA_HEADER: u8 = 6;
const FRAME_DATA: u8 = 7;
const FRAME_RAW: u8 = 8;
//const FRAME_RESERVED_2: u8 = 10;
//const FRAME_RESERVED_3: u8 = 13;
/// Used for Communication between Channel <----(TCP/UDP)----> Channel
#[derive(Debug, PartialEq, Clone)]
pub enum InitFrame {
Handshake {
magic_number: [u8; 7],
version: [u32; 3],
},
Init {
pid: Pid,
secret: u128,
},
/// WARNING: sending RAW is only for debug purposes and will drop the
/// connection
Raw(Vec<u8>),
}
/// Used for OUT TCP Communication between Channel --(TCP)--> Channel
#[derive(Debug, PartialEq, Clone)]
pub enum OTFrame {
Shutdown, /* Shutdown this channel gracefully, if all channels are shutdown (gracefully),
* Participant is deleted */
OpenStream {
sid: Sid,
prio: Prio,
promises: Promises,
},
CloseStream {
sid: Sid,
},
DataHeader {
mid: Mid,
sid: Sid,
length: u64,
},
Data {
mid: Mid,
start: u64, /* remove */
data: Bytes,
},
}
/// Used for IN TCP Communication between Channel <--(TCP)-- Channel
#[derive(Debug, PartialEq, Clone)]
pub enum ITFrame {
Shutdown, /* Shutdown this channel gracefully, if all channels are shutdown (gracefully),
* Participant is deleted */
OpenStream {
sid: Sid,
prio: Prio,
promises: Promises,
},
CloseStream {
sid: Sid,
},
DataHeader {
mid: Mid,
sid: Sid,
length: u64,
},
Data {
mid: Mid,
start: u64, /* remove */
data: BytesMut,
},
}
impl InitFrame {
// Size WITHOUT the 1rst indicating byte
pub(crate) const HANDSHAKE_CNS: usize = 19;
pub(crate) const INIT_CNS: usize = 32;
/// const part of the RAW frame, actual size is variable
pub(crate) const RAW_CNS: usize = 2;
//provide an appropriate buffer size. > 1500
pub(crate) fn write_bytes(self, bytes: &mut BytesMut) {
match self {
InitFrame::Handshake {
magic_number,
version,
} => {
bytes.put_u8(FRAME_HANDSHAKE);
bytes.put_slice(&magic_number);
bytes.put_u32_le(version[0]);
bytes.put_u32_le(version[1]);
bytes.put_u32_le(version[2]);
},
InitFrame::Init { pid, secret } => {
bytes.put_u8(FRAME_INIT);
pid.to_bytes(bytes);
bytes.put_u128_le(secret);
},
InitFrame::Raw(data) => {
bytes.put_u8(FRAME_RAW);
bytes.put_u16_le(data.len() as u16);
bytes.put_slice(&data);
},
}
}
pub(crate) fn read_frame(bytes: &mut BytesMut) -> Option<Self> {
let frame_no = match bytes.get(0) {
Some(&f) => f,
None => return None,
};
let frame = match frame_no {
FRAME_HANDSHAKE => {
if bytes.len() < Self::HANDSHAKE_CNS + 1 {
return None;
}
bytes.advance(1);
let mut magic_number_bytes = bytes.copy_to_bytes(7);
let mut magic_number = [0u8; 7];
magic_number_bytes.copy_to_slice(&mut magic_number);
InitFrame::Handshake {
magic_number,
version: [bytes.get_u32_le(), bytes.get_u32_le(), bytes.get_u32_le()],
}
},
FRAME_INIT => {
if bytes.len() < Self::INIT_CNS + 1 {
return None;
}
bytes.advance(1);
InitFrame::Init {
pid: Pid::from_bytes(bytes),
secret: bytes.get_u128_le(),
}
},
FRAME_RAW => {
if bytes.len() < Self::RAW_CNS + 1 {
return None;
}
bytes.advance(1);
let length = bytes.get_u16_le() as usize;
// lower length is allowed
let max_length = length.min(bytes.len());
let mut data = vec![0; max_length];
data.copy_from_slice(&bytes[..max_length]);
InitFrame::Raw(data)
},
_ => InitFrame::Raw(bytes.to_vec()),
};
Some(frame)
}
}
pub(crate) const TCP_CLOSE_STREAM_CNS: usize = 8;
/// const part of the DATA frame, actual size is variable
pub(crate) const TCP_DATA_CNS: usize = 18;
pub(crate) const TCP_DATA_HEADER_CNS: usize = 24;
pub(crate) const TCP_OPEN_STREAM_CNS: usize = 10;
// Size WITHOUT the 1rst indicating byte
pub(crate) const TCP_SHUTDOWN_CNS: usize = 0;
impl OTFrame {
pub fn write_bytes(self, bytes: &mut BytesMut) {
match self {
Self::Shutdown => {
bytes.put_u8(FRAME_SHUTDOWN);
},
Self::OpenStream {
sid,
prio,
promises,
} => {
bytes.put_u8(FRAME_OPEN_STREAM);
sid.to_bytes(bytes);
bytes.put_u8(prio);
bytes.put_u8(promises.to_le_bytes()[0]);
},
Self::CloseStream { sid } => {
bytes.put_u8(FRAME_CLOSE_STREAM);
sid.to_bytes(bytes);
},
Self::DataHeader { mid, sid, length } => {
bytes.put_u8(FRAME_DATA_HEADER);
bytes.put_u64_le(mid);
sid.to_bytes(bytes);
bytes.put_u64_le(length);
},
Self::Data { mid, start, data } => {
bytes.put_u8(FRAME_DATA);
bytes.put_u64_le(mid);
bytes.put_u64_le(start);
bytes.put_u16_le(data.len() as u16);
bytes.put_slice(&data);
},
}
}
}
impl ITFrame {
pub(crate) fn read_frame(bytes: &mut BytesMut) -> Option<Self> {
let frame_no = match bytes.first() {
Some(&f) => f,
None => return None,
};
let size = match frame_no {
FRAME_SHUTDOWN => TCP_SHUTDOWN_CNS,
FRAME_OPEN_STREAM => TCP_OPEN_STREAM_CNS,
FRAME_CLOSE_STREAM => TCP_CLOSE_STREAM_CNS,
FRAME_DATA_HEADER => TCP_DATA_HEADER_CNS,
FRAME_DATA => {
if bytes.len() < 17 + 1 + 1 {
return None;
}
u16::from_le_bytes([bytes[16 + 1], bytes[17 + 1]]) as usize + TCP_DATA_CNS
},
_ => return None,
};
if bytes.len() < size + 1 {
return None;
}
let frame = match frame_no {
FRAME_SHUTDOWN => {
let _ = bytes.split_to(size + 1);
Self::Shutdown
},
FRAME_OPEN_STREAM => {
let mut bytes = bytes.split_to(size + 1);
bytes.advance(1);
Self::OpenStream {
sid: Sid::from_bytes(&mut bytes),
prio: bytes.get_u8(),
promises: Promises::from_bits_truncate(bytes.get_u8()),
}
},
FRAME_CLOSE_STREAM => {
let mut bytes = bytes.split_to(size + 1);
bytes.advance(1);
Self::CloseStream {
sid: Sid::from_bytes(&mut bytes),
}
},
FRAME_DATA_HEADER => {
let mut bytes = bytes.split_to(size + 1);
bytes.advance(1);
Self::DataHeader {
mid: bytes.get_u64_le(),
sid: Sid::from_bytes(&mut bytes),
length: bytes.get_u64_le(),
}
},
FRAME_DATA => {
bytes.advance(1);
let mid = bytes.get_u64_le();
let start = bytes.get_u64_le();
let length = bytes.get_u16_le();
debug_assert_eq!(length as usize, size - TCP_DATA_CNS);
let data = bytes.split_to(length as usize);
Self::Data { mid, start, data }
},
_ => unreachable!("Frame::to_frame should be handled before!"),
};
Some(frame)
}
}
#[allow(unused_variables)]
impl PartialEq<ITFrame> for OTFrame {
fn eq(&self, other: &ITFrame) -> bool {
match self {
Self::Shutdown => matches!(other, ITFrame::Shutdown),
Self::OpenStream {
sid,
prio,
promises,
} => matches!(other, ITFrame::OpenStream {
sid,
prio,
promises
}),
Self::CloseStream { sid } => matches!(other, ITFrame::CloseStream { sid }),
Self::DataHeader { mid, sid, length } => {
matches!(other, ITFrame::DataHeader { mid, sid, length })
},
Self::Data { mid, start, data } => matches!(other, ITFrame::Data { mid, start, data }),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::{VELOREN_MAGIC_NUMBER, VELOREN_NETWORK_VERSION};
fn get_initframes() -> Vec<InitFrame> {
vec![
InitFrame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
},
InitFrame::Init {
pid: Pid::fake(0),
secret: 0u128,
},
InitFrame::Raw(vec![1, 2, 3]),
]
}
fn get_otframes() -> Vec<OTFrame> {
vec![
OTFrame::OpenStream {
sid: Sid::new(1337),
prio: 14,
promises: Promises::GUARANTEED_DELIVERY,
},
OTFrame::DataHeader {
sid: Sid::new(1337),
mid: 0,
length: 36,
},
OTFrame::Data {
mid: 0,
start: 0,
data: Bytes::from(&[77u8; 20][..]),
},
OTFrame::Data {
mid: 0,
start: 20,
data: Bytes::from(&[42u8; 16][..]),
},
OTFrame::CloseStream {
sid: Sid::new(1337),
},
OTFrame::Shutdown,
]
}
#[test]
fn initframe_individual() {
let dupl = |frame: InitFrame| {
let mut buffer = BytesMut::with_capacity(1500);
InitFrame::write_bytes(frame, &mut buffer);
InitFrame::read_frame(&mut buffer)
};
for frame in get_initframes() {
println!("initframe: {:?}", &frame);
assert_eq!(Some(frame.clone()), dupl(frame));
}
}
#[test]
fn initframe_multiple() {
let mut buffer = BytesMut::with_capacity(3000);
let mut frames = get_initframes();
// to string
for f in &frames {
InitFrame::write_bytes(f.clone(), &mut buffer);
}
// from string
let mut framesd = frames
.iter()
.map(|&_| InitFrame::read_frame(&mut buffer))
.collect::<Vec<_>>();
// compare
for (f, fd) in frames.drain(..).zip(framesd.drain(..)) {
println!("initframe: {:?}", &f);
assert_eq!(Some(f), fd);
}
}
#[test]
fn frame_individual() {
let dupl = |frame: OTFrame| {
let mut buffer = BytesMut::with_capacity(1500);
OTFrame::write_bytes(frame, &mut buffer);
ITFrame::read_frame(&mut buffer)
};
for frame in get_otframes() {
println!("frame: {:?}", &frame);
assert_eq!(frame.clone(), dupl(frame).expect("NONE"));
}
}
#[test]
fn frame_multiple() {
let mut buffer = BytesMut::with_capacity(3000);
let mut frames = get_otframes();
// to string
for f in &frames {
OTFrame::write_bytes(f.clone(), &mut buffer);
}
// from string
let mut framesd = frames
.iter()
.map(|&_| ITFrame::read_frame(&mut buffer))
.collect::<Vec<_>>();
// compare
for (f, fd) in frames.drain(..).zip(framesd.drain(..)) {
println!("frame: {:?}", &f);
assert_eq!(f, fd.expect("NONE"));
}
}
#[test]
fn frame_exact_size() {
const SIZE: usize = TCP_CLOSE_STREAM_CNS+1/*first byte*/;
let mut buffer = BytesMut::with_capacity(SIZE);
let frame1 = OTFrame::CloseStream { sid: Sid::new(2) };
OTFrame::write_bytes(frame1.clone(), &mut buffer);
assert_eq!(buffer.len(), SIZE);
let mut deque = buffer.iter().copied().collect();
let frame2 = ITFrame::read_frame(&mut deque);
assert_eq!(frame1, frame2.expect("NONE"));
}
#[test]
fn initframe_too_short_buffer() {
let mut buffer = BytesMut::with_capacity(10);
let frame1 = InitFrame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
};
InitFrame::write_bytes(frame1, &mut buffer);
}
#[test]
fn initframe_too_less_data() {
let mut buffer = BytesMut::with_capacity(20);
let frame1 = InitFrame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
};
let _ = InitFrame::write_bytes(frame1, &mut buffer);
buffer.truncate(6); // simulate partial retrieve
let frame1d = InitFrame::read_frame(&mut buffer);
assert_eq!(frame1d, None);
}
#[test]
fn initframe_rubish() {
let mut buffer = BytesMut::from(&b"dtrgwcser"[..]);
assert_eq!(
InitFrame::read_frame(&mut buffer),
Some(InitFrame::Raw(b"dtrgwcser".to_vec()))
);
}
#[test]
fn initframe_attack_too_much_length() {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = InitFrame::Raw(b"foobar".to_vec());
let _ = InitFrame::write_bytes(frame1.clone(), &mut buffer);
buffer[1] = 255;
let framed = InitFrame::read_frame(&mut buffer);
assert_eq!(framed, Some(frame1));
}
#[test]
fn initframe_attack_too_low_length() {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = InitFrame::Raw(b"foobar".to_vec());
let _ = InitFrame::write_bytes(frame1, &mut buffer);
buffer[1] = 3;
let framed = InitFrame::read_frame(&mut buffer);
// we accept a different frame here, as it's RAW and debug only!
assert_eq!(framed, Some(InitFrame::Raw(b"foo".to_vec())));
}
#[test]
fn frame_too_short_buffer() {
let mut buffer = BytesMut::with_capacity(10);
let frame1 = OTFrame::OpenStream {
sid: Sid::new(88),
promises: Promises::ENCRYPTED,
prio: 88,
};
OTFrame::write_bytes(frame1, &mut buffer);
}
#[test]
fn frame_too_less_data() {
let mut buffer = BytesMut::with_capacity(20);
let frame1 = OTFrame::OpenStream {
sid: Sid::new(88),
promises: Promises::ENCRYPTED,
prio: 88,
};
OTFrame::write_bytes(frame1, &mut buffer);
buffer.truncate(6); // simulate partial retrieve
let frame1d = ITFrame::read_frame(&mut buffer);
assert_eq!(frame1d, None);
}
#[test]
fn frame_rubish() {
let mut buffer = BytesMut::from(&b"dtrgwcser"[..]);
assert_eq!(ITFrame::read_frame(&mut buffer), None);
}
#[test]
fn frame_attack_too_much_length() {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = OTFrame::Data {
mid: 7u64,
start: 1u64,
data: Bytes::from(&b"foobar"[..]),
};
OTFrame::write_bytes(frame1, &mut buffer);
buffer[17] = 255;
let framed = ITFrame::read_frame(&mut buffer);
assert_eq!(framed, None);
}
#[test]
fn frame_attack_too_low_length() {
let mut buffer = BytesMut::with_capacity(50);
let frame1 = OTFrame::Data {
mid: 7u64,
start: 1u64,
data: Bytes::from(&b"foobar"[..]),
};
OTFrame::write_bytes(frame1, &mut buffer);
buffer[17] = 3;
let framed = ITFrame::read_frame(&mut buffer);
assert_eq!(
framed,
Some(ITFrame::Data {
mid: 7u64,
start: 1u64,
data: BytesMut::from(&b"foo"[..]),
})
);
//next = Invalid => Empty
let framed = ITFrame::read_frame(&mut buffer);
assert_eq!(framed, None);
}
}

View File

@ -0,0 +1,239 @@
use crate::{
frame::InitFrame,
types::{
Pid, Sid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2, VELOREN_MAGIC_NUMBER,
VELOREN_NETWORK_VERSION,
},
InitProtocol, InitProtocolError, ProtocolError,
};
use async_trait::async_trait;
use tracing::{debug, error, info, trace};
/// Implement this for auto Handshake with [`ReliableSink`].
/// You must make sure that EVERY message send this way actually is received on
/// the receiving site:
/// - exactly once
/// - in the correct order
/// - correctly
///
/// [`ReliableSink`]: crate::ReliableSink
/// [`RecvProtocol`]: crate::RecvProtocol
#[async_trait]
pub trait ReliableDrain {
async fn send(&mut self, frame: InitFrame) -> Result<(), ProtocolError>;
}
/// Implement this for auto Handshake with [`ReliableDrain`]. See
/// [`ReliableDrain`].
///
/// [`ReliableDrain`]: crate::ReliableDrain
#[async_trait]
pub trait ReliableSink {
async fn recv(&mut self) -> Result<InitFrame, ProtocolError>;
}
#[async_trait]
impl<D, S> InitProtocol for (D, S)
where
D: ReliableDrain + Send,
S: ReliableSink + Send,
{
async fn initialize(
&mut self,
initializer: bool,
local_pid: Pid,
local_secret: u128,
) -> Result<(Pid, Sid, u128), InitProtocolError> {
#[cfg(debug_assertions)]
const WRONG_NUMBER: &str = "Handshake does not contain the magic number required by \
veloren server.\nWe are not sure if you are a valid veloren \
client.\nClosing the connection";
#[cfg(debug_assertions)]
const WRONG_VERSION: &str = "Handshake does contain a correct magic number, but invalid \
version.\nWe don't know how to communicate with \
you.\nClosing the connection";
const ERR_S: &str = "Got A Raw Message, these are usually Debug Messages indicating that \
something went wrong on network layer and connection will be closed";
let drain = &mut self.0;
let sink = &mut self.1;
if initializer {
drain
.send(InitFrame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
})
.await?;
}
match sink.recv().await? {
InitFrame::Handshake {
magic_number,
version,
} => {
trace!(?magic_number, ?version, "Recv handshake");
if magic_number != VELOREN_MAGIC_NUMBER {
error!(?magic_number, "Connection with invalid magic_number");
#[cfg(debug_assertions)]
drain
.send(InitFrame::Raw(WRONG_NUMBER.as_bytes().to_vec()))
.await?;
Err(InitProtocolError::WrongMagicNumber(magic_number))
} else if version != VELOREN_NETWORK_VERSION {
error!(?version, "Connection with wrong network version");
#[cfg(debug_assertions)]
drain
.send(InitFrame::Raw(
format!(
"{} Our Version: {:?}\nYour Version: {:?}\nClosing the connection",
WRONG_VERSION, VELOREN_NETWORK_VERSION, version,
)
.as_bytes()
.to_vec(),
))
.await?;
Err(InitProtocolError::WrongVersion(version))
} else {
trace!("Handshake Frame completed");
if initializer {
drain
.send(InitFrame::Init {
pid: local_pid,
secret: local_secret,
})
.await?;
} else {
drain
.send(InitFrame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
})
.await?;
}
Ok(())
}
},
InitFrame::Raw(bytes) => {
match std::str::from_utf8(bytes.as_slice()) {
Ok(string) => error!(?string, ERR_S),
_ => error!(?bytes, ERR_S),
}
Err(InitProtocolError::Closed)
},
_ => {
info!("Handshake failed");
Err(InitProtocolError::Closed)
},
}?;
match sink.recv().await? {
InitFrame::Init { pid, secret } => {
debug!(?pid, "Participant send their ID");
let stream_id_offset = if initializer {
STREAM_ID_OFFSET1
} else {
drain
.send(InitFrame::Init {
pid: local_pid,
secret: local_secret,
})
.await?;
STREAM_ID_OFFSET2
};
info!(?pid, "This Handshake is now configured!");
Ok((pid, stream_id_offset, secret))
},
InitFrame::Raw(bytes) => {
match std::str::from_utf8(bytes.as_slice()) {
Ok(string) => error!(?string, ERR_S),
_ => error!(?bytes, ERR_S),
}
Err(InitProtocolError::Closed)
},
_ => {
info!("Handshake failed");
Err(InitProtocolError::Closed)
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{mpsc::test_utils::*, InitProtocolError};
#[tokio::test]
async fn handshake_drop_start() {
let [mut p1, p2] = ac_bound(10, None);
let r1 = tokio::spawn(async move { p1.initialize(true, Pid::fake(2), 1337).await });
let r2 = tokio::spawn(async move {
let _ = p2;
});
let (r1, _) = tokio::join!(r1, r2);
assert_eq!(r1.unwrap(), Err(InitProtocolError::Closed));
}
#[tokio::test]
async fn handshake_wrong_magic_number() {
let [mut p1, mut p2] = ac_bound(10, None);
let r1 = tokio::spawn(async move { p1.initialize(true, Pid::fake(2), 1337).await });
let r2 = tokio::spawn(async move {
let _ = p2.1.recv().await?;
p2.0.send(InitFrame::Handshake {
magic_number: *b"woopsie",
version: VELOREN_NETWORK_VERSION,
})
.await?;
let _ = p2.1.recv().await?;
Result::<(), InitProtocolError>::Ok(())
});
let (r1, r2) = tokio::join!(r1, r2);
assert_eq!(
r1.unwrap(),
Err(InitProtocolError::WrongMagicNumber(*b"woopsie"))
);
assert_eq!(r2.unwrap(), Ok(()));
}
#[tokio::test]
async fn handshake_wrong_version() {
let [mut p1, mut p2] = ac_bound(10, None);
let r1 = tokio::spawn(async move { p1.initialize(true, Pid::fake(2), 1337).await });
let r2 = tokio::spawn(async move {
let _ = p2.1.recv().await?;
p2.0.send(InitFrame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: [0, 1, 2],
})
.await?;
let _ = p2.1.recv().await?;
let _ = p2.1.recv().await?; //this should be closed now
Ok(())
});
let (r1, r2) = tokio::join!(r1, r2);
assert_eq!(r1.unwrap(), Err(InitProtocolError::WrongVersion([0, 1, 2])));
assert_eq!(r2.unwrap(), Err(InitProtocolError::Closed));
}
#[tokio::test]
async fn handshake_unexpected_raw() {
let [mut p1, mut p2] = ac_bound(10, None);
let r1 = tokio::spawn(async move { p1.initialize(true, Pid::fake(2), 1337).await });
let r2 = tokio::spawn(async move {
let _ = p2.1.recv().await?;
p2.0.send(InitFrame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
})
.await?;
let _ = p2.1.recv().await?;
p2.0.send(InitFrame::Raw(b"Hello World".to_vec())).await?;
Result::<(), InitProtocolError>::Ok(())
});
let (r1, r2) = tokio::join!(r1, r2);
assert_eq!(r1.unwrap(), Err(InitProtocolError::Closed));
assert_eq!(r2.unwrap(), Ok(()));
}
}

178
network/protocol/src/lib.rs Normal file
View File

@ -0,0 +1,178 @@
//! Network Protocol
//!
//! a I/O-Free protocol for the veloren network crate.
//! This crate defines multiple different protocols over [`UnreliableDrain`] and
//! [`UnreliableSink`] traits, which allows it to define the behavior of a
//! protocol separated from the actual io.
//!
//! For example we define the TCP protocol on top of Drains and Sinks that can
//! send chunks of bytes. You can now implement your own Drain And Sink that
//! sends the data via tokio's or std's implementation. Or you just use a
//! std::mpsc::channel for unit tests without needing a actual tcp socket.
//!
//! This crate currently defines:
//! - TCP
//! - MPSC
//!
//! a UDP implementation will quickly follow, and it's also possible to abstract
//! over QUIC.
//!
//! warning: don't mix protocol, using the TCP variant for actual UDP socket
//! will result in dropped data using UDP with a TCP socket will be a waste of
//! resources.
//!
//! A *channel* in this crate is defined as a combination of *read* and *write*
//! protocol.
//!
//! # adding a protocol
//!
//! We start by defining our DataFormat. For most this is prob [`Vec<u8>`] or
//! [`Bytes`]. MPSC can directly send a msg without serialisation.
//!
//! Create 2 structs, one for the receiving and sending end. Based on a generic
//! Drain/Sink with your required DataFormat.
//! Implement the [`SendProtocol`] and [`RecvProtocol`] traits respectively.
//!
//! Implement the Handshake: [`InitProtocol`], alternatively you can also
//! implement `ReliableDrain` and `ReliableSink`, by this, you use the default
//! Handshake.
//!
//! This crate also contains consts and definitions for the network protocol.
//!
//! For an *example* see `TcpDrain` and `TcpSink` in the [tcp.rs](tcp.rs)
//!
//! [`UnreliableDrain`]: crate::UnreliableDrain
//! [`UnreliableSink`]: crate::UnreliableSink
//! [`Vec<u8>`]: std::vec::Vec
//! [`Bytes`]: bytes::Bytes
//! [`SendProtocol`]: crate::SendProtocol
//! [`RecvProtocol`]: crate::RecvProtocol
//! [`InitProtocol`]: crate::InitProtocol
mod event;
mod frame;
mod handshake;
mod message;
mod metrics;
mod mpsc;
mod prio;
mod tcp;
mod types;
pub use event::ProtocolEvent;
pub use metrics::ProtocolMetricCache;
#[cfg(feature = "metrics")]
pub use metrics::ProtocolMetrics;
pub use mpsc::{MpscMsg, MpscRecvProtocol, MpscSendProtocol};
pub use tcp::{TcpRecvProtocol, TcpSendProtocol};
pub use types::{
Bandwidth, Cid, Mid, Pid, Prio, Promises, Sid, HIGHEST_PRIO, VELOREN_NETWORK_VERSION,
};
///use at own risk, might change any time, for internal benchmarks
pub mod _internal {
pub use crate::frame::{ITFrame, OTFrame};
}
use async_trait::async_trait;
/// Handshake: Used to connect 2 Channels.
#[async_trait]
pub trait InitProtocol {
async fn initialize(
&mut self,
initializer: bool,
local_pid: Pid,
secret: u128,
) -> Result<(Pid, Sid, u128), InitProtocolError>;
}
/// Generic Network Send Protocol.
/// Implement this for your Protocol of choice ( tcp, udp, mpsc, quic)
/// Allows the creation/deletions of `Streams` and sending messages via
/// [`ProtocolEvent`].
///
/// A `Stream` MUST be bound to a specific Channel. You MUST NOT switch the
/// channel to send a stream mid air. We will provide takeover options for
/// Channel closure in the future to allow keeping a `Stream` over a broker
/// Channel.
///
/// [`ProtocolEvent`]: crate::ProtocolEvent
#[async_trait]
pub trait SendProtocol {
/// YOU MUST inform the `SendProtocol` by any Stream Open BEFORE using it in
/// `send` and Stream Close AFTER using it in `send` via this fn.
fn notify_from_recv(&mut self, event: ProtocolEvent);
/// Send a Event via this Protocol. The `SendProtocol` MAY require `flush`
/// to be called before actual data is send to the respective `Sink`.
async fn send(&mut self, event: ProtocolEvent) -> Result<(), ProtocolError>;
/// Flush all buffered messages according to their [`Prio`] and
/// [`Bandwidth`]. provide the current bandwidth budget (per second) as
/// well as the `dt` since last call. According to the budget the
/// respective messages will be flushed.
///
/// [`Prio`]: crate::Prio
/// [`Bandwidth`]: crate::Bandwidth
async fn flush(
&mut self,
bandwidth: Bandwidth,
dt: std::time::Duration,
) -> Result<(), ProtocolError>;
}
/// Generic Network Recv Protocol. See: [`SendProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
#[async_trait]
pub trait RecvProtocol {
/// Either recv an event or fail the Protocol, once the Recv side is closed
/// it cannot recover from the error.
async fn recv(&mut self) -> Result<ProtocolEvent, ProtocolError>;
}
/// This crate makes use of UnreliableDrains, they are expected to provide the
/// same guarantees like their IO-counterpart. E.g. ordered messages for TCP and
/// nothing for UDP. The respective Protocol needs then to handle this.
/// This trait is an abstraction above multiple Drains, e.g. [`tokio`](https://tokio.rs) [`async-std`] [`std`] or even [`async-channel`]
///
/// [`async-std`]: async-std
/// [`std`]: std
/// [`async-channel`]: async-channel
#[async_trait]
pub trait UnreliableDrain: Send {
type DataFormat;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError>;
}
/// Sink counterpart of [`UnreliableDrain`]
///
/// [`UnreliableDrain`]: crate::UnreliableDrain
#[async_trait]
pub trait UnreliableSink: Send {
type DataFormat;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError>;
}
/// All possible Errors that can happen during Handshake [`InitProtocol`]
///
/// [`InitProtocol`]: crate::InitProtocol
#[derive(Debug, PartialEq)]
pub enum InitProtocolError {
Closed,
WrongMagicNumber([u8; 7]),
WrongVersion([u32; 3]),
}
/// When you return closed you must stay closed!
#[derive(Debug, PartialEq)]
pub enum ProtocolError {
Closed,
}
impl From<ProtocolError> for InitProtocolError {
fn from(err: ProtocolError) -> Self {
match err {
ProtocolError::Closed => InitProtocolError::Closed,
}
}
}

View File

@ -0,0 +1,191 @@
use crate::{
frame::OTFrame,
types::{Mid, Sid},
};
use bytes::{Bytes, BytesMut};
pub(crate) const ALLOC_BLOCK: usize = 16_777_216;
/// Contains a outgoing message for TCP protocol
/// All Chunks have the same size, except for the last chunk which can end
/// earlier. E.g.
/// ```ignore
/// msg = OTMessage::new();
/// msg.next();
/// msg.next();
/// ```
#[derive(Debug)]
pub(crate) struct OTMessage {
data: Bytes,
original_length: u64,
send_header: bool,
mid: Mid,
sid: Sid,
start: u64, /* remove */
}
#[derive(Debug)]
pub(crate) struct ITMessage {
pub data: BytesMut,
pub sid: Sid,
pub length: u64,
}
impl OTMessage {
pub(crate) const FRAME_DATA_SIZE: u64 = 1400;
pub(crate) fn new(data: Bytes, mid: Mid, sid: Sid) -> Self {
let original_length = data.len() as u64;
Self {
data,
original_length,
send_header: false,
mid,
sid,
start: 0,
}
}
fn get_header(&self) -> OTFrame {
OTFrame::DataHeader {
mid: self.mid,
sid: self.sid,
length: self.data.len() as u64,
}
}
fn get_next_data(&mut self) -> OTFrame {
let to_send = std::cmp::min(self.data.len(), Self::FRAME_DATA_SIZE as usize);
let data = self.data.split_to(to_send);
let start = self.start;
self.start += Self::FRAME_DATA_SIZE;
OTFrame::Data {
mid: self.mid,
start,
data,
}
}
/// returns if something was added
pub(crate) fn next(&mut self) -> Option<OTFrame> {
if !self.send_header {
self.send_header = true;
Some(self.get_header())
} else if !self.data.is_empty() {
Some(self.get_next_data())
} else {
None
}
}
pub(crate) fn get_sid_len(&self) -> (Sid, u64) { (self.sid, self.original_length) }
}
impl ITMessage {
pub(crate) fn new(sid: Sid, length: u64, _allocator: &mut BytesMut) -> Self {
//allocator.reserve(ALLOC_BLOCK);
//TODO: grab mem from the allocatior, but this is only possible with unsafe
Self {
sid,
length,
data: BytesMut::with_capacity((length as usize).min(ALLOC_BLOCK /* anti-ddos */)),
}
}
}
/*
/// Contains a outgoing message and store what was *send* and *confirmed*
/// All Chunks have the same size, except for the last chunk which can end
/// earlier. E.g.
/// ```ignore
/// msg = OutgoingMessage::new();
/// msg.next();
/// msg.next();
/// msg.confirm(1);
/// msg.confirm(2);
/// ```
#[derive(Debug)]
#[allow(dead_code)]
pub(crate) struct OUMessage {
buffer: Arc<MessageBuffer>,
send_index: u64, // 3 => 4200 (3*FRAME_DATA_SIZE)
send_header: bool,
mid: Mid,
sid: Sid,
max_index: u64, //speedup
missing_header: bool,
missing_indices: VecDeque<u64>,
}
#[allow(dead_code)]
impl OUMessage {
pub(crate) const FRAME_DATA_SIZE: u64 = 1400;
pub(crate) fn new(buffer: Arc<MessageBuffer>, mid: Mid, sid: Sid) -> Self {
let max_index =
(buffer.data.len() as u64 + Self::FRAME_DATA_SIZE - 1) / Self::FRAME_DATA_SIZE;
Self {
buffer,
send_index: 0,
send_header: false,
mid,
sid,
max_index,
missing_header: false,
missing_indices: VecDeque::new(),
}
}
/// all has been send once, but might been resend due to failures.
#[allow(dead_code)]
pub(crate) fn initial_sent(&self) -> bool { self.send_index == self.max_index }
pub fn get_header(&self) -> Frame {
Frame::DataHeader {
mid: self.mid,
sid: self.sid,
length: self.buffer.data.len() as u64,
}
}
pub fn get_data(&self, index: u64) -> Frame {
let start = index * Self::FRAME_DATA_SIZE;
let to_send = std::cmp::min(
self.buffer.data[start as usize..].len() as u64,
Self::FRAME_DATA_SIZE,
);
Frame::Data {
mid: self.mid,
start,
data: self.buffer.data[start as usize..][..to_send as usize].to_vec(),
}
}
#[allow(dead_code)]
pub(crate) fn set_missing(&mut self, missing_header: bool, missing_indicies: VecDeque<u64>) {
self.missing_header = missing_header;
self.missing_indices = missing_indicies;
}
/// returns if something was added
pub(crate) fn next(&mut self) -> Option<Frame> {
if !self.send_header {
self.send_header = true;
Some(self.get_header())
} else if self.send_index < self.max_index {
self.send_index += 1;
Some(self.get_data(self.send_index - 1))
} else if self.missing_header {
self.missing_header = false;
Some(self.get_header())
} else if let Some(index) = self.missing_indices.pop_front() {
Some(self.get_data(index))
} else {
None
}
}
pub(crate) fn get_sid_len(&self) -> (Sid, u64) { (self.sid, self.buffer.data.len() as u64) }
}
*/

View File

@ -0,0 +1,418 @@
use crate::types::Sid;
#[cfg(feature = "metrics")]
use prometheus::{
core::{AtomicI64, AtomicU64, GenericCounter, GenericGauge},
IntCounterVec, IntGaugeVec, Opts, Registry,
};
#[cfg(feature = "metrics")]
use std::collections::HashMap;
use std::{error::Error, sync::Arc};
#[allow(dead_code)]
pub enum RemoveReason {
Finished,
Dropped,
}
/// Use 1 `ProtocolMetrics` per `Network`.
/// I will contain all protocol related [`prometheus`] information
///
/// [`prometheus`]: prometheus
#[cfg(feature = "metrics")]
pub struct ProtocolMetrics {
// smsg=send_msg rdata=receive_data
// i=in o=out
// t=total b=byte throughput
//e.g smsg_it = sending messages, in (responsibility of protocol) total
// based on CHANNEL/STREAM
/// messages added to be send total, by STREAM,
smsg_it: IntCounterVec,
/// messages bytes added to be send throughput, by STREAM,
smsg_ib: IntCounterVec,
/// messages removed from to be send, because they where finished total, by
/// STREAM AND REASON(finished/canceled),
smsg_ot: IntCounterVec,
/// messages bytes removed from to be send throughput, because they where
/// finished total, by STREAM AND REASON(finished/dropped),
smsg_ob: IntCounterVec,
/// data frames send by prio by CHANNEL,
sdata_frames_t: IntCounterVec,
/// data frames bytes send by prio by CHANNEL,
sdata_frames_b: IntCounterVec,
// based on CHANNEL/STREAM
/// messages added to be received total, by STREAM,
rmsg_it: IntCounterVec,
/// messages bytes added to be received throughput, by STREAM,
rmsg_ib: IntCounterVec,
/// messages removed from to be received, because they where finished total,
/// by STREAM AND REASON(finished/canceled),
rmsg_ot: IntCounterVec,
/// messages bytes removed from to be received throughput, because they
/// where finished total, by STREAM AND REASON(finished/dropped),
rmsg_ob: IntCounterVec,
/// data frames send by prio by CHANNEL,
rdata_frames_t: IntCounterVec,
/// data frames bytes send by prio by CHANNEL,
rdata_frames_b: IntCounterVec,
/// ping per CHANNEL //TODO: implement
ping: IntGaugeVec,
}
/// Cache for [`ProtocolMetrics`], more optimized and cleared up after channel
/// disconnect.
///
/// [`ProtocolMetrics`]: crate::ProtocolMetrics
#[cfg(feature = "metrics")]
#[derive(Debug, Clone)]
pub struct ProtocolMetricCache {
cid: String,
m: Arc<ProtocolMetrics>,
cache: HashMap<Sid, CacheLine>,
sdata_frames_t: GenericCounter<AtomicU64>,
sdata_frames_b: GenericCounter<AtomicU64>,
rdata_frames_t: GenericCounter<AtomicU64>,
rdata_frames_b: GenericCounter<AtomicU64>,
ping: GenericGauge<AtomicI64>,
}
#[cfg(not(feature = "metrics"))]
#[derive(Debug, Clone)]
pub struct ProtocolMetricCache {}
#[cfg(feature = "metrics")]
impl ProtocolMetrics {
pub fn new() -> Result<Self, Box<dyn Error>> {
let smsg_it = IntCounterVec::new(
Opts::new(
"send_messages_in_total",
"All Messages that are added to this Protocol to be send at stream level",
),
&["channel", "stream"],
)?;
let smsg_ib = IntCounterVec::new(
Opts::new(
"send_messages_in_throughput",
"All Message bytes that are added to this Protocol to be send at stream level",
),
&["channel", "stream"],
)?;
let smsg_ot = IntCounterVec::new(
Opts::new(
"send_messages_out_total",
"All Messages that are removed from this Protocol to be send at stream and \
reason(finished/canceled) level",
),
&["channel", "stream", "reason"],
)?;
let smsg_ob = IntCounterVec::new(
Opts::new(
"send_messages_out_throughput",
"All Message bytes that are removed from this Protocol to be send at stream and \
reason(finished/canceled) level",
),
&["channel", "stream", "reason"],
)?;
let sdata_frames_t = IntCounterVec::new(
Opts::new(
"send_data_frames_total",
"Number of data frames send per channel",
),
&["channel"],
)?;
let sdata_frames_b = IntCounterVec::new(
Opts::new(
"send_data_frames_throughput",
"Number of data frames bytes send per channel",
),
&["channel"],
)?;
let rmsg_it = IntCounterVec::new(
Opts::new(
"recv_messages_in_total",
"All Messages that are added to this Protocol to be received at stream level",
),
&["channel", "stream"],
)?;
let rmsg_ib = IntCounterVec::new(
Opts::new(
"recv_messages_in_throughput",
"All Message bytes that are added to this Protocol to be received at stream level",
),
&["channel", "stream"],
)?;
let rmsg_ot = IntCounterVec::new(
Opts::new(
"recv_messages_out_total",
"All Messages that are removed from this Protocol to be received at stream and \
reason(finished/canceled) level",
),
&["channel", "stream", "reason"],
)?;
let rmsg_ob = IntCounterVec::new(
Opts::new(
"recv_messages_out_throughput",
"All Message bytes that are removed from this Protocol to be received at stream \
and reason(finished/canceled) level",
),
&["channel", "stream", "reason"],
)?;
let rdata_frames_t = IntCounterVec::new(
Opts::new(
"recv_data_frames_total",
"Number of data frames received per channel",
),
&["channel"],
)?;
let rdata_frames_b = IntCounterVec::new(
Opts::new(
"recv_data_frames_throughput",
"Number of data frames bytes received per channel",
),
&["channel"],
)?;
let ping = IntGaugeVec::new(Opts::new("ping", "Ping per channel"), &["channel"])?;
Ok(Self {
smsg_it,
smsg_ib,
smsg_ot,
smsg_ob,
sdata_frames_t,
sdata_frames_b,
rmsg_it,
rmsg_ib,
rmsg_ot,
rmsg_ob,
rdata_frames_t,
rdata_frames_b,
ping,
})
}
pub fn register(&self, registry: &Registry) -> Result<(), Box<dyn Error>> {
registry.register(Box::new(self.smsg_it.clone()))?;
registry.register(Box::new(self.smsg_ib.clone()))?;
registry.register(Box::new(self.smsg_ot.clone()))?;
registry.register(Box::new(self.smsg_ob.clone()))?;
registry.register(Box::new(self.sdata_frames_t.clone()))?;
registry.register(Box::new(self.sdata_frames_b.clone()))?;
registry.register(Box::new(self.rmsg_it.clone()))?;
registry.register(Box::new(self.rmsg_ib.clone()))?;
registry.register(Box::new(self.rmsg_ot.clone()))?;
registry.register(Box::new(self.rmsg_ob.clone()))?;
registry.register(Box::new(self.rdata_frames_t.clone()))?;
registry.register(Box::new(self.rdata_frames_b.clone()))?;
registry.register(Box::new(self.ping.clone()))?;
Ok(())
}
}
#[cfg(not(feature = "metrics"))]
pub struct ProtocolMetrics {}
#[cfg(feature = "metrics")]
#[derive(Debug, Clone)]
pub(crate) struct CacheLine {
pub smsg_it: GenericCounter<AtomicU64>,
pub smsg_ib: GenericCounter<AtomicU64>,
pub smsg_ot: [GenericCounter<AtomicU64>; 2],
pub smsg_ob: [GenericCounter<AtomicU64>; 2],
pub rmsg_it: GenericCounter<AtomicU64>,
pub rmsg_ib: GenericCounter<AtomicU64>,
pub rmsg_ot: [GenericCounter<AtomicU64>; 2],
pub rmsg_ob: [GenericCounter<AtomicU64>; 2],
}
#[cfg(feature = "metrics")]
impl ProtocolMetricCache {
pub fn new(channel_key: &str, metrics: Arc<ProtocolMetrics>) -> Self {
let cid = channel_key.to_string();
let sdata_frames_t = metrics.sdata_frames_t.with_label_values(&[&cid]);
let sdata_frames_b = metrics.sdata_frames_b.with_label_values(&[&cid]);
let rdata_frames_t = metrics.rdata_frames_t.with_label_values(&[&cid]);
let rdata_frames_b = metrics.rdata_frames_b.with_label_values(&[&cid]);
let ping = metrics.ping.with_label_values(&[&cid]);
Self {
cid,
m: metrics,
cache: HashMap::new(),
sdata_frames_t,
sdata_frames_b,
rdata_frames_t,
rdata_frames_b,
ping,
}
}
pub(crate) fn init_sid(&mut self, sid: Sid) -> &CacheLine {
let cid = &self.cid;
let m = &self.m;
self.cache.entry(sid).or_insert_with_key(|sid| {
let s = sid.to_string();
let finished = RemoveReason::Finished.to_str();
let dropped = RemoveReason::Dropped.to_str();
CacheLine {
smsg_it: m.smsg_it.with_label_values(&[&cid, &s]),
smsg_ib: m.smsg_ib.with_label_values(&[&cid, &s]),
smsg_ot: [
m.smsg_ot.with_label_values(&[&cid, &s, &finished]),
m.smsg_ot.with_label_values(&[&cid, &s, &dropped]),
],
smsg_ob: [
m.smsg_ob.with_label_values(&[&cid, &s, &finished]),
m.smsg_ob.with_label_values(&[&cid, &s, &dropped]),
],
rmsg_it: m.rmsg_it.with_label_values(&[&cid, &s]),
rmsg_ib: m.rmsg_ib.with_label_values(&[&cid, &s]),
rmsg_ot: [
m.rmsg_ot.with_label_values(&[&cid, &s, &finished]),
m.rmsg_ot.with_label_values(&[&cid, &s, &dropped]),
],
rmsg_ob: [
m.rmsg_ob.with_label_values(&[&cid, &s, &finished]),
m.rmsg_ob.with_label_values(&[&cid, &s, &dropped]),
],
}
})
}
pub(crate) fn smsg_ib(&mut self, sid: Sid, bytes: u64) {
let line = self.init_sid(sid);
line.smsg_it.inc();
line.smsg_ib.inc_by(bytes);
}
pub(crate) fn smsg_ob(&mut self, sid: Sid, reason: RemoveReason, bytes: u64) {
let line = self.init_sid(sid);
line.smsg_ot[reason.i()].inc();
line.smsg_ob[reason.i()].inc_by(bytes);
}
pub(crate) fn sdata_frames_b(&mut self, cnt: u64, bytes: u64) {
self.sdata_frames_t.inc_by(cnt);
self.sdata_frames_b.inc_by(bytes);
}
pub(crate) fn rmsg_ib(&mut self, sid: Sid, bytes: u64) {
let line = self.init_sid(sid);
line.rmsg_it.inc();
line.rmsg_ib.inc_by(bytes);
}
pub(crate) fn rmsg_ob(&mut self, sid: Sid, reason: RemoveReason, bytes: u64) {
let line = self.init_sid(sid);
line.rmsg_ot[reason.i()].inc();
line.rmsg_ob[reason.i()].inc_by(bytes);
}
pub(crate) fn rdata_frames_b(&mut self, bytes: u64) {
self.rdata_frames_t.inc();
self.rdata_frames_b.inc_by(bytes);
}
#[cfg(test)]
pub(crate) fn assert_msg(&mut self, sid: Sid, cnt: u64, reason: RemoveReason) {
let line = self.init_sid(sid);
assert_eq!(line.smsg_it.get(), cnt);
assert_eq!(line.smsg_ot[reason.i()].get(), cnt);
assert_eq!(line.rmsg_it.get(), cnt);
assert_eq!(line.rmsg_ot[reason.i()].get(), cnt);
}
#[cfg(test)]
pub(crate) fn assert_msg_bytes(&mut self, sid: Sid, bytes: u64, reason: RemoveReason) {
let line = self.init_sid(sid);
assert_eq!(line.smsg_ib.get(), bytes);
assert_eq!(line.smsg_ob[reason.i()].get(), bytes);
assert_eq!(line.rmsg_ib.get(), bytes);
assert_eq!(line.rmsg_ob[reason.i()].get(), bytes);
}
#[cfg(test)]
pub(crate) fn assert_data_frames(&mut self, cnt: u64) {
assert_eq!(self.sdata_frames_t.get(), cnt);
assert_eq!(self.rdata_frames_t.get(), cnt);
}
#[cfg(test)]
pub(crate) fn assert_data_frames_bytes(&mut self, bytes: u64) {
assert_eq!(self.sdata_frames_b.get(), bytes);
assert_eq!(self.rdata_frames_b.get(), bytes);
}
}
#[cfg(feature = "metrics")]
impl Drop for ProtocolMetricCache {
fn drop(&mut self) {
let cid = &self.cid;
let m = &self.m;
let finished = RemoveReason::Finished.to_str();
let dropped = RemoveReason::Dropped.to_str();
for (sid, _) in self.cache.drain() {
let s = sid.to_string();
let _ = m.smsg_it.remove_label_values(&[&cid, &s]);
let _ = m.smsg_ib.remove_label_values(&[&cid, &s]);
let _ = m.smsg_ot.remove_label_values(&[&cid, &s, &finished]);
let _ = m.smsg_ot.remove_label_values(&[&cid, &s, &dropped]);
let _ = m.smsg_ob.remove_label_values(&[&cid, &s, &finished]);
let _ = m.smsg_ob.remove_label_values(&[&cid, &s, &dropped]);
let _ = m.rmsg_it.remove_label_values(&[&cid, &s]);
let _ = m.rmsg_ib.remove_label_values(&[&cid, &s]);
let _ = m.rmsg_ot.remove_label_values(&[&cid, &s, &finished]);
let _ = m.rmsg_ot.remove_label_values(&[&cid, &s, &dropped]);
let _ = m.rmsg_ob.remove_label_values(&[&cid, &s, &finished]);
let _ = m.rmsg_ob.remove_label_values(&[&cid, &s, &dropped]);
}
}
}
#[cfg(feature = "metrics")]
impl std::fmt::Debug for ProtocolMetrics {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "ProtocolMetrics()")
}
}
#[cfg(not(feature = "metrics"))]
impl ProtocolMetricCache {
pub fn new(_channel_key: &str, _metrics: Arc<ProtocolMetrics>) -> Self { Self {} }
pub(crate) fn smsg_ib(&mut self, _sid: Sid, _b: u64) {}
pub(crate) fn smsg_ob(&mut self, _sid: Sid, _reason: RemoveReason, _b: u64) {}
pub(crate) fn sdata_frames_b(&mut self, _cnt: u64, _b: u64) {}
pub(crate) fn rmsg_ib(&mut self, _sid: Sid, _b: u64) {}
pub(crate) fn rmsg_ob(&mut self, _sid: Sid, _reason: RemoveReason, _b: u64) {}
pub(crate) fn rdata_frames_b(&mut self, _b: u64) {}
}
#[cfg(not(feature = "metrics"))]
impl ProtocolMetrics {
pub fn new() -> Result<Self, Box<dyn Error>> { Ok(Self {}) }
}
impl RemoveReason {
#[cfg(feature = "metrics")]
fn to_str(&self) -> &str {
match self {
RemoveReason::Finished => "Finished",
RemoveReason::Dropped => "Dropped",
}
}
#[cfg(feature = "metrics")]
pub(crate) fn i(&self) -> usize {
match self {
RemoveReason::Finished => 0,
RemoveReason::Dropped => 1,
}
}
}

View File

@ -0,0 +1,239 @@
#[cfg(feature = "metrics")]
use crate::metrics::RemoveReason;
use crate::{
event::ProtocolEvent,
frame::InitFrame,
handshake::{ReliableDrain, ReliableSink},
metrics::ProtocolMetricCache,
types::Bandwidth,
ProtocolError, RecvProtocol, SendProtocol, UnreliableDrain, UnreliableSink,
};
use async_trait::async_trait;
use std::time::{Duration, Instant};
#[cfg(feature = "trace_pedantic")]
use tracing::trace;
/// used for implementing your own MPSC `Sink` and `Drain`
#[derive(Debug)]
pub enum MpscMsg {
Event(ProtocolEvent),
InitFrame(InitFrame),
}
/// MPSC implementation of [`SendProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
#[derive(Debug)]
pub struct MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
drain: D,
last: Instant,
metrics: ProtocolMetricCache,
}
/// MPSC implementation of [`RecvProtocol`]
///
/// [`RecvProtocol`]: crate::RecvProtocol
#[derive(Debug)]
pub struct MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
sink: S,
metrics: ProtocolMetricCache,
}
impl<D> MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
pub fn new(drain: D, metrics: ProtocolMetricCache) -> Self {
Self {
drain,
last: Instant::now(),
metrics,
}
}
}
impl<S> MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
pub fn new(sink: S, metrics: ProtocolMetricCache) -> Self { Self { sink, metrics } }
}
#[async_trait]
impl<D> SendProtocol for MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
fn notify_from_recv(&mut self, _event: ProtocolEvent) {}
async fn send(&mut self, event: ProtocolEvent) -> Result<(), ProtocolError> {
#[cfg(feature = "trace_pedantic")]
trace!(?event, "send");
match &event {
ProtocolEvent::Message {
data: _data,
mid: _,
sid: _sid,
} => {
#[cfg(feature = "metrics")]
let (bytes, line) = {
let sid = *_sid;
let bytes = _data.len() as u64;
let line = self.metrics.init_sid(sid);
line.smsg_it.inc();
line.smsg_ib.inc_by(bytes);
(bytes, line)
};
let r = self.drain.send(MpscMsg::Event(event)).await;
#[cfg(feature = "metrics")]
{
line.smsg_ot[RemoveReason::Finished.i()].inc();
line.smsg_ob[RemoveReason::Finished.i()].inc_by(bytes);
}
r
},
_ => self.drain.send(MpscMsg::Event(event)).await,
}
}
async fn flush(&mut self, _: Bandwidth, _: Duration) -> Result<(), ProtocolError> { Ok(()) }
}
#[async_trait]
impl<S> RecvProtocol for MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
async fn recv(&mut self) -> Result<ProtocolEvent, ProtocolError> {
let event = self.sink.recv().await?;
#[cfg(feature = "trace_pedantic")]
trace!(?event, "recv");
match event {
MpscMsg::Event(e) => {
#[cfg(feature = "metrics")]
{
if let ProtocolEvent::Message { data, mid: _, sid } = &e {
let sid = *sid;
let bytes = data.len() as u64;
let line = self.metrics.init_sid(sid);
line.rmsg_it.inc();
line.rmsg_ib.inc_by(bytes);
line.rmsg_ot[RemoveReason::Finished.i()].inc();
line.rmsg_ob[RemoveReason::Finished.i()].inc_by(bytes);
}
}
Ok(e)
},
MpscMsg::InitFrame(_) => Err(ProtocolError::Closed),
}
}
}
#[async_trait]
impl<D> ReliableDrain for MpscSendProtocol<D>
where
D: UnreliableDrain<DataFormat = MpscMsg>,
{
async fn send(&mut self, frame: InitFrame) -> Result<(), ProtocolError> {
self.drain.send(MpscMsg::InitFrame(frame)).await
}
}
#[async_trait]
impl<S> ReliableSink for MpscRecvProtocol<S>
where
S: UnreliableSink<DataFormat = MpscMsg>,
{
async fn recv(&mut self) -> Result<InitFrame, ProtocolError> {
match self.sink.recv().await? {
MpscMsg::Event(_) => Err(ProtocolError::Closed),
MpscMsg::InitFrame(f) => Ok(f),
}
}
}
#[cfg(test)]
pub mod test_utils {
use super::*;
use crate::metrics::{ProtocolMetricCache, ProtocolMetrics};
use async_channel::*;
use std::sync::Arc;
pub struct ACDrain {
sender: Sender<MpscMsg>,
}
pub struct ACSink {
receiver: Receiver<MpscMsg>,
}
pub fn ac_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(MpscSendProtocol<ACDrain>, MpscRecvProtocol<ACSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
ProtocolMetricCache::new("mpsc", Arc::new(ProtocolMetrics::new().unwrap()))
});
[
(
MpscSendProtocol::new(ACDrain { sender: s1 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r2 }, m.clone()),
),
(
MpscSendProtocol::new(ACDrain { sender: s2 }, m.clone()),
MpscRecvProtocol::new(ACSink { receiver: r1 }, m),
),
]
}
#[async_trait]
impl UnreliableDrain for ACDrain {
type DataFormat = MpscMsg;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
self.sender
.send(data)
.await
.map_err(|_| ProtocolError::Closed)
}
}
#[async_trait]
impl UnreliableSink for ACSink {
type DataFormat = MpscMsg;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
self.receiver
.recv()
.await
.map_err(|_| ProtocolError::Closed)
}
}
}
#[cfg(test)]
mod tests {
use crate::{
mpsc::test_utils::*,
types::{Pid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2},
InitProtocol,
};
#[tokio::test]
async fn handshake_all_good() {
let [mut p1, mut p2] = ac_bound(10, None);
let r1 = tokio::spawn(async move { p1.initialize(true, Pid::fake(2), 1337).await });
let r2 = tokio::spawn(async move { p2.initialize(false, Pid::fake(3), 42).await });
let (r1, r2) = tokio::join!(r1, r2);
assert_eq!(r1.unwrap(), Ok((Pid::fake(3), STREAM_ID_OFFSET1, 42)));
assert_eq!(r2.unwrap(), Ok((Pid::fake(2), STREAM_ID_OFFSET2, 1337)));
}
}

View File

@ -0,0 +1,137 @@
use crate::{
frame::OTFrame,
message::OTMessage,
metrics::{ProtocolMetricCache, RemoveReason},
types::{Bandwidth, Mid, Prio, Promises, Sid, HIGHEST_PRIO},
};
use bytes::Bytes;
use std::{
collections::{HashMap, VecDeque},
time::Duration,
};
#[derive(Debug)]
struct StreamInfo {
pub(crate) guaranteed_bandwidth: Bandwidth,
pub(crate) prio: Prio,
pub(crate) promises: Promises,
pub(crate) messages: VecDeque<OTMessage>,
}
/// Responsible for queueing messages.
/// every stream has a guaranteed bandwidth and a prio 0-7.
/// when `n` Bytes are available in the buffer, first the guaranteed bandwidth
/// is used. Then remaining bandwidth is used to fill up the prios.
#[derive(Debug)]
pub(crate) struct PrioManager {
streams: HashMap<Sid, StreamInfo>,
metrics: ProtocolMetricCache,
}
// Send everything ONCE, then keep it till it's confirmed
impl PrioManager {
pub fn new(metrics: ProtocolMetricCache) -> Self {
Self {
streams: HashMap::new(),
metrics,
}
}
pub fn open_stream(
&mut self,
sid: Sid,
prio: Prio,
promises: Promises,
guaranteed_bandwidth: Bandwidth,
) {
self.streams.insert(sid, StreamInfo {
guaranteed_bandwidth,
prio,
promises,
messages: VecDeque::new(),
});
}
pub fn try_close_stream(&mut self, sid: Sid) -> bool {
if let Some(si) = self.streams.get(&sid) {
if si.messages.is_empty() {
self.streams.remove(&sid);
return true;
}
}
false
}
pub fn is_empty(&self) -> bool { self.streams.is_empty() }
pub fn add(&mut self, buffer: Bytes, mid: Mid, sid: Sid) {
self.streams
.get_mut(&sid)
.unwrap()
.messages
.push_back(OTMessage::new(buffer, mid, sid));
}
/// bandwidth might be extended, as for technical reasons
/// guaranteed_bandwidth is used and frames are always 1400 bytes.
pub fn grab(&mut self, bandwidth: Bandwidth, dt: Duration) -> (Vec<OTFrame>, Bandwidth) {
let total_bytes = (bandwidth as f64 * dt.as_secs_f64()) as u64;
let mut cur_bytes = 0u64;
let mut frames = vec![];
let mut prios = [0u64; (HIGHEST_PRIO + 1) as usize];
let metrics = &mut self.metrics;
let mut process_stream =
|stream: &mut StreamInfo, mut bandwidth: i64, cur_bytes: &mut u64| {
let mut finished = None;
'outer: for (i, msg) in stream.messages.iter_mut().enumerate() {
while let Some(frame) = msg.next() {
let b = if let OTFrame::Data { data, .. } = &frame {
crate::frame::TCP_DATA_CNS + 1 + data.len()
} else {
crate::frame::TCP_DATA_HEADER_CNS + 1
} as u64;
bandwidth -= b as i64;
*cur_bytes += b;
frames.push(frame);
if bandwidth <= 0 {
break 'outer;
}
}
let (sid, bytes) = msg.get_sid_len();
metrics.smsg_ob(sid, RemoveReason::Finished, bytes);
finished = Some(i);
}
if let Some(i) = finished {
//cleanup
stream.messages.drain(..=i);
}
};
// Add guaranteed bandwidth
for stream in self.streams.values_mut() {
prios[stream.prio as usize] += 1;
let stream_byte_cnt = (stream.guaranteed_bandwidth as f64 * dt.as_secs_f64()) as u64;
process_stream(stream, stream_byte_cnt as i64, &mut cur_bytes);
}
if cur_bytes < total_bytes {
// Add optional bandwidth
for prio in 0..=HIGHEST_PRIO {
if prios[prio as usize] == 0 {
continue;
}
let per_stream_bytes = ((total_bytes - cur_bytes) / prios[prio as usize]) as i64;
for stream in self.streams.values_mut() {
if stream.prio != prio {
continue;
}
process_stream(stream, per_stream_bytes, &mut cur_bytes);
}
}
}
(frames, cur_bytes)
}
}

706
network/protocol/src/tcp.rs Normal file
View File

@ -0,0 +1,706 @@
use crate::{
event::ProtocolEvent,
frame::{ITFrame, InitFrame, OTFrame},
handshake::{ReliableDrain, ReliableSink},
message::{ITMessage, ALLOC_BLOCK},
metrics::{ProtocolMetricCache, RemoveReason},
prio::PrioManager,
types::{Bandwidth, Mid, Sid},
ProtocolError, RecvProtocol, SendProtocol, UnreliableDrain, UnreliableSink,
};
use async_trait::async_trait;
use bytes::BytesMut;
use std::{
collections::HashMap,
time::{Duration, Instant},
};
use tracing::info;
#[cfg(feature = "trace_pedantic")]
use tracing::trace;
/// TCP implementation of [`SendProtocol`]
///
/// [`SendProtocol`]: crate::SendProtocol
#[derive(Debug)]
pub struct TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
buffer: BytesMut,
store: PrioManager,
closing_streams: Vec<Sid>,
notify_closing_streams: Vec<Sid>,
pending_shutdown: bool,
drain: D,
last: Instant,
metrics: ProtocolMetricCache,
}
/// TCP implementation of [`RecvProtocol`]
///
/// [`RecvProtocol`]: crate::RecvProtocol
#[derive(Debug)]
pub struct TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
buffer: BytesMut,
itmsg_allocator: BytesMut,
incoming: HashMap<Mid, ITMessage>,
sink: S,
metrics: ProtocolMetricCache,
}
impl<D> TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
pub fn new(drain: D, metrics: ProtocolMetricCache) -> Self {
Self {
buffer: BytesMut::new(),
store: PrioManager::new(metrics.clone()),
closing_streams: vec![],
notify_closing_streams: vec![],
pending_shutdown: false,
drain,
last: Instant::now(),
metrics,
}
}
}
impl<S> TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
pub fn new(sink: S, metrics: ProtocolMetricCache) -> Self {
Self {
buffer: BytesMut::new(),
itmsg_allocator: BytesMut::with_capacity(ALLOC_BLOCK),
incoming: HashMap::new(),
sink,
metrics,
}
}
}
#[async_trait]
impl<D> SendProtocol for TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
fn notify_from_recv(&mut self, event: ProtocolEvent) {
match event {
ProtocolEvent::OpenStream {
sid,
prio,
promises,
guaranteed_bandwidth,
} => {
self.store
.open_stream(sid, prio, promises, guaranteed_bandwidth);
},
ProtocolEvent::CloseStream { sid } => {
if !self.store.try_close_stream(sid) {
#[cfg(feature = "trace_pedantic")]
trace!(?sid, "hold back notify close stream");
self.notify_closing_streams.push(sid);
}
},
_ => {},
}
}
async fn send(&mut self, event: ProtocolEvent) -> Result<(), ProtocolError> {
#[cfg(feature = "trace_pedantic")]
trace!(?event, "send");
match event {
ProtocolEvent::OpenStream {
sid,
prio,
promises,
guaranteed_bandwidth,
} => {
self.store
.open_stream(sid, prio, promises, guaranteed_bandwidth);
event.to_frame().write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
},
ProtocolEvent::CloseStream { sid } => {
if self.store.try_close_stream(sid) {
event.to_frame().write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
} else {
#[cfg(feature = "trace_pedantic")]
trace!(?sid, "hold back close stream");
self.closing_streams.push(sid);
}
},
ProtocolEvent::Shutdown => {
if self.store.is_empty() {
event.to_frame().write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
} else {
#[cfg(feature = "trace_pedantic")]
trace!("hold back shutdown");
self.pending_shutdown = true;
}
},
ProtocolEvent::Message { data, mid, sid } => {
self.metrics.smsg_ib(sid, data.len() as u64);
self.store.add(data, mid, sid);
},
}
Ok(())
}
async fn flush(&mut self, bandwidth: Bandwidth, dt: Duration) -> Result<(), ProtocolError> {
let (frames, total_bytes) = self.store.grab(bandwidth, dt);
self.buffer.reserve(total_bytes as usize);
let mut data_frames = 0;
let mut data_bandwidth = 0;
for frame in frames {
if let OTFrame::Data {
mid: _,
start: _,
data,
} = &frame
{
data_bandwidth += data.len();
data_frames += 1;
}
frame.write_bytes(&mut self.buffer);
}
self.drain.send(self.buffer.split()).await?;
self.metrics
.sdata_frames_b(data_frames, data_bandwidth as u64);
let mut finished_streams = vec![];
for (i, &sid) in self.closing_streams.iter().enumerate() {
if self.store.try_close_stream(sid) {
#[cfg(feature = "trace_pedantic")]
trace!(?sid, "close stream, as it's now empty");
OTFrame::CloseStream { sid }.write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
finished_streams.push(i);
}
}
for i in finished_streams.iter().rev() {
self.closing_streams.remove(*i);
}
let mut finished_streams = vec![];
for (i, sid) in self.notify_closing_streams.iter().enumerate() {
if self.store.try_close_stream(*sid) {
#[cfg(feature = "trace_pedantic")]
trace!(?sid, "close stream, as it's now empty");
finished_streams.push(i);
}
}
for i in finished_streams.iter().rev() {
self.notify_closing_streams.remove(*i);
}
if self.pending_shutdown && self.store.is_empty() {
#[cfg(feature = "trace_pedantic")]
trace!("shutdown, as it's now empty");
OTFrame::Shutdown {}.write_bytes(&mut self.buffer);
self.drain.send(self.buffer.split()).await?;
self.pending_shutdown = false;
}
Ok(())
}
}
#[async_trait]
impl<S> RecvProtocol for TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
async fn recv(&mut self) -> Result<ProtocolEvent, ProtocolError> {
'outer: loop {
while let Some(frame) = ITFrame::read_frame(&mut self.buffer) {
#[cfg(feature = "trace_pedantic")]
trace!(?frame, "recv");
match frame {
ITFrame::Shutdown => break 'outer Ok(ProtocolEvent::Shutdown),
ITFrame::OpenStream {
sid,
prio,
promises,
} => {
break 'outer Ok(ProtocolEvent::OpenStream {
sid,
prio: prio.min(crate::types::HIGHEST_PRIO),
promises,
guaranteed_bandwidth: 1_000_000,
});
},
ITFrame::CloseStream { sid } => {
break 'outer Ok(ProtocolEvent::CloseStream { sid });
},
ITFrame::DataHeader { sid, mid, length } => {
let m = ITMessage::new(sid, length, &mut self.itmsg_allocator);
self.metrics.rmsg_ib(sid, length);
self.incoming.insert(mid, m);
},
ITFrame::Data {
mid,
start: _,
data,
} => {
self.metrics.rdata_frames_b(data.len() as u64);
let m = match self.incoming.get_mut(&mid) {
Some(m) => m,
None => {
info!(
?mid,
"protocol violation by remote side: send Data before Header"
);
break 'outer Err(ProtocolError::Closed);
},
};
m.data.extend_from_slice(&data);
if m.data.len() == m.length as usize {
// finished, yay
let m = self.incoming.remove(&mid).unwrap();
self.metrics.rmsg_ob(
m.sid,
RemoveReason::Finished,
m.data.len() as u64,
);
break 'outer Ok(ProtocolEvent::Message {
sid: m.sid,
mid,
data: m.data.freeze(),
});
}
},
};
}
let chunk = self.sink.recv().await?;
if self.buffer.is_empty() {
self.buffer = chunk;
} else {
self.buffer.extend_from_slice(&chunk);
}
}
}
}
#[async_trait]
impl<D> ReliableDrain for TcpSendProtocol<D>
where
D: UnreliableDrain<DataFormat = BytesMut>,
{
async fn send(&mut self, frame: InitFrame) -> Result<(), ProtocolError> {
let mut buffer = BytesMut::with_capacity(500);
frame.write_bytes(&mut buffer);
self.drain.send(buffer).await
}
}
#[async_trait]
impl<S> ReliableSink for TcpRecvProtocol<S>
where
S: UnreliableSink<DataFormat = BytesMut>,
{
async fn recv(&mut self) -> Result<InitFrame, ProtocolError> {
while self.buffer.len() < 100 {
let chunk = self.sink.recv().await?;
self.buffer.extend_from_slice(&chunk);
if let Some(frame) = InitFrame::read_frame(&mut self.buffer) {
return Ok(frame);
}
}
Err(ProtocolError::Closed)
}
}
#[cfg(test)]
mod test_utils {
//TCP protocol based on Channel
use super::*;
use crate::metrics::{ProtocolMetricCache, ProtocolMetrics};
use async_channel::*;
use std::sync::Arc;
pub struct TcpDrain {
pub sender: Sender<BytesMut>,
}
pub struct TcpSink {
pub receiver: Receiver<BytesMut>,
}
/// emulate Tcp protocol on Channels
pub fn tcp_bound(
cap: usize,
metrics: Option<ProtocolMetricCache>,
) -> [(TcpSendProtocol<TcpDrain>, TcpRecvProtocol<TcpSink>); 2] {
let (s1, r1) = async_channel::bounded(cap);
let (s2, r2) = async_channel::bounded(cap);
let m = metrics.unwrap_or_else(|| {
ProtocolMetricCache::new("tcp", Arc::new(ProtocolMetrics::new().unwrap()))
});
[
(
TcpSendProtocol::new(TcpDrain { sender: s1 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r2 }, m.clone()),
),
(
TcpSendProtocol::new(TcpDrain { sender: s2 }, m.clone()),
TcpRecvProtocol::new(TcpSink { receiver: r1 }, m),
),
]
}
#[async_trait]
impl UnreliableDrain for TcpDrain {
type DataFormat = BytesMut;
async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
self.sender
.send(data)
.await
.map_err(|_| ProtocolError::Closed)
}
}
#[async_trait]
impl UnreliableSink for TcpSink {
type DataFormat = BytesMut;
async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
self.receiver
.recv()
.await
.map_err(|_| ProtocolError::Closed)
}
}
}
#[cfg(test)]
mod tests {
use crate::{
frame::OTFrame,
metrics::{ProtocolMetricCache, ProtocolMetrics, RemoveReason},
tcp::test_utils::*,
types::{Pid, Promises, Sid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2},
InitProtocol, ProtocolError, ProtocolEvent, RecvProtocol, SendProtocol,
};
use bytes::{Bytes, BytesMut};
use std::{sync::Arc, time::Duration};
#[tokio::test]
async fn handshake_all_good() {
let [mut p1, mut p2] = tcp_bound(10, None);
let r1 = tokio::spawn(async move { p1.initialize(true, Pid::fake(2), 1337).await });
let r2 = tokio::spawn(async move { p2.initialize(false, Pid::fake(3), 42).await });
let (r1, r2) = tokio::join!(r1, r2);
assert_eq!(r1.unwrap(), Ok((Pid::fake(3), STREAM_ID_OFFSET1, 42)));
assert_eq!(r2.unwrap(), Ok((Pid::fake(2), STREAM_ID_OFFSET2, 1337)));
}
#[tokio::test]
async fn open_stream() {
let [p1, p2] = tcp_bound(10, None);
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid: Sid::new(10),
prio: 0u8,
promises: Promises::ORDERED,
guaranteed_bandwidth: 1_000_000,
};
s.send(event.clone()).await.unwrap();
let e = r.recv().await.unwrap();
assert_eq!(event, e);
}
#[tokio::test]
async fn send_short_msg() {
let [p1, p2] = tcp_bound(10, None);
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid: Sid::new(10),
prio: 3u8,
promises: Promises::ORDERED,
guaranteed_bandwidth: 1_000_000,
};
s.send(event).await.unwrap();
let _ = r.recv().await.unwrap();
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 0,
data: Bytes::from(&[188u8; 600][..]),
};
s.send(event.clone()).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let e = r.recv().await.unwrap();
assert_eq!(event, e);
// 2nd short message
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 1,
data: Bytes::from(&[7u8; 30][..]),
};
s.send(event.clone()).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let e = r.recv().await.unwrap();
assert_eq!(event, e)
}
#[tokio::test]
async fn send_long_msg() {
let mut metrics =
ProtocolMetricCache::new("long_tcp", Arc::new(ProtocolMetrics::new().unwrap()));
let sid = Sid::new(1);
let [p1, p2] = tcp_bound(10000, Some(metrics.clone()));
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
guaranteed_bandwidth: 1_000_000,
};
s.send(event).await.unwrap();
let _ = r.recv().await.unwrap();
let event = ProtocolEvent::Message {
sid,
mid: 77,
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event.clone()).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let e = r.recv().await.unwrap();
assert_eq!(event, e);
metrics.assert_msg(sid, 1, RemoveReason::Finished);
metrics.assert_msg_bytes(sid, 500_000, RemoveReason::Finished);
metrics.assert_data_frames(358);
metrics.assert_data_frames_bytes(500_000);
}
#[tokio::test]
async fn msg_finishes_after_close() {
let sid = Sid::new(1);
let [p1, p2] = tcp_bound(10000, None);
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
guaranteed_bandwidth: 0,
};
s.send(event).await.unwrap();
let _ = r.recv().await.unwrap();
let event = ProtocolEvent::Message {
sid,
mid: 77,
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event).await.unwrap();
let event = ProtocolEvent::CloseStream { sid };
s.send(event).await.unwrap();
//send
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Message { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::CloseStream { .. }));
}
#[tokio::test]
async fn msg_finishes_after_shutdown() {
let sid = Sid::new(1);
let [p1, p2] = tcp_bound(10000, None);
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
guaranteed_bandwidth: 0,
};
s.send(event).await.unwrap();
let _ = r.recv().await.unwrap();
let event = ProtocolEvent::Message {
sid,
mid: 77,
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event).await.unwrap();
let event = ProtocolEvent::Shutdown {};
s.send(event).await.unwrap();
let event = ProtocolEvent::CloseStream { sid };
s.send(event).await.unwrap();
//send
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Message { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::CloseStream { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Shutdown { .. }));
}
#[tokio::test]
async fn msg_finishes_after_drop() {
let sid = Sid::new(1);
let [p1, p2] = tcp_bound(10000, None);
let (mut s, mut r) = (p1.0, p2.1);
let event = ProtocolEvent::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
guaranteed_bandwidth: 0,
};
s.send(event).await.unwrap();
let event = ProtocolEvent::Message {
sid,
mid: 77,
data: Bytes::from(&[99u8; 500_000][..]),
};
s.send(event).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let event = ProtocolEvent::Message {
sid,
mid: 78,
data: Bytes::from(&[100u8; 500_000][..]),
};
s.send(event).await.unwrap();
s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
drop(s);
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::OpenStream { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Message { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Message { .. }));
}
#[tokio::test]
async fn header_and_data_in_seperate_msg() {
let sid = Sid::new(1);
let (s, r) = async_channel::bounded(10);
let m = ProtocolMetricCache::new("tcp", Arc::new(ProtocolMetrics::new().unwrap()));
let mut r =
super::TcpRecvProtocol::new(super::test_utils::TcpSink { receiver: r }, m.clone());
const DATA1: &[u8; 69] =
b"We need to make sure that its okay to send OPEN_STREAM and DATA_HEAD ";
const DATA2: &[u8; 95] = b"in one chunk and (DATA and CLOSE_STREAM) in the second chunk. and then keep the connection open";
let mut bytes = BytesMut::with_capacity(1500);
OTFrame::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
}
.write_bytes(&mut bytes);
OTFrame::DataHeader {
mid: 99,
sid,
length: (DATA1.len() + DATA2.len()) as u64,
}
.write_bytes(&mut bytes);
s.send(bytes.split()).await.unwrap();
OTFrame::Data {
mid: 99,
start: 0,
data: Bytes::from(&DATA1[..]),
}
.write_bytes(&mut bytes);
OTFrame::Data {
mid: 99,
start: DATA1.len() as u64,
data: Bytes::from(&DATA2[..]),
}
.write_bytes(&mut bytes);
OTFrame::CloseStream { sid }.write_bytes(&mut bytes);
s.send(bytes.split()).await.unwrap();
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::OpenStream { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::Message { .. }));
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::CloseStream { .. }));
}
#[tokio::test]
async fn drop_sink_while_recv() {
let sid = Sid::new(1);
let (s, r) = async_channel::bounded(10);
let m = ProtocolMetricCache::new("tcp", Arc::new(ProtocolMetrics::new().unwrap()));
let mut r =
super::TcpRecvProtocol::new(super::test_utils::TcpSink { receiver: r }, m.clone());
let mut bytes = BytesMut::with_capacity(1500);
OTFrame::OpenStream {
sid,
prio: 5u8,
promises: Promises::COMPRESSED,
}
.write_bytes(&mut bytes);
s.send(bytes.split()).await.unwrap();
let e = r.recv().await.unwrap();
assert!(matches!(e, ProtocolEvent::OpenStream { .. }));
let e = tokio::spawn(async move { r.recv().await });
drop(s);
let e = e.await.unwrap();
assert_eq!(e, Err(ProtocolError::Closed));
}
#[tokio::test]
#[should_panic]
async fn send_on_stream_from_remote_without_notify() {
//remote opens stream
//we send on it
let [mut p1, mut p2] = tcp_bound(10, None);
let event = ProtocolEvent::OpenStream {
sid: Sid::new(10),
prio: 3u8,
promises: Promises::ORDERED,
guaranteed_bandwidth: 1_000_000,
};
p1.0.send(event).await.unwrap();
let _ = p2.1.recv().await.unwrap();
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 0,
data: Bytes::from(&[188u8; 600][..]),
};
p2.0.send(event.clone()).await.unwrap();
p2.0.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let e = p1.1.recv().await.unwrap();
assert_eq!(event, e);
}
#[tokio::test]
async fn send_on_stream_from_remote() {
//remote opens stream
//we send on it
let [mut p1, mut p2] = tcp_bound(10, None);
let event = ProtocolEvent::OpenStream {
sid: Sid::new(10),
prio: 3u8,
promises: Promises::ORDERED,
guaranteed_bandwidth: 1_000_000,
};
p1.0.send(event).await.unwrap();
let e = p2.1.recv().await.unwrap();
p2.0.notify_from_recv(e);
let event = ProtocolEvent::Message {
sid: Sid::new(10),
mid: 0,
data: Bytes::from(&[188u8; 600][..]),
};
p2.0.send(event.clone()).await.unwrap();
p2.0.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
let e = p1.1.recv().await.unwrap();
assert_eq!(event, e);
}
}

View File

@ -0,0 +1,207 @@
use bitflags::bitflags;
use bytes::{Buf, BufMut, BytesMut};
use rand::Rng;
/// MessageID, unique ID per Message.
pub type Mid = u64;
/// ChannelID, unique ID per Channel (Protocol)
pub type Cid = u64;
/// Every Stream has a `Prio` and guaranteed [`Bandwidth`].
/// Every send, the guarantees part is used first.
/// If there is still bandwidth left, it will be shared by all Streams with the
/// same priority. Prio 0 will be send first, then 1, ... till the last prio 7
/// is send. Prio must be < 8!
///
/// [`Bandwidth`]: crate::Bandwidth
pub type Prio = u8;
/// guaranteed `Bandwidth`. See [`Prio`]
///
/// [`Prio`]: crate::Prio
pub type Bandwidth = u64;
bitflags! {
/// use promises to modify the behavior of [`Streams`].
/// see the consts in this `struct` for
///
/// [`Streams`]: crate::api::Stream
pub struct Promises: u8 {
/// this will guarantee that the order of messages which are send on one side,
/// is the same when received on the other.
const ORDERED = 0b00000001;
/// this will guarantee that messages received haven't been altered by errors,
/// like bit flips, this is done with a checksum.
const CONSISTENCY = 0b00000010;
/// this will guarantee that the other side will receive every message exactly
/// once no messages are dropped
const GUARANTEED_DELIVERY = 0b00000100;
/// this will enable the internal compression on this, only useable with #[cfg(feature = "compression")]
/// [`Stream`](crate::api::Stream)
const COMPRESSED = 0b00001000;
/// this will enable the internal encryption on this
/// [`Stream`](crate::api::Stream)
const ENCRYPTED = 0b00010000;
}
}
impl Promises {
pub const fn to_le_bytes(self) -> [u8; 1] { self.bits.to_le_bytes() }
}
pub(crate) const VELOREN_MAGIC_NUMBER: [u8; 7] = *b"VELOREN";
/// When this semver differs, 2 Networks can't communicate.
pub const VELOREN_NETWORK_VERSION: [u32; 3] = [0, 5, 0];
pub(crate) const STREAM_ID_OFFSET1: Sid = Sid::new(0);
pub(crate) const STREAM_ID_OFFSET2: Sid = Sid::new(u64::MAX / 2);
/// Maximal possible Prio to choose (for performance reasons)
pub const HIGHEST_PRIO: u8 = 7;
/// Support struct used for uniquely identifying `Participant` over the
/// `Network`.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct Pid {
internal: u128,
}
/// Unique ID per Stream, in one Channel.
/// one side will always start with 0, while the other start with u64::MAX / 2.
/// number increases for each created Stream.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct Sid {
internal: u64,
}
impl Pid {
/// create a new Pid with a random interior value
///
/// # Example
/// ```rust
/// use veloren_network_protocol::Pid;
///
/// let pid = Pid::new();
/// ```
pub fn new() -> Self {
Self {
internal: rand::thread_rng().gen(),
}
}
/// don't use fake! just for testing!
/// This will panic if pid i greater than 7, as I do not want you to use
/// this in production!
#[doc(hidden)]
pub fn fake(pid_offset: u8) -> Self {
assert!(pid_offset < 8);
let o = pid_offset as u128;
const OFF: [u128; 5] = [
0x40,
0x40 * 0x40,
0x40 * 0x40 * 0x40,
0x40 * 0x40 * 0x40 * 0x40,
0x40 * 0x40 * 0x40 * 0x40 * 0x40,
];
Self {
internal: o + o * OFF[0] + o * OFF[1] + o * OFF[2] + o * OFF[3] + o * OFF[4],
}
}
#[inline]
pub(crate) fn from_bytes(bytes: &mut BytesMut) -> Self {
Self {
internal: bytes.get_u128_le(),
}
}
#[inline]
pub(crate) fn to_bytes(&self, bytes: &mut BytesMut) { bytes.put_u128_le(self.internal) }
}
impl Sid {
pub const fn new(internal: u64) -> Self { Self { internal } }
#[inline]
pub(crate) fn from_bytes(bytes: &mut BytesMut) -> Self {
Self {
internal: bytes.get_u64_le(),
}
}
#[inline]
pub(crate) fn to_bytes(&self, bytes: &mut BytesMut) { bytes.put_u64_le(self.internal) }
}
impl std::fmt::Debug for Pid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
const BITS_PER_SIXLET: usize = 6;
//only print last 6 chars of number as full u128 logs are unreadable
const CHAR_COUNT: usize = 6;
for i in 0..CHAR_COUNT {
write!(
f,
"{}",
sixlet_to_str((self.internal >> (i * BITS_PER_SIXLET)) & 0x3F)
)?;
}
Ok(())
}
}
impl Default for Pid {
fn default() -> Self { Pid::new() }
}
impl std::fmt::Display for Pid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) }
}
impl std::ops::AddAssign for Sid {
fn add_assign(&mut self, other: Self) {
*self = Self {
internal: self.internal + other.internal,
};
}
}
impl std::fmt::Debug for Sid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
//only print last 6 chars of number as full u128 logs are unreadable
write!(f, "{}", self.internal.rem_euclid(1000000))
}
}
impl From<u64> for Sid {
fn from(internal: u64) -> Self { Sid { internal } }
}
impl std::fmt::Display for Sid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.internal)
}
}
fn sixlet_to_str(sixlet: u128) -> char {
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[sixlet as usize] as char
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn frame_creation() {
Pid::new();
assert_eq!(format!("{}", Pid::fake(0)), "AAAAAA");
assert_eq!(format!("{}", Pid::fake(1)), "BBBBBB");
assert_eq!(format!("{}", Pid::fake(2)), "CCCCCC");
}
#[test]
fn test_sixlet_to_str() {
assert_eq!(sixlet_to_str(0), 'A');
assert_eq!(sixlet_to_str(29), 'd');
assert_eq!(sixlet_to_str(63), '/');
}
}

View File

@ -0,0 +1,37 @@
// TODO: quick and dirty which activly waits for an ack!
/*
UDP protocol
All Good Case:
S --HEADER--> R
S --DATA--> R
S --DATA--> R
S <--FINISHED-- R
Delayed HEADER:
S --HEADER-->
S --DATA--> R // STORE IT
--HEADER--> R // apply left data and continue
S --DATA--> R
S <--FINISHED-- R
NO HEADER:
S --HEADER--> !
S --DATA--> R // STORE IT
S --DATA--> R // STORE IT
S <--MISSING_HEADER-- R // SEND AFTER 10 ms after DATA1
S --HEADER--> R
S <--FINISHED-- R
NO DATA:
S --HEADER--> R
S --DATA--> R
S --DATA--> !
S --STATUS--> R
S <--MISSING_DATA -- R
S --DATA--> R
S <--FINISHED-- R
*/

File diff suppressed because it is too large Load Diff

View File

@ -1,359 +1,272 @@
#[cfg(feature = "metrics")] use async_trait::async_trait;
use crate::metrics::NetworkMetrics; use bytes::BytesMut;
use crate::{ use network_protocol::{
participant::C2pFrame, Cid, InitProtocolError, MpscMsg, MpscRecvProtocol, MpscSendProtocol, Pid, ProtocolError,
protocols::Protocols, ProtocolEvent, ProtocolMetricCache, ProtocolMetrics, Sid, TcpRecvProtocol, TcpSendProtocol,
types::{ UnreliableDrain, UnreliableSink,
Cid, Frame, Pid, Sid, STREAM_ID_OFFSET1, STREAM_ID_OFFSET2, VELOREN_MAGIC_NUMBER,
VELOREN_NETWORK_VERSION,
},
}; };
use futures::{ use std::{sync::Arc, time::Duration};
channel::{mpsc, oneshot}, use tokio::{
join, io::{AsyncReadExt, AsyncWriteExt},
sink::SinkExt, net::tcp::{OwnedReadHalf, OwnedWriteHalf},
stream::StreamExt, sync::mpsc,
FutureExt,
}; };
#[cfg(feature = "metrics")] use std::sync::Arc;
use tracing::*;
pub(crate) struct Channel { #[derive(Debug)]
cid: Cid, pub(crate) enum Protocols {
c2w_frame_r: Option<mpsc::UnboundedReceiver<Frame>>, Tcp((TcpSendProtocol<TcpDrain>, TcpRecvProtocol<TcpSink>)),
read_stop_receiver: Option<oneshot::Receiver<()>>, Mpsc((MpscSendProtocol<MpscDrain>, MpscRecvProtocol<MpscSink>)),
}
impl Channel {
pub fn new(cid: u64) -> (Self, mpsc::UnboundedSender<Frame>, oneshot::Sender<()>) {
let (c2w_frame_s, c2w_frame_r) = mpsc::unbounded::<Frame>();
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
(
Self {
cid,
c2w_frame_r: Some(c2w_frame_r),
read_stop_receiver: Some(read_stop_receiver),
},
c2w_frame_s,
read_stop_sender,
)
}
pub async fn run(
mut self,
protocol: Protocols,
mut w2c_cid_frame_s: mpsc::UnboundedSender<C2pFrame>,
mut leftover_cid_frame: Vec<C2pFrame>,
) {
let c2w_frame_r = self.c2w_frame_r.take().unwrap();
let read_stop_receiver = self.read_stop_receiver.take().unwrap();
//reapply leftovers from handshake
let cnt = leftover_cid_frame.len();
trace!(?cnt, "Reapplying leftovers");
for cid_frame in leftover_cid_frame.drain(..) {
w2c_cid_frame_s.send(cid_frame).await.unwrap();
}
trace!(?cnt, "All leftovers reapplied");
trace!("Start up channel");
match protocol {
Protocols::Tcp(tcp) => {
join!(
tcp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver),
tcp.write_to_wire(self.cid, c2w_frame_r),
);
},
Protocols::Udp(udp) => {
join!(
udp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver),
udp.write_to_wire(self.cid, c2w_frame_r),
);
},
}
trace!("Shut down channel");
}
} }
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct Handshake { pub(crate) enum SendProtocols {
cid: Cid, Tcp(TcpSendProtocol<TcpDrain>),
local_pid: Pid, Mpsc(MpscSendProtocol<MpscDrain>),
secret: u128,
init_handshake: bool,
#[cfg(feature = "metrics")]
metrics: Arc<NetworkMetrics>,
} }
impl Handshake { #[derive(Debug)]
#[cfg(debug_assertions)] pub(crate) enum RecvProtocols {
const WRONG_NUMBER: &'static [u8] = "Handshake does not contain the magic number required by \ Tcp(TcpRecvProtocol<TcpSink>),
veloren server.\nWe are not sure if you are a valid \ Mpsc(MpscRecvProtocol<MpscSink>),
veloren client.\nClosing the connection" }
.as_bytes();
#[cfg(debug_assertions)]
const WRONG_VERSION: &'static str = "Handshake does contain a correct magic number, but \
invalid version.\nWe don't know how to communicate with \
you.\nClosing the connection";
pub fn new( impl Protocols {
cid: u64, pub(crate) fn new_tcp(
stream: tokio::net::TcpStream,
cid: Cid,
metrics: Arc<ProtocolMetrics>,
) -> Self {
let (r, w) = stream.into_split();
let metrics = ProtocolMetricCache::new(&cid.to_string(), metrics);
let sp = TcpSendProtocol::new(TcpDrain { half: w }, metrics.clone());
let rp = TcpRecvProtocol::new(
TcpSink {
half: r,
buffer: BytesMut::new(),
},
metrics,
);
Protocols::Tcp((sp, rp))
}
pub(crate) fn new_mpsc(
sender: mpsc::Sender<MpscMsg>,
receiver: mpsc::Receiver<MpscMsg>,
cid: Cid,
metrics: Arc<ProtocolMetrics>,
) -> Self {
let metrics = ProtocolMetricCache::new(&cid.to_string(), metrics);
let sp = MpscSendProtocol::new(MpscDrain { sender }, metrics.clone());
let rp = MpscRecvProtocol::new(MpscSink { receiver }, metrics);
Protocols::Mpsc((sp, rp))
}
pub(crate) fn split(self) -> (SendProtocols, RecvProtocols) {
match self {
Protocols::Tcp((s, r)) => (SendProtocols::Tcp(s), RecvProtocols::Tcp(r)),
Protocols::Mpsc((s, r)) => (SendProtocols::Mpsc(s), RecvProtocols::Mpsc(r)),
}
}
}
#[async_trait]
impl network_protocol::InitProtocol for Protocols {
async fn initialize(
&mut self,
initializer: bool,
local_pid: Pid, local_pid: Pid,
secret: u128, secret: u128,
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>, ) -> Result<(Pid, Sid, u128), InitProtocolError> {
init_handshake: bool, match self {
) -> Self { Protocols::Tcp(p) => p.initialize(initializer, local_pid, secret).await,
Self { Protocols::Mpsc(p) => p.initialize(initializer, local_pid, secret).await,
cid,
local_pid,
secret,
#[cfg(feature = "metrics")]
metrics,
init_handshake,
} }
} }
}
pub async fn setup(self, protocol: &Protocols) -> Result<(Pid, Sid, u128, Vec<C2pFrame>), ()> {
let (c2w_frame_s, c2w_frame_r) = mpsc::unbounded::<Frame>(); #[async_trait]
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<C2pFrame>(); impl network_protocol::SendProtocol for SendProtocols {
fn notify_from_recv(&mut self, event: ProtocolEvent) {
let (read_stop_sender, read_stop_receiver) = oneshot::channel(); match self {
let handler_future = SendProtocols::Tcp(s) => s.notify_from_recv(event),
self.frame_handler(&mut w2c_cid_frame_r, c2w_frame_s, read_stop_sender); SendProtocols::Mpsc(s) => s.notify_from_recv(event),
let res = match protocol { }
Protocols::Tcp(tcp) => { }
(join! {
tcp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver), async fn send(&mut self, event: ProtocolEvent) -> Result<(), ProtocolError> {
tcp.write_to_wire(self.cid, c2w_frame_r).fuse(), match self {
handler_future, SendProtocols::Tcp(s) => s.send(event).await,
}) SendProtocols::Mpsc(s) => s.send(event).await,
.2 }
}, }
Protocols::Udp(udp) => {
(join! { async fn flush(&mut self, bandwidth: u64, dt: Duration) -> Result<(), ProtocolError> {
udp.read_from_wire(self.cid, &mut w2c_cid_frame_s, read_stop_receiver), match self {
udp.write_to_wire(self.cid, c2w_frame_r), SendProtocols::Tcp(s) => s.flush(bandwidth, dt).await,
handler_future, SendProtocols::Mpsc(s) => s.flush(bandwidth, dt).await,
}) }
.2 }
}, }
};
#[async_trait]
match res { impl network_protocol::RecvProtocol for RecvProtocols {
Ok(res) => { async fn recv(&mut self) -> Result<ProtocolEvent, ProtocolError> {
let mut leftover_frames = vec![]; match self {
while let Ok(Some(cid_frame)) = w2c_cid_frame_r.try_next() { RecvProtocols::Tcp(r) => r.recv().await,
leftover_frames.push(cid_frame); RecvProtocols::Mpsc(r) => r.recv().await,
} }
let cnt = leftover_frames.len(); }
if cnt > 0 { }
debug!(
?cnt, ///////////////////////////////////////
"Some additional frames got already transferred, piping them to the \ //// TCP
bparticipant as leftover_frames" #[derive(Debug)]
); pub struct TcpDrain {
} half: OwnedWriteHalf,
Ok((res.0, res.1, res.2, leftover_frames)) }
},
Err(()) => Err(()), #[derive(Debug)]
} pub struct TcpSink {
} half: OwnedReadHalf,
buffer: BytesMut,
async fn frame_handler( }
&self,
w2c_cid_frame_r: &mut mpsc::UnboundedReceiver<C2pFrame>, #[async_trait]
mut c2w_frame_s: mpsc::UnboundedSender<Frame>, impl UnreliableDrain for TcpDrain {
read_stop_sender: oneshot::Sender<()>, type DataFormat = BytesMut;
) -> Result<(Pid, Sid, u128), ()> {
const ERR_S: &str = "Got A Raw Message, these are usually Debug Messages indicating that \ async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
something went wrong on network layer and connection will be closed"; match self.half.write_all(&data).await {
#[cfg(feature = "metrics")] Ok(()) => Ok(()),
let cid_string = self.cid.to_string(); Err(_) => Err(ProtocolError::Closed),
}
if self.init_handshake { }
self.send_handshake(&mut c2w_frame_s).await; }
}
#[async_trait]
let frame = w2c_cid_frame_r.next().await.map(|(_cid, frame)| frame); impl UnreliableSink for TcpSink {
#[cfg(feature = "metrics")] type DataFormat = BytesMut;
{
if let Some(Ok(ref frame)) = frame { async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
self.metrics self.buffer.resize(1500, 0u8);
.frames_in_total match self.half.read(&mut self.buffer).await {
.with_label_values(&[&cid_string, &frame.get_string()]) Ok(0) => Err(ProtocolError::Closed),
.inc(); Ok(n) => Ok(self.buffer.split_to(n)),
} Err(_) => Err(ProtocolError::Closed),
} }
let r = match frame { }
Some(Ok(Frame::Handshake { }
magic_number,
version, ///////////////////////////////////////
})) => { //// MPSC
trace!(?magic_number, ?version, "Recv handshake"); #[derive(Debug)]
if magic_number != VELOREN_MAGIC_NUMBER { pub struct MpscDrain {
error!(?magic_number, "Connection with invalid magic_number"); sender: tokio::sync::mpsc::Sender<MpscMsg>,
#[cfg(debug_assertions)] }
self.send_raw_and_shutdown(&mut c2w_frame_s, Self::WRONG_NUMBER.to_vec())
.await; #[derive(Debug)]
Err(()) pub struct MpscSink {
} else if version != VELOREN_NETWORK_VERSION { receiver: tokio::sync::mpsc::Receiver<MpscMsg>,
error!(?version, "Connection with wrong network version"); }
#[cfg(debug_assertions)]
self.send_raw_and_shutdown( #[async_trait]
&mut c2w_frame_s, impl UnreliableDrain for MpscDrain {
format!( type DataFormat = MpscMsg;
"{} Our Version: {:?}\nYour Version: {:?}\nClosing the connection",
Self::WRONG_VERSION, async fn send(&mut self, data: Self::DataFormat) -> Result<(), ProtocolError> {
VELOREN_NETWORK_VERSION, self.sender
version, .send(data)
) .await
.as_bytes() .map_err(|_| ProtocolError::Closed)
.to_vec(), }
) }
.await;
Err(()) #[async_trait]
} else { impl UnreliableSink for MpscSink {
debug!("Handshake completed"); type DataFormat = MpscMsg;
if self.init_handshake {
self.send_init(&mut c2w_frame_s).await; async fn recv(&mut self) -> Result<Self::DataFormat, ProtocolError> {
} else { self.receiver.recv().await.ok_or(ProtocolError::Closed)
self.send_handshake(&mut c2w_frame_s).await; }
} }
Ok(())
} #[cfg(test)]
}, mod tests {
Some(Ok(frame)) => { use super::*;
#[cfg(feature = "metrics")] use bytes::Bytes;
self.metrics use network_protocol::{Promises, RecvProtocol, SendProtocol};
.frames_in_total use tokio::net::{TcpListener, TcpStream};
.with_label_values(&[&cid_string, frame.get_string()])
.inc(); #[tokio::test]
if let Frame::Raw(bytes) = frame { async fn tokio_sinks() {
match std::str::from_utf8(bytes.as_slice()) { let listener = TcpListener::bind("127.0.0.1:5000").await.unwrap();
Ok(string) => error!(?string, ERR_S), let r1 = tokio::spawn(async move {
_ => error!(?bytes, ERR_S), let (server, _) = listener.accept().await.unwrap();
} (listener, server)
} });
Err(()) let client = TcpStream::connect("127.0.0.1:5000").await.unwrap();
}, let (_listener, server) = r1.await.unwrap();
Some(Err(())) => { let metrics = Arc::new(ProtocolMetrics::new().unwrap());
info!("Protocol got interrupted"); let client = Protocols::new_tcp(client, 0, Arc::clone(&metrics));
Err(()) let server = Protocols::new_tcp(server, 0, Arc::clone(&metrics));
}, let (mut s, _) = client.split();
None => Err(()), let (_, mut r) = server.split();
}; let event = ProtocolEvent::OpenStream {
if let Err(()) = r { sid: Sid::new(1),
if let Err(e) = read_stop_sender.send(()) { prio: 4u8,
trace!( promises: Promises::GUARANTEED_DELIVERY,
?e, guaranteed_bandwidth: 1_000,
"couldn't stop protocol, probably it encountered a Protocol Stop and closed \ };
itself already, which is fine" s.send(event.clone()).await.unwrap();
); s.send(ProtocolEvent::Message {
} sid: Sid::new(1),
return Err(()); mid: 0,
} data: Bytes::from(&[8u8; 8][..]),
})
let frame = w2c_cid_frame_r.next().await.map(|(_cid, frame)| frame); .await
let r = match frame { .unwrap();
Some(Ok(Frame::Init { pid, secret })) => { s.flush(1_000_000, Duration::from_secs(1)).await.unwrap();
debug!(?pid, "Participant send their ID"); drop(s); // recv must work even after shutdown of send!
#[cfg(feature = "metrics")] tokio::time::sleep(Duration::from_secs(1)).await;
self.metrics let res = r.recv().await;
.frames_in_total match res {
.with_label_values(&[&cid_string, "ParticipantId"]) Ok(ProtocolEvent::OpenStream {
.inc(); sid,
let stream_id_offset = if self.init_handshake { prio,
STREAM_ID_OFFSET1 promises,
} else { guaranteed_bandwidth: _,
self.send_init(&mut c2w_frame_s).await; }) => {
STREAM_ID_OFFSET2 assert_eq!(sid, Sid::new(1));
}; assert_eq!(prio, 4u8);
info!(?pid, "This Handshake is now configured!"); assert_eq!(promises, Promises::GUARANTEED_DELIVERY);
Ok((pid, stream_id_offset, secret)) },
}, _ => {
Some(Ok(frame)) => { panic!("wrong type {:?}", res);
#[cfg(feature = "metrics")] },
self.metrics }
.frames_in_total r.recv().await.unwrap();
.with_label_values(&[&cid_string, frame.get_string()]) }
.inc();
if let Frame::Raw(bytes) = frame { #[tokio::test]
match std::str::from_utf8(bytes.as_slice()) { async fn tokio_sink_stop_after_drop() {
Ok(string) => error!(?string, ERR_S), let listener = TcpListener::bind("127.0.0.1:5001").await.unwrap();
_ => error!(?bytes, ERR_S), let r1 = tokio::spawn(async move {
} let (server, _) = listener.accept().await.unwrap();
} (listener, server)
Err(()) });
}, let client = TcpStream::connect("127.0.0.1:5001").await.unwrap();
Some(Err(())) => { let (_listener, server) = r1.await.unwrap();
info!("Protocol got interrupted"); let metrics = Arc::new(ProtocolMetrics::new().unwrap());
Err(()) let client = Protocols::new_tcp(client, 0, Arc::clone(&metrics));
}, let server = Protocols::new_tcp(server, 0, Arc::clone(&metrics));
None => Err(()), let (s, _) = client.split();
}; let (_, mut r) = server.split();
if r.is_err() { let e = tokio::spawn(async move { r.recv().await });
if let Err(e) = read_stop_sender.send(()) { drop(s);
trace!( let e = e.await.unwrap();
?e, assert!(e.is_err());
"couldn't stop protocol, probably it encountered a Protocol Stop and closed \ assert_eq!(e.unwrap_err(), ProtocolError::Closed);
itself already, which is fine" }
);
}
}
r
}
async fn send_handshake(&self, c2w_frame_s: &mut mpsc::UnboundedSender<Frame>) {
#[cfg(feature = "metrics")]
self.metrics
.frames_out_total
.with_label_values(&[&self.cid.to_string(), "Handshake"])
.inc();
c2w_frame_s
.send(Frame::Handshake {
magic_number: VELOREN_MAGIC_NUMBER,
version: VELOREN_NETWORK_VERSION,
})
.await
.unwrap();
}
async fn send_init(&self, c2w_frame_s: &mut mpsc::UnboundedSender<Frame>) {
#[cfg(feature = "metrics")]
self.metrics
.frames_out_total
.with_label_values(&[&self.cid.to_string(), "ParticipantId"])
.inc();
c2w_frame_s
.send(Frame::Init {
pid: self.local_pid,
secret: self.secret,
})
.await
.unwrap();
}
#[cfg(debug_assertions)]
async fn send_raw_and_shutdown(
&self,
c2w_frame_s: &mut mpsc::UnboundedSender<Frame>,
data: Vec<u8>,
) {
debug!("Sending client instructions before killing");
#[cfg(feature = "metrics")]
{
let cid_string = self.cid.to_string();
self.metrics
.frames_out_total
.with_label_values(&[&cid_string, "Raw"])
.inc();
self.metrics
.frames_out_total
.with_label_values(&[&cid_string, "Shutdown"])
.inc();
}
c2w_frame_s.send(Frame::Raw(data)).await.unwrap();
c2w_frame_s.send(Frame::Shutdown).await.unwrap();
}
} }

View File

@ -39,29 +39,27 @@
//! //!
//! # Examples //! # Examples
//! ```rust //! ```rust
//! use async_std::task::sleep; //! use std::sync::Arc;
//! use futures::{executor::block_on, join}; //! use tokio::{join, runtime::Runtime, time::sleep};
//! use veloren_network::{Network, Pid, Promises, ProtocolAddr}; //! use veloren_network::{Network, Pid, Promises, ProtocolAddr};
//! //!
//! // Client //! // Client
//! async fn client() -> std::result::Result<(), Box<dyn std::error::Error>> { //! async fn client(runtime: Arc<Runtime>) -> std::result::Result<(), Box<dyn std::error::Error>> {
//! sleep(std::time::Duration::from_secs(1)).await; // `connect` MUST be after `listen` //! sleep(std::time::Duration::from_secs(1)).await; // `connect` MUST be after `listen`
//! let (client_network, f) = Network::new(Pid::new()); //! let client_network = Network::new(Pid::new(), runtime);
//! std::thread::spawn(f);
//! let server = client_network //! let server = client_network
//! .connect(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap())) //! .connect(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .await?; //! .await?;
//! let mut stream = server //! let mut stream = server
//! .open(10, Promises::ORDERED | Promises::CONSISTENCY) //! .open(4, Promises::ORDERED | Promises::CONSISTENCY)
//! .await?; //! .await?;
//! stream.send("Hello World")?; //! stream.send("Hello World")?;
//! Ok(()) //! Ok(())
//! } //! }
//! //!
//! // Server //! // Server
//! async fn server() -> std::result::Result<(), Box<dyn std::error::Error>> { //! async fn server(runtime: Arc<Runtime>) -> std::result::Result<(), Box<dyn std::error::Error>> {
//! let (server_network, f) = Network::new(Pid::new()); //! let server_network = Network::new(Pid::new(), runtime);
//! std::thread::spawn(f);
//! server_network //! server_network
//! .listen(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap())) //! .listen(ProtocolAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
//! .await?; //! .await?;
@ -74,8 +72,10 @@
//! } //! }
//! //!
//! fn main() -> std::result::Result<(), Box<dyn std::error::Error>> { //! fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
//! block_on(async { //! let runtime = Arc::new(Runtime::new().unwrap());
//! let (result_c, result_s) = join!(client(), server(),); //! runtime.block_on(async {
//! let (result_c, result_s) =
//! join!(client(Arc::clone(&runtime)), server(Arc::clone(&runtime)),);
//! result_c?; //! result_c?;
//! result_s?; //! result_s?;
//! Ok(()) //! Ok(())
@ -95,23 +95,19 @@
//! [`Streams`]: crate::api::Stream //! [`Streams`]: crate::api::Stream
//! [`send`]: crate::api::Stream::send //! [`send`]: crate::api::Stream::send
//! [`recv`]: crate::api::Stream::recv //! [`recv`]: crate::api::Stream::recv
//! [`Pid`]: crate::types::Pid //! [`Pid`]: network_protocol::Pid
//! [`ProtocolAddr`]: crate::api::ProtocolAddr //! [`ProtocolAddr`]: crate::api::ProtocolAddr
//! [`Promises`]: crate::types::Promises //! [`Promises`]: network_protocol::Promises
mod api; mod api;
mod channel; mod channel;
mod message; mod message;
#[cfg(feature = "metrics")] mod metrics; mod metrics;
mod participant; mod participant;
mod prios;
mod protocols;
mod scheduler; mod scheduler;
#[macro_use]
mod types;
pub use api::{ pub use api::{
Network, NetworkError, Participant, ParticipantError, ProtocolAddr, Stream, StreamError, Network, NetworkError, Participant, ParticipantError, ProtocolAddr, Stream, StreamError,
}; };
pub use message::Message; pub use message::Message;
pub use types::{Pid, Promises}; pub use network_protocol::{Pid, Promises};

View File

@ -1,12 +1,9 @@
use serde::{de::DeserializeOwned, Serialize}; use crate::api::{Stream, StreamError};
//use std::collections::VecDeque; use bytes::Bytes;
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
use crate::types::Promises; use network_protocol::Promises;
use crate::{ use serde::{de::DeserializeOwned, Serialize};
api::{Stream, StreamError}, use std::io;
types::{Frame, Mid, Sid},
};
use std::{io, sync::Arc};
#[cfg(all(feature = "compression", debug_assertions))] #[cfg(all(feature = "compression", debug_assertions))]
use tracing::warn; use tracing::warn;
@ -18,34 +15,11 @@ use tracing::warn;
/// [`Stream`]: crate::api::Stream /// [`Stream`]: crate::api::Stream
/// [`send_raw`]: crate::api::Stream::send_raw /// [`send_raw`]: crate::api::Stream::send_raw
pub struct Message { pub struct Message {
pub(crate) buffer: Arc<MessageBuffer>, pub(crate) data: Bytes,
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
pub(crate) compressed: bool, pub(crate) compressed: bool,
} }
//Todo: Evaluate switching to VecDeque for quickly adding and removing data
// from front, back.
// - It would prob require custom bincode code but thats possible.
pub(crate) struct MessageBuffer {
pub data: Vec<u8>,
}
#[derive(Debug)]
pub(crate) struct OutgoingMessage {
pub buffer: Arc<MessageBuffer>,
pub cursor: u64,
pub mid: Mid,
pub sid: Sid,
}
#[derive(Debug)]
pub(crate) struct IncomingMessage {
pub buffer: MessageBuffer,
pub length: u64,
pub mid: Mid,
pub sid: Sid,
}
impl Message { impl Message {
/// This serializes any message, according to the [`Streams`] [`Promises`]. /// This serializes any message, according to the [`Streams`] [`Promises`].
/// You can reuse this `Message` and send it via other [`Streams`], if the /// You can reuse this `Message` and send it via other [`Streams`], if the
@ -83,7 +57,7 @@ impl Message {
let _stream = stream; let _stream = stream;
Self { Self {
buffer: Arc::new(MessageBuffer { data }), data: Bytes::from(data),
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
compressed, compressed,
} }
@ -98,18 +72,18 @@ impl Message {
/// ``` /// ```
/// # use veloren_network::{Network, ProtocolAddr, Pid}; /// # use veloren_network::{Network, ProtocolAddr, Pid};
/// # use veloren_network::Promises; /// # use veloren_network::Promises;
/// # use futures::executor::block_on; /// # use tokio::runtime::Runtime;
/// # use std::sync::Arc;
/// ///
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> { /// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
/// // Create a Network, listen on Port `2300` and wait for a Stream to be opened, then listen on it /// // Create a Network, listen on Port `2300` and wait for a Stream to be opened, then listen on it
/// # let (network, f) = Network::new(Pid::new()); /// # let runtime = Arc::new(Runtime::new().unwrap());
/// # std::thread::spawn(f); /// # let network = Network::new(Pid::new(), Arc::clone(&runtime));
/// # let (remote, fr) = Network::new(Pid::new()); /// # let remote = Network::new(Pid::new(), Arc::clone(&runtime));
/// # std::thread::spawn(fr); /// # runtime.block_on(async {
/// # block_on(async {
/// # network.listen(ProtocolAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?; /// # network.listen(ProtocolAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
/// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?; /// # let remote_p = remote.connect(ProtocolAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
/// # let mut stream_p = remote_p.open(16, Promises::ORDERED | Promises::CONSISTENCY).await?; /// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY).await?;
/// # stream_p.send("Hello World"); /// # stream_p.send("Hello World");
/// # let participant_a = network.connected().await?; /// # let participant_a = network.connected().await?;
/// let mut stream_a = participant_a.opened().await?; /// let mut stream_a = participant_a.opened().await?;
@ -124,33 +98,27 @@ impl Message {
/// [`recv_raw`]: crate::api::Stream::recv_raw /// [`recv_raw`]: crate::api::Stream::recv_raw
pub fn deserialize<M: DeserializeOwned>(self) -> Result<M, StreamError> { pub fn deserialize<M: DeserializeOwned>(self) -> Result<M, StreamError> {
#[cfg(not(feature = "compression"))] #[cfg(not(feature = "compression"))]
let uncompressed_data = match Arc::try_unwrap(self.buffer) { let uncompressed_data = self.data;
Ok(d) => d.data,
Err(b) => b.data.clone(),
};
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
let uncompressed_data = if self.compressed { let uncompressed_data = if self.compressed {
{ {
let mut uncompressed_data = Vec::with_capacity(self.buffer.data.len() * 2); let mut uncompressed_data = Vec::with_capacity(self.data.len() * 2);
if let Err(e) = lz_fear::raw::decompress_raw( if let Err(e) = lz_fear::raw::decompress_raw(
&self.buffer.data, &self.data,
&[0; 0], &[0; 0],
&mut uncompressed_data, &mut uncompressed_data,
usize::MAX, usize::MAX,
) { ) {
return Err(StreamError::Compression(e)); return Err(StreamError::Compression(e));
} }
uncompressed_data Bytes::from(uncompressed_data)
} }
} else { } else {
match Arc::try_unwrap(self.buffer) { self.data
Ok(d) => d.data,
Err(b) => b.data.clone(),
}
}; };
match bincode::deserialize(uncompressed_data.as_slice()) { match bincode::deserialize(&uncompressed_data) {
Ok(m) => Ok(m), Ok(m) => Ok(m),
Err(e) => Err(StreamError::Deserialize(e)), Err(e) => Err(StreamError::Deserialize(e)),
} }
@ -170,38 +138,6 @@ impl Message {
} }
} }
impl OutgoingMessage {
pub(crate) const FRAME_DATA_SIZE: u64 = 1400;
/// returns if msg is empty
pub(crate) fn fill_next<E: Extend<(Sid, Frame)>>(
&mut self,
msg_sid: Sid,
frames: &mut E,
) -> bool {
let to_send = std::cmp::min(
self.buffer.data[self.cursor as usize..].len() as u64,
Self::FRAME_DATA_SIZE,
);
if to_send > 0 {
if self.cursor == 0 {
frames.extend(std::iter::once((msg_sid, Frame::DataHeader {
mid: self.mid,
sid: self.sid,
length: self.buffer.data.len() as u64,
})));
}
frames.extend(std::iter::once((msg_sid, Frame::Data {
mid: self.mid,
start: self.cursor,
data: self.buffer.data[self.cursor as usize..][..to_send as usize].to_vec(),
})));
};
self.cursor += to_send;
self.cursor >= self.buffer.data.len() as u64
}
}
///wouldn't trust this aaaassss much, fine for tests ///wouldn't trust this aaaassss much, fine for tests
pub(crate) fn partial_eq_io_error(first: &io::Error, second: &io::Error) -> bool { pub(crate) fn partial_eq_io_error(first: &io::Error, second: &io::Error) -> bool {
if let Some(f) = first.raw_os_error() { if let Some(f) = first.raw_os_error() {
@ -231,36 +167,15 @@ pub(crate) fn partial_eq_bincode(first: &bincode::ErrorKind, second: &bincode::E
} }
} }
impl std::fmt::Debug for MessageBuffer {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
//TODO: small messages!
let len = self.data.len();
if len > 20 {
write!(
f,
"MessageBuffer(len: {}, {}, {}, {}, {:X?}..{:X?})",
len,
u32::from_le_bytes([self.data[0], self.data[1], self.data[2], self.data[3]]),
u32::from_le_bytes([self.data[4], self.data[5], self.data[6], self.data[7]]),
u32::from_le_bytes([self.data[8], self.data[9], self.data[10], self.data[11]]),
&self.data[13..16],
&self.data[len - 8..len]
)
} else {
write!(f, "MessageBuffer(len: {}, {:?})", len, &self.data[..])
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{api::Stream, message::*}; use crate::{api::Stream, message::*};
use futures::channel::mpsc;
use std::sync::{atomic::AtomicBool, Arc}; use std::sync::{atomic::AtomicBool, Arc};
use tokio::sync::mpsc;
fn stub_stream(compressed: bool) -> Stream { fn stub_stream(compressed: bool) -> Stream {
use crate::{api::*, types::*}; use crate::api::*;
use network_protocol::*;
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
let promises = if compressed { let promises = if compressed {
@ -273,14 +188,16 @@ mod tests {
let promises = Promises::empty(); let promises = Promises::empty();
let (a2b_msg_s, _a2b_msg_r) = crossbeam_channel::unbounded(); let (a2b_msg_s, _a2b_msg_r) = crossbeam_channel::unbounded();
let (_b2a_msg_recv_s, b2a_msg_recv_r) = mpsc::unbounded(); let (_b2a_msg_recv_s, b2a_msg_recv_r) = async_channel::unbounded();
let (a2b_close_stream_s, _a2b_close_stream_r) = mpsc::unbounded(); let (a2b_close_stream_s, _a2b_close_stream_r) = mpsc::unbounded_channel();
Stream::new( Stream::new(
Pid::fake(0), Pid::fake(0),
Pid::fake(1),
Sid::new(0), Sid::new(0),
0u8, 0u8,
promises, promises,
1_000_000,
Arc::new(AtomicBool::new(true)), Arc::new(AtomicBool::new(true)),
a2b_msg_s, a2b_msg_s,
b2a_msg_recv_r, b2a_msg_recv_r,
@ -291,25 +208,25 @@ mod tests {
#[test] #[test]
fn serialize_test() { fn serialize_test() {
let msg = Message::serialize("abc", &stub_stream(false)); let msg = Message::serialize("abc", &stub_stream(false));
assert_eq!(msg.buffer.data.len(), 11); assert_eq!(msg.data.len(), 11);
assert_eq!(msg.buffer.data[0], 3); assert_eq!(msg.data[0], 3);
assert_eq!(msg.buffer.data[1..7], [0, 0, 0, 0, 0, 0]); assert_eq!(msg.data[1..7], [0, 0, 0, 0, 0, 0]);
assert_eq!(msg.buffer.data[8], b'a'); assert_eq!(msg.data[8], b'a');
assert_eq!(msg.buffer.data[9], b'b'); assert_eq!(msg.data[9], b'b');
assert_eq!(msg.buffer.data[10], b'c'); assert_eq!(msg.data[10], b'c');
} }
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
#[test] #[test]
fn serialize_compress_small() { fn serialize_compress_small() {
let msg = Message::serialize("abc", &stub_stream(true)); let msg = Message::serialize("abc", &stub_stream(true));
assert_eq!(msg.buffer.data.len(), 12); assert_eq!(msg.data.len(), 12);
assert_eq!(msg.buffer.data[0], 176); assert_eq!(msg.data[0], 176);
assert_eq!(msg.buffer.data[1], 3); assert_eq!(msg.data[1], 3);
assert_eq!(msg.buffer.data[2..8], [0, 0, 0, 0, 0, 0]); assert_eq!(msg.data[2..8], [0, 0, 0, 0, 0, 0]);
assert_eq!(msg.buffer.data[9], b'a'); assert_eq!(msg.data[9], b'a');
assert_eq!(msg.buffer.data[10], b'b'); assert_eq!(msg.data[10], b'b');
assert_eq!(msg.buffer.data[11], b'c'); assert_eq!(msg.data[11], b'c');
} }
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
@ -327,14 +244,14 @@ mod tests {
"assets/data/plants/flowers/greenrose.ron", "assets/data/plants/flowers/greenrose.ron",
); );
let msg = Message::serialize(&msg, &stub_stream(true)); let msg = Message::serialize(&msg, &stub_stream(true));
assert_eq!(msg.buffer.data.len(), 79); assert_eq!(msg.data.len(), 79);
assert_eq!(msg.buffer.data[0], 34); assert_eq!(msg.data[0], 34);
assert_eq!(msg.buffer.data[1], 5); assert_eq!(msg.data[1], 5);
assert_eq!(msg.buffer.data[2], 0); assert_eq!(msg.data[2], 0);
assert_eq!(msg.buffer.data[3], 1); assert_eq!(msg.data[3], 1);
assert_eq!(msg.buffer.data[20], 20); assert_eq!(msg.data[20], 20);
assert_eq!(msg.buffer.data[40], 115); assert_eq!(msg.data[40], 115);
assert_eq!(msg.buffer.data[60], 111); assert_eq!(msg.data[60], 111);
} }
#[cfg(feature = "compression")] #[cfg(feature = "compression")]
@ -357,6 +274,6 @@ mod tests {
} }
} }
let msg = Message::serialize(&msg, &stub_stream(true)); let msg = Message::serialize(&msg, &stub_stream(true));
assert_eq!(msg.buffer.data.len(), 1331); assert_eq!(msg.data.len(), 1331);
} }
} }

View File

@ -1,16 +1,10 @@
use crate::types::{Cid, Frame, Pid}; use network_protocol::{Cid, Pid};
use prometheus::{ #[cfg(feature = "metrics")]
core::{AtomicU64, GenericCounter}, use prometheus::{IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Opts, Registry};
IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Opts, Registry,
};
use std::error::Error; use std::error::Error;
use tracing::*;
/// 1:1 relation between NetworkMetrics and Network /// 1:1 relation between NetworkMetrics and Network
/// use 2NF here and avoid redundant data like CHANNEL AND PARTICIPANT encoding. #[cfg(feature = "metrics")]
/// as this will cause a matrix that is full of 0 but needs alot of bandwith and
/// storage
#[allow(dead_code)]
pub struct NetworkMetrics { pub struct NetworkMetrics {
pub listen_requests_total: IntCounterVec, pub listen_requests_total: IntCounterVec,
pub connect_requests_total: IntCounterVec, pub connect_requests_total: IntCounterVec,
@ -25,33 +19,13 @@ pub struct NetworkMetrics {
pub streams_opened_total: IntCounterVec, pub streams_opened_total: IntCounterVec,
pub streams_closed_total: IntCounterVec, pub streams_closed_total: IntCounterVec,
pub network_info: IntGauge, pub network_info: IntGauge,
// Frames counted a channel level, seperated by CHANNEL (and PARTICIPANT) AND FRAME TYPE,
pub frames_out_total: IntCounterVec,
pub frames_in_total: IntCounterVec,
// Frames counted at protocol level, seperated by CHANNEL (and PARTICIPANT) AND FRAME TYPE,
pub frames_wire_out_total: IntCounterVec,
pub frames_wire_in_total: IntCounterVec,
// throughput at protocol level, seperated by CHANNEL (and PARTICIPANT),
pub wire_out_throughput: IntCounterVec,
pub wire_in_throughput: IntCounterVec,
// send(prio) Messages count, seperated by STREAM AND PARTICIPANT,
pub message_out_total: IntCounterVec,
// send(prio) Messages throughput, seperated by STREAM AND PARTICIPANT,
pub message_out_throughput: IntCounterVec,
// flushed(prio) stream count, seperated by PARTICIPANT,
pub streams_flushed: IntCounterVec,
// TODO: queued Messages, seperated by STREAM (add PART, CHANNEL),
// queued Messages, seperated by PARTICIPANT
pub queued_count: IntGaugeVec,
// TODO: queued Messages bytes, seperated by STREAM (add PART, CHANNEL),
// queued Messages bytes, seperated by PARTICIPANT
pub queued_bytes: IntGaugeVec,
// ping calculated based on last msg seperated by PARTICIPANT
pub participants_ping: IntGaugeVec,
} }
#[cfg(not(feature = "metrics"))]
pub struct NetworkMetrics {}
#[cfg(feature = "metrics")]
impl NetworkMetrics { impl NetworkMetrics {
#[allow(dead_code)]
pub fn new(local_pid: &Pid) -> Result<Self, Box<dyn Error>> { pub fn new(local_pid: &Pid) -> Result<Self, Box<dyn Error>> {
let listen_requests_total = IntCounterVec::new( let listen_requests_total = IntCounterVec::new(
Opts::new( Opts::new(
@ -115,99 +89,13 @@ impl NetworkMetrics {
"version", "version",
&format!( &format!(
"{}.{}.{}", "{}.{}.{}",
&crate::types::VELOREN_NETWORK_VERSION[0], &network_protocol::VELOREN_NETWORK_VERSION[0],
&crate::types::VELOREN_NETWORK_VERSION[1], &network_protocol::VELOREN_NETWORK_VERSION[1],
&crate::types::VELOREN_NETWORK_VERSION[2] &network_protocol::VELOREN_NETWORK_VERSION[2]
), ),
) )
.const_label("local_pid", &format!("{}", &local_pid)); .const_label("local_pid", &format!("{}", &local_pid));
let network_info = IntGauge::with_opts(opts)?; let network_info = IntGauge::with_opts(opts)?;
let frames_out_total = IntCounterVec::new(
Opts::new(
"frames_out_total",
"Number of all frames send per channel, at the channel level",
),
&["channel", "frametype"],
)?;
let frames_in_total = IntCounterVec::new(
Opts::new(
"frames_in_total",
"Number of all frames received per channel, at the channel level",
),
&["channel", "frametype"],
)?;
let frames_wire_out_total = IntCounterVec::new(
Opts::new(
"frames_wire_out_total",
"Number of all frames send per channel, at the protocol level",
),
&["channel", "frametype"],
)?;
let frames_wire_in_total = IntCounterVec::new(
Opts::new(
"frames_wire_in_total",
"Number of all frames received per channel, at the protocol level",
),
&["channel", "frametype"],
)?;
let wire_out_throughput = IntCounterVec::new(
Opts::new(
"wire_out_throughput",
"Throupgput of all data frames send per channel, at the protocol level",
),
&["channel"],
)?;
let wire_in_throughput = IntCounterVec::new(
Opts::new(
"wire_in_throughput",
"Throupgput of all data frames send per channel, at the protocol level",
),
&["channel"],
)?;
//TODO IN
let message_out_total = IntCounterVec::new(
Opts::new(
"message_out_total",
"Number of messages send by streams on the network",
),
&["participant", "stream"],
)?;
//TODO IN
let message_out_throughput = IntCounterVec::new(
Opts::new(
"message_out_throughput",
"Throughput of messages send by streams on the network",
),
&["participant", "stream"],
)?;
let streams_flushed = IntCounterVec::new(
Opts::new(
"stream_flushed",
"Number of flushed streams requested to PrioManager at participant level",
),
&["participant"],
)?;
let queued_count = IntGaugeVec::new(
Opts::new(
"queued_count",
"Queued number of messages by participant on the network",
),
&["channel"],
)?;
let queued_bytes = IntGaugeVec::new(
Opts::new(
"queued_bytes",
"Queued bytes of messages by participant on the network",
),
&["channel"],
)?;
let participants_ping = IntGaugeVec::new(
Opts::new(
"participants_ping",
"Ping time to participants on the network",
),
&["channel"],
)?;
Ok(Self { Ok(Self {
listen_requests_total, listen_requests_total,
@ -220,18 +108,6 @@ impl NetworkMetrics {
streams_opened_total, streams_opened_total,
streams_closed_total, streams_closed_total,
network_info, network_info,
frames_out_total,
frames_in_total,
frames_wire_out_total,
frames_wire_in_total,
wire_out_throughput,
wire_in_throughput,
message_out_total,
message_out_throughput,
streams_flushed,
queued_count,
queued_bytes,
participants_ping,
}) })
} }
@ -246,22 +122,48 @@ impl NetworkMetrics {
registry.register(Box::new(self.streams_opened_total.clone()))?; registry.register(Box::new(self.streams_opened_total.clone()))?;
registry.register(Box::new(self.streams_closed_total.clone()))?; registry.register(Box::new(self.streams_closed_total.clone()))?;
registry.register(Box::new(self.network_info.clone()))?; registry.register(Box::new(self.network_info.clone()))?;
registry.register(Box::new(self.frames_out_total.clone()))?;
registry.register(Box::new(self.frames_in_total.clone()))?;
registry.register(Box::new(self.frames_wire_out_total.clone()))?;
registry.register(Box::new(self.frames_wire_in_total.clone()))?;
registry.register(Box::new(self.wire_out_throughput.clone()))?;
registry.register(Box::new(self.wire_in_throughput.clone()))?;
registry.register(Box::new(self.message_out_total.clone()))?;
registry.register(Box::new(self.message_out_throughput.clone()))?;
registry.register(Box::new(self.queued_count.clone()))?;
registry.register(Box::new(self.queued_bytes.clone()))?;
registry.register(Box::new(self.participants_ping.clone()))?;
Ok(()) Ok(())
} }
//pub fn _is_100th_tick(&self) -> bool { pub(crate) fn channels_connected(&self, remote_p: &str, no: usize, cid: Cid) {
// self.tick.load(Ordering::Relaxed).rem_euclid(100) == 0 } self.channels_connected_total
.with_label_values(&[remote_p])
.inc();
self.participants_channel_ids
.with_label_values(&[remote_p, &no.to_string()])
.set(cid as i64);
}
pub(crate) fn channels_disconnected(&self, remote_p: &str) {
self.channels_disconnected_total
.with_label_values(&[remote_p])
.inc();
}
pub(crate) fn streams_opened(&self, remote_p: &str) {
self.streams_opened_total
.with_label_values(&[remote_p])
.inc();
}
pub(crate) fn streams_closed(&self, remote_p: &str) {
self.streams_closed_total
.with_label_values(&[remote_p])
.inc();
}
}
#[cfg(not(feature = "metrics"))]
impl NetworkMetrics {
pub fn new(_local_pid: &Pid) -> Result<Self, Box<dyn Error>> { Ok(Self {}) }
pub(crate) fn channels_connected(&self, _remote_p: &str, _no: usize, _cid: Cid) {}
pub(crate) fn channels_disconnected(&self, _remote_p: &str) {}
pub(crate) fn streams_opened(&self, _remote_p: &str) {}
pub(crate) fn streams_closed(&self, _remote_p: &str) {}
} }
impl std::fmt::Debug for NetworkMetrics { impl std::fmt::Debug for NetworkMetrics {
@ -270,138 +172,3 @@ impl std::fmt::Debug for NetworkMetrics {
write!(f, "NetworkMetrics()") write!(f, "NetworkMetrics()")
} }
} }
/*
pub(crate) struct PidCidFrameCache<T: MetricVecBuilder> {
metric: MetricVec<T>,
pid: String,
cache: Vec<[T::M; 8]>,
}
*/
pub(crate) struct MultiCidFrameCache {
metric: IntCounterVec,
cache: Vec<[Option<GenericCounter<AtomicU64>>; Frame::FRAMES_LEN as usize]>,
}
impl MultiCidFrameCache {
const CACHE_SIZE: usize = 2048;
pub fn new(metric: IntCounterVec) -> Self {
Self {
metric,
cache: vec![],
}
}
fn populate(&mut self, cid: Cid) {
let start_cid = self.cache.len();
if cid >= start_cid as u64 && cid > (Self::CACHE_SIZE as Cid) {
warn!(
?cid,
"cid, getting quite high, is this a attack on the cache?"
);
}
self.cache.resize((cid + 1) as usize, [
None, None, None, None, None, None, None, None,
]);
}
pub fn with_label_values(&mut self, cid: Cid, frame: &Frame) -> &GenericCounter<AtomicU64> {
self.populate(cid);
let frame_int = frame.get_int() as usize;
let r = &mut self.cache[cid as usize][frame_int];
if r.is_none() {
*r = Some(
self.metric
.with_label_values(&[&cid.to_string(), &frame_int.to_string()]),
);
}
r.as_ref().unwrap()
}
}
pub(crate) struct CidFrameCache {
cache: [GenericCounter<AtomicU64>; Frame::FRAMES_LEN as usize],
}
impl CidFrameCache {
pub fn new(metric: IntCounterVec, cid: Cid) -> Self {
let cid = cid.to_string();
let cache = [
metric.with_label_values(&[&cid, Frame::int_to_string(0)]),
metric.with_label_values(&[&cid, Frame::int_to_string(1)]),
metric.with_label_values(&[&cid, Frame::int_to_string(2)]),
metric.with_label_values(&[&cid, Frame::int_to_string(3)]),
metric.with_label_values(&[&cid, Frame::int_to_string(4)]),
metric.with_label_values(&[&cid, Frame::int_to_string(5)]),
metric.with_label_values(&[&cid, Frame::int_to_string(6)]),
metric.with_label_values(&[&cid, Frame::int_to_string(7)]),
];
Self { cache }
}
pub fn with_label_values(&mut self, frame: &Frame) -> &GenericCounter<AtomicU64> {
&self.cache[frame.get_int() as usize]
}
}
#[cfg(test)]
mod tests {
use crate::{
metrics::*,
types::{Frame, Pid},
};
#[test]
fn register_metrics() {
let registry = Registry::new();
let metrics = NetworkMetrics::new(&Pid::fake(1)).unwrap();
metrics.register(&registry).unwrap();
}
#[test]
fn multi_cid_frame_cache() {
let pid = Pid::fake(1);
let frame1 = Frame::Raw(b"Foo".to_vec());
let frame2 = Frame::Raw(b"Bar".to_vec());
let metrics = NetworkMetrics::new(&pid).unwrap();
let mut cache = MultiCidFrameCache::new(metrics.frames_in_total);
let v1 = cache.with_label_values(1, &frame1);
v1.inc();
assert_eq!(v1.get(), 1);
let v2 = cache.with_label_values(1, &frame1);
v2.inc();
assert_eq!(v2.get(), 2);
let v3 = cache.with_label_values(1, &frame2);
v3.inc();
assert_eq!(v3.get(), 3);
let v4 = cache.with_label_values(3, &frame1);
v4.inc();
assert_eq!(v4.get(), 1);
let v5 = cache.with_label_values(3, &Frame::Shutdown);
v5.inc();
assert_eq!(v5.get(), 1);
}
#[test]
fn cid_frame_cache() {
let pid = Pid::fake(1);
let frame1 = Frame::Raw(b"Foo".to_vec());
let frame2 = Frame::Raw(b"Bar".to_vec());
let metrics = NetworkMetrics::new(&pid).unwrap();
let mut cache = CidFrameCache::new(metrics.frames_wire_out_total, 1);
let v1 = cache.with_label_values(&frame1);
v1.inc();
assert_eq!(v1.get(), 1);
let v2 = cache.with_label_values(&frame1);
v2.inc();
assert_eq!(v2.get(), 2);
let v3 = cache.with_label_values(&frame2);
v3.inc();
assert_eq!(v3.get(), 3);
let v4 = cache.with_label_values(&Frame::Shutdown);
v4.inc();
assert_eq!(v4.get(), 1);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,665 +0,0 @@
//!Priorities are handled the following way.
//!Prios from 0-63 are allowed.
//!all 5 numbers the throughput is halved.
//!E.g. in the same time 100 prio0 messages are send, only 50 prio5, 25 prio10,
//! 12 prio15 or 6 prio20 messages are send. Note: TODO: prio0 will be send
//! immediately when found!
#[cfg(feature = "metrics")]
use crate::metrics::NetworkMetrics;
use crate::{
message::OutgoingMessage,
types::{Frame, Prio, Sid},
};
use crossbeam_channel::{unbounded, Receiver, Sender};
use futures::channel::oneshot;
use std::collections::{HashMap, HashSet, VecDeque};
#[cfg(feature = "metrics")] use std::sync::Arc;
use tracing::trace;
const PRIO_MAX: usize = 64;
#[derive(Default)]
struct PidSidInfo {
len: u64,
empty_notify: Option<oneshot::Sender<()>>,
}
pub(crate) struct PrioManager {
points: [u32; PRIO_MAX],
messages: [VecDeque<(Sid, OutgoingMessage)>; PRIO_MAX],
messages_rx: Receiver<(Prio, Sid, OutgoingMessage)>,
sid_owned: HashMap<Sid, PidSidInfo>,
//you can register to be notified if a pid_sid combination is flushed completely here
sid_flushed_rx: Receiver<(Sid, oneshot::Sender<()>)>,
queued: HashSet<u8>,
#[cfg(feature = "metrics")]
metrics: Arc<NetworkMetrics>,
#[cfg(feature = "metrics")]
pid: String,
}
impl PrioManager {
const PRIOS: [u32; PRIO_MAX] = [
100, 115, 132, 152, 174, 200, 230, 264, 303, 348, 400, 459, 528, 606, 696, 800, 919, 1056,
1213, 1393, 1600, 1838, 2111, 2425, 2786, 3200, 3676, 4222, 4850, 5572, 6400, 7352, 8445,
9701, 11143, 12800, 14703, 16890, 19401, 22286, 25600, 29407, 33779, 38802, 44572, 51200,
58813, 67559, 77605, 89144, 102400, 117627, 135118, 155209, 178289, 204800, 235253, 270235,
310419, 356578, 409600, 470507, 540470, 620838,
];
#[allow(clippy::type_complexity)]
pub fn new(
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>,
pid: String,
) -> (
Self,
Sender<(Prio, Sid, OutgoingMessage)>,
Sender<(Sid, oneshot::Sender<()>)>,
) {
#[cfg(not(feature = "metrics"))]
let _pid = pid;
// (a2p_msg_s, a2p_msg_r)
let (messages_tx, messages_rx) = unbounded();
let (sid_flushed_tx, sid_flushed_rx) = unbounded();
(
Self {
points: [0; PRIO_MAX],
messages: [
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
VecDeque::new(),
],
messages_rx,
queued: HashSet::new(), //TODO: optimize with u64 and 64 bits
sid_flushed_rx,
sid_owned: HashMap::new(),
#[cfg(feature = "metrics")]
metrics,
#[cfg(feature = "metrics")]
pid,
},
messages_tx,
sid_flushed_tx,
)
}
async fn tick(&mut self) {
// Check Range
for (prio, sid, msg) in self.messages_rx.try_iter() {
debug_assert!(prio as usize <= PRIO_MAX);
#[cfg(feature = "metrics")]
{
let sid_string = sid.to_string();
self.metrics
.message_out_total
.with_label_values(&[&self.pid, &sid_string])
.inc();
self.metrics
.message_out_throughput
.with_label_values(&[&self.pid, &sid_string])
.inc_by(msg.buffer.data.len() as u64);
}
//trace!(?prio, ?sid_string, "tick");
self.queued.insert(prio);
self.messages[prio as usize].push_back((sid, msg));
self.sid_owned.entry(sid).or_default().len += 1;
}
//this must be AFTER messages
for (sid, return_sender) in self.sid_flushed_rx.try_iter() {
#[cfg(feature = "metrics")]
self.metrics
.streams_flushed
.with_label_values(&[&self.pid])
.inc();
if let Some(cnt) = self.sid_owned.get_mut(&sid) {
// register sender
cnt.empty_notify = Some(return_sender);
trace!(?sid, "register empty notify");
} else {
// return immediately
return_sender.send(()).unwrap();
trace!(?sid, "return immediately that stream is empty");
}
}
}
//if None returned, we are empty!
fn calc_next_prio(&self) -> Option<u8> {
// compare all queued prios, max 64 operations
let mut lowest = std::u32::MAX;
let mut lowest_id = None;
for &n in &self.queued {
let n_points = self.points[n as usize];
if n_points < lowest {
lowest = n_points;
lowest_id = Some(n)
} else if n_points == lowest && lowest_id.is_some() && n < lowest_id.unwrap() {
//on equal points lowest first!
lowest_id = Some(n)
}
}
lowest_id
/*
self.queued
.iter()
.min_by_key(|&n| self.points[*n as usize]).cloned()*/
}
/// no_of_frames = frames.len()
/// Your goal is to try to find a realistic no_of_frames!
/// no_of_frames should be choosen so, that all Frames can be send out till
/// the next tick!
/// - if no_of_frames is too high you will fill either the Socket buffer,
/// or your internal buffer. In that case you will increase latency for
/// high prio messages!
/// - if no_of_frames is too low you wont saturate your Socket fully, thus
/// have a lower bandwidth as possible
pub async fn fill_frames<E: Extend<(Sid, Frame)>>(
&mut self,
no_of_frames: usize,
frames: &mut E,
) {
for v in self.messages.iter_mut() {
v.reserve_exact(no_of_frames)
}
self.tick().await;
for _ in 0..no_of_frames {
match self.calc_next_prio() {
Some(prio) => {
//let prio2 = self.calc_next_prio().unwrap();
//trace!(?prio, "handle next prio");
self.points[prio as usize] += Self::PRIOS[prio as usize];
//pop message from front of VecDeque, handle it and push it back, so that all
// => messages with same prio get a fair chance :)
//TODO: evaluate not popping every time
let (sid, mut msg) = self.messages[prio as usize].pop_front().unwrap();
if msg.fill_next(sid, frames) {
//trace!(?m.mid, "finish message");
//check if prio is empty
if self.messages[prio as usize].is_empty() {
self.queued.remove(&prio);
}
//decrease pid_sid counter by 1 again
let cnt = self.sid_owned.get_mut(&sid).expect(
"The pid_sid_owned counter works wrong, more pid,sid removed than \
inserted",
);
cnt.len -= 1;
if cnt.len == 0 {
let cnt = self.sid_owned.remove(&sid).unwrap();
if let Some(empty_notify) = cnt.empty_notify {
empty_notify.send(()).unwrap();
trace!(?sid, "returned that stream is empty");
}
}
} else {
self.messages[prio as usize].push_front((sid, msg));
}
},
None => {
//QUEUE is empty, we are clearing the POINTS to not build up huge pipes of
// POINTS on a prio from the past
self.points = [0; PRIO_MAX];
break;
},
}
}
}
}
impl std::fmt::Debug for PrioManager {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut cnt = 0;
for m in self.messages.iter() {
cnt += m.len();
}
write!(f, "PrioManager(len: {}, queued: {:?})", cnt, &self.queued,)
}
}
#[cfg(test)]
mod tests {
use crate::{
message::{MessageBuffer, OutgoingMessage},
metrics::NetworkMetrics,
prios::*,
types::{Frame, Pid, Prio, Sid},
};
use crossbeam_channel::Sender;
use futures::{channel::oneshot, executor::block_on};
use std::{collections::VecDeque, sync::Arc};
const SIZE: u64 = OutgoingMessage::FRAME_DATA_SIZE;
const USIZE: usize = OutgoingMessage::FRAME_DATA_SIZE as usize;
#[allow(clippy::type_complexity)]
fn mock_new() -> (
PrioManager,
Sender<(Prio, Sid, OutgoingMessage)>,
Sender<(Sid, oneshot::Sender<()>)>,
) {
let pid = Pid::fake(1);
PrioManager::new(
Arc::new(NetworkMetrics::new(&pid).unwrap()),
pid.to_string(),
)
}
fn mock_out(prio: Prio, sid: u64) -> (Prio, Sid, OutgoingMessage) {
let sid = Sid::new(sid);
(prio, sid, OutgoingMessage {
buffer: Arc::new(MessageBuffer {
data: vec![48, 49, 50],
}),
cursor: 0,
mid: 1,
sid,
})
}
fn mock_out_large(prio: Prio, sid: u64) -> (Prio, Sid, OutgoingMessage) {
let sid = Sid::new(sid);
let mut data = vec![48; USIZE];
data.append(&mut vec![49; USIZE]);
data.append(&mut vec![50; 20]);
(prio, sid, OutgoingMessage {
buffer: Arc::new(MessageBuffer { data }),
cursor: 0,
mid: 1,
sid,
})
}
fn assert_header(frames: &mut VecDeque<(Sid, Frame)>, f_sid: u64, f_length: u64) {
let frame = frames
.pop_front()
.expect("Frames vecdeque doesn't contain enough frames!")
.1;
if let Frame::DataHeader { mid, sid, length } = frame {
assert_eq!(mid, 1);
assert_eq!(sid, Sid::new(f_sid));
assert_eq!(length, f_length);
} else {
panic!("Wrong frame type!, expected DataHeader");
}
}
fn assert_data(frames: &mut VecDeque<(Sid, Frame)>, f_start: u64, f_data: Vec<u8>) {
let frame = frames
.pop_front()
.expect("Frames vecdeque doesn't contain enough frames!")
.1;
if let Frame::Data { mid, start, data } = frame {
assert_eq!(mid, 1);
assert_eq!(start, f_start);
assert_eq!(data, f_data);
} else {
panic!("Wrong frame type!, expected Data");
}
}
#[test]
fn single_p16() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out(16, 1337)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(100, &mut frames));
assert_header(&mut frames, 1337, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert!(frames.is_empty());
}
#[test]
fn single_p16_p20() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out(16, 1337)).unwrap();
msg_tx.send(mock_out(20, 42)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(100, &mut frames));
assert_header(&mut frames, 1337, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_header(&mut frames, 42, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert!(frames.is_empty());
}
#[test]
fn single_p20_p16() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out(20, 42)).unwrap();
msg_tx.send(mock_out(16, 1337)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(100, &mut frames));
assert_header(&mut frames, 1337, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_header(&mut frames, 42, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert!(frames.is_empty());
}
#[test]
fn multiple_p16_p20() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out(20, 2)).unwrap();
msg_tx.send(mock_out(16, 1)).unwrap();
msg_tx.send(mock_out(16, 3)).unwrap();
msg_tx.send(mock_out(16, 5)).unwrap();
msg_tx.send(mock_out(20, 4)).unwrap();
msg_tx.send(mock_out(20, 7)).unwrap();
msg_tx.send(mock_out(16, 6)).unwrap();
msg_tx.send(mock_out(20, 10)).unwrap();
msg_tx.send(mock_out(16, 8)).unwrap();
msg_tx.send(mock_out(20, 12)).unwrap();
msg_tx.send(mock_out(16, 9)).unwrap();
msg_tx.send(mock_out(16, 11)).unwrap();
msg_tx.send(mock_out(20, 13)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(100, &mut frames));
for i in 1..14 {
assert_header(&mut frames, i, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
}
assert!(frames.is_empty());
}
#[test]
fn multiple_fill_frames_p16_p20() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out(20, 2)).unwrap();
msg_tx.send(mock_out(16, 1)).unwrap();
msg_tx.send(mock_out(16, 3)).unwrap();
msg_tx.send(mock_out(16, 5)).unwrap();
msg_tx.send(mock_out(20, 4)).unwrap();
msg_tx.send(mock_out(20, 7)).unwrap();
msg_tx.send(mock_out(16, 6)).unwrap();
msg_tx.send(mock_out(20, 10)).unwrap();
msg_tx.send(mock_out(16, 8)).unwrap();
msg_tx.send(mock_out(20, 12)).unwrap();
msg_tx.send(mock_out(16, 9)).unwrap();
msg_tx.send(mock_out(16, 11)).unwrap();
msg_tx.send(mock_out(20, 13)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(3, &mut frames));
for i in 1..4 {
assert_header(&mut frames, i, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
}
assert!(frames.is_empty());
block_on(mgr.fill_frames(11, &mut frames));
for i in 4..14 {
assert_header(&mut frames, i, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
}
assert!(frames.is_empty());
}
#[test]
fn single_large_p16() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out_large(16, 1)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(100, &mut frames));
assert_header(&mut frames, 1, SIZE * 2 + 20);
assert_data(&mut frames, 0, vec![48; USIZE]);
assert_data(&mut frames, SIZE, vec![49; USIZE]);
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
assert!(frames.is_empty());
}
#[test]
fn multiple_large_p16() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out_large(16, 1)).unwrap();
msg_tx.send(mock_out_large(16, 2)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(100, &mut frames));
assert_header(&mut frames, 1, SIZE * 2 + 20);
assert_data(&mut frames, 0, vec![48; USIZE]);
assert_data(&mut frames, SIZE, vec![49; USIZE]);
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
assert_header(&mut frames, 2, SIZE * 2 + 20);
assert_data(&mut frames, 0, vec![48; USIZE]);
assert_data(&mut frames, SIZE, vec![49; USIZE]);
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
assert!(frames.is_empty());
}
#[test]
fn multiple_large_p16_sudden_p0() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
msg_tx.send(mock_out_large(16, 1)).unwrap();
msg_tx.send(mock_out_large(16, 2)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(2, &mut frames));
assert_header(&mut frames, 1, SIZE * 2 + 20);
assert_data(&mut frames, 0, vec![48; USIZE]);
assert_data(&mut frames, SIZE, vec![49; USIZE]);
msg_tx.send(mock_out(0, 3)).unwrap();
block_on(mgr.fill_frames(100, &mut frames));
assert_header(&mut frames, 3, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
assert_header(&mut frames, 2, SIZE * 2 + 20);
assert_data(&mut frames, 0, vec![48; USIZE]);
assert_data(&mut frames, SIZE, vec![49; USIZE]);
assert_data(&mut frames, SIZE * 2, vec![50; 20]);
assert!(frames.is_empty());
}
#[test]
fn single_p20_thousand_p16_at_once() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
for _ in 0..998 {
msg_tx.send(mock_out(16, 2)).unwrap();
}
msg_tx.send(mock_out(20, 1)).unwrap();
msg_tx.send(mock_out(16, 2)).unwrap();
msg_tx.send(mock_out(16, 2)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(2000, &mut frames));
assert_header(&mut frames, 2, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_header(&mut frames, 1, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_header(&mut frames, 2, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_header(&mut frames, 2, 3);
//unimportant
}
#[test]
fn single_p20_thousand_p16_later() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
for _ in 0..998 {
msg_tx.send(mock_out(16, 2)).unwrap();
}
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(2000, &mut frames));
//^unimportant frames, gonna be dropped
msg_tx.send(mock_out(20, 1)).unwrap();
msg_tx.send(mock_out(16, 2)).unwrap();
msg_tx.send(mock_out(16, 2)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(2000, &mut frames));
//important in that test is, that after the first frames got cleared i reset
// the Points even though 998 prio 16 messages have been send at this
// point and 0 prio20 messages the next message is a prio16 message
// again, and only then prio20! we dont want to build dept over a idling
// connection
assert_header(&mut frames, 2, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_header(&mut frames, 1, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_header(&mut frames, 2, 3);
//unimportant
}
#[test]
fn gigantic_message() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
let mut data = vec![1; USIZE];
data.extend_from_slice(&[2; USIZE]);
data.extend_from_slice(&[3; USIZE]);
data.extend_from_slice(&[4; USIZE]);
data.extend_from_slice(&[5; USIZE]);
let sid = Sid::new(2);
msg_tx
.send((16, sid, OutgoingMessage {
buffer: Arc::new(MessageBuffer { data }),
cursor: 0,
mid: 1,
sid,
}))
.unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(2000, &mut frames));
assert_header(&mut frames, 2, 7000);
assert_data(&mut frames, 0, vec![1; USIZE]);
assert_data(&mut frames, 1400, vec![2; USIZE]);
assert_data(&mut frames, 2800, vec![3; USIZE]);
assert_data(&mut frames, 4200, vec![4; USIZE]);
assert_data(&mut frames, 5600, vec![5; USIZE]);
}
#[test]
fn gigantic_message_order() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
let mut data = vec![1; USIZE];
data.extend_from_slice(&[2; USIZE]);
data.extend_from_slice(&[3; USIZE]);
data.extend_from_slice(&[4; USIZE]);
data.extend_from_slice(&[5; USIZE]);
let sid = Sid::new(2);
msg_tx
.send((16, sid, OutgoingMessage {
buffer: Arc::new(MessageBuffer { data }),
cursor: 0,
mid: 1,
sid,
}))
.unwrap();
msg_tx.send(mock_out(16, 8)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(2000, &mut frames));
assert_header(&mut frames, 2, 7000);
assert_data(&mut frames, 0, vec![1; USIZE]);
assert_data(&mut frames, 1400, vec![2; USIZE]);
assert_data(&mut frames, 2800, vec![3; USIZE]);
assert_data(&mut frames, 4200, vec![4; USIZE]);
assert_data(&mut frames, 5600, vec![5; USIZE]);
assert_header(&mut frames, 8, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
}
#[test]
fn gigantic_message_order_other_prio() {
let (mut mgr, msg_tx, _flush_tx) = mock_new();
let mut data = vec![1; USIZE];
data.extend_from_slice(&[2; USIZE]);
data.extend_from_slice(&[3; USIZE]);
data.extend_from_slice(&[4; USIZE]);
data.extend_from_slice(&[5; USIZE]);
let sid = Sid::new(2);
msg_tx
.send((16, sid, OutgoingMessage {
buffer: Arc::new(MessageBuffer { data }),
cursor: 0,
mid: 1,
sid,
}))
.unwrap();
msg_tx.send(mock_out(20, 8)).unwrap();
let mut frames = VecDeque::new();
block_on(mgr.fill_frames(2000, &mut frames));
assert_header(&mut frames, 2, 7000);
assert_data(&mut frames, 0, vec![1; USIZE]);
assert_header(&mut frames, 8, 3);
assert_data(&mut frames, 0, vec![48, 49, 50]);
assert_data(&mut frames, 1400, vec![2; USIZE]);
assert_data(&mut frames, 2800, vec![3; USIZE]);
assert_data(&mut frames, 4200, vec![4; USIZE]);
assert_data(&mut frames, 5600, vec![5; USIZE]);
}
}

View File

@ -1,596 +0,0 @@
#[cfg(feature = "metrics")]
use crate::metrics::{CidFrameCache, NetworkMetrics};
use crate::{
participant::C2pFrame,
types::{Cid, Frame},
};
use async_std::{
io::prelude::*,
net::{TcpStream, UdpSocket},
};
use futures::{
channel::{mpsc, oneshot},
future::{Fuse, FutureExt},
lock::Mutex,
select,
sink::SinkExt,
stream::StreamExt,
};
use std::{convert::TryFrom, net::SocketAddr, sync::Arc};
use tracing::*;
// Reserving bytes 0, 10, 13 as i have enough space and want to make it easy to
// detect a invalid client, e.g. sending an empty line would make 10 first char
// const FRAME_RESERVED_1: u8 = 0;
const FRAME_HANDSHAKE: u8 = 1;
const FRAME_INIT: u8 = 2;
const FRAME_SHUTDOWN: u8 = 3;
const FRAME_OPEN_STREAM: u8 = 4;
const FRAME_CLOSE_STREAM: u8 = 5;
const FRAME_DATA_HEADER: u8 = 6;
const FRAME_DATA: u8 = 7;
const FRAME_RAW: u8 = 8;
//const FRAME_RESERVED_2: u8 = 10;
//const FRAME_RESERVED_3: u8 = 13;
#[derive(Debug)]
pub(crate) enum Protocols {
Tcp(TcpProtocol),
Udp(UdpProtocol),
//Mpsc(MpscChannel),
}
#[derive(Debug)]
pub(crate) struct TcpProtocol {
stream: TcpStream,
#[cfg(feature = "metrics")]
metrics: Arc<NetworkMetrics>,
}
#[derive(Debug)]
pub(crate) struct UdpProtocol {
socket: Arc<UdpSocket>,
remote_addr: SocketAddr,
#[cfg(feature = "metrics")]
metrics: Arc<NetworkMetrics>,
data_in: Mutex<mpsc::UnboundedReceiver<Vec<u8>>>,
}
//TODO: PERFORMACE: Use BufWriter and BufReader from std::io!
impl TcpProtocol {
pub(crate) fn new(
stream: TcpStream,
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>,
) -> Self {
Self {
stream,
#[cfg(feature = "metrics")]
metrics,
}
}
async fn read_frame<R: ReadExt + std::marker::Unpin>(
r: &mut R,
mut end_receiver: &mut Fuse<oneshot::Receiver<()>>,
) -> Result<Frame, Option<std::io::Error>> {
let handle = |read_result| match read_result {
Ok(_) => Ok(()),
Err(e) => Err(Some(e)),
};
let mut frame_no = [0u8; 1];
match select! {
r = r.read_exact(&mut frame_no).fuse() => Some(r),
_ = end_receiver => None,
} {
Some(read_result) => handle(read_result)?,
None => {
trace!("shutdown requested");
return Err(None);
},
};
match frame_no[0] {
FRAME_HANDSHAKE => {
let mut bytes = [0u8; 19];
handle(r.read_exact(&mut bytes).await)?;
Ok(Frame::gen_handshake(bytes))
},
FRAME_INIT => {
let mut bytes = [0u8; 32];
handle(r.read_exact(&mut bytes).await)?;
Ok(Frame::gen_init(bytes))
},
FRAME_SHUTDOWN => Ok(Frame::Shutdown),
FRAME_OPEN_STREAM => {
let mut bytes = [0u8; 10];
handle(r.read_exact(&mut bytes).await)?;
Ok(Frame::gen_open_stream(bytes))
},
FRAME_CLOSE_STREAM => {
let mut bytes = [0u8; 8];
handle(r.read_exact(&mut bytes).await)?;
Ok(Frame::gen_close_stream(bytes))
},
FRAME_DATA_HEADER => {
let mut bytes = [0u8; 24];
handle(r.read_exact(&mut bytes).await)?;
Ok(Frame::gen_data_header(bytes))
},
FRAME_DATA => {
let mut bytes = [0u8; 18];
handle(r.read_exact(&mut bytes).await)?;
let (mid, start, length) = Frame::gen_data(bytes);
let mut data = vec![0; length as usize];
handle(r.read_exact(&mut data).await)?;
Ok(Frame::Data { mid, start, data })
},
FRAME_RAW => {
let mut bytes = [0u8; 2];
handle(r.read_exact(&mut bytes).await)?;
let length = Frame::gen_raw(bytes);
let mut data = vec![0; length as usize];
handle(r.read_exact(&mut data).await)?;
Ok(Frame::Raw(data))
},
other => {
// report a RAW frame, but cannot rely on the next 2 bytes to be a size.
// guessing 32 bytes, which might help to sort down issues
let mut data = vec![0; 32];
//keep the first byte!
match r.read(&mut data[1..]).await {
Ok(n) => {
data.truncate(n + 1);
Ok(())
},
Err(e) => Err(Some(e)),
}?;
data[0] = other;
warn!(?data, "got a unexpected RAW msg");
Ok(Frame::Raw(data))
},
}
}
pub async fn read_from_wire(
&self,
cid: Cid,
w2c_cid_frame_s: &mut mpsc::UnboundedSender<C2pFrame>,
end_r: oneshot::Receiver<()>,
) {
trace!("Starting up tcp read()");
#[cfg(feature = "metrics")]
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_in_total.clone(), cid);
#[cfg(feature = "metrics")]
let throughput_cache = self
.metrics
.wire_in_throughput
.with_label_values(&[&cid.to_string()]);
let mut stream = self.stream.clone();
let mut end_r = end_r.fuse();
loop {
match Self::read_frame(&mut stream, &mut end_r).await {
Ok(frame) => {
#[cfg(feature = "metrics")]
{
metrics_cache.with_label_values(&frame).inc();
if let Frame::Data {
mid: _,
start: _,
ref data,
} = frame
{
throughput_cache.inc_by(data.len() as u64);
}
}
w2c_cid_frame_s
.send((cid, Ok(frame)))
.await
.expect("Channel or Participant seems no longer to exist");
},
Err(e_option) => {
if let Some(e) = e_option {
info!(?e, "Closing tcp protocol due to read error");
//w2c_cid_frame_s is shared, dropping it wouldn't notify the receiver as
// every channel is holding a sender! thats why Ne
// need a explicit STOP here
w2c_cid_frame_s
.send((cid, Err(())))
.await
.expect("Channel or Participant seems no longer to exist");
}
//None is clean shutdown
break;
},
}
}
trace!("Shutting down tcp read()");
}
pub async fn write_frame<W: WriteExt + std::marker::Unpin>(
w: &mut W,
frame: Frame,
) -> Result<(), std::io::Error> {
match frame {
Frame::Handshake {
magic_number,
version,
} => {
w.write_all(&FRAME_HANDSHAKE.to_be_bytes()).await?;
w.write_all(&magic_number).await?;
w.write_all(&version[0].to_le_bytes()).await?;
w.write_all(&version[1].to_le_bytes()).await?;
w.write_all(&version[2].to_le_bytes()).await?;
},
Frame::Init { pid, secret } => {
w.write_all(&FRAME_INIT.to_be_bytes()).await?;
w.write_all(&pid.to_le_bytes()).await?;
w.write_all(&secret.to_le_bytes()).await?;
},
Frame::Shutdown => {
w.write_all(&FRAME_SHUTDOWN.to_be_bytes()).await?;
},
Frame::OpenStream {
sid,
prio,
promises,
} => {
w.write_all(&FRAME_OPEN_STREAM.to_be_bytes()).await?;
w.write_all(&sid.to_le_bytes()).await?;
w.write_all(&prio.to_le_bytes()).await?;
w.write_all(&promises.to_le_bytes()).await?;
},
Frame::CloseStream { sid } => {
w.write_all(&FRAME_CLOSE_STREAM.to_be_bytes()).await?;
w.write_all(&sid.to_le_bytes()).await?;
},
Frame::DataHeader { mid, sid, length } => {
w.write_all(&FRAME_DATA_HEADER.to_be_bytes()).await?;
w.write_all(&mid.to_le_bytes()).await?;
w.write_all(&sid.to_le_bytes()).await?;
w.write_all(&length.to_le_bytes()).await?;
},
Frame::Data { mid, start, data } => {
w.write_all(&FRAME_DATA.to_be_bytes()).await?;
w.write_all(&mid.to_le_bytes()).await?;
w.write_all(&start.to_le_bytes()).await?;
w.write_all(&(data.len() as u16).to_le_bytes()).await?;
w.write_all(&data).await?;
},
Frame::Raw(data) => {
w.write_all(&FRAME_RAW.to_be_bytes()).await?;
w.write_all(&(data.len() as u16).to_le_bytes()).await?;
w.write_all(&data).await?;
},
};
Ok(())
}
pub async fn write_to_wire(&self, cid: Cid, mut c2w_frame_r: mpsc::UnboundedReceiver<Frame>) {
trace!("Starting up tcp write()");
let mut stream = self.stream.clone();
#[cfg(feature = "metrics")]
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_out_total.clone(), cid);
#[cfg(feature = "metrics")]
let throughput_cache = self
.metrics
.wire_out_throughput
.with_label_values(&[&cid.to_string()]);
#[cfg(not(feature = "metrics"))]
let _cid = cid;
while let Some(frame) = c2w_frame_r.next().await {
#[cfg(feature = "metrics")]
{
metrics_cache.with_label_values(&frame).inc();
if let Frame::Data {
mid: _,
start: _,
ref data,
} = frame
{
throughput_cache.inc_by(data.len() as u64);
}
}
if let Err(e) = Self::write_frame(&mut stream, frame).await {
info!(
?e,
"Got an error writing to tcp, going to close this channel"
);
c2w_frame_r.close();
break;
};
}
trace!("shutting down tcp write()");
}
}
impl UdpProtocol {
pub(crate) fn new(
socket: Arc<UdpSocket>,
remote_addr: SocketAddr,
#[cfg(feature = "metrics")] metrics: Arc<NetworkMetrics>,
data_in: mpsc::UnboundedReceiver<Vec<u8>>,
) -> Self {
Self {
socket,
remote_addr,
#[cfg(feature = "metrics")]
metrics,
data_in: Mutex::new(data_in),
}
}
pub async fn read_from_wire(
&self,
cid: Cid,
w2c_cid_frame_s: &mut mpsc::UnboundedSender<C2pFrame>,
end_r: oneshot::Receiver<()>,
) {
trace!("Starting up udp read()");
#[cfg(feature = "metrics")]
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_in_total.clone(), cid);
#[cfg(feature = "metrics")]
let throughput_cache = self
.metrics
.wire_in_throughput
.with_label_values(&[&cid.to_string()]);
let mut data_in = self.data_in.lock().await;
let mut end_r = end_r.fuse();
while let Some(bytes) = select! {
r = data_in.next().fuse() => match r {
Some(r) => Some(r),
None => {
info!("Udp read ended");
w2c_cid_frame_s.send((cid, Err(()))).await.expect("Channel or Participant seems no longer to exist");
None
}
},
_ = end_r => None,
} {
trace!("Got raw UDP message with len: {}", bytes.len());
let frame_no = bytes[0];
let frame = match frame_no {
FRAME_HANDSHAKE => {
Frame::gen_handshake(*<&[u8; 19]>::try_from(&bytes[1..20]).unwrap())
},
FRAME_INIT => Frame::gen_init(*<&[u8; 32]>::try_from(&bytes[1..33]).unwrap()),
FRAME_SHUTDOWN => Frame::Shutdown,
FRAME_OPEN_STREAM => {
Frame::gen_open_stream(*<&[u8; 10]>::try_from(&bytes[1..11]).unwrap())
},
FRAME_CLOSE_STREAM => {
Frame::gen_close_stream(*<&[u8; 8]>::try_from(&bytes[1..9]).unwrap())
},
FRAME_DATA_HEADER => {
Frame::gen_data_header(*<&[u8; 24]>::try_from(&bytes[1..25]).unwrap())
},
FRAME_DATA => {
let (mid, start, length) =
Frame::gen_data(*<&[u8; 18]>::try_from(&bytes[1..19]).unwrap());
let mut data = vec![0; length as usize];
#[cfg(feature = "metrics")]
throughput_cache.inc_by(length as u64);
data.copy_from_slice(&bytes[19..]);
Frame::Data { mid, start, data }
},
FRAME_RAW => {
let length = Frame::gen_raw(*<&[u8; 2]>::try_from(&bytes[1..3]).unwrap());
let mut data = vec![0; length as usize];
data.copy_from_slice(&bytes[3..]);
Frame::Raw(data)
},
_ => Frame::Raw(bytes),
};
#[cfg(feature = "metrics")]
metrics_cache.with_label_values(&frame).inc();
w2c_cid_frame_s.send((cid, Ok(frame))).await.unwrap();
}
trace!("Shutting down udp read()");
}
pub async fn write_to_wire(&self, cid: Cid, mut c2w_frame_r: mpsc::UnboundedReceiver<Frame>) {
trace!("Starting up udp write()");
let mut buffer = [0u8; 2000];
#[cfg(feature = "metrics")]
let mut metrics_cache = CidFrameCache::new(self.metrics.frames_wire_out_total.clone(), cid);
#[cfg(feature = "metrics")]
let throughput_cache = self
.metrics
.wire_out_throughput
.with_label_values(&[&cid.to_string()]);
#[cfg(not(feature = "metrics"))]
let _cid = cid;
while let Some(frame) = c2w_frame_r.next().await {
#[cfg(feature = "metrics")]
metrics_cache.with_label_values(&frame).inc();
let len = match frame {
Frame::Handshake {
magic_number,
version,
} => {
let x = FRAME_HANDSHAKE.to_be_bytes();
buffer[0] = x[0];
buffer[1..8].copy_from_slice(&magic_number);
buffer[8..12].copy_from_slice(&version[0].to_le_bytes());
buffer[12..16].copy_from_slice(&version[1].to_le_bytes());
buffer[16..20].copy_from_slice(&version[2].to_le_bytes());
20
},
Frame::Init { pid, secret } => {
buffer[0] = FRAME_INIT.to_be_bytes()[0];
buffer[1..17].copy_from_slice(&pid.to_le_bytes());
buffer[17..33].copy_from_slice(&secret.to_le_bytes());
33
},
Frame::Shutdown => {
buffer[0] = FRAME_SHUTDOWN.to_be_bytes()[0];
1
},
Frame::OpenStream {
sid,
prio,
promises,
} => {
buffer[0] = FRAME_OPEN_STREAM.to_be_bytes()[0];
buffer[1..9].copy_from_slice(&sid.to_le_bytes());
buffer[9] = prio.to_le_bytes()[0];
buffer[10] = promises.to_le_bytes()[0];
11
},
Frame::CloseStream { sid } => {
buffer[0] = FRAME_CLOSE_STREAM.to_be_bytes()[0];
buffer[1..9].copy_from_slice(&sid.to_le_bytes());
9
},
Frame::DataHeader { mid, sid, length } => {
buffer[0] = FRAME_DATA_HEADER.to_be_bytes()[0];
buffer[1..9].copy_from_slice(&mid.to_le_bytes());
buffer[9..17].copy_from_slice(&sid.to_le_bytes());
buffer[17..25].copy_from_slice(&length.to_le_bytes());
25
},
Frame::Data { mid, start, data } => {
buffer[0] = FRAME_DATA.to_be_bytes()[0];
buffer[1..9].copy_from_slice(&mid.to_le_bytes());
buffer[9..17].copy_from_slice(&start.to_le_bytes());
buffer[17..19].copy_from_slice(&(data.len() as u16).to_le_bytes());
buffer[19..(data.len() + 19)].clone_from_slice(&data[..]);
#[cfg(feature = "metrics")]
throughput_cache.inc_by(data.len() as u64);
19 + data.len()
},
Frame::Raw(data) => {
buffer[0] = FRAME_RAW.to_be_bytes()[0];
buffer[1..3].copy_from_slice(&(data.len() as u16).to_le_bytes());
buffer[3..(data.len() + 3)].clone_from_slice(&data[..]);
3 + data.len()
},
};
let mut start = 0;
while start < len {
trace!(?start, ?len, "Splitting up udp frame in multiple packages");
match self
.socket
.send_to(&buffer[start..len], self.remote_addr)
.await
{
Ok(n) => {
start += n;
if n != len {
error!(
"THIS DOESN'T WORK, as RECEIVER CURRENTLY ONLY HANDLES 1 FRAME \
per UDP message. splitting up will fail!"
);
}
},
Err(e) => error!(?e, "Need to handle that error!"),
}
}
}
trace!("Shutting down udp write()");
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{metrics::NetworkMetrics, types::Pid};
use async_std::net;
use futures::{executor::block_on, stream::StreamExt};
use std::sync::Arc;
#[test]
fn tcp_read_handshake() {
let pid = Pid::new();
let cid = 80085;
let metrics = Arc::new(NetworkMetrics::new(&pid).unwrap());
let addr = std::net::SocketAddrV4::new(std::net::Ipv4Addr::new(127, 0, 0, 1), 50500);
block_on(async {
let server = net::TcpListener::bind(addr).await.unwrap();
let mut client = net::TcpStream::connect(addr).await.unwrap();
let s_stream = server.incoming().next().await.unwrap().unwrap();
let prot = TcpProtocol::new(s_stream, metrics);
//Send Handshake
client.write_all(&[FRAME_HANDSHAKE]).await.unwrap();
client.write_all(b"HELLOWO").await.unwrap();
client.write_all(&1337u32.to_le_bytes()).await.unwrap();
client.write_all(&0u32.to_le_bytes()).await.unwrap();
client.write_all(&42u32.to_le_bytes()).await.unwrap();
client.flush();
//handle data
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<C2pFrame>();
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
let cid2 = cid;
let t = std::thread::spawn(move || {
block_on(async {
prot.read_from_wire(cid2, &mut w2c_cid_frame_s, read_stop_receiver)
.await;
})
});
// Assert than we get some value back! Its a Handshake!
//async_std::task::sleep(std::time::Duration::from_millis(1000));
let (cid_r, frame) = w2c_cid_frame_r.next().await.unwrap();
assert_eq!(cid, cid_r);
if let Ok(Frame::Handshake {
magic_number,
version,
}) = frame
{
assert_eq!(&magic_number, b"HELLOWO");
assert_eq!(version, [1337, 0, 42]);
} else {
panic!("wrong handshake");
}
read_stop_sender.send(()).unwrap();
t.join().unwrap();
});
}
#[test]
fn tcp_read_garbage() {
let pid = Pid::new();
let cid = 80085;
let metrics = Arc::new(NetworkMetrics::new(&pid).unwrap());
let addr = std::net::SocketAddrV4::new(std::net::Ipv4Addr::new(127, 0, 0, 1), 50501);
block_on(async {
let server = net::TcpListener::bind(addr).await.unwrap();
let mut client = net::TcpStream::connect(addr).await.unwrap();
let s_stream = server.incoming().next().await.unwrap().unwrap();
let prot = TcpProtocol::new(s_stream, metrics);
//Send Handshake
client
.write_all("x4hrtzsektfhxugzdtz5r78gzrtzfhxfdthfthuzhfzzufasgasdfg".as_bytes())
.await
.unwrap();
client.flush();
//handle data
let (mut w2c_cid_frame_s, mut w2c_cid_frame_r) = mpsc::unbounded::<C2pFrame>();
let (read_stop_sender, read_stop_receiver) = oneshot::channel();
let cid2 = cid;
let t = std::thread::spawn(move || {
block_on(async {
prot.read_from_wire(cid2, &mut w2c_cid_frame_s, read_stop_receiver)
.await;
})
});
// Assert than we get some value back! Its a Raw!
let (cid_r, frame) = w2c_cid_frame_r.next().await.unwrap();
assert_eq!(cid, cid_r);
if let Ok(Frame::Raw(data)) = frame {
assert_eq!(&data.as_slice(), b"x4hrtzsektfhxugzdtz5r78gzrtzfhxf");
} else {
panic!("wrong frame type");
}
read_stop_sender.send(()).unwrap();
t.join().unwrap();
});
}
}

View File

@ -1,21 +1,11 @@
#[cfg(feature = "metrics")]
use crate::metrics::NetworkMetrics;
use crate::{ use crate::{
api::{Participant, ProtocolAddr}, api::{Participant, ProtocolAddr},
channel::Handshake, channel::Protocols,
metrics::NetworkMetrics,
participant::{B2sPrioStatistic, BParticipant, S2bCreateChannel, S2bShutdownBparticipant}, participant::{B2sPrioStatistic, BParticipant, S2bCreateChannel, S2bShutdownBparticipant},
protocols::{Protocols, TcpProtocol, UdpProtocol},
types::Pid,
};
use async_std::{io, net, sync::Mutex};
use futures::{
channel::{mpsc, oneshot},
executor::ThreadPool,
future::FutureExt,
select,
sink::SinkExt,
stream::StreamExt,
}; };
use futures_util::{FutureExt, StreamExt};
use network_protocol::{Cid, MpscMsg, Pid, ProtocolMetrics};
#[cfg(feature = "metrics")] #[cfg(feature = "metrics")]
use prometheus::Registry; use prometheus::Registry;
use rand::Rng; use rand::Rng;
@ -25,18 +15,29 @@ use std::{
atomic::{AtomicBool, AtomicU64, Ordering}, atomic::{AtomicBool, AtomicU64, Ordering},
Arc, Arc,
}, },
time::Duration,
}; };
use tokio::{
io, net, select,
sync::{mpsc, oneshot, Mutex},
};
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::*; use tracing::*;
use tracing_futures::Instrument;
/// Naming of Channels `x2x` // Naming of Channels `x2x`
/// - a: api // - a: api
/// - s: scheduler // - s: scheduler
/// - b: bparticipant // - b: bparticipant
/// - p: prios // - p: prios
/// - r: protocol // - r: protocol
/// - w: wire // - w: wire
/// - c: channel/handshake // - c: channel/handshake
lazy_static::lazy_static! {
static ref MPSC_POOL: Mutex<HashMap<u64, mpsc::UnboundedSender<S2sMpscConnect>>> = {
Mutex::new(HashMap::new())
};
}
#[derive(Debug)] #[derive(Debug)]
struct ParticipantInfo { struct ParticipantInfo {
@ -48,6 +49,10 @@ struct ParticipantInfo {
type A2sListen = (ProtocolAddr, oneshot::Sender<io::Result<()>>); type A2sListen = (ProtocolAddr, oneshot::Sender<io::Result<()>>);
type A2sConnect = (ProtocolAddr, oneshot::Sender<io::Result<Participant>>); type A2sConnect = (ProtocolAddr, oneshot::Sender<io::Result<Participant>>);
type A2sDisconnect = (Pid, S2bShutdownBparticipant); type A2sDisconnect = (Pid, S2bShutdownBparticipant);
type S2sMpscConnect = (
mpsc::Sender<MpscMsg>,
oneshot::Sender<mpsc::Sender<MpscMsg>>,
);
#[derive(Debug)] #[derive(Debug)]
struct ControlChannels { struct ControlChannels {
@ -70,17 +75,18 @@ pub struct Scheduler {
local_pid: Pid, local_pid: Pid,
local_secret: u128, local_secret: u128,
closed: AtomicBool, closed: AtomicBool,
pool: Arc<ThreadPool>,
run_channels: Option<ControlChannels>, run_channels: Option<ControlChannels>,
participant_channels: Arc<Mutex<Option<ParticipantChannels>>>, participant_channels: Arc<Mutex<Option<ParticipantChannels>>>,
participants: Arc<Mutex<HashMap<Pid, ParticipantInfo>>>, participants: Arc<Mutex<HashMap<Pid, ParticipantInfo>>>,
channel_ids: Arc<AtomicU64>, channel_ids: Arc<AtomicU64>,
channel_listener: Mutex<HashMap<ProtocolAddr, oneshot::Sender<()>>>, channel_listener: Mutex<HashMap<ProtocolAddr, oneshot::Sender<()>>>,
#[cfg(feature = "metrics")]
metrics: Arc<NetworkMetrics>, metrics: Arc<NetworkMetrics>,
protocol_metrics: Arc<ProtocolMetrics>,
} }
impl Scheduler { impl Scheduler {
const MPSC_CHANNEL_BOUND: usize = 1000;
pub fn new( pub fn new(
local_pid: Pid, local_pid: Pid,
#[cfg(feature = "metrics")] registry: Option<&Registry>, #[cfg(feature = "metrics")] registry: Option<&Registry>,
@ -91,12 +97,13 @@ impl Scheduler {
mpsc::UnboundedReceiver<Participant>, mpsc::UnboundedReceiver<Participant>,
oneshot::Sender<()>, oneshot::Sender<()>,
) { ) {
let (a2s_listen_s, a2s_listen_r) = mpsc::unbounded::<A2sListen>(); let (a2s_listen_s, a2s_listen_r) = mpsc::unbounded_channel::<A2sListen>();
let (a2s_connect_s, a2s_connect_r) = mpsc::unbounded::<A2sConnect>(); let (a2s_connect_s, a2s_connect_r) = mpsc::unbounded_channel::<A2sConnect>();
let (s2a_connected_s, s2a_connected_r) = mpsc::unbounded::<Participant>(); let (s2a_connected_s, s2a_connected_r) = mpsc::unbounded_channel::<Participant>();
let (a2s_scheduler_shutdown_s, a2s_scheduler_shutdown_r) = oneshot::channel::<()>(); let (a2s_scheduler_shutdown_s, a2s_scheduler_shutdown_r) = oneshot::channel::<()>();
let (a2s_disconnect_s, a2s_disconnect_r) = mpsc::unbounded::<A2sDisconnect>(); let (a2s_disconnect_s, a2s_disconnect_r) = mpsc::unbounded_channel::<A2sDisconnect>();
let (b2s_prio_statistic_s, b2s_prio_statistic_r) = mpsc::unbounded::<B2sPrioStatistic>(); let (b2s_prio_statistic_s, b2s_prio_statistic_r) =
mpsc::unbounded_channel::<B2sPrioStatistic>();
let run_channels = Some(ControlChannels { let run_channels = Some(ControlChannels {
a2s_listen_r, a2s_listen_r,
@ -112,13 +119,14 @@ impl Scheduler {
b2s_prio_statistic_s, b2s_prio_statistic_s,
}; };
#[cfg(feature = "metrics")]
let metrics = Arc::new(NetworkMetrics::new(&local_pid).unwrap()); let metrics = Arc::new(NetworkMetrics::new(&local_pid).unwrap());
let protocol_metrics = Arc::new(ProtocolMetrics::new().unwrap());
#[cfg(feature = "metrics")] #[cfg(feature = "metrics")]
{ {
if let Some(registry) = registry { if let Some(registry) = registry {
metrics.register(registry).unwrap(); metrics.register(registry).unwrap();
protocol_metrics.register(registry).unwrap();
} }
} }
@ -130,14 +138,13 @@ impl Scheduler {
local_pid, local_pid,
local_secret, local_secret,
closed: AtomicBool::new(false), closed: AtomicBool::new(false),
pool: Arc::new(ThreadPool::new().unwrap()),
run_channels, run_channels,
participant_channels: Arc::new(Mutex::new(Some(participant_channels))), participant_channels: Arc::new(Mutex::new(Some(participant_channels))),
participants: Arc::new(Mutex::new(HashMap::new())), participants: Arc::new(Mutex::new(HashMap::new())),
channel_ids: Arc::new(AtomicU64::new(0)), channel_ids: Arc::new(AtomicU64::new(0)),
channel_listener: Mutex::new(HashMap::new()), channel_listener: Mutex::new(HashMap::new()),
#[cfg(feature = "metrics")]
metrics, metrics,
protocol_metrics,
}, },
a2s_listen_s, a2s_listen_s,
a2s_connect_s, a2s_connect_s,
@ -149,7 +156,7 @@ impl Scheduler {
pub async fn run(mut self) { pub async fn run(mut self) {
let run_channels = self.run_channels.take().unwrap(); let run_channels = self.run_channels.take().unwrap();
futures::join!( tokio::join!(
self.listen_mgr(run_channels.a2s_listen_r), self.listen_mgr(run_channels.a2s_listen_r),
self.connect_mgr(run_channels.a2s_connect_r), self.connect_mgr(run_channels.a2s_connect_r),
self.disconnect_mgr(run_channels.a2s_disconnect_r), self.disconnect_mgr(run_channels.a2s_disconnect_r),
@ -160,6 +167,7 @@ impl Scheduler {
async fn listen_mgr(&self, a2s_listen_r: mpsc::UnboundedReceiver<A2sListen>) { async fn listen_mgr(&self, a2s_listen_r: mpsc::UnboundedReceiver<A2sListen>) {
trace!("Start listen_mgr"); trace!("Start listen_mgr");
let a2s_listen_r = UnboundedReceiverStream::new(a2s_listen_r);
a2s_listen_r a2s_listen_r
.for_each_concurrent(None, |(address, s2a_listen_result_s)| { .for_each_concurrent(None, |(address, s2a_listen_result_s)| {
let address = address; let address = address;
@ -196,8 +204,8 @@ impl Scheduler {
)>, )>,
) { ) {
trace!("Start connect_mgr"); trace!("Start connect_mgr");
while let Some((addr, pid_sender)) = a2s_connect_r.next().await { while let Some((addr, pid_sender)) = a2s_connect_r.recv().await {
let (protocol, handshake) = match addr { let (protocol, cid, handshake) = match addr {
ProtocolAddr::Tcp(addr) => { ProtocolAddr::Tcp(addr) => {
#[cfg(feature = "metrics")] #[cfg(feature = "metrics")]
self.metrics self.metrics
@ -211,51 +219,84 @@ impl Scheduler {
continue; continue;
}, },
}; };
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
info!("Connecting Tcp to: {}", stream.peer_addr().unwrap()); info!("Connecting Tcp to: {}", stream.peer_addr().unwrap());
( (
Protocols::Tcp(TcpProtocol::new( Protocols::new_tcp(stream, cid, Arc::clone(&self.protocol_metrics)),
stream, cid,
#[cfg(feature = "metrics")]
Arc::clone(&self.metrics),
)),
false, false,
) )
}, },
ProtocolAddr::Udp(addr) => { ProtocolAddr::Mpsc(addr) => {
#[cfg(feature = "metrics")] let mpsc_s = match MPSC_POOL.lock().await.get(&addr) {
self.metrics Some(s) => s.clone(),
.connect_requests_total None => {
.with_label_values(&["udp"]) pid_sender
.inc(); .send(Err(std::io::Error::new(
let socket = match net::UdpSocket::bind("0.0.0.0:0").await { std::io::ErrorKind::NotConnected,
Ok(socket) => Arc::new(socket), "no mpsc listen on this addr",
Err(e) => { )))
pid_sender.send(Err(e)).unwrap(); .unwrap();
continue; continue;
}, },
}; };
if let Err(e) = socket.connect(addr).await { let (remote_to_local_s, remote_to_local_r) =
pid_sender.send(Err(e)).unwrap(); mpsc::channel(Self::MPSC_CHANNEL_BOUND);
continue; let (local_to_remote_oneshot_s, local_to_remote_oneshot_r) = oneshot::channel();
}; mpsc_s
info!("Connecting Udp to: {}", addr); .send((remote_to_local_s, local_to_remote_oneshot_s))
let (udp_data_sender, udp_data_receiver) = mpsc::unbounded::<Vec<u8>>(); .unwrap();
let protocol = UdpProtocol::new( let local_to_remote_s = local_to_remote_oneshot_r.await.unwrap();
Arc::clone(&socket),
addr, let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
#[cfg(feature = "metrics")] info!(?addr, "Connecting Mpsc");
Arc::clone(&self.metrics), (
udp_data_receiver, Protocols::new_mpsc(
); local_to_remote_s,
self.pool.spawn_ok( remote_to_local_r,
Self::udp_single_channel_connect(Arc::clone(&socket), udp_data_sender) cid,
.instrument(tracing::info_span!("udp", ?addr)), Arc::clone(&self.protocol_metrics),
); ),
(Protocols::Udp(protocol), true) cid,
false,
)
}, },
/* */
//ProtocolAddr::Udp(addr) => {
//#[cfg(feature = "metrics")]
//self.metrics
//.connect_requests_total
//.with_label_values(&["udp"])
//.inc();
//let socket = match net::UdpSocket::bind("0.0.0.0:0").await {
//Ok(socket) => Arc::new(socket),
//Err(e) => {
//pid_sender.send(Err(e)).unwrap();
//continue;
//},
//};
//if let Err(e) = socket.connect(addr).await {
//pid_sender.send(Err(e)).unwrap();
//continue;
//};
//info!("Connecting Udp to: {}", addr);
//let (udp_data_sender, udp_data_receiver) = mpsc::unbounded_channel::<Vec<u8>>();
//let protocol = UdpProtocol::new(
//Arc::clone(&socket),
//addr,
//#[cfg(feature = "metrics")]
//Arc::clone(&self.metrics),
//udp_data_receiver,
//);
//self.runtime.spawn(
//Self::udp_single_channel_connect(Arc::clone(&socket), udp_data_sender)
//.instrument(tracing::info_span!("udp", ?addr)),
//);
//(Protocols::Udp(protocol), true)
//},
_ => unimplemented!(), _ => unimplemented!(),
}; };
self.init_protocol(protocol, Some(pid_sender), handshake) self.init_protocol(protocol, cid, Some(pid_sender), handshake)
.await; .await;
} }
trace!("Stop connect_mgr"); trace!("Stop connect_mgr");
@ -263,7 +304,9 @@ impl Scheduler {
async fn disconnect_mgr(&self, mut a2s_disconnect_r: mpsc::UnboundedReceiver<A2sDisconnect>) { async fn disconnect_mgr(&self, mut a2s_disconnect_r: mpsc::UnboundedReceiver<A2sDisconnect>) {
trace!("Start disconnect_mgr"); trace!("Start disconnect_mgr");
while let Some((pid, return_once_successful_shutdown)) = a2s_disconnect_r.next().await { while let Some((pid, (timeout_time, return_once_successful_shutdown))) =
a2s_disconnect_r.recv().await
{
//Closing Participants is done the following way: //Closing Participants is done the following way:
// 1. We drop our senders and receivers // 1. We drop our senders and receivers
// 2. we need to close BParticipant, this will drop its senderns and receivers // 2. we need to close BParticipant, this will drop its senderns and receivers
@ -277,7 +320,7 @@ impl Scheduler {
pi.s2b_shutdown_bparticipant_s pi.s2b_shutdown_bparticipant_s
.take() .take()
.unwrap() .unwrap()
.send(finished_sender) .send((timeout_time, finished_sender))
.unwrap(); .unwrap();
drop(pi); drop(pi);
trace!(?pid, "dropped bparticipant, waiting for finish"); trace!(?pid, "dropped bparticipant, waiting for finish");
@ -298,7 +341,7 @@ impl Scheduler {
mut b2s_prio_statistic_r: mpsc::UnboundedReceiver<B2sPrioStatistic>, mut b2s_prio_statistic_r: mpsc::UnboundedReceiver<B2sPrioStatistic>,
) { ) {
trace!("Start prio_adj_mgr"); trace!("Start prio_adj_mgr");
while let Some((_pid, _frame_cnt, _unused)) = b2s_prio_statistic_r.next().await { while let Some((_pid, _frame_cnt, _unused)) = b2s_prio_statistic_r.recv().await {
//TODO adjust prios in participants here! //TODO adjust prios in participants here!
} }
@ -320,7 +363,7 @@ impl Scheduler {
pi.s2b_shutdown_bparticipant_s pi.s2b_shutdown_bparticipant_s
.take() .take()
.unwrap() .unwrap()
.send(finished_sender) .send((Duration::from_secs(120), finished_sender))
.unwrap(); .unwrap();
(pid, finished_receiver) (pid, finished_receiver)
}) })
@ -370,43 +413,51 @@ impl Scheduler {
info!( info!(
?addr, ?addr,
?e, ?e,
"Listener couldn't be started due to error on tcp bind" "Tcp bind error durin listener startup"
); );
s2a_listen_result_s.send(Err(e)).unwrap(); s2a_listen_result_s.send(Err(e)).unwrap();
return; return;
}, },
}; };
trace!(?addr, "Listener bound"); trace!(?addr, "Listener bound");
let mut incoming = listener.incoming();
let mut end_receiver = s2s_stop_listening_r.fuse(); let mut end_receiver = s2s_stop_listening_r.fuse();
while let Some(stream) = select! { while let Some(data) = select! {
next = incoming.next().fuse() => next, next = listener.accept().fuse() => Some(next),
_ = end_receiver => None, _ = &mut end_receiver => None,
} { } {
let stream = match stream { let (stream, remote_addr) = match data {
Ok(s) => s, Ok((s, p)) => (s, p),
Err(e) => { Err(e) => {
warn!(?e, "TcpStream Error, ignoring connection attempt"); warn!(?e, "TcpStream Error, ignoring connection attempt");
continue; continue;
}, },
}; };
let peer_addr = match stream.peer_addr() { info!("Accepting Tcp from: {}", remote_addr);
Ok(s) => s, let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
Err(e) => { self.init_protocol(Protocols::new_tcp(stream, cid, Arc::clone(&self.protocol_metrics)), cid, None, true)
warn!(?e, "TcpStream Error, ignoring connection attempt");
continue;
},
};
info!("Accepting Tcp from: {}", peer_addr);
let protocol = TcpProtocol::new(
stream,
#[cfg(feature = "metrics")]
Arc::clone(&self.metrics),
);
self.init_protocol(Protocols::Tcp(protocol), None, true)
.await; .await;
} }
}, },
ProtocolAddr::Mpsc(addr) => {
let (mpsc_s, mut mpsc_r) = mpsc::unbounded_channel();
MPSC_POOL.lock().await.insert(addr, mpsc_s);
s2a_listen_result_s.send(Ok(())).unwrap();
trace!(?addr, "Listener bound");
let mut end_receiver = s2s_stop_listening_r.fuse();
while let Some((local_to_remote_s, local_remote_to_local_s)) = select! {
next = mpsc_r.recv().fuse() => next,
_ = &mut end_receiver => None,
} {
let (remote_to_local_s, remote_to_local_r) = mpsc::channel(Self::MPSC_CHANNEL_BOUND);
local_remote_to_local_s.send(remote_to_local_s).unwrap();
info!(?addr, "Accepting Mpsc from");
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
self.init_protocol(Protocols::new_mpsc(local_to_remote_s, remote_to_local_r, cid, Arc::clone(&self.protocol_metrics)), cid, None, true)
.await;
}
warn!("MpscStream Failed, stopping");
},/*
ProtocolAddr::Udp(addr) => { ProtocolAddr::Udp(addr) => {
let socket = match net::UdpSocket::bind(addr).await { let socket = match net::UdpSocket::bind(addr).await {
Ok(socket) => { Ok(socket) => {
@ -432,7 +483,7 @@ impl Scheduler {
let mut data = [0u8; UDP_MAXIMUM_SINGLE_PACKET_SIZE_EVER]; let mut data = [0u8; UDP_MAXIMUM_SINGLE_PACKET_SIZE_EVER];
while let Ok((size, remote_addr)) = select! { while let Ok((size, remote_addr)) = select! {
next = socket.recv_from(&mut data).fuse() => next, next = socket.recv_from(&mut data).fuse() => next,
_ = end_receiver => Err(std::io::Error::new(std::io::ErrorKind::Other, "")), _ = &mut end_receiver => Err(std::io::Error::new(std::io::ErrorKind::Other, "")),
} { } {
let mut datavec = Vec::with_capacity(size); let mut datavec = Vec::with_capacity(size);
datavec.extend_from_slice(&data[0..size]); datavec.extend_from_slice(&data[0..size]);
@ -441,7 +492,8 @@ impl Scheduler {
#[allow(clippy::map_entry)] #[allow(clippy::map_entry)]
if !listeners.contains_key(&remote_addr) { if !listeners.contains_key(&remote_addr) {
info!("Accepting Udp from: {}", &remote_addr); info!("Accepting Udp from: {}", &remote_addr);
let (udp_data_sender, udp_data_receiver) = mpsc::unbounded::<Vec<u8>>(); let (udp_data_sender, udp_data_receiver) =
mpsc::unbounded_channel::<Vec<u8>>();
listeners.insert(remote_addr, udp_data_sender); listeners.insert(remote_addr, udp_data_sender);
let protocol = UdpProtocol::new( let protocol = UdpProtocol::new(
Arc::clone(&socket), Arc::clone(&socket),
@ -454,17 +506,18 @@ impl Scheduler {
.await; .await;
} }
let udp_data_sender = listeners.get_mut(&remote_addr).unwrap(); let udp_data_sender = listeners.get_mut(&remote_addr).unwrap();
udp_data_sender.send(datavec).await.unwrap(); udp_data_sender.send(datavec).unwrap();
} }
}, },*/
_ => unimplemented!(), _ => unimplemented!(),
} }
trace!(?addr, "Ending channel creator"); trace!(?addr, "Ending channel creator");
} }
#[allow(dead_code)]
async fn udp_single_channel_connect( async fn udp_single_channel_connect(
socket: Arc<net::UdpSocket>, socket: Arc<net::UdpSocket>,
mut w2p_udp_package_s: mpsc::UnboundedSender<Vec<u8>>, w2p_udp_package_s: mpsc::UnboundedSender<Vec<u8>>,
) { ) {
let addr = socket.local_addr(); let addr = socket.local_addr();
trace!(?addr, "Start udp_single_channel_connect"); trace!(?addr, "Start udp_single_channel_connect");
@ -477,18 +530,19 @@ impl Scheduler {
let mut data = [0u8; 9216]; let mut data = [0u8; 9216];
while let Ok(size) = select! { while let Ok(size) = select! {
next = socket.recv(&mut data).fuse() => next, next = socket.recv(&mut data).fuse() => next,
_ = end_receiver => Err(std::io::Error::new(std::io::ErrorKind::Other, "")), _ = &mut end_receiver => Err(std::io::Error::new(std::io::ErrorKind::Other, "")),
} { } {
let mut datavec = Vec::with_capacity(size); let mut datavec = Vec::with_capacity(size);
datavec.extend_from_slice(&data[0..size]); datavec.extend_from_slice(&data[0..size]);
w2p_udp_package_s.send(datavec).await.unwrap(); w2p_udp_package_s.send(datavec).unwrap();
} }
trace!(?addr, "Stop udp_single_channel_connect"); trace!(?addr, "Stop udp_single_channel_connect");
} }
async fn init_protocol( async fn init_protocol(
&self, &self,
protocol: Protocols, mut protocol: Protocols,
cid: Cid,
s2a_return_pid_s: Option<oneshot::Sender<io::Result<Participant>>>, s2a_return_pid_s: Option<oneshot::Sender<io::Result<Participant>>>,
send_handshake: bool, send_handshake: bool,
) { ) {
@ -498,36 +552,26 @@ impl Scheduler {
Contra: - DOS possibility because we answer first Contra: - DOS possibility because we answer first
- Speed, because otherwise the message can be send with the creation - Speed, because otherwise the message can be send with the creation
*/ */
let mut participant_channels = self.participant_channels.lock().await.clone().unwrap(); let participant_channels = self.participant_channels.lock().await.clone().unwrap();
// spawn is needed here, e.g. for TCP connect it would mean that only 1 // spawn is needed here, e.g. for TCP connect it would mean that only 1
// participant can be in handshake phase ever! Someone could deadlock // participant can be in handshake phase ever! Someone could deadlock
// the whole server easily for new clients UDP doesnt work at all, as // the whole server easily for new clients UDP doesnt work at all, as
// the UDP listening is done in another place. // the UDP listening is done in another place.
let cid = self.channel_ids.fetch_add(1, Ordering::Relaxed);
let participants = Arc::clone(&self.participants); let participants = Arc::clone(&self.participants);
#[cfg(feature = "metrics")]
let metrics = Arc::clone(&self.metrics); let metrics = Arc::clone(&self.metrics);
let pool = Arc::clone(&self.pool);
let local_pid = self.local_pid; let local_pid = self.local_pid;
let local_secret = self.local_secret; let local_secret = self.local_secret;
// this is necessary for UDP to work at all and to remove code duplication // this is necessary for UDP to work at all and to remove code duplication
self.pool.spawn_ok( tokio::spawn(
async move { async move {
trace!(?cid, "Open channel and be ready for Handshake"); trace!(?cid, "Open channel and be ready for Handshake");
let handshake = Handshake::new( use network_protocol::InitProtocol;
cid, let init_result = protocol
local_pid, .initialize(send_handshake, local_pid, local_secret)
local_secret,
#[cfg(feature = "metrics")]
Arc::clone(&metrics),
send_handshake,
);
match handshake
.setup(&protocol)
.instrument(tracing::info_span!("handshake", ?cid)) .instrument(tracing::info_span!("handshake", ?cid))
.await .await;
{ match init_result {
Ok((pid, sid, secret, leftover_cid_frame)) => { Ok((pid, sid, secret)) => {
trace!( trace!(
?cid, ?cid,
?pid, ?pid,
@ -538,21 +582,16 @@ impl Scheduler {
debug!(?cid, "New participant connected via a channel"); debug!(?cid, "New participant connected via a channel");
let ( let (
bparticipant, bparticipant,
a2b_stream_open_s, a2b_open_stream_s,
b2a_stream_opened_r, b2a_stream_opened_r,
mut s2b_create_channel_s, s2b_create_channel_s,
s2b_shutdown_bparticipant_s, s2b_shutdown_bparticipant_s,
) = BParticipant::new( ) = BParticipant::new(local_pid, pid, sid, Arc::clone(&metrics));
pid,
sid,
#[cfg(feature = "metrics")]
Arc::clone(&metrics),
);
let participant = Participant::new( let participant = Participant::new(
local_pid, local_pid,
pid, pid,
a2b_stream_open_s, a2b_open_stream_s,
b2a_stream_opened_r, b2a_stream_opened_r,
participant_channels.a2s_disconnect_s, participant_channels.a2s_disconnect_s,
); );
@ -566,24 +605,18 @@ impl Scheduler {
}); });
drop(participants); drop(participants);
trace!("dropped participants lock"); trace!("dropped participants lock");
pool.spawn_ok( let p = pid;
tokio::spawn(
bparticipant bparticipant
.run(participant_channels.b2s_prio_statistic_s) .run(participant_channels.b2s_prio_statistic_s)
.instrument(tracing::info_span!("participant", ?pid)), .instrument(tracing::info_span!("remote", ?p)),
); );
//create a new channel within BParticipant and wait for it to run //create a new channel within BParticipant and wait for it to run
let (b2s_create_channel_done_s, b2s_create_channel_done_r) = let (b2s_create_channel_done_s, b2s_create_channel_done_r) =
oneshot::channel(); oneshot::channel();
//From now on wire connects directly with bparticipant! //From now on wire connects directly with bparticipant!
s2b_create_channel_s s2b_create_channel_s
.send(( .send((cid, sid, protocol, b2s_create_channel_done_s))
cid,
sid,
protocol,
leftover_cid_frame,
b2s_create_channel_done_s,
))
.await
.unwrap(); .unwrap();
b2s_create_channel_done_r.await.unwrap(); b2s_create_channel_done_r.await.unwrap();
if let Some(pid_oneshot) = s2a_return_pid_s { if let Some(pid_oneshot) = s2a_return_pid_s {
@ -594,7 +627,6 @@ impl Scheduler {
participant_channels participant_channels
.s2a_connected_s .s2a_connected_s
.send(participant) .send(participant)
.await
.unwrap(); .unwrap();
} }
} else { } else {
@ -632,8 +664,8 @@ impl Scheduler {
//From now on this CHANNEL can receiver other frames! //From now on this CHANNEL can receiver other frames!
// move directly to participant! // move directly to participant!
}, },
Err(()) => { Err(e) => {
debug!(?cid, "Handshake from a new connection failed"); debug!(?cid, ?e, "Handshake from a new connection failed");
if let Some(pid_oneshot) = s2a_return_pid_s { if let Some(pid_oneshot) = s2a_return_pid_s {
// someone is waiting with `connect`, so give them their Error // someone is waiting with `connect`, so give them their Error
trace!(?cid, "returning the Err to api who requested the connect"); trace!(?cid, "returning the Err to api who requested the connect");

View File

@ -1,327 +0,0 @@
use bitflags::bitflags;
use rand::Rng;
use std::convert::TryFrom;
pub type Mid = u64;
pub type Cid = u64;
pub type Prio = u8;
bitflags! {
/// use promises to modify the behavior of [`Streams`].
/// see the consts in this `struct` for
///
/// [`Streams`]: crate::api::Stream
pub struct Promises: u8 {
/// this will guarantee that the order of messages which are send on one side,
/// is the same when received on the other.
const ORDERED = 0b00000001;
/// this will guarantee that messages received haven't been altered by errors,
/// like bit flips, this is done with a checksum.
const CONSISTENCY = 0b00000010;
/// this will guarantee that the other side will receive every message exactly
/// once no messages are dropped
const GUARANTEED_DELIVERY = 0b00000100;
/// this will enable the internal compression on this
/// [`Stream`](crate::api::Stream)
#[cfg(feature = "compression")]
const COMPRESSED = 0b00001000;
/// this will enable the internal encryption on this
/// [`Stream`](crate::api::Stream)
const ENCRYPTED = 0b00010000;
}
}
impl Promises {
pub const fn to_le_bytes(self) -> [u8; 1] { self.bits.to_le_bytes() }
}
pub(crate) const VELOREN_MAGIC_NUMBER: [u8; 7] = [86, 69, 76, 79, 82, 69, 78]; //VELOREN
pub const VELOREN_NETWORK_VERSION: [u32; 3] = [0, 5, 0];
pub(crate) const STREAM_ID_OFFSET1: Sid = Sid::new(0);
pub(crate) const STREAM_ID_OFFSET2: Sid = Sid::new(u64::MAX / 2);
/// Support struct used for uniquely identifying [`Participant`] over the
/// [`Network`].
///
/// [`Participant`]: crate::api::Participant
/// [`Network`]: crate::api::Network
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct Pid {
internal: u128,
}
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub(crate) struct Sid {
internal: u64,
}
// Used for Communication between Channel <----(TCP/UDP)----> Channel
#[derive(Debug)]
pub(crate) enum Frame {
Handshake {
magic_number: [u8; 7],
version: [u32; 3],
},
Init {
pid: Pid,
secret: u128,
},
Shutdown, /* Shutdown this channel gracefully, if all channels are shutdown, Participant
* is deleted */
OpenStream {
sid: Sid,
prio: Prio,
promises: Promises,
},
CloseStream {
sid: Sid,
},
DataHeader {
mid: Mid,
sid: Sid,
length: u64,
},
Data {
mid: Mid,
start: u64,
data: Vec<u8>,
},
/* WARNING: Sending RAW is only used for debug purposes in case someone write a new API
* against veloren Server! */
Raw(Vec<u8>),
}
impl Frame {
#[cfg(feature = "metrics")]
pub const FRAMES_LEN: u8 = 8;
#[cfg(feature = "metrics")]
pub const fn int_to_string(i: u8) -> &'static str {
match i {
0 => "Handshake",
1 => "Init",
2 => "Shutdown",
3 => "OpenStream",
4 => "CloseStream",
5 => "DataHeader",
6 => "Data",
7 => "Raw",
_ => "",
}
}
#[cfg(feature = "metrics")]
pub fn get_int(&self) -> u8 {
match self {
Frame::Handshake { .. } => 0,
Frame::Init { .. } => 1,
Frame::Shutdown => 2,
Frame::OpenStream { .. } => 3,
Frame::CloseStream { .. } => 4,
Frame::DataHeader { .. } => 5,
Frame::Data { .. } => 6,
Frame::Raw(_) => 7,
}
}
#[cfg(feature = "metrics")]
pub fn get_string(&self) -> &str { Self::int_to_string(self.get_int()) }
pub fn gen_handshake(buf: [u8; 19]) -> Self {
let magic_number = *<&[u8; 7]>::try_from(&buf[0..7]).unwrap();
Frame::Handshake {
magic_number,
version: [
u32::from_le_bytes(*<&[u8; 4]>::try_from(&buf[7..11]).unwrap()),
u32::from_le_bytes(*<&[u8; 4]>::try_from(&buf[11..15]).unwrap()),
u32::from_le_bytes(*<&[u8; 4]>::try_from(&buf[15..19]).unwrap()),
],
}
}
pub fn gen_init(buf: [u8; 32]) -> Self {
Frame::Init {
pid: Pid::from_le_bytes(*<&[u8; 16]>::try_from(&buf[0..16]).unwrap()),
secret: u128::from_le_bytes(*<&[u8; 16]>::try_from(&buf[16..32]).unwrap()),
}
}
pub fn gen_open_stream(buf: [u8; 10]) -> Self {
Frame::OpenStream {
sid: Sid::from_le_bytes(*<&[u8; 8]>::try_from(&buf[0..8]).unwrap()),
prio: buf[8],
promises: Promises::from_bits_truncate(buf[9]),
}
}
pub fn gen_close_stream(buf: [u8; 8]) -> Self {
Frame::CloseStream {
sid: Sid::from_le_bytes(*<&[u8; 8]>::try_from(&buf[0..8]).unwrap()),
}
}
pub fn gen_data_header(buf: [u8; 24]) -> Self {
Frame::DataHeader {
mid: Mid::from_le_bytes(*<&[u8; 8]>::try_from(&buf[0..8]).unwrap()),
sid: Sid::from_le_bytes(*<&[u8; 8]>::try_from(&buf[8..16]).unwrap()),
length: u64::from_le_bytes(*<&[u8; 8]>::try_from(&buf[16..24]).unwrap()),
}
}
pub fn gen_data(buf: [u8; 18]) -> (Mid, u64, u16) {
let mid = Mid::from_le_bytes(*<&[u8; 8]>::try_from(&buf[0..8]).unwrap());
let start = u64::from_le_bytes(*<&[u8; 8]>::try_from(&buf[8..16]).unwrap());
let length = u16::from_le_bytes(*<&[u8; 2]>::try_from(&buf[16..18]).unwrap());
(mid, start, length)
}
pub fn gen_raw(buf: [u8; 2]) -> u16 {
u16::from_le_bytes(*<&[u8; 2]>::try_from(&buf[0..2]).unwrap())
}
}
impl Pid {
/// create a new Pid with a random interior value
///
/// # Example
/// ```rust
/// use veloren_network::{Network, Pid};
///
/// let pid = Pid::new();
/// let _ = Network::new(pid);
/// ```
pub fn new() -> Self {
Self {
internal: rand::thread_rng().gen(),
}
}
/// don't use fake! just for testing!
/// This will panic if pid i greater than 7, as I do not want you to use
/// this in production!
#[doc(hidden)]
pub fn fake(pid_offset: u8) -> Self {
assert!(pid_offset < 8);
let o = pid_offset as u128;
const OFF: [u128; 5] = [
0x40,
0x40 * 0x40,
0x40 * 0x40 * 0x40,
0x40 * 0x40 * 0x40 * 0x40,
0x40 * 0x40 * 0x40 * 0x40 * 0x40,
];
Self {
internal: o + o * OFF[0] + o * OFF[1] + o * OFF[2] + o * OFF[3] + o * OFF[4],
}
}
pub(crate) fn to_le_bytes(&self) -> [u8; 16] { self.internal.to_le_bytes() }
pub(crate) fn from_le_bytes(bytes: [u8; 16]) -> Self {
Self {
internal: u128::from_le_bytes(bytes),
}
}
}
impl Sid {
pub const fn new(internal: u64) -> Self { Self { internal } }
pub(crate) fn to_le_bytes(&self) -> [u8; 8] { self.internal.to_le_bytes() }
pub(crate) fn from_le_bytes(bytes: [u8; 8]) -> Self {
Self {
internal: u64::from_le_bytes(bytes),
}
}
}
impl std::fmt::Debug for Pid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
const BITS_PER_SIXLET: usize = 6;
//only print last 6 chars of number as full u128 logs are unreadable
const CHAR_COUNT: usize = 6;
for i in 0..CHAR_COUNT {
write!(
f,
"{}",
sixlet_to_str((self.internal >> (i * BITS_PER_SIXLET)) & 0x3F)
)?;
}
Ok(())
}
}
impl Default for Pid {
fn default() -> Self { Pid::new() }
}
impl std::fmt::Display for Pid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) }
}
impl std::ops::AddAssign for Sid {
fn add_assign(&mut self, other: Self) {
*self = Self {
internal: self.internal + other.internal,
};
}
}
impl std::fmt::Debug for Sid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
//only print last 6 chars of number as full u128 logs are unreadable
write!(f, "{}", self.internal.rem_euclid(1000000))
}
}
impl From<u64> for Sid {
fn from(internal: u64) -> Self { Sid { internal } }
}
impl std::fmt::Display for Sid {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.internal)
}
}
fn sixlet_to_str(sixlet: u128) -> char {
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[sixlet as usize] as char
}
#[cfg(test)]
mod tests {
use crate::types::*;
#[test]
fn frame_int2str() {
assert_eq!(Frame::int_to_string(3), "OpenStream");
assert_eq!(Frame::int_to_string(7), "Raw");
assert_eq!(Frame::int_to_string(8), "");
}
#[test]
fn frame_get_int() {
assert_eq!(Frame::get_int(&Frame::Raw(b"Foo".to_vec())), 7);
assert_eq!(Frame::get_int(&Frame::Shutdown), 2);
}
#[test]
fn frame_creation() {
Pid::new();
assert_eq!(format!("{}", Pid::fake(0)), "AAAAAA");
assert_eq!(format!("{}", Pid::fake(1)), "BBBBBB");
assert_eq!(format!("{}", Pid::fake(2)), "CCCCCC");
}
#[test]
fn test_sixlet_to_str() {
assert_eq!(sixlet_to_str(0), 'A');
assert_eq!(sixlet_to_str(29), 'd');
assert_eq!(sixlet_to_str(63), '/');
}
}

View File

@ -18,8 +18,8 @@
//! - You sometimes see sleep(1000ms) this is used when we rely on the //! - You sometimes see sleep(1000ms) this is used when we rely on the
//! underlying TCP functionality, as this simulates client and server //! underlying TCP functionality, as this simulates client and server
use async_std::task; use std::sync::Arc;
use task::block_on; use tokio::runtime::Runtime;
use veloren_network::{Network, ParticipantError, Pid, Promises, StreamError}; use veloren_network::{Network, ParticipantError, Pid, Promises, StreamError};
mod helper; mod helper;
use helper::{network_participant_stream, tcp}; use helper::{network_participant_stream, tcp};
@ -27,26 +27,26 @@ use helper::{network_participant_stream, tcp};
#[test] #[test]
fn close_network() { fn close_network() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_, _p1_a, mut s1_a, _, _p1_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _, _p1_a, mut s1_a, _, _p1_b, mut s1_b) = network_participant_stream(tcp());
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed)); assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
let msg1: Result<String, _> = block_on(s1_b.recv()); let msg1: Result<String, _> = r.block_on(s1_b.recv());
assert_eq!(msg1, Err(StreamError::StreamClosed)); assert_eq!(msg1, Err(StreamError::StreamClosed));
} }
#[test] #[test]
fn close_participant() { fn close_participant() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, p1_a, mut s1_a, _n_b, p1_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, p1_a, mut s1_a, _n_b, p1_b, mut s1_b) = network_participant_stream(tcp());
block_on(p1_a.disconnect()).unwrap(); r.block_on(p1_a.disconnect()).unwrap();
block_on(p1_b.disconnect()).unwrap(); r.block_on(p1_b.disconnect()).unwrap();
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed)); assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
assert_eq!( assert_eq!(
block_on(s1_b.recv::<String>()), r.block_on(s1_b.recv::<String>()),
Err(StreamError::StreamClosed) Err(StreamError::StreamClosed)
); );
} }
@ -54,26 +54,25 @@ fn close_participant() {
#[test] #[test]
fn close_stream() { fn close_stream() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _, mut s1_a, _n_b, _, _) = block_on(network_participant_stream(tcp())); let (r, _n_a, _, mut s1_a, _n_b, _, _) = network_participant_stream(tcp());
// s1_b is dropped directly while s1_a isn't // s1_b is dropped directly while s1_a isn't
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed)); assert_eq!(s1_a.send("Hello World"), Err(StreamError::StreamClosed));
assert_eq!( assert_eq!(
block_on(s1_a.recv::<String>()), r.block_on(s1_a.recv::<String>()),
Err(StreamError::StreamClosed) Err(StreamError::StreamClosed)
); );
} }
///THIS is actually a bug which currently luckily doesn't trigger, but with new ///WE must NOT create runtimes inside a Runtime, this check needs to verify
/// async-std WE must make sure, if a stream is `drop`ed inside a `block_on`, /// that we dont panic there
/// that no panic is thrown.
#[test] #[test]
fn close_streams_in_block_on() { fn close_streams_in_block_on() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, s1_a, _n_b, _p_b, s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) = network_participant_stream(tcp());
block_on(async { r.block_on(async {
//make it locally so that they are dropped later //make it locally so that they are dropped later
let mut s1_a = s1_a; let mut s1_a = s1_a;
let mut s1_b = s1_b; let mut s1_b = s1_b;
@ -81,19 +80,20 @@ fn close_streams_in_block_on() {
assert_eq!(s1_b.recv().await, Ok("ping".to_string())); assert_eq!(s1_b.recv().await, Ok("ping".to_string()));
drop(s1_a); drop(s1_a);
}); });
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
} }
#[test] #[test]
fn stream_simple_3msg_then_close() { fn stream_simple_3msg_then_close() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send(1u8).unwrap(); s1_a.send(1u8).unwrap();
s1_a.send(42).unwrap(); s1_a.send(42).unwrap();
s1_a.send("3rdMessage").unwrap(); s1_a.send("3rdMessage").unwrap();
assert_eq!(block_on(s1_b.recv()), Ok(1u8)); assert_eq!(r.block_on(s1_b.recv()), Ok(1u8));
assert_eq!(block_on(s1_b.recv()), Ok(42)); assert_eq!(r.block_on(s1_b.recv()), Ok(42));
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
drop(s1_a); drop(s1_a);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed)); assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed));
@ -103,43 +103,43 @@ fn stream_simple_3msg_then_close() {
fn stream_send_first_then_receive() { fn stream_send_first_then_receive() {
// recv should still be possible even if stream got closed if they are in queue // recv should still be possible even if stream got closed if they are in queue
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send(1u8).unwrap(); s1_a.send(1u8).unwrap();
s1_a.send(42).unwrap(); s1_a.send(42).unwrap();
s1_a.send("3rdMessage").unwrap(); s1_a.send("3rdMessage").unwrap();
drop(s1_a); drop(s1_a);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
assert_eq!(block_on(s1_b.recv()), Ok(1u8)); assert_eq!(r.block_on(s1_b.recv()), Ok(1u8));
assert_eq!(block_on(s1_b.recv()), Ok(42)); assert_eq!(r.block_on(s1_b.recv()), Ok(42));
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed)); assert_eq!(s1_b.send("Hello World"), Err(StreamError::StreamClosed));
} }
#[test] #[test]
fn stream_send_1_then_close_stream() { fn stream_send_1_then_close_stream() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send("this message must be received, even if stream is closed already!") s1_a.send("this message must be received, even if stream is closed already!")
.unwrap(); .unwrap();
drop(s1_a); drop(s1_a);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
let exp = Ok("this message must be received, even if stream is closed already!".to_string()); let exp = Ok("this message must be received, even if stream is closed already!".to_string());
assert_eq!(block_on(s1_b.recv()), exp); assert_eq!(r.block_on(s1_b.recv()), exp);
println!("all received and done"); println!("all received and done");
} }
#[test] #[test]
fn stream_send_100000_then_close_stream() { fn stream_send_100000_then_close_stream() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
for _ in 0..100000 { for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap(); s1_a.send("woop_PARTY_HARD_woop").unwrap();
} }
drop(s1_a); drop(s1_a);
let exp = Ok("woop_PARTY_HARD_woop".to_string()); let exp = Ok("woop_PARTY_HARD_woop".to_string());
println!("start receiving"); println!("start receiving");
block_on(async { r.block_on(async {
for _ in 0..100000 { for _ in 0..100000 {
assert_eq!(s1_b.recv().await, exp); assert_eq!(s1_b.recv().await, exp);
} }
@ -150,19 +150,20 @@ fn stream_send_100000_then_close_stream() {
#[test] #[test]
fn stream_send_100000_then_close_stream_remote() { fn stream_send_100000_then_close_stream_remote() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
for _ in 0..100000 { for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap(); s1_a.send("woop_PARTY_HARD_woop").unwrap();
} }
drop(s1_a); drop(s1_a);
drop(_s1_b); drop(_s1_b);
//no receiving //no receiving
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
} }
#[test] #[test]
fn stream_send_100000_then_close_stream_remote2() { fn stream_send_100000_then_close_stream_remote2() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
for _ in 0..100000 { for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap(); s1_a.send("woop_PARTY_HARD_woop").unwrap();
} }
@ -170,12 +171,13 @@ fn stream_send_100000_then_close_stream_remote2() {
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
drop(s1_a); drop(s1_a);
//no receiving //no receiving
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
} }
#[test] #[test]
fn stream_send_100000_then_close_stream_remote3() { fn stream_send_100000_then_close_stream_remote3() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
for _ in 0..100000 { for _ in 0..100000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap(); s1_a.send("woop_PARTY_HARD_woop").unwrap();
} }
@ -183,12 +185,13 @@ fn stream_send_100000_then_close_stream_remote3() {
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
drop(s1_a); drop(s1_a);
//no receiving //no receiving
drop((_n_a, _p_a, _n_b, _p_b)); //clean teardown
} }
#[test] #[test]
fn close_part_then_network() { fn close_part_then_network() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp())); let (_, n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
for _ in 0..1000 { for _ in 0..1000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap(); s1_a.send("woop_PARTY_HARD_woop").unwrap();
} }
@ -201,7 +204,7 @@ fn close_part_then_network() {
#[test] #[test]
fn close_network_then_part() { fn close_network_then_part() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp())); let (_, n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
for _ in 0..1000 { for _ in 0..1000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap(); s1_a.send("woop_PARTY_HARD_woop").unwrap();
} }
@ -214,140 +217,143 @@ fn close_network_then_part() {
#[test] #[test]
fn close_network_then_disconnect_part() { fn close_network_then_disconnect_part() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = block_on(network_participant_stream(tcp())); let (r, n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
for _ in 0..1000 { for _ in 0..1000 {
s1_a.send("woop_PARTY_HARD_woop").unwrap(); s1_a.send("woop_PARTY_HARD_woop").unwrap();
} }
drop(n_a); drop(n_a);
assert!(block_on(p_a.disconnect()).is_err()); assert!(r.block_on(p_a.disconnect()).is_err());
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
} }
#[test] #[test]
fn opened_stream_before_remote_part_is_closed() { fn opened_stream_before_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, p_a, _, _n_b, p_b, _) = block_on(network_participant_stream(tcp())); let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
let mut s2_a = block_on(p_a.open(10, Promises::empty())).unwrap(); let mut s2_a = r.block_on(p_a.open(4, Promises::empty())).unwrap();
s2_a.send("HelloWorld").unwrap(); s2_a.send("HelloWorld").unwrap();
let mut s2_b = block_on(p_b.opened()).unwrap(); let mut s2_b = r.block_on(p_b.opened()).unwrap();
drop(p_a); drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
assert_eq!(block_on(s2_b.recv()), Ok("HelloWorld".to_string())); assert_eq!(r.block_on(s2_b.recv()), Ok("HelloWorld".to_string()));
drop((_n_a, _n_b, p_b)); //clean teardown
} }
#[test] #[test]
fn opened_stream_after_remote_part_is_closed() { fn opened_stream_after_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, p_a, _, _n_b, p_b, _) = block_on(network_participant_stream(tcp())); let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
let mut s2_a = block_on(p_a.open(10, Promises::empty())).unwrap(); let mut s2_a = r.block_on(p_a.open(3, Promises::empty())).unwrap();
s2_a.send("HelloWorld").unwrap(); s2_a.send("HelloWorld").unwrap();
drop(p_a); drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
let mut s2_b = block_on(p_b.opened()).unwrap(); let mut s2_b = r.block_on(p_b.opened()).unwrap();
assert_eq!(block_on(s2_b.recv()), Ok("HelloWorld".to_string())); assert_eq!(r.block_on(s2_b.recv()), Ok("HelloWorld".to_string()));
assert_eq!( assert_eq!(
block_on(p_b.opened()).unwrap_err(), r.block_on(p_b.opened()).unwrap_err(),
ParticipantError::ParticipantDisconnected ParticipantError::ParticipantDisconnected
); );
drop((_n_a, _n_b, p_b)); //clean teardown
} }
#[test] #[test]
fn open_stream_after_remote_part_is_closed() { fn open_stream_after_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, p_a, _, _n_b, p_b, _) = block_on(network_participant_stream(tcp())); let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
let mut s2_a = block_on(p_a.open(10, Promises::empty())).unwrap(); let mut s2_a = r.block_on(p_a.open(4, Promises::empty())).unwrap();
s2_a.send("HelloWorld").unwrap(); s2_a.send("HelloWorld").unwrap();
drop(p_a); drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
let mut s2_b = block_on(p_b.opened()).unwrap(); let mut s2_b = r.block_on(p_b.opened()).unwrap();
assert_eq!(block_on(s2_b.recv()), Ok("HelloWorld".to_string())); assert_eq!(r.block_on(s2_b.recv()), Ok("HelloWorld".to_string()));
assert_eq!( assert_eq!(
block_on(p_b.open(20, Promises::empty())).unwrap_err(), r.block_on(p_b.open(5, Promises::empty())).unwrap_err(),
ParticipantError::ParticipantDisconnected ParticipantError::ParticipantDisconnected
); );
drop((_n_a, _n_b, p_b)); //clean teardown
} }
#[test] #[test]
fn failed_stream_open_after_remote_part_is_closed() { fn failed_stream_open_after_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, p_a, _, _n_b, p_b, _) = block_on(network_participant_stream(tcp())); let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
drop(p_a); drop(p_a);
std::thread::sleep(std::time::Duration::from_millis(1000));
assert_eq!( assert_eq!(
block_on(p_b.opened()).unwrap_err(), r.block_on(p_b.opened()).unwrap_err(),
ParticipantError::ParticipantDisconnected ParticipantError::ParticipantDisconnected
); );
drop((_n_a, _n_b, p_b)); //clean teardown
} }
#[test] #[test]
fn open_participant_before_remote_part_is_closed() { fn open_participant_before_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (n_a, f) = Network::new(Pid::fake(0)); let r = Arc::new(Runtime::new().unwrap());
std::thread::spawn(f); let n_a = Network::new(Pid::fake(0), Arc::clone(&r));
let (n_b, f) = Network::new(Pid::fake(1)); let n_b = Network::new(Pid::fake(1), Arc::clone(&r));
std::thread::spawn(f);
let addr = tcp(); let addr = tcp();
block_on(n_a.listen(addr.clone())).unwrap(); r.block_on(n_a.listen(addr.clone())).unwrap();
let p_b = block_on(n_b.connect(addr)).unwrap(); let p_b = r.block_on(n_b.connect(addr)).unwrap();
let mut s1_b = block_on(p_b.open(10, Promises::empty())).unwrap(); let mut s1_b = r.block_on(p_b.open(4, Promises::empty())).unwrap();
s1_b.send("HelloWorld").unwrap(); s1_b.send("HelloWorld").unwrap();
let p_a = block_on(n_a.connected()).unwrap(); let p_a = r.block_on(n_a.connected()).unwrap();
drop(s1_b); drop(s1_b);
drop(p_b); drop(p_b);
drop(n_b); drop(n_b);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
let mut s1_a = block_on(p_a.opened()).unwrap(); let mut s1_a = r.block_on(p_a.opened()).unwrap();
assert_eq!(block_on(s1_a.recv()), Ok("HelloWorld".to_string())); assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
} }
#[test] #[test]
fn open_participant_after_remote_part_is_closed() { fn open_participant_after_remote_part_is_closed() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (n_a, f) = Network::new(Pid::fake(0)); let r = Arc::new(Runtime::new().unwrap());
std::thread::spawn(f); let n_a = Network::new(Pid::fake(0), Arc::clone(&r));
let (n_b, f) = Network::new(Pid::fake(1)); let n_b = Network::new(Pid::fake(1), Arc::clone(&r));
std::thread::spawn(f);
let addr = tcp(); let addr = tcp();
block_on(n_a.listen(addr.clone())).unwrap(); r.block_on(n_a.listen(addr.clone())).unwrap();
let p_b = block_on(n_b.connect(addr)).unwrap(); let p_b = r.block_on(n_b.connect(addr)).unwrap();
let mut s1_b = block_on(p_b.open(10, Promises::empty())).unwrap(); let mut s1_b = r.block_on(p_b.open(4, Promises::empty())).unwrap();
s1_b.send("HelloWorld").unwrap(); s1_b.send("HelloWorld").unwrap();
drop(s1_b); drop(s1_b);
drop(p_b); drop(p_b);
drop(n_b); drop(n_b);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
let p_a = block_on(n_a.connected()).unwrap(); let p_a = r.block_on(n_a.connected()).unwrap();
let mut s1_a = block_on(p_a.opened()).unwrap(); let mut s1_a = r.block_on(p_a.opened()).unwrap();
assert_eq!(block_on(s1_a.recv()), Ok("HelloWorld".to_string())); assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
} }
#[test] #[test]
fn close_network_scheduler_completely() { fn close_network_scheduler_completely() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (n_a, f) = Network::new(Pid::fake(0)); let r = Arc::new(Runtime::new().unwrap());
let ha = std::thread::spawn(f); let n_a = Network::new(Pid::fake(0), Arc::clone(&r));
let (n_b, f) = Network::new(Pid::fake(1)); let n_b = Network::new(Pid::fake(1), Arc::clone(&r));
let hb = std::thread::spawn(f);
let addr = tcp(); let addr = tcp();
block_on(n_a.listen(addr.clone())).unwrap(); r.block_on(n_a.listen(addr.clone())).unwrap();
let p_b = block_on(n_b.connect(addr)).unwrap(); let p_b = r.block_on(n_b.connect(addr)).unwrap();
let mut s1_b = block_on(p_b.open(10, Promises::empty())).unwrap(); let mut s1_b = r.block_on(p_b.open(4, Promises::empty())).unwrap();
s1_b.send("HelloWorld").unwrap(); s1_b.send("HelloWorld").unwrap();
let p_a = block_on(n_a.connected()).unwrap(); let p_a = r.block_on(n_a.connected()).unwrap();
let mut s1_a = block_on(p_a.opened()).unwrap(); let mut s1_a = r.block_on(p_a.opened()).unwrap();
assert_eq!(block_on(s1_a.recv()), Ok("HelloWorld".to_string())); assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
drop(n_a); drop(n_a);
drop(n_b); drop(n_b);
std::thread::sleep(std::time::Duration::from_millis(1000)); std::thread::sleep(std::time::Duration::from_millis(1000));
ha.join().unwrap();
hb.join().unwrap(); drop(p_b);
drop(p_a);
let runtime = Arc::try_unwrap(r).expect("runtime is not alone, there still exist a reference");
runtime.shutdown_timeout(std::time::Duration::from_secs(300));
} }
#[test] #[test]
fn dont_panic_on_multiply_recv_after_close() { fn dont_panic_on_multiply_recv_after_close() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send(11u32).unwrap(); s1_a.send(11u32).unwrap();
drop(s1_a); drop(s1_a);
@ -362,7 +368,7 @@ fn dont_panic_on_multiply_recv_after_close() {
#[test] #[test]
fn dont_panic_on_recv_send_after_close() { fn dont_panic_on_recv_send_after_close() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send(11u32).unwrap(); s1_a.send(11u32).unwrap();
drop(s1_a); drop(s1_a);
@ -375,7 +381,7 @@ fn dont_panic_on_recv_send_after_close() {
#[test] #[test]
fn dont_panic_on_multiple_send_after_close() { fn dont_panic_on_multiple_send_after_close() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send(11u32).unwrap(); s1_a.send(11u32).unwrap();
drop(s1_a); drop(s1_a);

View File

@ -1,10 +1,14 @@
use lazy_static::*; use lazy_static::*;
use std::{ use std::{
net::SocketAddr, net::SocketAddr,
sync::atomic::{AtomicU16, Ordering}, sync::{
atomic::{AtomicU16, AtomicU64, Ordering},
Arc,
},
thread, thread,
time::Duration, time::Duration,
}; };
use tokio::runtime::Runtime;
use tracing::*; use tracing::*;
use tracing_subscriber::EnvFilter; use tracing_subscriber::EnvFilter;
use veloren_network::{Network, Participant, Pid, Promises, ProtocolAddr, Stream}; use veloren_network::{Network, Participant, Pid, Promises, ProtocolAddr, Stream};
@ -43,38 +47,57 @@ pub fn setup(tracing: bool, sleep: u64) -> (u64, u64) {
} }
#[allow(dead_code)] #[allow(dead_code)]
pub async fn network_participant_stream( pub fn network_participant_stream(
addr: ProtocolAddr, addr: ProtocolAddr,
) -> (Network, Participant, Stream, Network, Participant, Stream) { ) -> (
let (n_a, f_a) = Network::new(Pid::fake(0)); Arc<Runtime>,
std::thread::spawn(f_a); Network,
let (n_b, f_b) = Network::new(Pid::fake(1)); Participant,
std::thread::spawn(f_b); Stream,
Network,
Participant,
Stream,
) {
let runtime = Arc::new(Runtime::new().unwrap());
let (n_a, p1_a, s1_a, n_b, p1_b, s1_b) = runtime.block_on(async {
let n_a = Network::new(Pid::fake(0), Arc::clone(&runtime));
let n_b = Network::new(Pid::fake(1), Arc::clone(&runtime));
n_a.listen(addr.clone()).await.unwrap(); n_a.listen(addr.clone()).await.unwrap();
let p1_b = n_b.connect(addr).await.unwrap(); let p1_b = n_b.connect(addr).await.unwrap();
let p1_a = n_a.connected().await.unwrap(); let p1_a = n_a.connected().await.unwrap();
let s1_a = p1_a.open(10, Promises::empty()).await.unwrap(); let s1_a = p1_a.open(4, Promises::empty()).await.unwrap();
let s1_b = p1_b.opened().await.unwrap(); let s1_b = p1_b.opened().await.unwrap();
(n_a, p1_a, s1_a, n_b, p1_b, s1_b) (n_a, p1_a, s1_a, n_b, p1_b, s1_b)
});
(runtime, n_a, p1_a, s1_a, n_b, p1_b, s1_b)
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn tcp() -> veloren_network::ProtocolAddr { pub fn tcp() -> ProtocolAddr {
lazy_static! { lazy_static! {
static ref PORTS: AtomicU16 = AtomicU16::new(5000); static ref PORTS: AtomicU16 = AtomicU16::new(5000);
} }
let port = PORTS.fetch_add(1, Ordering::Relaxed); let port = PORTS.fetch_add(1, Ordering::Relaxed);
veloren_network::ProtocolAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], port))) ProtocolAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], port)))
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn udp() -> veloren_network::ProtocolAddr { pub fn udp() -> ProtocolAddr {
lazy_static! { lazy_static! {
static ref PORTS: AtomicU16 = AtomicU16::new(5000); static ref PORTS: AtomicU16 = AtomicU16::new(5000);
} }
let port = PORTS.fetch_add(1, Ordering::Relaxed); let port = PORTS.fetch_add(1, Ordering::Relaxed);
veloren_network::ProtocolAddr::Udp(SocketAddr::from(([127, 0, 0, 1], port))) ProtocolAddr::Udp(SocketAddr::from(([127, 0, 0, 1], port)))
}
#[allow(dead_code)]
pub fn mpsc() -> ProtocolAddr {
lazy_static! {
static ref PORTS: AtomicU64 = AtomicU64::new(5000);
}
let port = PORTS.fetch_add(1, Ordering::Relaxed);
ProtocolAddr::Mpsc(port)
} }

View File

@ -1,8 +1,8 @@
use async_std::task; use std::sync::Arc;
use task::block_on; use tokio::runtime::Runtime;
use veloren_network::{NetworkError, StreamError}; use veloren_network::{NetworkError, StreamError};
mod helper; mod helper;
use helper::{network_participant_stream, tcp, udp}; use helper::{mpsc, network_participant_stream, tcp, udp};
use std::io::ErrorKind; use std::io::ErrorKind;
use veloren_network::{Network, Pid, Promises, ProtocolAddr}; use veloren_network::{Network, Pid, Promises, ProtocolAddr};
@ -10,73 +10,105 @@ use veloren_network::{Network, Pid, Promises, ProtocolAddr};
#[ignore] #[ignore]
fn network_20s() { fn network_20s() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _, _, _n_b, _, _) = block_on(network_participant_stream(tcp())); let (_, _n_a, _, _, _n_b, _, _) = network_participant_stream(tcp());
std::thread::sleep(std::time::Duration::from_secs(30)); std::thread::sleep(std::time::Duration::from_secs(30));
} }
#[test] #[test]
fn stream_simple() { fn stream_simple() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send("Hello World").unwrap(); s1_a.send("Hello World").unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
} }
#[test] #[test]
fn stream_try_recv() { fn stream_try_recv() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send(4242u32).unwrap(); s1_a.send(4242u32).unwrap();
std::thread::sleep(std::time::Duration::from_secs(1)); std::thread::sleep(std::time::Duration::from_secs(1));
assert_eq!(s1_b.try_recv(), Ok(Some(4242u32))); assert_eq!(s1_b.try_recv(), Ok(Some(4242u32)));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
} }
#[test] #[test]
fn stream_simple_3msg() { fn stream_simple_3msg() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send("Hello World").unwrap(); s1_a.send("Hello World").unwrap();
s1_a.send(1337).unwrap(); s1_a.send(1337).unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
assert_eq!(block_on(s1_b.recv()), Ok(1337)); assert_eq!(r.block_on(s1_b.recv()), Ok(1337));
s1_a.send("3rdMessage").unwrap(); s1_a.send("3rdMessage").unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
} }
#[test] #[test]
fn stream_simple_mpsc() {
let (_, _) = helper::setup(false, 0);
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(mpsc());
s1_a.send("Hello World").unwrap();
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
}
#[test]
fn stream_simple_mpsc_3msg() {
let (_, _) = helper::setup(false, 0);
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(mpsc());
s1_a.send("Hello World").unwrap();
s1_a.send(1337).unwrap();
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
assert_eq!(r.block_on(s1_b.recv()), Ok(1337));
s1_a.send("3rdMessage").unwrap();
assert_eq!(r.block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
}
#[test]
#[ignore]
fn stream_simple_udp() { fn stream_simple_udp() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(udp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(udp());
s1_a.send("Hello World").unwrap(); s1_a.send("Hello World").unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
} }
#[test] #[test]
#[ignore]
fn stream_simple_udp_3msg() { fn stream_simple_udp_3msg() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(udp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(udp());
s1_a.send("Hello World").unwrap(); s1_a.send("Hello World").unwrap();
s1_a.send(1337).unwrap(); s1_a.send(1337).unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("Hello World".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
assert_eq!(block_on(s1_b.recv()), Ok(1337)); assert_eq!(r.block_on(s1_b.recv()), Ok(1337));
s1_a.send("3rdMessage").unwrap(); s1_a.send("3rdMessage").unwrap();
assert_eq!(block_on(s1_b.recv()), Ok("3rdMessage".to_string())); assert_eq!(r.block_on(s1_b.recv()), Ok("3rdMessage".to_string()));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
} }
#[test] #[test]
#[ignore] #[ignore]
fn tcp_and_udp_2_connections() -> std::result::Result<(), Box<dyn std::error::Error>> { fn tcp_and_udp_2_connections() -> std::result::Result<(), Box<dyn std::error::Error>> {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (network, f) = Network::new(Pid::new()); let r = Arc::new(Runtime::new().unwrap());
let (remote, fr) = Network::new(Pid::new()); let network = Network::new(Pid::new(), Arc::clone(&r));
std::thread::spawn(f); let remote = Network::new(Pid::new(), Arc::clone(&r));
std::thread::spawn(fr); r.block_on(async {
block_on(async { let network = network;
let remote = remote;
remote remote
.listen(ProtocolAddr::Tcp("127.0.0.1:2000".parse().unwrap())) .listen(ProtocolAddr::Tcp("127.0.0.1:2000".parse().unwrap()))
.await?; .await?;
@ -95,20 +127,20 @@ fn tcp_and_udp_2_connections() -> std::result::Result<(), Box<dyn std::error::Er
} }
#[test] #[test]
#[ignore]
fn failed_listen_on_used_ports() -> std::result::Result<(), Box<dyn std::error::Error>> { fn failed_listen_on_used_ports() -> std::result::Result<(), Box<dyn std::error::Error>> {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (network, f) = Network::new(Pid::new()); let r = Arc::new(Runtime::new().unwrap());
std::thread::spawn(f); let network = Network::new(Pid::new(), Arc::clone(&r));
let udp1 = udp(); let udp1 = udp();
let tcp1 = tcp(); let tcp1 = tcp();
block_on(network.listen(udp1.clone()))?; r.block_on(network.listen(udp1.clone()))?;
block_on(network.listen(tcp1.clone()))?; r.block_on(network.listen(tcp1.clone()))?;
std::thread::sleep(std::time::Duration::from_millis(200)); std::thread::sleep(std::time::Duration::from_millis(200));
let (network2, f2) = Network::new(Pid::new()); let network2 = Network::new(Pid::new(), Arc::clone(&r));
std::thread::spawn(f2); let e1 = r.block_on(network2.listen(udp1));
let e1 = block_on(network2.listen(udp1)); let e2 = r.block_on(network2.listen(tcp1));
let e2 = block_on(network2.listen(tcp1));
match e1 { match e1 {
Err(NetworkError::ListenFailed(e)) if e.kind() == ErrorKind::AddrInUse => (), Err(NetworkError::ListenFailed(e)) if e.kind() == ErrorKind::AddrInUse => (),
_ => panic!(), _ => panic!(),
@ -117,6 +149,7 @@ fn failed_listen_on_used_ports() -> std::result::Result<(), Box<dyn std::error::
Err(NetworkError::ListenFailed(e)) if e.kind() == ErrorKind::AddrInUse => (), Err(NetworkError::ListenFailed(e)) if e.kind() == ErrorKind::AddrInUse => (),
_ => panic!(), _ => panic!(),
}; };
drop((network, network2)); //clean teardown
Ok(()) Ok(())
} }
@ -130,11 +163,12 @@ fn api_stream_send_main() -> std::result::Result<(), Box<dyn std::error::Error>>
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
// Create a Network, listen on Port `1200` and wait for a Stream to be opened, // Create a Network, listen on Port `1200` and wait for a Stream to be opened,
// then answer `Hello World` // then answer `Hello World`
let (network, f) = Network::new(Pid::new()); let r = Arc::new(Runtime::new().unwrap());
let (remote, fr) = Network::new(Pid::new()); let network = Network::new(Pid::new(), Arc::clone(&r));
std::thread::spawn(f); let remote = Network::new(Pid::new(), Arc::clone(&r));
std::thread::spawn(fr); r.block_on(async {
block_on(async { let network = network;
let remote = remote;
network network
.listen(ProtocolAddr::Tcp("127.0.0.1:1200".parse().unwrap())) .listen(ProtocolAddr::Tcp("127.0.0.1:1200".parse().unwrap()))
.await?; .await?;
@ -143,7 +177,7 @@ fn api_stream_send_main() -> std::result::Result<(), Box<dyn std::error::Error>>
.await?; .await?;
// keep it alive // keep it alive
let _stream_p = remote_p let _stream_p = remote_p
.open(16, Promises::ORDERED | Promises::CONSISTENCY) .open(4, Promises::ORDERED | Promises::CONSISTENCY)
.await?; .await?;
let participant_a = network.connected().await?; let participant_a = network.connected().await?;
let mut stream_a = participant_a.opened().await?; let mut stream_a = participant_a.opened().await?;
@ -158,11 +192,12 @@ fn api_stream_recv_main() -> std::result::Result<(), Box<dyn std::error::Error>>
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
// Create a Network, listen on Port `1220` and wait for a Stream to be opened, // Create a Network, listen on Port `1220` and wait for a Stream to be opened,
// then listen on it // then listen on it
let (network, f) = Network::new(Pid::new()); let r = Arc::new(Runtime::new().unwrap());
let (remote, fr) = Network::new(Pid::new()); let network = Network::new(Pid::new(), Arc::clone(&r));
std::thread::spawn(f); let remote = Network::new(Pid::new(), Arc::clone(&r));
std::thread::spawn(fr); r.block_on(async {
block_on(async { let network = network;
let remote = remote;
network network
.listen(ProtocolAddr::Tcp("127.0.0.1:1220".parse().unwrap())) .listen(ProtocolAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
.await?; .await?;
@ -170,7 +205,7 @@ fn api_stream_recv_main() -> std::result::Result<(), Box<dyn std::error::Error>>
.connect(ProtocolAddr::Tcp("127.0.0.1:1220".parse().unwrap())) .connect(ProtocolAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
.await?; .await?;
let mut stream_p = remote_p let mut stream_p = remote_p
.open(16, Promises::ORDERED | Promises::CONSISTENCY) .open(4, Promises::ORDERED | Promises::CONSISTENCY)
.await?; .await?;
stream_p.send("Hello World")?; stream_p.send("Hello World")?;
let participant_a = network.connected().await?; let participant_a = network.connected().await?;
@ -184,19 +219,20 @@ fn api_stream_recv_main() -> std::result::Result<(), Box<dyn std::error::Error>>
#[test] #[test]
fn wrong_parse() { fn wrong_parse() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send(1337).unwrap(); s1_a.send(1337).unwrap();
match block_on(s1_b.recv::<String>()) { match r.block_on(s1_b.recv::<String>()) {
Err(StreamError::Deserialize(_)) => (), Err(StreamError::Deserialize(_)) => (),
_ => panic!("this should fail, but it doesnt!"), _ => panic!("this should fail, but it doesnt!"),
} }
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
} }
#[test] #[test]
fn multiple_try_recv() { fn multiple_try_recv() {
let (_, _) = helper::setup(false, 0); let (_, _) = helper::setup(false, 0);
let (_n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = block_on(network_participant_stream(tcp())); let (_, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
s1_a.send("asd").unwrap(); s1_a.send("asd").unwrap();
s1_a.send(11u32).unwrap(); s1_a.send(11u32).unwrap();
@ -208,4 +244,5 @@ fn multiple_try_recv() {
drop(s1_a); drop(s1_a);
std::thread::sleep(std::time::Duration::from_secs(1)); std::thread::sleep(std::time::Duration::from_secs(1));
assert_eq!(s1_b.try_recv::<String>(), Err(StreamError::StreamClosed)); assert_eq!(s1_b.try_recv::<String>(), Err(StreamError::StreamClosed));
drop((_n_a, _n_b, _p_a, _p_b)); //clean teardown
} }

View File

@ -15,6 +15,7 @@ server = { package = "veloren-server", path = "../server", default-features = fa
common = { package = "veloren-common", path = "../common" } common = { package = "veloren-common", path = "../common" }
common-net = { package = "veloren-common-net", path = "../common/net" } common-net = { package = "veloren-common-net", path = "../common/net" }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread"] }
ansi-parser = "0.7" ansi-parser = "0.7"
clap = "2.33" clap = "2.33"
crossterm = "0.18" crossterm = "0.18"

View File

@ -17,8 +17,11 @@ pub fn init(basic: bool) {
env.add_directive("veloren_world::sim=info".parse().unwrap()) env.add_directive("veloren_world::sim=info".parse().unwrap())
.add_directive("veloren_world::civ=info".parse().unwrap()) .add_directive("veloren_world::civ=info".parse().unwrap())
.add_directive("uvth=warn".parse().unwrap()) .add_directive("uvth=warn".parse().unwrap())
.add_directive("tiny_http=warn".parse().unwrap()) .add_directive("hyper=info".parse().unwrap())
.add_directive("prometheus_hyper=info".parse().unwrap())
.add_directive("mio::pool=info".parse().unwrap())
.add_directive("mio::sys::windows=debug".parse().unwrap()) .add_directive("mio::sys::windows=debug".parse().unwrap())
.add_directive("veloren_network_protocol=info".parse().unwrap())
.add_directive( .add_directive(
"veloren_server::persistence::character=info" "veloren_server::persistence::character=info"
.parse() .parse()

View File

@ -129,8 +129,19 @@ fn main() -> io::Result<()> {
let server_port = &server_settings.gameserver_address.port(); let server_port = &server_settings.gameserver_address.port();
let metrics_port = &server_settings.metrics_address.port(); let metrics_port = &server_settings.metrics_address.port();
// Create server // Create server
let mut server = Server::new(server_settings, editable_settings, &server_data_dir) let runtime = Arc::new(
.expect("Failed to create server instance!"); tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap(),
);
let mut server = Server::new(
server_settings,
editable_settings,
&server_data_dir,
runtime,
)
.expect("Failed to create server instance!");
info!( info!(
?server_port, ?server_port,

View File

@ -16,7 +16,7 @@ common = { package = "veloren-common", path = "../common" }
common-sys = { package = "veloren-common-sys", path = "../common/sys" } common-sys = { package = "veloren-common-sys", path = "../common/sys" }
common-net = { package = "veloren-common-net", path = "../common/net" } common-net = { package = "veloren-common-net", path = "../common/net" }
world = { package = "veloren-world", path = "../world" } world = { package = "veloren-world", path = "../world" }
network = { package = "veloren_network", path = "../network", features = ["metrics", "compression"], default-features = false } network = { package = "veloren-network", path = "../network", features = ["metrics", "compression"], default-features = false }
specs = { git = "https://github.com/amethyst/specs.git", features = ["shred-derive"], rev = "d4435bdf496cf322c74886ca09dd8795984919b4" } specs = { git = "https://github.com/amethyst/specs.git", features = ["shred-derive"], rev = "d4435bdf496cf322c74886ca09dd8795984919b4" }
specs-idvs = { git = "https://gitlab.com/veloren/specs-idvs.git", rev = "9fab7b396acd6454585486e50ae4bfe2069858a9" } specs-idvs = { git = "https://gitlab.com/veloren/specs-idvs.git", rev = "9fab7b396acd6454585486e50ae4bfe2069858a9" }
@ -28,6 +28,8 @@ futures-util = "0.3.7"
futures-executor = "0.3" futures-executor = "0.3"
futures-timer = "3.0" futures-timer = "3.0"
futures-channel = "0.3" futures-channel = "0.3"
tokio = { version = "1", default-features = false, features = ["rt"] }
prometheus-hyper = "0.1.1"
itertools = "0.9" itertools = "0.9"
lazy_static = "1.4.0" lazy_static = "1.4.0"
scan_fmt = { git = "https://github.com/Imberflur/scan_fmt" } scan_fmt = { git = "https://github.com/Imberflur/scan_fmt" }
@ -40,7 +42,6 @@ hashbrown = { version = "0.9", features = ["rayon", "serde", "nightly"] }
rayon = "1.5" rayon = "1.5"
crossbeam-channel = "0.5" crossbeam-channel = "0.5"
prometheus = { version = "0.11", default-features = false} prometheus = { version = "0.11", default-features = false}
tiny_http = "0.8.0"
portpicker = { git = "https://github.com/xMAC94x/portpicker-rs", rev = "df6b37872f3586ac3b21d08b56c8ec7cd92fb172" } portpicker = { git = "https://github.com/xMAC94x/portpicker-rs", rev = "df6b37872f3586ac3b21d08b56c8ec7cd92fb172" }
authc = { git = "https://gitlab.com/veloren/auth.git", rev = "bffb5181a35c19ddfd33ee0b4aedba741aafb68d" } authc = { git = "https://gitlab.com/veloren/auth.git", rev = "bffb5181a35c19ddfd33ee0b4aedba741aafb68d" }
libsqlite3-sys = { version = "0.18", features = ["bundled"] } libsqlite3-sys = { version = "0.18", features = ["bundled"] }

View File

@ -104,11 +104,11 @@ impl ConnectionHandler {
let reliable = Promises::ORDERED | Promises::CONSISTENCY; let reliable = Promises::ORDERED | Promises::CONSISTENCY;
let reliablec = reliable | Promises::COMPRESSED; let reliablec = reliable | Promises::COMPRESSED;
let general_stream = participant.open(10, reliablec).await?; let general_stream = participant.open(3, reliablec).await?;
let ping_stream = participant.open(5, reliable).await?; let ping_stream = participant.open(2, reliable).await?;
let mut register_stream = participant.open(10, reliablec).await?; let mut register_stream = participant.open(3, reliablec).await?;
let character_screen_stream = participant.open(10, reliablec).await?; let character_screen_stream = participant.open(3, reliablec).await?;
let in_game_stream = participant.open(10, reliablec).await?; let in_game_stream = participant.open(3, reliablec).await?;
let server_data = receiver.recv()?; let server_data = receiver.recv()?;

View File

@ -76,12 +76,14 @@ use common_net::{
use common_sys::plugin::PluginMgr; use common_sys::plugin::PluginMgr;
use common_sys::state::State; use common_sys::state::State;
use futures_executor::block_on; use futures_executor::block_on;
use metrics::{PhysicsMetrics, ServerMetrics, StateTickMetrics, TickMetrics}; use metrics::{PhysicsMetrics, StateTickMetrics, TickMetrics};
use network::{Network, Pid, ProtocolAddr}; use network::{Network, Pid, ProtocolAddr};
use persistence::{ use persistence::{
character_loader::{CharacterLoader, CharacterLoaderResponseKind}, character_loader::{CharacterLoader, CharacterLoaderResponseKind},
character_updater::CharacterUpdater, character_updater::CharacterUpdater,
}; };
use prometheus::Registry;
use prometheus_hyper::Server as PrometheusServer;
use specs::{join::Join, Builder, Entity as EcsEntity, RunNow, SystemData, WorldExt}; use specs::{join::Join, Builder, Entity as EcsEntity, RunNow, SystemData, WorldExt};
use std::{ use std::{
i32, i32,
@ -91,6 +93,7 @@ use std::{
}; };
#[cfg(not(feature = "worldgen"))] #[cfg(not(feature = "worldgen"))]
use test_world::{IndexOwned, World}; use test_world::{IndexOwned, World};
use tokio::{runtime::Runtime, sync::Notify};
use tracing::{debug, error, info, trace}; use tracing::{debug, error, info, trace};
use uvth::{ThreadPool, ThreadPoolBuilder}; use uvth::{ThreadPool, ThreadPoolBuilder};
use vek::*; use vek::*;
@ -120,9 +123,10 @@ pub struct Server {
connection_handler: ConnectionHandler, connection_handler: ConnectionHandler,
_runtime: Arc<Runtime>,
thread_pool: ThreadPool, thread_pool: ThreadPool,
metrics: ServerMetrics, metrics_shutdown: Arc<Notify>,
tick_metrics: TickMetrics, tick_metrics: TickMetrics,
state_tick_metrics: StateTickMetrics, state_tick_metrics: StateTickMetrics,
physics_metrics: PhysicsMetrics, physics_metrics: PhysicsMetrics,
@ -136,6 +140,7 @@ impl Server {
settings: Settings, settings: Settings,
editable_settings: EditableSettings, editable_settings: EditableSettings,
data_dir: &std::path::Path, data_dir: &std::path::Path,
runtime: Arc<Runtime>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
info!("Server is data dir is: {}", data_dir.display()); info!("Server is data dir is: {}", data_dir.display());
if settings.auth_server_address.is_none() { if settings.auth_server_address.is_none() {
@ -347,28 +352,35 @@ impl Server {
state.ecs_mut().insert(DeletedEntities::default()); state.ecs_mut().insert(DeletedEntities::default());
let mut metrics = ServerMetrics::new();
// register all metrics submodules here // register all metrics submodules here
let (tick_metrics, registry_tick) = TickMetrics::new(metrics.tick_clone()) let (tick_metrics, registry_tick) =
.expect("Failed to initialize server tick metrics submodule."); TickMetrics::new().expect("Failed to initialize server tick metrics submodule.");
let (state_tick_metrics, registry_state) = StateTickMetrics::new().unwrap(); let (state_tick_metrics, registry_state) = StateTickMetrics::new().unwrap();
let (physics_metrics, registry_physics) = PhysicsMetrics::new().unwrap(); let (physics_metrics, registry_physics) = PhysicsMetrics::new().unwrap();
registry_chunk(&metrics.registry()).expect("failed to register chunk gen metrics"); let registry = Arc::new(Registry::new());
registry_network(&metrics.registry()).expect("failed to register network request metrics"); registry_chunk(&registry).expect("failed to register chunk gen metrics");
registry_player(&metrics.registry()).expect("failed to register player metrics"); registry_network(&registry).expect("failed to register network request metrics");
registry_tick(&metrics.registry()).expect("failed to register tick metrics"); registry_player(&registry).expect("failed to register player metrics");
registry_state(&metrics.registry()).expect("failed to register state metrics"); registry_tick(&registry).expect("failed to register tick metrics");
registry_physics(&metrics.registry()).expect("failed to register state metrics"); registry_state(&registry).expect("failed to register state metrics");
registry_physics(&registry).expect("failed to register state metrics");
let thread_pool = ThreadPoolBuilder::new() let thread_pool = ThreadPoolBuilder::new()
.name("veloren-worker".to_string()) .name("veloren-worker".to_string())
.build(); .build();
let (network, f) = Network::new_with_registry(Pid::new(), &metrics.registry()); let network = Network::new_with_registry(Pid::new(), Arc::clone(&runtime), &registry);
metrics let metrics_shutdown = Arc::new(Notify::new());
.run(settings.metrics_address) let metrics_shutdown_clone = Arc::clone(&metrics_shutdown);
.expect("Failed to initialize server metrics submodule."); let addr = settings.metrics_address;
thread_pool.execute(f); runtime.spawn(async move {
PrometheusServer::run(
Arc::clone(&registry),
addr,
metrics_shutdown_clone.notified(),
)
.await
});
block_on(network.listen(ProtocolAddr::Tcp(settings.gameserver_address)))?; block_on(network.listen(ProtocolAddr::Tcp(settings.gameserver_address)))?;
let connection_handler = ConnectionHandler::new(network); let connection_handler = ConnectionHandler::new(network);
@ -386,9 +398,10 @@ impl Server {
connection_handler, connection_handler,
_runtime: runtime,
thread_pool, thread_pool,
metrics, metrics_shutdown,
tick_metrics, tick_metrics,
state_tick_metrics, state_tick_metrics,
physics_metrics, physics_metrics,
@ -900,7 +913,7 @@ impl Server {
.tick_time .tick_time
.with_label_values(&["metrics"]) .with_label_values(&["metrics"])
.set(end_of_server_tick.elapsed().as_nanos() as i64); .set(end_of_server_tick.elapsed().as_nanos() as i64);
self.metrics.tick(); self.tick_metrics.tick();
// 9) Finish the tick, pass control back to the frontend. // 9) Finish the tick, pass control back to the frontend.
@ -1146,6 +1159,7 @@ impl Server {
impl Drop for Server { impl Drop for Server {
fn drop(&mut self) { fn drop(&mut self) {
self.metrics_shutdown.notify_one();
self.state self.state
.notify_players(ServerGeneral::Disconnect(DisconnectReason::Shutdown)); .notify_players(ServerGeneral::Disconnect(DisconnectReason::Shutdown));
} }

View File

@ -1,19 +1,16 @@
use prometheus::{ use prometheus::{
Encoder, Gauge, HistogramOpts, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Gauge, HistogramOpts, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Opts,
Opts, Registry, TextEncoder, Registry,
}; };
use std::{ use std::{
convert::TryInto, convert::TryInto,
error::Error, error::Error,
net::SocketAddr,
sync::{ sync::{
atomic::{AtomicBool, AtomicU64, Ordering}, atomic::{AtomicU64, Ordering},
Arc, Arc,
}, },
thread,
time::{Duration, SystemTime, UNIX_EPOCH}, time::{Duration, SystemTime, UNIX_EPOCH},
}; };
use tracing::{debug, error};
type RegistryFn = Box<dyn FnOnce(&Registry) -> Result<(), prometheus::Error>>; type RegistryFn = Box<dyn FnOnce(&Registry) -> Result<(), prometheus::Error>>;
@ -60,13 +57,6 @@ pub struct TickMetrics {
tick: Arc<AtomicU64>, tick: Arc<AtomicU64>,
} }
pub struct ServerMetrics {
running: Arc<AtomicBool>,
handle: Option<thread::JoinHandle<()>>,
registry: Option<Registry>,
tick: Arc<AtomicU64>,
}
impl PhysicsMetrics { impl PhysicsMetrics {
pub fn new() -> Result<(Self, RegistryFn), prometheus::Error> { pub fn new() -> Result<(Self, RegistryFn), prometheus::Error> {
let entity_entity_collision_checks_count = IntCounter::with_opts(Opts::new( let entity_entity_collision_checks_count = IntCounter::with_opts(Opts::new(
@ -265,7 +255,7 @@ impl ChunkGenMetrics {
} }
impl TickMetrics { impl TickMetrics {
pub fn new(tick: Arc<AtomicU64>) -> Result<(Self, RegistryFn), Box<dyn Error>> { pub fn new() -> Result<(Self, RegistryFn), Box<dyn Error>> {
let chonks_count = IntGauge::with_opts(Opts::new( let chonks_count = IntGauge::with_opts(Opts::new(
"chonks_count", "chonks_count",
"number of all chonks currently active on the server", "number of all chonks currently active on the server",
@ -315,6 +305,7 @@ impl TickMetrics {
let time_of_day_clone = time_of_day.clone(); let time_of_day_clone = time_of_day.clone();
let light_count_clone = light_count.clone(); let light_count_clone = light_count.clone();
let tick_time_clone = tick_time.clone(); let tick_time_clone = tick_time.clone();
let tick = Arc::new(AtomicU64::new(0));
let f = |registry: &Registry| { let f = |registry: &Registry| {
registry.register(Box::new(chonks_count_clone))?; registry.register(Box::new(chonks_count_clone))?;
@ -346,87 +337,7 @@ impl TickMetrics {
)) ))
} }
pub fn tick(&self) { self.tick.fetch_add(1, Ordering::Relaxed); }
pub fn is_100th_tick(&self) -> bool { self.tick.load(Ordering::Relaxed).rem_euclid(100) == 0 } pub fn is_100th_tick(&self) -> bool { self.tick.load(Ordering::Relaxed).rem_euclid(100) == 0 }
} }
impl ServerMetrics {
#[allow(clippy::new_without_default)] // TODO: Pending review in #587
pub fn new() -> Self {
let running = Arc::new(AtomicBool::new(false));
let tick = Arc::new(AtomicU64::new(0));
let registry = Some(Registry::new());
Self {
running,
handle: None,
registry,
tick,
}
}
pub fn registry(&self) -> &Registry {
match self.registry {
Some(ref r) => r,
None => panic!("You cannot longer register new metrics after the server has started!"),
}
}
pub fn run(&mut self, addr: SocketAddr) -> Result<(), Box<dyn Error>> {
self.running.store(true, Ordering::Relaxed);
let running2 = Arc::clone(&self.running);
let registry = self
.registry
.take()
.expect("ServerMetrics must be already started");
//TODO: make this a job
self.handle = Some(thread::spawn(move || {
let server = tiny_http::Server::http(addr).unwrap();
const TIMEOUT: Duration = Duration::from_secs(1);
debug!("starting tiny_http server to serve metrics");
while running2.load(Ordering::Relaxed) {
let request = match server.recv_timeout(TIMEOUT) {
Ok(Some(rq)) => rq,
Ok(None) => continue,
Err(e) => {
error!(?e, "metrics http server error");
break;
},
};
let mf = registry.gather();
let encoder = TextEncoder::new();
let mut buffer = vec![];
encoder
.encode(&mf, &mut buffer)
.expect("Failed to encoder metrics text.");
let response = tiny_http::Response::from_string(
String::from_utf8(buffer).expect("Failed to parse bytes as a string."),
);
if let Err(e) = request.respond(response) {
error!(
?e,
"The metrics HTTP server had encountered and error with answering",
);
}
}
debug!("stopping tiny_http server to serve metrics");
}));
Ok(())
}
pub fn tick(&self) -> u64 { self.tick.fetch_add(1, Ordering::Relaxed) + 1 }
pub fn tick_clone(&self) -> Arc<AtomicU64> { Arc::clone(&self.tick) }
}
impl Drop for ServerMetrics {
fn drop(&mut self) {
self.running.store(false, Ordering::Relaxed);
let handle = self.handle.take();
handle
.expect("ServerMetrics worker handle does not exist.")
.join()
.expect("Error shutting down prometheus metric exporter");
}
}

View File

@ -82,6 +82,8 @@ ron = {version = "0.6", default-features = false}
serde = {version = "1.0", features = [ "rc", "derive" ]} serde = {version = "1.0", features = [ "rc", "derive" ]}
treeculler = "0.1.0" treeculler = "0.1.0"
uvth = "3.1.1" uvth = "3.1.1"
tokio = { version = "1", default-features = false, features = ["rt-multi-thread"] }
num_cpus = "1.0"
# vec_map = { version = "0.8.2" } # vec_map = { version = "0.8.2" }
inline_tweak = "1.0.2" inline_tweak = "1.0.2"
itertools = "0.10.0" itertools = "0.10.0"

View File

@ -373,7 +373,7 @@ impl<'a> Widget for Chat<'a> {
let ChatMsg { chat_type, .. } = &message; let ChatMsg { chat_type, .. } = &message;
// For each ChatType needing localization get/set matching pre-formatted // For each ChatType needing localization get/set matching pre-formatted
// localized string. This string will be formatted with the data // localized string. This string will be formatted with the data
// provided in ChatType in the client/src/lib.rs // provided in ChatType in the client/src/mod.rs
// fn format_message called below // fn format_message called below
message.message = match chat_type { message.message = match chat_type {
ChatType::Online(_) => self ChatType::Online(_) => self

View File

@ -45,6 +45,7 @@ pub fn init(settings: &Settings) -> Vec<impl Drop> {
.add_directive("uvth=warn".parse().unwrap()) .add_directive("uvth=warn".parse().unwrap())
.add_directive("tiny_http=warn".parse().unwrap()) .add_directive("tiny_http=warn".parse().unwrap())
.add_directive("mio::sys::windows=debug".parse().unwrap()) .add_directive("mio::sys::windows=debug".parse().unwrap())
.add_directive("veloren_network_protocol=info".parse().unwrap())
.add_directive( .add_directive(
"veloren_server::persistence::character=info" "veloren_server::persistence::character=info"
.parse() .parse()

View File

@ -71,6 +71,15 @@ impl ClientInit {
let mut last_err = None; let mut last_err = None;
let cores = num_cpus::get();
let runtime = Arc::new(
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(if cores > 4 { cores - 1 } else { cores })
.build()
.unwrap(),
);
const FOUR_MINUTES_RETRIES: u64 = 48; const FOUR_MINUTES_RETRIES: u64 = 48;
'tries: for _ in 0..FOUR_MINUTES_RETRIES { 'tries: for _ in 0..FOUR_MINUTES_RETRIES {
if cancel2.load(Ordering::Relaxed) { if cancel2.load(Ordering::Relaxed) {
@ -79,7 +88,7 @@ impl ClientInit {
for socket_addr in for socket_addr in
first_addrs.clone().into_iter().chain(second_addrs.clone()) first_addrs.clone().into_iter().chain(second_addrs.clone())
{ {
match Client::new(socket_addr, view_distance) { match Client::new(socket_addr, view_distance, Arc::clone(&runtime)) {
Ok(mut client) => { Ok(mut client) => {
if let Err(e) = if let Err(e) =
client.register(username, password, |auth_server| { client.register(username, password, |auth_server| {

View File

@ -82,6 +82,14 @@ impl Singleplayer {
let editable_settings = server::EditableSettings::singleplayer(&server_data_dir); let editable_settings = server::EditableSettings::singleplayer(&server_data_dir);
let thread_pool = client.map(|c| c.thread_pool().clone()); let thread_pool = client.map(|c| c.thread_pool().clone());
let cores = num_cpus::get();
let runtime = Arc::new(
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(if cores > 4 { cores - 1 } else { cores })
.build()
.unwrap(),
);
let settings2 = settings.clone(); let settings2 = settings.clone();
let paused = Arc::new(AtomicBool::new(false)); let paused = Arc::new(AtomicBool::new(false));
@ -92,7 +100,7 @@ impl Singleplayer {
let thread = thread::spawn(move || { let thread = thread::spawn(move || {
let mut server = None; let mut server = None;
if let Err(e) = result_sender.send( if let Err(e) = result_sender.send(
match Server::new(settings2, editable_settings, &server_data_dir) { match Server::new(settings2, editable_settings, &server_data_dir, runtime) {
Ok(s) => { Ok(s) => {
server = Some(s); server = Some(s);
Ok(()) Ok(())