mirror of
https://gitlab.com/veloren/veloren.git
synced 2024-08-30 18:12:32 +00:00
Reduce overhead of messaging systems.
This commit is contained in:
parent
aea4aca057
commit
2c15d0af56
29
Cargo.lock
generated
29
Cargo.lock
generated
@ -1752,6 +1752,12 @@ version = "1.2.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650"
|
checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "drop_guard"
|
||||||
|
version = "0.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2c4a817d8b683f6e649aed359aab0c47a875377516bb5791d0f7e46d9066d209"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "egui"
|
name = "egui"
|
||||||
version = "0.12.0"
|
version = "0.12.0"
|
||||||
@ -4621,7 +4627,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "2f61dcf0b917cd75d4521d7343d1ffff3d1583054133c9b5cbea3375c703c40d"
|
checksum = "2f61dcf0b917cd75d4521d7343d1ffff3d1583054133c9b5cbea3375c703c40d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"profiling-procmacros",
|
"profiling-procmacros",
|
||||||
"tracy-client 0.13.2",
|
"tracy-client",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6387,13 +6393,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing-tracy"
|
name = "tracing-tracy"
|
||||||
version = "0.10.0"
|
version = "0.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ed3ebef1f9f0d00aaa29239537effef65b82c56040c680f540fc6cedfac7b230"
|
checksum = "23a42311a35ed976d72f359de43e9fe028ec9d9f1051c4c52bd05a4f66ff3cbf"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"tracing-core",
|
"tracing-core",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
"tracy-client 0.14.0",
|
"tracy-client",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6407,17 +6413,6 @@ dependencies = [
|
|||||||
"tracy-client-sys",
|
"tracy-client-sys",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tracy-client"
|
|
||||||
version = "0.14.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "f901ea566c34f5fdc987962495ebfea20c18d781e271967edcc0f9897e339815"
|
|
||||||
dependencies = [
|
|
||||||
"loom",
|
|
||||||
"once_cell",
|
|
||||||
"tracy-client-sys",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracy-client-sys"
|
name = "tracy-client-sys"
|
||||||
version = "0.17.1"
|
version = "0.17.1"
|
||||||
@ -6763,7 +6758,7 @@ version = "0.10.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"directories-next",
|
"directories-next",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracy-client 0.13.2",
|
"tracy-client",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6937,6 +6932,7 @@ dependencies = [
|
|||||||
"chrono",
|
"chrono",
|
||||||
"chrono-tz",
|
"chrono-tz",
|
||||||
"crossbeam-channel",
|
"crossbeam-channel",
|
||||||
|
"drop_guard",
|
||||||
"enumset",
|
"enumset",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"hashbrown 0.12.3",
|
"hashbrown 0.12.3",
|
||||||
@ -6945,6 +6941,7 @@ dependencies = [
|
|||||||
"lazy_static",
|
"lazy_static",
|
||||||
"noise",
|
"noise",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
|
"parking_lot 0.12.1",
|
||||||
"portpicker",
|
"portpicker",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"prometheus-hyper",
|
"prometheus-hyper",
|
||||||
|
@ -30,7 +30,7 @@ struct Opt {
|
|||||||
fn main() {
|
fn main() {
|
||||||
let opt = Opt::from_args();
|
let opt = Opt::from_args();
|
||||||
// Start logging
|
// Start logging
|
||||||
common_frontend::init_stdout(None);
|
let _guards = common_frontend::init_stdout(None);
|
||||||
// Run clients and stuff
|
// Run clients and stuff
|
||||||
//
|
//
|
||||||
// NOTE: "swarm0" is assumed to be an admin already
|
// NOTE: "swarm0" is assumed to be an admin already
|
||||||
@ -72,9 +72,7 @@ fn main() {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
loop {
|
std::thread::park();
|
||||||
thread::sleep(Duration::from_secs_f32(1.0));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_client_new_thread(
|
fn run_client_new_thread(
|
||||||
@ -102,23 +100,26 @@ fn run_client(
|
|||||||
opt: Opt,
|
opt: Opt,
|
||||||
finished_init: Arc<AtomicU32>,
|
finished_init: Arc<AtomicU32>,
|
||||||
) -> Result<(), veloren_client::Error> {
|
) -> Result<(), veloren_client::Error> {
|
||||||
// Connect to localhost
|
let mut client = loop {
|
||||||
let addr = ConnectionArgs::Tcp {
|
// Connect to localhost
|
||||||
prefer_ipv6: false,
|
let addr = ConnectionArgs::Tcp {
|
||||||
hostname: "localhost".into(),
|
prefer_ipv6: false,
|
||||||
};
|
hostname: "localhost".into(),
|
||||||
let runtime_clone = Arc::clone(&runtime);
|
};
|
||||||
// NOTE: use a no-auth server
|
let runtime_clone = Arc::clone(&runtime);
|
||||||
let mut client = runtime
|
// NOTE: use a no-auth server
|
||||||
.block_on(Client::new(
|
match runtime.block_on(Client::new(
|
||||||
addr,
|
addr,
|
||||||
runtime_clone,
|
runtime_clone,
|
||||||
&mut None,
|
&mut None,
|
||||||
&username,
|
&username,
|
||||||
"",
|
"",
|
||||||
|_| false,
|
|_| false,
|
||||||
))
|
)) {
|
||||||
.expect("Failed to connect to the server");
|
Err(e) => tracing::warn!(?e, "Client {} disconnected", index),
|
||||||
|
Ok(client) => break client,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut clock = common::clock::Clock::new(Duration::from_secs_f32(1.0 / 30.0));
|
let mut clock = common::clock::Clock::new(Duration::from_secs_f32(1.0 / 30.0));
|
||||||
|
|
||||||
|
@ -39,11 +39,11 @@ use common::{
|
|||||||
mounting::Rider,
|
mounting::Rider,
|
||||||
outcome::Outcome,
|
outcome::Outcome,
|
||||||
recipe::{ComponentRecipeBook, RecipeBook},
|
recipe::{ComponentRecipeBook, RecipeBook},
|
||||||
resources::{PlayerEntity, TimeOfDay},
|
resources::{GameMode, PlayerEntity, TimeOfDay},
|
||||||
spiral::Spiral2d,
|
spiral::Spiral2d,
|
||||||
terrain::{
|
terrain::{
|
||||||
block::Block, map::MapConfig, neighbors, site::DungeonKindMeta, BiomeKind, SiteKindMeta,
|
block::Block, map::MapConfig, neighbors, site::DungeonKindMeta, BiomeKind, SiteKindMeta,
|
||||||
SpriteKind, TerrainChunk, TerrainChunkSize,
|
SpriteKind, TerrainChunk, TerrainChunkSize, TerrainGrid,
|
||||||
},
|
},
|
||||||
trade::{PendingTrade, SitePrices, TradeAction, TradeId, TradeResult},
|
trade::{PendingTrade, SitePrices, TradeAction, TradeId, TradeResult},
|
||||||
uid::{Uid, UidAllocator},
|
uid::{Uid, UidAllocator},
|
||||||
@ -281,7 +281,7 @@ impl Client {
|
|||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let network = Network::new(Pid::new(), &runtime);
|
let network = Network::new(Pid::new(), &runtime);
|
||||||
|
|
||||||
let participant = match addr {
|
let mut participant = match addr {
|
||||||
ConnectionArgs::Tcp {
|
ConnectionArgs::Tcp {
|
||||||
hostname,
|
hostname,
|
||||||
prefer_ipv6,
|
prefer_ipv6,
|
||||||
@ -304,7 +304,7 @@ impl Client {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let stream = participant.opened().await?;
|
let stream = participant.opened().await?;
|
||||||
let mut ping_stream = participant.opened().await?;
|
let ping_stream = participant.opened().await?;
|
||||||
let mut register_stream = participant.opened().await?;
|
let mut register_stream = participant.opened().await?;
|
||||||
let character_screen_stream = participant.opened().await?;
|
let character_screen_stream = participant.opened().await?;
|
||||||
let in_game_stream = participant.opened().await?;
|
let in_game_stream = participant.opened().await?;
|
||||||
@ -340,6 +340,314 @@ impl Client {
|
|||||||
|
|
||||||
// Wait for initial sync
|
// Wait for initial sync
|
||||||
let mut ping_interval = tokio::time::interval(Duration::from_secs(1));
|
let mut ping_interval = tokio::time::interval(Duration::from_secs(1));
|
||||||
|
let ServerInit::GameSync {
|
||||||
|
entity_package,
|
||||||
|
time_of_day,
|
||||||
|
max_group_size,
|
||||||
|
client_timeout,
|
||||||
|
world_map,
|
||||||
|
recipe_book,
|
||||||
|
component_recipe_book,
|
||||||
|
material_stats,
|
||||||
|
ability_map,
|
||||||
|
} = loop {
|
||||||
|
tokio::select! {
|
||||||
|
// Spawn in a blocking thread (leaving the network thread free). This is mostly
|
||||||
|
// useful for bots.
|
||||||
|
res = register_stream.recv() => break res?,
|
||||||
|
_ = ping_interval.tick() => ping_stream.send(PingMsg::Ping)?,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Spawn in a blocking thread (leaving the network thread free). This is mostly
|
||||||
|
// useful for bots.
|
||||||
|
let mut task = tokio::task::spawn_blocking(move || {
|
||||||
|
let map_size_lg =
|
||||||
|
common::terrain::MapSizeLg::new(world_map.dimensions_lg).map_err(|_| {
|
||||||
|
Error::Other(format!(
|
||||||
|
"Server sent bad world map dimensions: {:?}",
|
||||||
|
world_map.dimensions_lg,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let sea_level = world_map.default_chunk.get_min_z() as f32;
|
||||||
|
|
||||||
|
// Initialize `State`
|
||||||
|
let pools = State::pools(GameMode::Client);
|
||||||
|
let mut state = State::client(pools, map_size_lg, world_map.default_chunk);
|
||||||
|
// Client-only components
|
||||||
|
state.ecs_mut().register::<comp::Last<CharacterState>>();
|
||||||
|
let entity = state.ecs_mut().apply_entity_package(entity_package);
|
||||||
|
*state.ecs_mut().write_resource() = time_of_day;
|
||||||
|
*state.ecs_mut().write_resource() = PlayerEntity(Some(entity));
|
||||||
|
state.ecs_mut().insert(material_stats);
|
||||||
|
state.ecs_mut().insert(ability_map);
|
||||||
|
|
||||||
|
let map_size = map_size_lg.chunks();
|
||||||
|
let max_height = world_map.max_height;
|
||||||
|
let rgba = world_map.rgba;
|
||||||
|
let alt = world_map.alt;
|
||||||
|
if rgba.size() != map_size.map(|e| e as i32) {
|
||||||
|
return Err(Error::Other("Server sent a bad world map image".into()));
|
||||||
|
}
|
||||||
|
if alt.size() != map_size.map(|e| e as i32) {
|
||||||
|
return Err(Error::Other("Server sent a bad altitude map.".into()));
|
||||||
|
}
|
||||||
|
let [west, east] = world_map.horizons;
|
||||||
|
let scale_angle = |a: u8| (a as f32 / 255.0 * <f32 as FloatConst>::FRAC_PI_2()).tan();
|
||||||
|
let scale_height = |h: u8| h as f32 / 255.0 * max_height;
|
||||||
|
let scale_height_big = |h: u32| (h >> 3) as f32 / 8191.0 * max_height;
|
||||||
|
|
||||||
|
debug!("Preparing image...");
|
||||||
|
let unzip_horizons = |(angles, heights): &(Vec<_>, Vec<_>)| {
|
||||||
|
(
|
||||||
|
angles.iter().copied().map(scale_angle).collect::<Vec<_>>(),
|
||||||
|
heights
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.map(scale_height)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
let horizons = [unzip_horizons(&west), unzip_horizons(&east)];
|
||||||
|
|
||||||
|
// Redraw map (with shadows this time).
|
||||||
|
let mut world_map_rgba = vec![0u32; rgba.size().product() as usize];
|
||||||
|
let mut world_map_topo = vec![0u32; rgba.size().product() as usize];
|
||||||
|
let mut map_config = common::terrain::map::MapConfig::orthographic(
|
||||||
|
map_size_lg,
|
||||||
|
core::ops::RangeInclusive::new(0.0, max_height),
|
||||||
|
);
|
||||||
|
map_config.horizons = Some(&horizons);
|
||||||
|
let rescale_height = |h: f32| h / max_height;
|
||||||
|
let bounds_check = |pos: Vec2<i32>| {
|
||||||
|
pos.reduce_partial_min() >= 0
|
||||||
|
&& pos.x < map_size.x as i32
|
||||||
|
&& pos.y < map_size.y as i32
|
||||||
|
};
|
||||||
|
fn sample_pos(
|
||||||
|
map_config: &MapConfig,
|
||||||
|
pos: Vec2<i32>,
|
||||||
|
alt: &Grid<u32>,
|
||||||
|
rgba: &Grid<u32>,
|
||||||
|
map_size: &Vec2<u16>,
|
||||||
|
map_size_lg: &common::terrain::MapSizeLg,
|
||||||
|
max_height: f32,
|
||||||
|
) -> common::terrain::map::MapSample {
|
||||||
|
let rescale_height = |h: f32| h / max_height;
|
||||||
|
let scale_height_big = |h: u32| (h >> 3) as f32 / 8191.0 * max_height;
|
||||||
|
let bounds_check = |pos: Vec2<i32>| {
|
||||||
|
pos.reduce_partial_min() >= 0
|
||||||
|
&& pos.x < map_size.x as i32
|
||||||
|
&& pos.y < map_size.y as i32
|
||||||
|
};
|
||||||
|
let MapConfig {
|
||||||
|
gain,
|
||||||
|
is_contours,
|
||||||
|
is_height_map,
|
||||||
|
is_stylized_topo,
|
||||||
|
..
|
||||||
|
} = *map_config;
|
||||||
|
let mut is_contour_line = false;
|
||||||
|
let mut is_border = false;
|
||||||
|
let (rgb, alt, downhill_wpos) = if bounds_check(pos) {
|
||||||
|
let posi = pos.y as usize * map_size.x as usize + pos.x as usize;
|
||||||
|
let [r, g, b, _a] = rgba[pos].to_le_bytes();
|
||||||
|
let is_water = r == 0 && b > 102 && g < 77;
|
||||||
|
let alti = alt[pos];
|
||||||
|
// Compute contours (chunks are assigned in the river code below)
|
||||||
|
let altj = rescale_height(scale_height_big(alti));
|
||||||
|
let contour_interval = 150.0;
|
||||||
|
let chunk_contour = (altj * gain / contour_interval) as u32;
|
||||||
|
|
||||||
|
// Compute downhill.
|
||||||
|
let downhill = {
|
||||||
|
let mut best = -1;
|
||||||
|
let mut besth = alti;
|
||||||
|
for nposi in neighbors(*map_size_lg, posi) {
|
||||||
|
let nbh = alt.raw()[nposi];
|
||||||
|
let nalt = rescale_height(scale_height_big(nbh));
|
||||||
|
let nchunk_contour = (nalt * gain / contour_interval) as u32;
|
||||||
|
if !is_contour_line && chunk_contour > nchunk_contour {
|
||||||
|
is_contour_line = true;
|
||||||
|
}
|
||||||
|
let [nr, ng, nb, _na] = rgba.raw()[nposi].to_le_bytes();
|
||||||
|
let n_is_water = nr == 0 && nb > 102 && ng < 77;
|
||||||
|
|
||||||
|
if !is_border && is_water && !n_is_water {
|
||||||
|
is_border = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if nbh < besth {
|
||||||
|
besth = nbh;
|
||||||
|
best = nposi as isize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
best
|
||||||
|
};
|
||||||
|
let downhill_wpos = if downhill < 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(
|
||||||
|
Vec2::new(
|
||||||
|
(downhill as usize % map_size.x as usize) as i32,
|
||||||
|
(downhill as usize / map_size.x as usize) as i32,
|
||||||
|
) * TerrainChunkSize::RECT_SIZE.map(|e| e as i32),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
(Rgb::new(r, g, b), alti, downhill_wpos)
|
||||||
|
} else {
|
||||||
|
(Rgb::zero(), 0, None)
|
||||||
|
};
|
||||||
|
let alt = f64::from(rescale_height(scale_height_big(alt)));
|
||||||
|
let wpos = pos * TerrainChunkSize::RECT_SIZE.map(|e| e as i32);
|
||||||
|
let downhill_wpos =
|
||||||
|
downhill_wpos.unwrap_or(wpos + TerrainChunkSize::RECT_SIZE.map(|e| e as i32));
|
||||||
|
let is_path = rgb.r == 0x37 && rgb.g == 0x29 && rgb.b == 0x23;
|
||||||
|
let rgb = rgb.map(|e: u8| e as f64 / 255.0);
|
||||||
|
let is_water = rgb.r == 0.0 && rgb.b > 0.4 && rgb.g < 0.3;
|
||||||
|
|
||||||
|
let rgb = if is_height_map {
|
||||||
|
if is_path {
|
||||||
|
// Path color is Rgb::new(0x37, 0x29, 0x23)
|
||||||
|
Rgb::new(0.9, 0.9, 0.63)
|
||||||
|
} else if is_water {
|
||||||
|
Rgb::new(0.23, 0.47, 0.53)
|
||||||
|
} else if is_contours && is_contour_line {
|
||||||
|
// Color contour lines
|
||||||
|
Rgb::new(0.15, 0.15, 0.15)
|
||||||
|
} else {
|
||||||
|
// Color hill shading
|
||||||
|
let lightness = (alt + 0.2).min(1.0) as f64;
|
||||||
|
Rgb::new(lightness, 0.9 * lightness, 0.5 * lightness)
|
||||||
|
}
|
||||||
|
} else if is_stylized_topo {
|
||||||
|
if is_path {
|
||||||
|
Rgb::new(0.9, 0.9, 0.63)
|
||||||
|
} else if is_water {
|
||||||
|
if is_border {
|
||||||
|
Rgb::new(0.10, 0.34, 0.50)
|
||||||
|
} else {
|
||||||
|
Rgb::new(0.23, 0.47, 0.63)
|
||||||
|
}
|
||||||
|
} else if is_contour_line {
|
||||||
|
Rgb::new(0.25, 0.25, 0.25)
|
||||||
|
} else {
|
||||||
|
// Stylized colors
|
||||||
|
Rgb::new(
|
||||||
|
(rgb.r + 0.25).min(1.0),
|
||||||
|
(rgb.g + 0.23).min(1.0),
|
||||||
|
(rgb.b + 0.10).min(1.0),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Rgb::new(rgb.r, rgb.g, rgb.b)
|
||||||
|
}
|
||||||
|
.map(|e| (e * 255.0) as u8);
|
||||||
|
common::terrain::map::MapSample {
|
||||||
|
rgb,
|
||||||
|
alt,
|
||||||
|
downhill_wpos,
|
||||||
|
connections: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Generate standard shaded map
|
||||||
|
map_config.is_shaded = true;
|
||||||
|
map_config.generate(
|
||||||
|
|pos| {
|
||||||
|
sample_pos(
|
||||||
|
&map_config,
|
||||||
|
pos,
|
||||||
|
&alt,
|
||||||
|
&rgba,
|
||||||
|
&map_size,
|
||||||
|
&map_size_lg,
|
||||||
|
max_height,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
|wpos| {
|
||||||
|
let pos = wpos.map2(TerrainChunkSize::RECT_SIZE, |e, f| e / f as i32);
|
||||||
|
rescale_height(if bounds_check(pos) {
|
||||||
|
scale_height_big(alt[pos])
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
})
|
||||||
|
},
|
||||||
|
|pos, (r, g, b, a)| {
|
||||||
|
world_map_rgba[pos.y * map_size.x as usize + pos.x] =
|
||||||
|
u32::from_le_bytes([r, g, b, a]);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
// Generate map with topographical lines and stylized colors
|
||||||
|
map_config.is_contours = true;
|
||||||
|
map_config.is_stylized_topo = true;
|
||||||
|
map_config.generate(
|
||||||
|
|pos| {
|
||||||
|
sample_pos(
|
||||||
|
&map_config,
|
||||||
|
pos,
|
||||||
|
&alt,
|
||||||
|
&rgba,
|
||||||
|
&map_size,
|
||||||
|
&map_size_lg,
|
||||||
|
max_height,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
|wpos| {
|
||||||
|
let pos = wpos.map2(TerrainChunkSize::RECT_SIZE, |e, f| e / f as i32);
|
||||||
|
rescale_height(if bounds_check(pos) {
|
||||||
|
scale_height_big(alt[pos])
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
})
|
||||||
|
},
|
||||||
|
|pos, (r, g, b, a)| {
|
||||||
|
world_map_topo[pos.y * map_size.x as usize + pos.x] =
|
||||||
|
u32::from_le_bytes([r, g, b, a]);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let make_raw = |rgb| -> Result<_, Error> {
|
||||||
|
let mut raw = vec![0u8; 4 * world_map_rgba.len()];
|
||||||
|
LittleEndian::write_u32_into(rgb, &mut raw);
|
||||||
|
Ok(Arc::new(
|
||||||
|
DynamicImage::ImageRgba8({
|
||||||
|
// Should not fail if the dimensions are correct.
|
||||||
|
let map =
|
||||||
|
image::ImageBuffer::from_raw(u32::from(map_size.x), u32::from(map_size.y), raw);
|
||||||
|
map.ok_or_else(|| Error::Other("Server sent a bad world map image".into()))?
|
||||||
|
})
|
||||||
|
// Flip the image, since Voxygen uses an orientation where rotation from
|
||||||
|
// positive x axis to positive y axis is counterclockwise around the z axis.
|
||||||
|
.flipv(),
|
||||||
|
))
|
||||||
|
};
|
||||||
|
let lod_base = rgba;
|
||||||
|
let lod_alt = alt;
|
||||||
|
let world_map_rgb_img = make_raw(&world_map_rgba)?;
|
||||||
|
let world_map_topo_img = make_raw(&world_map_topo)?;
|
||||||
|
let world_map_layers = vec![world_map_rgb_img, world_map_topo_img];
|
||||||
|
let horizons = (west.0, west.1, east.0, east.1)
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|(wa, wh, ea, eh)| u32::from_le_bytes([wa, wh, ea, eh]))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let lod_horizon = horizons;
|
||||||
|
let map_bounds = Vec2::new(sea_level, max_height);
|
||||||
|
debug!("Done preparing image...");
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
state,
|
||||||
|
lod_base,
|
||||||
|
lod_alt,
|
||||||
|
Grid::from_raw(map_size.map(|e| e as i32), lod_horizon),
|
||||||
|
(world_map_layers, map_size, map_bounds),
|
||||||
|
world_map.sites,
|
||||||
|
world_map.pois,
|
||||||
|
recipe_book,
|
||||||
|
component_recipe_book,
|
||||||
|
max_group_size,
|
||||||
|
client_timeout,
|
||||||
|
))
|
||||||
|
});
|
||||||
|
|
||||||
let (
|
let (
|
||||||
state,
|
state,
|
||||||
lod_base,
|
lod_base,
|
||||||
@ -352,312 +660,11 @@ impl Client {
|
|||||||
component_recipe_book,
|
component_recipe_book,
|
||||||
max_group_size,
|
max_group_size,
|
||||||
client_timeout,
|
client_timeout,
|
||||||
) = match loop {
|
) = loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
res = register_stream.recv() => break res?,
|
res = &mut task => break res.expect("Client thread should not panic")?,
|
||||||
_ = ping_interval.tick() => ping_stream.send(PingMsg::Ping)?,
|
_ = ping_interval.tick() => ping_stream.send(PingMsg::Ping)?,
|
||||||
}
|
}
|
||||||
} {
|
|
||||||
ServerInit::GameSync {
|
|
||||||
entity_package,
|
|
||||||
time_of_day,
|
|
||||||
max_group_size,
|
|
||||||
client_timeout,
|
|
||||||
world_map,
|
|
||||||
recipe_book,
|
|
||||||
component_recipe_book,
|
|
||||||
material_stats,
|
|
||||||
ability_map,
|
|
||||||
} => {
|
|
||||||
// Initialize `State`
|
|
||||||
let mut state = State::client();
|
|
||||||
// Client-only components
|
|
||||||
state.ecs_mut().register::<comp::Last<CharacterState>>();
|
|
||||||
|
|
||||||
let entity = state.ecs_mut().apply_entity_package(entity_package);
|
|
||||||
*state.ecs_mut().write_resource() = time_of_day;
|
|
||||||
*state.ecs_mut().write_resource() = PlayerEntity(Some(entity));
|
|
||||||
state.ecs_mut().insert(material_stats);
|
|
||||||
state.ecs_mut().insert(ability_map);
|
|
||||||
|
|
||||||
let map_size_lg = common::terrain::MapSizeLg::new(world_map.dimensions_lg)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::Other(format!(
|
|
||||||
"Server sent bad world map dimensions: {:?}",
|
|
||||||
world_map.dimensions_lg,
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let map_size = map_size_lg.chunks();
|
|
||||||
let max_height = world_map.max_height;
|
|
||||||
let sea_level = world_map.sea_level;
|
|
||||||
let rgba = world_map.rgba;
|
|
||||||
let alt = world_map.alt;
|
|
||||||
if rgba.size() != map_size.map(|e| e as i32) {
|
|
||||||
return Err(Error::Other("Server sent a bad world map image".into()));
|
|
||||||
}
|
|
||||||
if alt.size() != map_size.map(|e| e as i32) {
|
|
||||||
return Err(Error::Other("Server sent a bad altitude map.".into()));
|
|
||||||
}
|
|
||||||
let [west, east] = world_map.horizons;
|
|
||||||
let scale_angle =
|
|
||||||
|a: u8| (a as f32 / 255.0 * <f32 as FloatConst>::FRAC_PI_2()).tan();
|
|
||||||
let scale_height = |h: u8| h as f32 / 255.0 * max_height;
|
|
||||||
let scale_height_big = |h: u32| (h >> 3) as f32 / 8191.0 * max_height;
|
|
||||||
ping_stream.send(PingMsg::Ping)?;
|
|
||||||
|
|
||||||
debug!("Preparing image...");
|
|
||||||
let unzip_horizons = |(angles, heights): &(Vec<_>, Vec<_>)| {
|
|
||||||
(
|
|
||||||
angles.iter().copied().map(scale_angle).collect::<Vec<_>>(),
|
|
||||||
heights
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.map(scale_height)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
let horizons = [unzip_horizons(&west), unzip_horizons(&east)];
|
|
||||||
|
|
||||||
// Redraw map (with shadows this time).
|
|
||||||
let mut world_map_rgba = vec![0u32; rgba.size().product() as usize];
|
|
||||||
let mut world_map_topo = vec![0u32; rgba.size().product() as usize];
|
|
||||||
let mut map_config = common::terrain::map::MapConfig::orthographic(
|
|
||||||
map_size_lg,
|
|
||||||
core::ops::RangeInclusive::new(0.0, max_height),
|
|
||||||
);
|
|
||||||
map_config.horizons = Some(&horizons);
|
|
||||||
let rescale_height = |h: f32| h / max_height;
|
|
||||||
let bounds_check = |pos: Vec2<i32>| {
|
|
||||||
pos.reduce_partial_min() >= 0
|
|
||||||
&& pos.x < map_size.x as i32
|
|
||||||
&& pos.y < map_size.y as i32
|
|
||||||
};
|
|
||||||
ping_stream.send(PingMsg::Ping)?;
|
|
||||||
fn sample_pos(
|
|
||||||
map_config: &MapConfig,
|
|
||||||
pos: Vec2<i32>,
|
|
||||||
alt: &Grid<u32>,
|
|
||||||
rgba: &Grid<u32>,
|
|
||||||
map_size: &Vec2<u16>,
|
|
||||||
map_size_lg: &common::terrain::MapSizeLg,
|
|
||||||
max_height: f32,
|
|
||||||
) -> common::terrain::map::MapSample {
|
|
||||||
let rescale_height = |h: f32| h / max_height;
|
|
||||||
let scale_height_big = |h: u32| (h >> 3) as f32 / 8191.0 * max_height;
|
|
||||||
let bounds_check = |pos: Vec2<i32>| {
|
|
||||||
pos.reduce_partial_min() >= 0
|
|
||||||
&& pos.x < map_size.x as i32
|
|
||||||
&& pos.y < map_size.y as i32
|
|
||||||
};
|
|
||||||
let MapConfig {
|
|
||||||
gain,
|
|
||||||
is_contours,
|
|
||||||
is_height_map,
|
|
||||||
is_stylized_topo,
|
|
||||||
..
|
|
||||||
} = *map_config;
|
|
||||||
let mut is_contour_line = false;
|
|
||||||
let mut is_border = false;
|
|
||||||
let (rgb, alt, downhill_wpos) = if bounds_check(pos) {
|
|
||||||
let posi = pos.y as usize * map_size.x as usize + pos.x as usize;
|
|
||||||
let [r, g, b, _a] = rgba[pos].to_le_bytes();
|
|
||||||
let is_water = r == 0 && b > 102 && g < 77;
|
|
||||||
let alti = alt[pos];
|
|
||||||
// Compute contours (chunks are assigned in the river code below)
|
|
||||||
let altj = rescale_height(scale_height_big(alti));
|
|
||||||
let contour_interval = 150.0;
|
|
||||||
let chunk_contour = (altj * gain / contour_interval) as u32;
|
|
||||||
|
|
||||||
// Compute downhill.
|
|
||||||
let downhill = {
|
|
||||||
let mut best = -1;
|
|
||||||
let mut besth = alti;
|
|
||||||
for nposi in neighbors(*map_size_lg, posi) {
|
|
||||||
let nbh = alt.raw()[nposi];
|
|
||||||
let nalt = rescale_height(scale_height_big(nbh));
|
|
||||||
let nchunk_contour = (nalt * gain / contour_interval) as u32;
|
|
||||||
if !is_contour_line && chunk_contour > nchunk_contour {
|
|
||||||
is_contour_line = true;
|
|
||||||
}
|
|
||||||
let [nr, ng, nb, _na] = rgba.raw()[nposi].to_le_bytes();
|
|
||||||
let n_is_water = nr == 0 && nb > 102 && ng < 77;
|
|
||||||
|
|
||||||
if !is_border && is_water && !n_is_water {
|
|
||||||
is_border = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if nbh < besth {
|
|
||||||
besth = nbh;
|
|
||||||
best = nposi as isize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
best
|
|
||||||
};
|
|
||||||
let downhill_wpos = if downhill < 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(
|
|
||||||
Vec2::new(
|
|
||||||
(downhill as usize % map_size.x as usize) as i32,
|
|
||||||
(downhill as usize / map_size.x as usize) as i32,
|
|
||||||
) * TerrainChunkSize::RECT_SIZE.map(|e| e as i32),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
(Rgb::new(r, g, b), alti, downhill_wpos)
|
|
||||||
} else {
|
|
||||||
(Rgb::zero(), 0, None)
|
|
||||||
};
|
|
||||||
let alt = f64::from(rescale_height(scale_height_big(alt)));
|
|
||||||
let wpos = pos * TerrainChunkSize::RECT_SIZE.map(|e| e as i32);
|
|
||||||
let downhill_wpos = downhill_wpos
|
|
||||||
.unwrap_or(wpos + TerrainChunkSize::RECT_SIZE.map(|e| e as i32));
|
|
||||||
let is_path = rgb.r == 0x37 && rgb.g == 0x29 && rgb.b == 0x23;
|
|
||||||
let rgb = rgb.map(|e: u8| e as f64 / 255.0);
|
|
||||||
let is_water = rgb.r == 0.0 && rgb.b > 0.4 && rgb.g < 0.3;
|
|
||||||
|
|
||||||
let rgb = if is_height_map {
|
|
||||||
if is_path {
|
|
||||||
// Path color is Rgb::new(0x37, 0x29, 0x23)
|
|
||||||
Rgb::new(0.9, 0.9, 0.63)
|
|
||||||
} else if is_water {
|
|
||||||
Rgb::new(0.23, 0.47, 0.53)
|
|
||||||
} else if is_contours && is_contour_line {
|
|
||||||
// Color contour lines
|
|
||||||
Rgb::new(0.15, 0.15, 0.15)
|
|
||||||
} else {
|
|
||||||
// Color hill shading
|
|
||||||
let lightness = (alt + 0.2).min(1.0) as f64;
|
|
||||||
Rgb::new(lightness, 0.9 * lightness, 0.5 * lightness)
|
|
||||||
}
|
|
||||||
} else if is_stylized_topo {
|
|
||||||
if is_path {
|
|
||||||
Rgb::new(0.9, 0.9, 0.63)
|
|
||||||
} else if is_water {
|
|
||||||
if is_border {
|
|
||||||
Rgb::new(0.10, 0.34, 0.50)
|
|
||||||
} else {
|
|
||||||
Rgb::new(0.23, 0.47, 0.63)
|
|
||||||
}
|
|
||||||
} else if is_contour_line {
|
|
||||||
Rgb::new(0.25, 0.25, 0.25)
|
|
||||||
} else {
|
|
||||||
// Stylized colors
|
|
||||||
Rgb::new(
|
|
||||||
(rgb.r + 0.25).min(1.0),
|
|
||||||
(rgb.g + 0.23).min(1.0),
|
|
||||||
(rgb.b + 0.10).min(1.0),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Rgb::new(rgb.r, rgb.g, rgb.b)
|
|
||||||
}
|
|
||||||
.map(|e| (e * 255.0) as u8);
|
|
||||||
common::terrain::map::MapSample {
|
|
||||||
rgb,
|
|
||||||
alt,
|
|
||||||
downhill_wpos,
|
|
||||||
connections: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Generate standard shaded map
|
|
||||||
map_config.is_shaded = true;
|
|
||||||
map_config.generate(
|
|
||||||
|pos| {
|
|
||||||
sample_pos(
|
|
||||||
&map_config,
|
|
||||||
pos,
|
|
||||||
&alt,
|
|
||||||
&rgba,
|
|
||||||
&map_size,
|
|
||||||
&map_size_lg,
|
|
||||||
max_height,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
|wpos| {
|
|
||||||
let pos = wpos.map2(TerrainChunkSize::RECT_SIZE, |e, f| e / f as i32);
|
|
||||||
rescale_height(if bounds_check(pos) {
|
|
||||||
scale_height_big(alt[pos])
|
|
||||||
} else {
|
|
||||||
0.0
|
|
||||||
})
|
|
||||||
},
|
|
||||||
|pos, (r, g, b, a)| {
|
|
||||||
world_map_rgba[pos.y * map_size.x as usize + pos.x] =
|
|
||||||
u32::from_le_bytes([r, g, b, a]);
|
|
||||||
},
|
|
||||||
);
|
|
||||||
// Generate map with topographical lines and stylized colors
|
|
||||||
map_config.is_contours = true;
|
|
||||||
map_config.is_stylized_topo = true;
|
|
||||||
map_config.generate(
|
|
||||||
|pos| {
|
|
||||||
sample_pos(
|
|
||||||
&map_config,
|
|
||||||
pos,
|
|
||||||
&alt,
|
|
||||||
&rgba,
|
|
||||||
&map_size,
|
|
||||||
&map_size_lg,
|
|
||||||
max_height,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
|wpos| {
|
|
||||||
let pos = wpos.map2(TerrainChunkSize::RECT_SIZE, |e, f| e / f as i32);
|
|
||||||
rescale_height(if bounds_check(pos) {
|
|
||||||
scale_height_big(alt[pos])
|
|
||||||
} else {
|
|
||||||
0.0
|
|
||||||
})
|
|
||||||
},
|
|
||||||
|pos, (r, g, b, a)| {
|
|
||||||
world_map_topo[pos.y * map_size.x as usize + pos.x] =
|
|
||||||
u32::from_le_bytes([r, g, b, a]);
|
|
||||||
},
|
|
||||||
);
|
|
||||||
ping_stream.send(PingMsg::Ping)?;
|
|
||||||
let make_raw = |rgb| -> Result<_, Error> {
|
|
||||||
let mut raw = vec![0u8; 4 * world_map_rgba.len()];
|
|
||||||
LittleEndian::write_u32_into(rgb, &mut raw);
|
|
||||||
Ok(Arc::new(
|
|
||||||
DynamicImage::ImageRgba8({
|
|
||||||
// Should not fail if the dimensions are correct.
|
|
||||||
let map =
|
|
||||||
image::ImageBuffer::from_raw(u32::from(map_size.x), u32::from(map_size.y), raw);
|
|
||||||
map.ok_or_else(|| Error::Other("Server sent a bad world map image".into()))?
|
|
||||||
})
|
|
||||||
// Flip the image, since Voxygen uses an orientation where rotation from
|
|
||||||
// positive x axis to positive y axis is counterclockwise around the z axis.
|
|
||||||
.flipv(),
|
|
||||||
))
|
|
||||||
};
|
|
||||||
ping_stream.send(PingMsg::Ping)?;
|
|
||||||
let lod_base = rgba;
|
|
||||||
let lod_alt = alt;
|
|
||||||
let world_map_rgb_img = make_raw(&world_map_rgba)?;
|
|
||||||
let world_map_topo_img = make_raw(&world_map_topo)?;
|
|
||||||
let world_map_layers = vec![world_map_rgb_img, world_map_topo_img];
|
|
||||||
let horizons = (west.0, west.1, east.0, east.1)
|
|
||||||
.into_par_iter()
|
|
||||||
.map(|(wa, wh, ea, eh)| u32::from_le_bytes([wa, wh, ea, eh]))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let lod_horizon = horizons;
|
|
||||||
let map_bounds = Vec2::new(sea_level, max_height);
|
|
||||||
debug!("Done preparing image...");
|
|
||||||
|
|
||||||
(
|
|
||||||
state,
|
|
||||||
lod_base,
|
|
||||||
lod_alt,
|
|
||||||
Grid::from_raw(map_size.map(|e| e as i32), lod_horizon),
|
|
||||||
(world_map_layers, map_size, map_bounds),
|
|
||||||
world_map.sites,
|
|
||||||
world_map.pois,
|
|
||||||
recipe_book,
|
|
||||||
component_recipe_book,
|
|
||||||
max_group_size,
|
|
||||||
client_timeout,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
ping_stream.send(PingMsg::Ping)?;
|
ping_stream.send(PingMsg::Ping)?;
|
||||||
|
|
||||||
@ -1899,7 +1906,19 @@ impl Client {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for key in keys.iter() {
|
for key in keys.iter() {
|
||||||
if self.state.terrain().get_key(*key).is_none() {
|
let dist_to_player = (TerrainGrid::key_chunk(*key).map(|x| x as f32)
|
||||||
|
+ TerrainChunkSize::RECT_SIZE.map(|x| x as f32) / 2.0)
|
||||||
|
.distance_squared(pos.0.into());
|
||||||
|
|
||||||
|
let terrain = self.state.terrain();
|
||||||
|
if let Some(chunk) = terrain.get_key_arc(*key) {
|
||||||
|
if !skip_mode && !terrain.contains_key_real(*key) {
|
||||||
|
let chunk = Arc::clone(chunk);
|
||||||
|
drop(terrain);
|
||||||
|
self.state.insert_chunk(*key, chunk);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
drop(terrain);
|
||||||
if !skip_mode && !self.pending_chunks.contains_key(key) {
|
if !skip_mode && !self.pending_chunks.contains_key(key) {
|
||||||
const TOTAL_PENDING_CHUNKS_LIMIT: usize = 12;
|
const TOTAL_PENDING_CHUNKS_LIMIT: usize = 12;
|
||||||
const CURRENT_TICK_PENDING_CHUNKS_LIMIT: usize = 2;
|
const CURRENT_TICK_PENDING_CHUNKS_LIMIT: usize = 2;
|
||||||
@ -1917,11 +1936,6 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let dist_to_player =
|
|
||||||
(self.state.terrain().key_pos(*key).map(|x| x as f32)
|
|
||||||
+ TerrainChunkSize::RECT_SIZE.map(|x| x as f32) / 2.0)
|
|
||||||
.distance_squared(pos.0.into());
|
|
||||||
|
|
||||||
if dist_to_player < self.loaded_distance {
|
if dist_to_player < self.loaded_distance {
|
||||||
self.loaded_distance = dist_to_player;
|
self.loaded_distance = dist_to_player;
|
||||||
}
|
}
|
||||||
@ -2510,7 +2524,12 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ignore network events
|
// ignore network events
|
||||||
while let Some(Ok(Some(event))) = self.participant.as_ref().map(|p| p.try_fetch_event()) {
|
while let Some(res) = self
|
||||||
|
.participant
|
||||||
|
.as_mut()
|
||||||
|
.and_then(|p| p.try_fetch_event().transpose())
|
||||||
|
{
|
||||||
|
let event = res?;
|
||||||
trace!(?event, "received network event");
|
trace!(?event, "received network event");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2875,8 +2894,12 @@ impl Client {
|
|||||||
self.state.read_storage().get(self.entity()).cloned(),
|
self.state.read_storage().get(self.entity()).cloned(),
|
||||||
self.state.read_storage().get(self.entity()).cloned(),
|
self.state.read_storage().get(self.entity()).cloned(),
|
||||||
) {
|
) {
|
||||||
self.in_game_stream
|
self.in_game_stream.send(ClientGeneral::PlayerPhysics {
|
||||||
.send(ClientGeneral::PlayerPhysics { pos, vel, ori })?;
|
pos,
|
||||||
|
vel,
|
||||||
|
ori,
|
||||||
|
force_counter: self.force_update_counter,
|
||||||
|
})?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,4 +19,4 @@ tracing-log = "0.1.1"
|
|||||||
tracing-subscriber = { version = "0.3.7", default-features = false, features = ["env-filter", "fmt", "time", "ansi", "smallvec", "tracing-log"]}
|
tracing-subscriber = { version = "0.3.7", default-features = false, features = ["env-filter", "fmt", "time", "ansi", "smallvec", "tracing-log"]}
|
||||||
|
|
||||||
# Tracy
|
# Tracy
|
||||||
tracing-tracy = { version = "0.10.0", optional = true }
|
tracing-tracy = { version = "0.9.0", optional = true }
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use common::{grid::Grid, trade::Good};
|
use common::{grid::Grid, terrain::TerrainChunk, trade::Good};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::HashMap;
|
use std::{collections::HashMap, sync::Arc};
|
||||||
use vek::*;
|
use vek::*;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@ -26,8 +26,6 @@ pub struct WorldMapMsg {
|
|||||||
///
|
///
|
||||||
/// NOTE: Invariant: chunk count fits in a u16.
|
/// NOTE: Invariant: chunk count fits in a u16.
|
||||||
pub dimensions_lg: Vec2<u32>,
|
pub dimensions_lg: Vec2<u32>,
|
||||||
/// Sea level (used to provide a base altitude).
|
|
||||||
pub sea_level: f32,
|
|
||||||
/// Max height (used to scale altitudes).
|
/// Max height (used to scale altitudes).
|
||||||
pub max_height: f32,
|
pub max_height: f32,
|
||||||
/// RGB+A; the alpha channel is currently unused, but will be used in the
|
/// RGB+A; the alpha channel is currently unused, but will be used in the
|
||||||
@ -124,6 +122,10 @@ pub struct WorldMapMsg {
|
|||||||
pub horizons: [(Vec<u8>, Vec<u8>); 2],
|
pub horizons: [(Vec<u8>, Vec<u8>); 2],
|
||||||
pub sites: Vec<SiteInfo>,
|
pub sites: Vec<SiteInfo>,
|
||||||
pub pois: Vec<PoiInfo>,
|
pub pois: Vec<PoiInfo>,
|
||||||
|
/// Default chunk (representing the ocean outside the map bounds). Sea
|
||||||
|
/// level (used to provide a base altitude) is the lower bound of this
|
||||||
|
/// chunk.
|
||||||
|
pub default_chunk: Arc<TerrainChunk>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type SiteId = common::trade::SiteId;
|
pub type SiteId = common::trade::SiteId;
|
||||||
|
@ -132,7 +132,7 @@ pub const MAX_WORLD_BLOCKS_LG: Vec2<u32> = Vec2 { x: 19, y: 19 };
|
|||||||
/// [TERRAIN_CHUNK_BLOCKS_LG]))` fits in an i32 (derived from the invariant
|
/// [TERRAIN_CHUNK_BLOCKS_LG]))` fits in an i32 (derived from the invariant
|
||||||
/// on [MAX_WORLD_BLOCKS_LG]).
|
/// on [MAX_WORLD_BLOCKS_LG]).
|
||||||
///
|
///
|
||||||
/// NOTE: As an invariant, each dimension (in chunks) must fit in a u16.
|
/// NOTE: As an invariant, each dimension (in chunks) must fit in a i16.
|
||||||
///
|
///
|
||||||
/// NOTE: As an invariant, the product of dimensions (in chunks) must fit in a
|
/// NOTE: As an invariant, the product of dimensions (in chunks) must fit in a
|
||||||
/// usize.
|
/// usize.
|
||||||
@ -160,12 +160,12 @@ impl MapSizeLg {
|
|||||||
// 0 and ([MAX_WORLD_BLOCKS_LG] - [TERRAIN_CHUNK_BLOCKS_LG])
|
// 0 and ([MAX_WORLD_BLOCKS_LG] - [TERRAIN_CHUNK_BLOCKS_LG])
|
||||||
let is_le_max = map_size_lg.x <= MAX_WORLD_BLOCKS_LG.x - TERRAIN_CHUNK_BLOCKS_LG
|
let is_le_max = map_size_lg.x <= MAX_WORLD_BLOCKS_LG.x - TERRAIN_CHUNK_BLOCKS_LG
|
||||||
&& map_size_lg.y <= MAX_WORLD_BLOCKS_LG.y - TERRAIN_CHUNK_BLOCKS_LG;
|
&& map_size_lg.y <= MAX_WORLD_BLOCKS_LG.y - TERRAIN_CHUNK_BLOCKS_LG;
|
||||||
// Assertion on dimensions: chunks must fit in a u16.
|
// Assertion on dimensions: chunks must fit in a i16.
|
||||||
let chunks_in_range =
|
let chunks_in_range =
|
||||||
/* 1u16.checked_shl(map_size_lg.x).is_some() &&
|
/* 1u15.checked_shl(map_size_lg.x).is_some() &&
|
||||||
1u16.checked_shl(map_size_lg.y).is_some(); */
|
1u15.checked_shl(map_size_lg.y).is_some(); */
|
||||||
map_size_lg.x <= 16 &&
|
map_size_lg.x <= 15 &&
|
||||||
map_size_lg.y <= 16;
|
map_size_lg.y <= 15;
|
||||||
if is_le_max && chunks_in_range {
|
if is_le_max && chunks_in_range {
|
||||||
// Assertion on dimensions: blocks must fit in a i32.
|
// Assertion on dimensions: blocks must fit in a i32.
|
||||||
let blocks_in_range =
|
let blocks_in_range =
|
||||||
@ -197,6 +197,15 @@ impl MapSizeLg {
|
|||||||
|
|
||||||
/// Get the size of an array of the correct size to hold all chunks.
|
/// Get the size of an array of the correct size to hold all chunks.
|
||||||
pub const fn chunks_len(self) -> usize { 1 << (self.0.x + self.0.y) }
|
pub const fn chunks_len(self) -> usize { 1 << (self.0.x + self.0.y) }
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
/// Determine whether a chunk position is in bounds.
|
||||||
|
pub const fn contains_chunk(&self, chunk_key: Vec2<i32>) -> bool {
|
||||||
|
let map_size = self.chunks();
|
||||||
|
chunk_key.x >= 0 && chunk_key.y >= 0 &&
|
||||||
|
chunk_key.x == chunk_key.x & ((map_size.x as i32) - 1) &&
|
||||||
|
chunk_key.y == chunk_key.y & ((map_size.y as i32) - 1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<MapSizeLg> for Vec2<u32> {
|
impl From<MapSizeLg> for Vec2<u32> {
|
||||||
|
@ -188,6 +188,16 @@ impl TerrainGrid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TerrainChunk {
|
impl TerrainChunk {
|
||||||
|
/// Generate an all-water chunk at a specific sea level.
|
||||||
|
pub fn water(sea_level: i32) -> TerrainChunk {
|
||||||
|
TerrainChunk::new(
|
||||||
|
sea_level,
|
||||||
|
Block::new(BlockKind::Water, Rgb::zero()),
|
||||||
|
Block::air(SpriteKind::Empty),
|
||||||
|
TerrainChunkMeta::void(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Find the highest or lowest accessible position within the chunk
|
/// Find the highest or lowest accessible position within the chunk
|
||||||
pub fn find_accessible_pos(&self, spawn_wpos: Vec2<i32>, ascending: bool) -> Vec3<f32> {
|
pub fn find_accessible_pos(&self, spawn_wpos: Vec2<i32>, ascending: bool) -> Vec3<f32> {
|
||||||
let min_z = self.get_min_z();
|
let min_z = self.get_min_z();
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
|
terrain::MapSizeLg,
|
||||||
vol::{BaseVol, ReadVol, RectRasterableVol, SampleVol, WriteVol},
|
vol::{BaseVol, ReadVol, RectRasterableVol, SampleVol, WriteVol},
|
||||||
volumes::dyna::DynaError,
|
volumes::dyna::DynaError,
|
||||||
};
|
};
|
||||||
@ -19,6 +20,10 @@ pub enum VolGrid2dError<V: RectRasterableVol> {
|
|||||||
// M = Chunk metadata
|
// M = Chunk metadata
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct VolGrid2d<V: RectRasterableVol> {
|
pub struct VolGrid2d<V: RectRasterableVol> {
|
||||||
|
/// Size of the entire (not just loaded) map.
|
||||||
|
map_size_lg: MapSizeLg,
|
||||||
|
/// Default voxel for use outside of max map bounds.
|
||||||
|
default: Arc<V>,
|
||||||
chunks: HashMap<Vec2<i32>, Arc<V>>,
|
chunks: HashMap<Vec2<i32>, Arc<V>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -29,6 +34,18 @@ impl<V: RectRasterableVol> VolGrid2d<V> {
|
|||||||
.map2(V::RECT_SIZE, |e, sz: u32| e.div_euclid(sz as i32))
|
.map2(V::RECT_SIZE, |e, sz: u32| e.div_euclid(sz as i32))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn key_chunk<K: Into<Vec2<i32>>>(key: K) -> Vec2<i32> {
|
||||||
|
key.into() * V::RECT_SIZE.map(|e| e as i32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn par_keys(&self) -> hashbrown::hash_map::rayon::ParKeys<Vec2<i32>, Arc<V>>
|
||||||
|
where V: Send + Sync,
|
||||||
|
{
|
||||||
|
self.chunks.par_keys()
|
||||||
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn chunk_offs(pos: Vec3<i32>) -> Vec3<i32> {
|
pub fn chunk_offs(pos: Vec3<i32>) -> Vec3<i32> {
|
||||||
let offs = Vec2::<i32>::from(pos).map2(V::RECT_SIZE, |e, sz| e & (sz - 1) as i32);
|
let offs = Vec2::<i32>::from(pos).map2(V::RECT_SIZE, |e, sz| e & (sz - 1) as i32);
|
||||||
@ -45,8 +62,7 @@ impl<V: RectRasterableVol + ReadVol + Debug> ReadVol for VolGrid2d<V> {
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn get(&self, pos: Vec3<i32>) -> Result<&V::Vox, VolGrid2dError<V>> {
|
fn get(&self, pos: Vec3<i32>) -> Result<&V::Vox, VolGrid2dError<V>> {
|
||||||
let ck = Self::chunk_key(pos);
|
let ck = Self::chunk_key(pos);
|
||||||
self.chunks
|
self.get_key(ck)
|
||||||
.get(&ck)
|
|
||||||
.ok_or(VolGrid2dError::NoSuchChunk)
|
.ok_or(VolGrid2dError::NoSuchChunk)
|
||||||
.and_then(|chunk| {
|
.and_then(|chunk| {
|
||||||
let co = Self::chunk_offs(pos);
|
let co = Self::chunk_offs(pos);
|
||||||
@ -102,14 +118,14 @@ impl<I: Into<Aabr<i32>>, V: RectRasterableVol + ReadVol + Debug> SampleVol<I> fo
|
|||||||
fn sample(&self, range: I) -> Result<Self::Sample, VolGrid2dError<V>> {
|
fn sample(&self, range: I) -> Result<Self::Sample, VolGrid2dError<V>> {
|
||||||
let range = range.into();
|
let range = range.into();
|
||||||
|
|
||||||
let mut sample = VolGrid2d::new()?;
|
let mut sample = VolGrid2d::new(self.map_size_lg, Arc::clone(&self.default))?;
|
||||||
let chunk_min = Self::chunk_key(range.min);
|
let chunk_min = Self::chunk_key(range.min);
|
||||||
let chunk_max = Self::chunk_key(range.max);
|
let chunk_max = Self::chunk_key(range.max);
|
||||||
for x in chunk_min.x..chunk_max.x + 1 {
|
for x in chunk_min.x..chunk_max.x + 1 {
|
||||||
for y in chunk_min.y..chunk_max.y + 1 {
|
for y in chunk_min.y..chunk_max.y + 1 {
|
||||||
let chunk_key = Vec2::new(x, y);
|
let chunk_key = Vec2::new(x, y);
|
||||||
|
|
||||||
let chunk = self.get_key_arc(chunk_key).cloned();
|
let chunk = self.get_key_arc_real(chunk_key).cloned();
|
||||||
|
|
||||||
if let Some(chunk) = chunk {
|
if let Some(chunk) = chunk {
|
||||||
sample.insert(chunk_key, chunk);
|
sample.insert(chunk_key, chunk);
|
||||||
@ -138,12 +154,14 @@ impl<V: RectRasterableVol + WriteVol + Clone + Debug> WriteVol for VolGrid2d<V>
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<V: RectRasterableVol> VolGrid2d<V> {
|
impl<V: RectRasterableVol> VolGrid2d<V> {
|
||||||
pub fn new() -> Result<Self, VolGrid2dError<V>> {
|
pub fn new(map_size_lg: MapSizeLg, default: Arc<V>) -> Result<Self, VolGrid2dError<V>> {
|
||||||
if Self::chunk_size()
|
if Self::chunk_size()
|
||||||
.map(|e| e.is_power_of_two() && e > 0)
|
.map(|e| e.is_power_of_two() && e > 0)
|
||||||
.reduce_and()
|
.reduce_and()
|
||||||
{
|
{
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
|
map_size_lg,
|
||||||
|
default,
|
||||||
chunks: HashMap::default(),
|
chunks: HashMap::default(),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@ -160,10 +178,37 @@ impl<V: RectRasterableVol> VolGrid2d<V> {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn get_key(&self, key: Vec2<i32>) -> Option<&V> {
|
pub fn get_key(&self, key: Vec2<i32>) -> Option<&V> {
|
||||||
self.chunks.get(&key).map(|arc_chunk| arc_chunk.as_ref())
|
self.get_key_arc(key).map(|arc_chunk| arc_chunk.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_key_arc(&self, key: Vec2<i32>) -> Option<&Arc<V>> { self.chunks.get(&key) }
|
#[inline(always)]
|
||||||
|
pub fn get_key_real(&self, key: Vec2<i32>) -> Option<&V> {
|
||||||
|
self.get_key_arc_real(key).map(|arc_chunk| arc_chunk.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn contains_key(&self, key: Vec2<i32>) -> bool {
|
||||||
|
self.contains_key_real(key) ||
|
||||||
|
// Counterintuitively, areas outside the map are *always* considered to be in it, since
|
||||||
|
// they're assigned the default chunk.
|
||||||
|
!self.map_size_lg.contains_chunk(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn contains_key_real(&self, key: Vec2<i32>) -> bool {
|
||||||
|
self.chunks.contains_key(&key)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn get_key_arc(&self, key: Vec2<i32>) -> Option<&Arc<V>> {
|
||||||
|
self.get_key_arc_real(key)
|
||||||
|
.or_else(|| if !self.map_size_lg.contains_chunk(key) { Some(&self.default) } else { None })
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn get_key_arc_real(&self, key: Vec2<i32>) -> Option<&Arc<V>> {
|
||||||
|
self.chunks.get(&key)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) { self.chunks.clear(); }
|
pub fn clear(&mut self) { self.chunks.clear(); }
|
||||||
|
|
||||||
@ -172,7 +217,7 @@ impl<V: RectRasterableVol> VolGrid2d<V> {
|
|||||||
pub fn remove(&mut self, key: Vec2<i32>) -> Option<Arc<V>> { self.chunks.remove(&key) }
|
pub fn remove(&mut self, key: Vec2<i32>) -> Option<Arc<V>> { self.chunks.remove(&key) }
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn key_pos(&self, key: Vec2<i32>) -> Vec2<i32> { key * V::RECT_SIZE.map(|e| e as i32) }
|
pub fn key_pos(&self, key: Vec2<i32>) -> Vec2<i32> { Self::key_chunk(key) }
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn pos_key(&self, pos: Vec3<i32>) -> Vec2<i32> { Self::chunk_key(pos) }
|
pub fn pos_key(&self, pos: Vec3<i32>) -> Vec2<i32> { Self::chunk_key(pos) }
|
||||||
@ -219,8 +264,7 @@ impl<'a, V: RectRasterableVol + ReadVol> CachedVolGrid2d<'a, V> {
|
|||||||
// Otherwise retrieve from the hashmap
|
// Otherwise retrieve from the hashmap
|
||||||
let chunk = self
|
let chunk = self
|
||||||
.vol_grid_2d
|
.vol_grid_2d
|
||||||
.chunks
|
.get_key_arc(ck)
|
||||||
.get(&ck)
|
|
||||||
.ok_or(VolGrid2dError::NoSuchChunk)?;
|
.ok_or(VolGrid2dError::NoSuchChunk)?;
|
||||||
// Store most recently looked up chunk in the cache
|
// Store most recently looked up chunk in the cache
|
||||||
self.cache = Some((ck, Arc::clone(chunk)));
|
self.cache = Some((ck, Arc::clone(chunk)));
|
||||||
|
@ -17,7 +17,7 @@ use common::{
|
|||||||
TimeOfDay,
|
TimeOfDay,
|
||||||
},
|
},
|
||||||
slowjob::SlowJobPool,
|
slowjob::SlowJobPool,
|
||||||
terrain::{Block, TerrainChunk, TerrainGrid},
|
terrain::{Block, MapSizeLg, TerrainChunk, TerrainGrid},
|
||||||
time::DayPeriod,
|
time::DayPeriod,
|
||||||
trade::Trades,
|
trade::Trades,
|
||||||
vol::{ReadVol, WriteVol},
|
vol::{ReadVol, WriteVol},
|
||||||
@ -94,37 +94,56 @@ pub struct State {
|
|||||||
thread_pool: Arc<ThreadPool>,
|
thread_pool: Arc<ThreadPool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type Pools = Arc<ThreadPool>;
|
||||||
|
|
||||||
impl State {
|
impl State {
|
||||||
/// Create a new `State` in client mode.
|
pub fn pools(game_mode: GameMode) -> Pools {
|
||||||
pub fn client() -> Self { Self::new(GameMode::Client) }
|
|
||||||
|
|
||||||
/// Create a new `State` in server mode.
|
|
||||||
pub fn server() -> Self { Self::new(GameMode::Server) }
|
|
||||||
|
|
||||||
pub fn new(game_mode: GameMode) -> Self {
|
|
||||||
let thread_name_infix = match game_mode {
|
let thread_name_infix = match game_mode {
|
||||||
GameMode::Server => "s",
|
GameMode::Server => "s",
|
||||||
GameMode::Client => "c",
|
GameMode::Client => "c",
|
||||||
GameMode::Singleplayer => "sp",
|
GameMode::Singleplayer => "sp",
|
||||||
};
|
};
|
||||||
|
|
||||||
let thread_pool = Arc::new(
|
Arc::new(
|
||||||
ThreadPoolBuilder::new()
|
ThreadPoolBuilder::new()
|
||||||
.num_threads(num_cpus::get().max(common::consts::MIN_RECOMMENDED_RAYON_THREADS))
|
.num_threads(num_cpus::get().max(common::consts::MIN_RECOMMENDED_RAYON_THREADS))
|
||||||
.thread_name(move |i| format!("rayon-{}-{}", thread_name_infix, i))
|
.thread_name(move |i| format!("rayon-{}-{}", thread_name_infix, i))
|
||||||
.build()
|
.build()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new `State` in client mode.
|
||||||
|
pub fn client(pools: Pools, map_size_lg: MapSizeLg, default_chunk: Arc<TerrainChunk>) -> Self {
|
||||||
|
Self::new(GameMode::Client, pools, map_size_lg, default_chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new `State` in server mode.
|
||||||
|
pub fn server(pools: Pools, map_size_lg: MapSizeLg, default_chunk: Arc<TerrainChunk>) -> Self {
|
||||||
|
Self::new(GameMode::Server, pools, map_size_lg, default_chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(
|
||||||
|
game_mode: GameMode,
|
||||||
|
pools: Pools,
|
||||||
|
map_size_lg: MapSizeLg,
|
||||||
|
default_chunk: Arc<TerrainChunk>,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
ecs: Self::setup_ecs_world(game_mode, &thread_pool),
|
ecs: Self::setup_ecs_world(game_mode, Arc::clone(&pools), map_size_lg, default_chunk),
|
||||||
thread_pool,
|
thread_pool: pools,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates ecs world and registers all the common components and resources
|
/// Creates ecs world and registers all the common components and resources
|
||||||
// TODO: Split up registering into server and client (e.g. move
|
// TODO: Split up registering into server and client (e.g. move
|
||||||
// EventBus<ServerEvent> to the server)
|
// EventBus<ServerEvent> to the server)
|
||||||
fn setup_ecs_world(game_mode: GameMode, thread_pool: &Arc<ThreadPool>) -> specs::World {
|
fn setup_ecs_world(
|
||||||
|
game_mode: GameMode,
|
||||||
|
thread_pool: Arc<ThreadPool>,
|
||||||
|
map_size_lg: MapSizeLg,
|
||||||
|
default_chunk: Arc<TerrainChunk>,
|
||||||
|
) -> specs::World {
|
||||||
let mut ecs = specs::World::new();
|
let mut ecs = specs::World::new();
|
||||||
// Uids for sync
|
// Uids for sync
|
||||||
ecs.register_sync_marker();
|
ecs.register_sync_marker();
|
||||||
@ -213,7 +232,7 @@ impl State {
|
|||||||
ecs.insert(Time(0.0));
|
ecs.insert(Time(0.0));
|
||||||
ecs.insert(DeltaTime(0.0));
|
ecs.insert(DeltaTime(0.0));
|
||||||
ecs.insert(PlayerEntity(None));
|
ecs.insert(PlayerEntity(None));
|
||||||
ecs.insert(TerrainGrid::new().unwrap());
|
ecs.insert(TerrainGrid::new(map_size_lg, default_chunk).unwrap());
|
||||||
ecs.insert(BlockChange::default());
|
ecs.insert(BlockChange::default());
|
||||||
ecs.insert(crate::build_areas::BuildAreas::default());
|
ecs.insert(crate::build_areas::BuildAreas::default());
|
||||||
ecs.insert(TerrainChanges::default());
|
ecs.insert(TerrainChanges::default());
|
||||||
@ -226,11 +245,7 @@ impl State {
|
|||||||
let num_cpu = num_cpus::get() as u64;
|
let num_cpu = num_cpus::get() as u64;
|
||||||
let slow_limit = (num_cpu / 2 + num_cpu / 4).max(1);
|
let slow_limit = (num_cpu / 2 + num_cpu / 4).max(1);
|
||||||
tracing::trace!(?slow_limit, "Slow Thread limit");
|
tracing::trace!(?slow_limit, "Slow Thread limit");
|
||||||
ecs.insert(SlowJobPool::new(
|
ecs.insert(SlowJobPool::new(slow_limit, 10_000, thread_pool));
|
||||||
slow_limit,
|
|
||||||
10_000,
|
|
||||||
Arc::clone(thread_pool),
|
|
||||||
));
|
|
||||||
|
|
||||||
// TODO: only register on the server
|
// TODO: only register on the server
|
||||||
ecs.insert(EventBus::<ServerEvent>::default());
|
ecs.insert(EventBus::<ServerEvent>::default());
|
||||||
|
@ -632,8 +632,7 @@ impl<'a> PhysicsData<'a> {
|
|||||||
)| {
|
)| {
|
||||||
let in_loaded_chunk = read
|
let in_loaded_chunk = read
|
||||||
.terrain
|
.terrain
|
||||||
.get_key(read.terrain.pos_key(pos.0.map(|e| e.floor() as i32)))
|
.contains_key(read.terrain.pos_key(pos.0.map(|e| e.floor() as i32)));
|
||||||
.is_some();
|
|
||||||
|
|
||||||
// Apply physics only if in a loaded chunk
|
// Apply physics only if in a loaded chunk
|
||||||
if in_loaded_chunk
|
if in_loaded_chunk
|
||||||
@ -790,8 +789,7 @@ impl<'a> PhysicsData<'a> {
|
|||||||
|
|
||||||
let in_loaded_chunk = read
|
let in_loaded_chunk = read
|
||||||
.terrain
|
.terrain
|
||||||
.get_key(read.terrain.pos_key(pos.0.map(|e| e.floor() as i32)))
|
.contains_key(read.terrain.pos_key(pos.0.map(|e| e.floor() as i32)));
|
||||||
.is_some();
|
|
||||||
|
|
||||||
// Don't move if we're not in a loaded chunk
|
// Don't move if we're not in a loaded chunk
|
||||||
let pos_delta = if in_loaded_chunk {
|
let pos_delta = if in_loaded_chunk {
|
||||||
|
@ -6,6 +6,7 @@ mod tests {
|
|||||||
Ori, PhysicsState, Poise, Pos, Skill, Stats, Vel,
|
Ori, PhysicsState, Poise, Pos, Skill, Stats, Vel,
|
||||||
},
|
},
|
||||||
resources::{DeltaTime, GameMode, Time},
|
resources::{DeltaTime, GameMode, Time},
|
||||||
|
terrain::{MapSizeLg, TerrainChunk},
|
||||||
uid::Uid,
|
uid::Uid,
|
||||||
util::Dir,
|
util::Dir,
|
||||||
SkillSetBuilder,
|
SkillSetBuilder,
|
||||||
@ -14,12 +15,25 @@ mod tests {
|
|||||||
use common_state::State;
|
use common_state::State;
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
use specs::{Builder, Entity, WorldExt};
|
use specs::{Builder, Entity, WorldExt};
|
||||||
use std::time::Duration;
|
use std::{sync::Arc, time::Duration};
|
||||||
use vek::{approx::AbsDiffEq, Vec3};
|
use vek::{approx::AbsDiffEq, Vec2, Vec3};
|
||||||
use veloren_common_systems::character_behavior;
|
use veloren_common_systems::character_behavior;
|
||||||
|
|
||||||
|
const DEFAULT_WORLD_CHUNKS_LG: MapSizeLg =
|
||||||
|
if let Ok(map_size_lg) = MapSizeLg::new(Vec2 { x: 1, y: 1 }) {
|
||||||
|
map_size_lg
|
||||||
|
} else {
|
||||||
|
panic!("Default world chunk size does not satisfy required invariants.");
|
||||||
|
};
|
||||||
|
|
||||||
fn setup() -> State {
|
fn setup() -> State {
|
||||||
let mut state = State::new(GameMode::Server);
|
let pools = State::pools(GameMode::Server);
|
||||||
|
let mut state = State::new(
|
||||||
|
GameMode::Server,
|
||||||
|
pools,
|
||||||
|
DEFAULT_WORLD_CHUNKS_LG,
|
||||||
|
Arc::new(TerrainChunk::water(0)),
|
||||||
|
);
|
||||||
let msm = MaterialStatManifest::load().cloned();
|
let msm = MaterialStatManifest::load().cloned();
|
||||||
state.ecs_mut().insert(msm);
|
state.ecs_mut().insert(msm);
|
||||||
state.ecs_mut().read_resource::<Time>();
|
state.ecs_mut().read_resource::<Time>();
|
||||||
|
@ -7,7 +7,9 @@ use common::{
|
|||||||
},
|
},
|
||||||
resources::{DeltaTime, GameMode, Time},
|
resources::{DeltaTime, GameMode, Time},
|
||||||
skillset_builder::SkillSetBuilder,
|
skillset_builder::SkillSetBuilder,
|
||||||
terrain::{Block, BlockKind, SpriteKind, TerrainChunk, TerrainChunkMeta, TerrainGrid},
|
terrain::{
|
||||||
|
Block, BlockKind, MapSizeLg, SpriteKind, TerrainChunk, TerrainChunkMeta, TerrainGrid,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use common_ecs::{dispatch, System};
|
use common_ecs::{dispatch, System};
|
||||||
use common_net::sync::WorldSyncExt;
|
use common_net::sync::WorldSyncExt;
|
||||||
@ -24,12 +26,24 @@ const MILLIS_PER_SEC: f64 = 1_000.0;
|
|||||||
pub const DT: Duration = Duration::from_millis(DT_MILLIS);
|
pub const DT: Duration = Duration::from_millis(DT_MILLIS);
|
||||||
pub const DT_F64: f64 = DT_MILLIS as f64 / MILLIS_PER_SEC;
|
pub const DT_F64: f64 = DT_MILLIS as f64 / MILLIS_PER_SEC;
|
||||||
|
|
||||||
|
const DEFAULT_WORLD_CHUNKS_LG: MapSizeLg =
|
||||||
|
if let Ok(map_size_lg) = MapSizeLg::new(Vec2 { x: 10, y: 10 }) {
|
||||||
|
map_size_lg
|
||||||
|
} else {
|
||||||
|
panic!("Default world chunk size does not satisfy required invariants.");
|
||||||
|
};
|
||||||
|
|
||||||
pub fn setup() -> State {
|
pub fn setup() -> State {
|
||||||
let mut state = State::new(GameMode::Server);
|
let pools = State::pools(GameMode::Server);
|
||||||
|
let mut state = State::new(
|
||||||
|
GameMode::Server,
|
||||||
|
pools,
|
||||||
|
DEFAULT_WORLD_CHUNKS_LG,
|
||||||
|
Arc::new(TerrainChunk::water(0)),
|
||||||
|
);
|
||||||
state.ecs_mut().insert(MaterialStatManifest::with_empty());
|
state.ecs_mut().insert(MaterialStatManifest::with_empty());
|
||||||
state.ecs_mut().read_resource::<Time>();
|
state.ecs_mut().read_resource::<Time>();
|
||||||
state.ecs_mut().read_resource::<DeltaTime>();
|
state.ecs_mut().read_resource::<DeltaTime>();
|
||||||
state.ecs_mut().insert(TerrainGrid::new());
|
|
||||||
for x in 0..2 {
|
for x in 0..2 {
|
||||||
for y in 0..2 {
|
for y in 0..2 {
|
||||||
generate_chunk(&mut state, Vec2::new(x, y));
|
generate_chunk(&mut state, Vec2::new(x, y));
|
||||||
|
@ -11,7 +11,7 @@ async fn stream_msg(s1_a: Arc<Mutex<Stream>>, s1_b: Arc<Mutex<Stream>>, data: &[
|
|||||||
let mut s1_b = s1_b.lock().await;
|
let mut s1_b = s1_b.lock().await;
|
||||||
let m = Message::serialize(&data, s1_b.params());
|
let m = Message::serialize(&data, s1_b.params());
|
||||||
std::thread::spawn(move || {
|
std::thread::spawn(move || {
|
||||||
let mut s1_a = s1_a.try_lock().unwrap();
|
let s1_a = s1_a.try_lock().unwrap();
|
||||||
for _ in 0..cnt {
|
for _ in 0..cnt {
|
||||||
s1_a.send_raw(&m).unwrap();
|
s1_a.send_raw(&m).unwrap();
|
||||||
}
|
}
|
||||||
@ -130,11 +130,11 @@ pub fn network_participant_stream(
|
|||||||
) {
|
) {
|
||||||
let runtime = Runtime::new().unwrap();
|
let runtime = Runtime::new().unwrap();
|
||||||
let (n_a, p1_a, s1_a, n_b, p1_b, s1_b) = runtime.block_on(async {
|
let (n_a, p1_a, s1_a, n_b, p1_b, s1_b) = runtime.block_on(async {
|
||||||
let n_a = Network::new(Pid::fake(0), &runtime);
|
let mut n_a = Network::new(Pid::fake(0), &runtime);
|
||||||
let n_b = Network::new(Pid::fake(1), &runtime);
|
let n_b = Network::new(Pid::fake(1), &runtime);
|
||||||
|
|
||||||
n_a.listen(addr.0).await.unwrap();
|
n_a.listen(addr.0).await.unwrap();
|
||||||
let p1_b = n_b.connect(addr.1).await.unwrap();
|
let mut p1_b = n_b.connect(addr.1).await.unwrap();
|
||||||
let p1_a = n_a.connected().await.unwrap();
|
let p1_a = n_a.connected().await.unwrap();
|
||||||
|
|
||||||
let s1_a = p1_a.open(4, Promises::empty(), 0).await.unwrap();
|
let s1_a = p1_a.open(4, Promises::empty(), 0).await.unwrap();
|
||||||
|
@ -8,7 +8,7 @@ use std::{sync::Arc, thread, time::Duration};
|
|||||||
use tokio::{io, io::AsyncBufReadExt, runtime::Runtime, sync::RwLock};
|
use tokio::{io, io::AsyncBufReadExt, runtime::Runtime, sync::RwLock};
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
use tracing_subscriber::EnvFilter;
|
use tracing_subscriber::EnvFilter;
|
||||||
use veloren_network::{ConnectAddr, ListenAddr, Network, Participant, Pid, Promises};
|
use veloren_network::{ConnectAddr, ListenAddr, Network, Participant, Pid, Promises, Stream};
|
||||||
|
|
||||||
///This example contains a simple chatserver, that allows to send messages
|
///This example contains a simple chatserver, that allows to send messages
|
||||||
/// between participants, it's neither pretty nor perfect, but it should show
|
/// between participants, it's neither pretty nor perfect, but it should show
|
||||||
@ -106,26 +106,20 @@ fn main() {
|
|||||||
|
|
||||||
fn server(address: ListenAddr) {
|
fn server(address: ListenAddr) {
|
||||||
let r = Arc::new(Runtime::new().unwrap());
|
let r = Arc::new(Runtime::new().unwrap());
|
||||||
let server = Network::new(Pid::new(), &r);
|
let mut server = Network::new(Pid::new(), &r);
|
||||||
let server = Arc::new(server);
|
|
||||||
let participants = Arc::new(RwLock::new(Vec::new()));
|
let participants = Arc::new(RwLock::new(Vec::new()));
|
||||||
r.block_on(async {
|
r.block_on(async {
|
||||||
server.listen(address).await.unwrap();
|
server.listen(address).await.unwrap();
|
||||||
loop {
|
loop {
|
||||||
let p1 = Arc::new(server.connected().await.unwrap());
|
let mut p1 = server.connected().await.unwrap();
|
||||||
let server1 = server.clone();
|
let s1 = p1.opened().await.unwrap();
|
||||||
participants.write().await.push(p1.clone());
|
participants.write().await.push(p1);
|
||||||
tokio::spawn(client_connection(server1, p1, participants.clone()));
|
tokio::spawn(client_connection(s1, participants.clone()));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn client_connection(
|
async fn client_connection(mut s1: Stream, participants: Arc<RwLock<Vec<Participant>>>) {
|
||||||
_network: Arc<Network>,
|
|
||||||
participant: Arc<Participant>,
|
|
||||||
participants: Arc<RwLock<Vec<Arc<Participant>>>>,
|
|
||||||
) {
|
|
||||||
let mut s1 = participant.opened().await.unwrap();
|
|
||||||
let username = s1.recv::<String>().await.unwrap();
|
let username = s1.recv::<String>().await.unwrap();
|
||||||
println!("[{}] connected", username);
|
println!("[{}] connected", username);
|
||||||
loop {
|
loop {
|
||||||
@ -141,7 +135,7 @@ async fn client_connection(
|
|||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Err(_) => info!("error talking to client, //TODO drop it"),
|
Err(_) => info!("error talking to client, //TODO drop it"),
|
||||||
Ok(mut s) => s.send((username.clone(), msg.clone())).unwrap(),
|
Ok(s) => s.send((username.clone(), msg.clone())).unwrap(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -156,7 +150,7 @@ fn client(address: ConnectAddr) {
|
|||||||
|
|
||||||
r.block_on(async {
|
r.block_on(async {
|
||||||
let p1 = client.connect(address.clone()).await.unwrap(); //remote representation of p1
|
let p1 = client.connect(address.clone()).await.unwrap(); //remote representation of p1
|
||||||
let mut s1 = p1
|
let s1 = p1
|
||||||
.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
||||||
.await
|
.await
|
||||||
.unwrap(); //remote representation of s1
|
.unwrap(); //remote representation of s1
|
||||||
@ -188,7 +182,7 @@ fn client(address: ConnectAddr) {
|
|||||||
// receiving i open and close a stream per message. this can be done easier but
|
// receiving i open and close a stream per message. this can be done easier but
|
||||||
// this allows me to be quite lazy on the server side and just get a list of
|
// this allows me to be quite lazy on the server side and just get a list of
|
||||||
// all participants and send to them...
|
// all participants and send to them...
|
||||||
async fn read_messages(participant: Participant) {
|
async fn read_messages(mut participant: Participant) {
|
||||||
while let Ok(mut s) = participant.opened().await {
|
while let Ok(mut s) = participant.opened().await {
|
||||||
let (username, message) = s.recv::<(String, String)>().await.unwrap();
|
let (username, message) = s.recv::<(String, String)>().await.unwrap();
|
||||||
println!("[{}]: {}", username, message);
|
println!("[{}]: {}", username, message);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::commands::{Command, FileInfo, LocalCommand, RemoteInfo};
|
use crate::commands::{Command, FileInfo, LocalCommand, RemoteInfo};
|
||||||
use futures_util::{FutureExt, StreamExt};
|
use futures_util::StreamExt;
|
||||||
use std::{collections::HashMap, path::PathBuf, sync::Arc};
|
use std::{collections::HashMap, path::PathBuf, sync::Arc};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs, join,
|
fs, join,
|
||||||
@ -15,49 +15,65 @@ struct ControlChannels {
|
|||||||
command_receiver: mpsc::UnboundedReceiver<LocalCommand>,
|
command_receiver: mpsc::UnboundedReceiver<LocalCommand>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Server {
|
struct Shared {
|
||||||
run_channels: Option<ControlChannels>,
|
|
||||||
network: Network,
|
|
||||||
served: RwLock<Vec<FileInfo>>,
|
served: RwLock<Vec<FileInfo>>,
|
||||||
remotes: RwLock<HashMap<Pid, Arc<Mutex<RemoteInfo>>>>,
|
remotes: RwLock<HashMap<Pid, Arc<Mutex<RemoteInfo>>>>,
|
||||||
receiving_files: Mutex<HashMap<u32, Option<String>>>,
|
receiving_files: Mutex<HashMap<u32, Option<String>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct Server {
|
||||||
|
run_channels: ControlChannels,
|
||||||
|
server: Network,
|
||||||
|
client: Network,
|
||||||
|
shared: Shared,
|
||||||
|
}
|
||||||
|
|
||||||
impl Server {
|
impl Server {
|
||||||
pub fn new(runtime: Arc<Runtime>) -> (Self, mpsc::UnboundedSender<LocalCommand>) {
|
pub fn new(runtime: Arc<Runtime>) -> (Self, mpsc::UnboundedSender<LocalCommand>) {
|
||||||
let (command_sender, command_receiver) = mpsc::unbounded_channel();
|
let (command_sender, command_receiver) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
let network = Network::new(Pid::new(), &runtime);
|
let server = Network::new(Pid::new(), &runtime);
|
||||||
|
let client = Network::new(Pid::new(), &runtime);
|
||||||
|
|
||||||
let run_channels = Some(ControlChannels { command_receiver });
|
let run_channels = ControlChannels { command_receiver };
|
||||||
(
|
(
|
||||||
Server {
|
Server {
|
||||||
run_channels,
|
run_channels,
|
||||||
network,
|
server,
|
||||||
served: RwLock::new(vec![]),
|
client,
|
||||||
remotes: RwLock::new(HashMap::new()),
|
shared: Shared {
|
||||||
receiving_files: Mutex::new(HashMap::new()),
|
served: RwLock::new(vec![]),
|
||||||
|
remotes: RwLock::new(HashMap::new()),
|
||||||
|
receiving_files: Mutex::new(HashMap::new()),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
command_sender,
|
command_sender,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(mut self, address: ListenAddr) {
|
pub async fn run(self, address: ListenAddr) {
|
||||||
let run_channels = self.run_channels.take().unwrap();
|
let run_channels = self.run_channels;
|
||||||
|
|
||||||
self.network.listen(address).await.unwrap();
|
self.server.listen(address).await.unwrap();
|
||||||
|
|
||||||
join!(
|
join!(
|
||||||
self.command_manager(run_channels.command_receiver,),
|
self.shared
|
||||||
self.connect_manager(),
|
.command_manager(self.client, run_channels.command_receiver),
|
||||||
|
self.shared.connect_manager(self.server),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn command_manager(&self, command_receiver: mpsc::UnboundedReceiver<LocalCommand>) {
|
impl Shared {
|
||||||
|
async fn command_manager(
|
||||||
|
&self,
|
||||||
|
client: Network,
|
||||||
|
command_receiver: mpsc::UnboundedReceiver<LocalCommand>,
|
||||||
|
) {
|
||||||
trace!("Start command_manager");
|
trace!("Start command_manager");
|
||||||
let command_receiver = UnboundedReceiverStream::new(command_receiver);
|
let command_receiver = UnboundedReceiverStream::new(command_receiver);
|
||||||
command_receiver
|
command_receiver
|
||||||
.for_each_concurrent(None, async move |cmd| {
|
.for_each_concurrent(None, |cmd| async {
|
||||||
match cmd {
|
match cmd {
|
||||||
LocalCommand::Shutdown => println!("Shutting down service"),
|
LocalCommand::Shutdown => println!("Shutting down service"),
|
||||||
LocalCommand::Disconnect => {
|
LocalCommand::Disconnect => {
|
||||||
@ -66,7 +82,7 @@ impl Server {
|
|||||||
},
|
},
|
||||||
LocalCommand::Connect(addr) => {
|
LocalCommand::Connect(addr) => {
|
||||||
println!("Trying to connect to: {:?}", &addr);
|
println!("Trying to connect to: {:?}", &addr);
|
||||||
match self.network.connect(addr.clone()).await {
|
match client.connect(addr.clone()).await {
|
||||||
Ok(p) => self.loop_participant(p).await,
|
Ok(p) => self.loop_participant(p).await,
|
||||||
Err(e) => println!("Failed to connect to {:?}, err: {:?}", &addr, e),
|
Err(e) => println!("Failed to connect to {:?}, err: {:?}", &addr, e),
|
||||||
}
|
}
|
||||||
@ -89,7 +105,7 @@ impl Server {
|
|||||||
LocalCommand::Get(id, path) => {
|
LocalCommand::Get(id, path) => {
|
||||||
// i dont know the owner, just broadcast, i am laaaazyyy
|
// i dont know the owner, just broadcast, i am laaaazyyy
|
||||||
for ri in self.remotes.read().await.values() {
|
for ri in self.remotes.read().await.values() {
|
||||||
let mut ri = ri.lock().await;
|
let ri = ri.lock().await;
|
||||||
if ri.get_info(id).is_some() {
|
if ri.get_info(id).is_some() {
|
||||||
//found provider, send request.
|
//found provider, send request.
|
||||||
self.receiving_files.lock().await.insert(id, path.clone());
|
self.receiving_files.lock().await.insert(id, path.clone());
|
||||||
@ -105,20 +121,20 @@ impl Server {
|
|||||||
trace!("Stop command_manager");
|
trace!("Stop command_manager");
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn connect_manager(&self) {
|
async fn connect_manager(&self, network: Network) {
|
||||||
trace!("Start connect_manager");
|
trace!("Start connect_manager");
|
||||||
let iter = futures_util::stream::unfold((), |_| {
|
let iter = futures_util::stream::unfold(network, async move |mut network| {
|
||||||
self.network.connected().map(|r| r.ok().map(|v| (v, ())))
|
network.connected().await.ok().map(|v| (v, network))
|
||||||
});
|
});
|
||||||
|
|
||||||
iter.for_each_concurrent(/* limit */ None, async move |participant| {
|
iter.for_each_concurrent(/* limit */ None, |participant| async {
|
||||||
self.loop_participant(participant).await;
|
self.loop_participant(participant).await;
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
trace!("Stop connect_manager");
|
trace!("Stop connect_manager");
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn loop_participant(&self, p: Participant) {
|
async fn loop_participant(&self, mut p: Participant) {
|
||||||
if let (Ok(cmd_out), Ok(file_out), Ok(cmd_in), Ok(file_in)) = (
|
if let (Ok(cmd_out), Ok(file_out), Ok(cmd_in), Ok(file_in)) = (
|
||||||
p.open(3, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
p.open(3, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
||||||
.await,
|
.await,
|
||||||
|
@ -129,7 +129,7 @@ fn main() {
|
|||||||
|
|
||||||
fn server(address: ListenAddr, runtime: Arc<Runtime>) {
|
fn server(address: ListenAddr, runtime: Arc<Runtime>) {
|
||||||
let registry = Arc::new(Registry::new());
|
let registry = Arc::new(Registry::new());
|
||||||
let server = Network::new_with_registry(Pid::new(), &runtime, ®istry);
|
let mut server = Network::new_with_registry(Pid::new(), &runtime, ®istry);
|
||||||
runtime.spawn(Server::run(
|
runtime.spawn(Server::run(
|
||||||
Arc::clone(®istry),
|
Arc::clone(®istry),
|
||||||
SocketAddr::from(([0; 4], 59112)),
|
SocketAddr::from(([0; 4], 59112)),
|
||||||
@ -140,7 +140,7 @@ fn server(address: ListenAddr, runtime: Arc<Runtime>) {
|
|||||||
loop {
|
loop {
|
||||||
info!("----");
|
info!("----");
|
||||||
info!("Waiting for participant to connect");
|
info!("Waiting for participant to connect");
|
||||||
let p1 = runtime.block_on(server.connected()).unwrap(); //remote representation of p1
|
let mut p1 = runtime.block_on(server.connected()).unwrap(); //remote representation of p1
|
||||||
let mut s1 = runtime.block_on(p1.opened()).unwrap(); //remote representation of s1
|
let mut s1 = runtime.block_on(p1.opened()).unwrap(); //remote representation of s1
|
||||||
runtime.block_on(async {
|
runtime.block_on(async {
|
||||||
let mut last = Instant::now();
|
let mut last = Instant::now();
|
||||||
@ -169,7 +169,7 @@ fn client(address: ConnectAddr, runtime: Arc<Runtime>) {
|
|||||||
));
|
));
|
||||||
|
|
||||||
let p1 = runtime.block_on(client.connect(address)).unwrap(); //remote representation of p1
|
let p1 = runtime.block_on(client.connect(address)).unwrap(); //remote representation of p1
|
||||||
let mut s1 = runtime
|
let s1 = runtime
|
||||||
.block_on(p1.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0))
|
.block_on(p1.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0))
|
||||||
.unwrap(); //remote representation of s1
|
.unwrap(); //remote representation of s1
|
||||||
let mut last = Instant::now();
|
let mut last = Instant::now();
|
||||||
|
@ -67,9 +67,9 @@ pub enum ParticipantEvent {
|
|||||||
pub struct Participant {
|
pub struct Participant {
|
||||||
local_pid: Pid,
|
local_pid: Pid,
|
||||||
remote_pid: Pid,
|
remote_pid: Pid,
|
||||||
a2b_open_stream_s: Mutex<mpsc::UnboundedSender<A2bStreamOpen>>,
|
a2b_open_stream_s: mpsc::UnboundedSender<A2bStreamOpen>,
|
||||||
b2a_stream_opened_r: Mutex<mpsc::UnboundedReceiver<Stream>>,
|
b2a_stream_opened_r: mpsc::UnboundedReceiver<Stream>,
|
||||||
b2a_event_r: Mutex<mpsc::UnboundedReceiver<ParticipantEvent>>,
|
b2a_event_r: mpsc::UnboundedReceiver<ParticipantEvent>,
|
||||||
b2a_bandwidth_stats_r: watch::Receiver<f32>,
|
b2a_bandwidth_stats_r: watch::Receiver<f32>,
|
||||||
a2s_disconnect_s: A2sDisconnect,
|
a2s_disconnect_s: A2sDisconnect,
|
||||||
}
|
}
|
||||||
@ -171,7 +171,7 @@ pub struct StreamParams {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on port `2999` to accept connections and connect to port `8080` to connect to a (pseudo) database Application
|
/// // Create a Network, listen on port `2999` to accept connections and connect to port `8080` to connect to a (pseudo) database Application
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async{
|
/// runtime.block_on(async{
|
||||||
/// # //setup pseudo database!
|
/// # //setup pseudo database!
|
||||||
/// # let database = Network::new(Pid::new(), &runtime);
|
/// # let database = Network::new(Pid::new(), &runtime);
|
||||||
@ -195,9 +195,9 @@ pub struct StreamParams {
|
|||||||
pub struct Network {
|
pub struct Network {
|
||||||
local_pid: Pid,
|
local_pid: Pid,
|
||||||
participant_disconnect_sender: Arc<Mutex<HashMap<Pid, A2sDisconnect>>>,
|
participant_disconnect_sender: Arc<Mutex<HashMap<Pid, A2sDisconnect>>>,
|
||||||
listen_sender: Mutex<mpsc::UnboundedSender<(ListenAddr, oneshot::Sender<io::Result<()>>)>>,
|
listen_sender: mpsc::UnboundedSender<(ListenAddr, oneshot::Sender<io::Result<()>>)>,
|
||||||
connect_sender: Mutex<mpsc::UnboundedSender<A2sConnect>>,
|
connect_sender: mpsc::UnboundedSender<A2sConnect>,
|
||||||
connected_receiver: Mutex<mpsc::UnboundedReceiver<Participant>>,
|
connected_receiver: mpsc::UnboundedReceiver<Participant>,
|
||||||
shutdown_network_s: Option<oneshot::Sender<oneshot::Sender<()>>>,
|
shutdown_network_s: Option<oneshot::Sender<oneshot::Sender<()>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,9 +300,9 @@ impl Network {
|
|||||||
Self {
|
Self {
|
||||||
local_pid: participant_id,
|
local_pid: participant_id,
|
||||||
participant_disconnect_sender,
|
participant_disconnect_sender,
|
||||||
listen_sender: Mutex::new(listen_sender),
|
listen_sender,
|
||||||
connect_sender: Mutex::new(connect_sender),
|
connect_sender,
|
||||||
connected_receiver: Mutex::new(connected_receiver),
|
connected_receiver,
|
||||||
shutdown_network_s: Some(shutdown_network_s),
|
shutdown_network_s: Some(shutdown_network_s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -322,7 +322,7 @@ impl Network {
|
|||||||
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on port `2000` TCP on all NICs and `2001` UDP locally
|
/// // Create a Network, listen on port `2000` TCP on all NICs and `2001` UDP locally
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// network
|
/// network
|
||||||
/// .listen(ListenAddr::Tcp("127.0.0.1:2000".parse().unwrap()))
|
/// .listen(ListenAddr::Tcp("127.0.0.1:2000".parse().unwrap()))
|
||||||
@ -342,10 +342,7 @@ impl Network {
|
|||||||
pub async fn listen(&self, address: ListenAddr) -> Result<(), NetworkError> {
|
pub async fn listen(&self, address: ListenAddr) -> Result<(), NetworkError> {
|
||||||
let (s2a_result_s, s2a_result_r) = oneshot::channel::<io::Result<()>>();
|
let (s2a_result_s, s2a_result_r) = oneshot::channel::<io::Result<()>>();
|
||||||
debug!(?address, "listening on address");
|
debug!(?address, "listening on address");
|
||||||
self.listen_sender
|
self.listen_sender.send((address, s2a_result_s))?;
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.send((address, s2a_result_s))?;
|
|
||||||
match s2a_result_r.await? {
|
match s2a_result_r.await? {
|
||||||
//waiting guarantees that we either listened successfully or get an error like port in
|
//waiting guarantees that we either listened successfully or get an error like port in
|
||||||
// use
|
// use
|
||||||
@ -401,10 +398,7 @@ impl Network {
|
|||||||
let (pid_sender, pid_receiver) =
|
let (pid_sender, pid_receiver) =
|
||||||
oneshot::channel::<Result<Participant, NetworkConnectError>>();
|
oneshot::channel::<Result<Participant, NetworkConnectError>>();
|
||||||
debug!(?address, "Connect to address");
|
debug!(?address, "Connect to address");
|
||||||
self.connect_sender
|
self.connect_sender.send((address, pid_sender))?;
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.send((address, pid_sender))?;
|
|
||||||
let participant = match pid_receiver.await? {
|
let participant = match pid_receiver.await? {
|
||||||
Ok(p) => p,
|
Ok(p) => p,
|
||||||
Err(e) => return Err(NetworkError::ConnectFailed(e)),
|
Err(e) => return Err(NetworkError::ConnectFailed(e)),
|
||||||
@ -431,7 +425,7 @@ impl Network {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on port `2020` TCP and opens returns their Pid
|
/// // Create a Network, listen on port `2020` TCP and opens returns their Pid
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let remote = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// network
|
/// network
|
||||||
@ -454,11 +448,9 @@ impl Network {
|
|||||||
/// [`listen`]: crate::api::Network::listen
|
/// [`listen`]: crate::api::Network::listen
|
||||||
/// [`ListenAddr`]: crate::api::ListenAddr
|
/// [`ListenAddr`]: crate::api::ListenAddr
|
||||||
#[instrument(name="network", skip(self), fields(p = %self.local_pid))]
|
#[instrument(name="network", skip(self), fields(p = %self.local_pid))]
|
||||||
pub async fn connected(&self) -> Result<Participant, NetworkError> {
|
pub async fn connected(&mut self) -> Result<Participant, NetworkError> {
|
||||||
let participant = self
|
let participant = self
|
||||||
.connected_receiver
|
.connected_receiver
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.recv()
|
.recv()
|
||||||
.await
|
.await
|
||||||
.ok_or(NetworkError::NetworkClosed)?;
|
.ok_or(NetworkError::NetworkClosed)?;
|
||||||
@ -536,9 +528,9 @@ impl Participant {
|
|||||||
Self {
|
Self {
|
||||||
local_pid,
|
local_pid,
|
||||||
remote_pid,
|
remote_pid,
|
||||||
a2b_open_stream_s: Mutex::new(a2b_open_stream_s),
|
a2b_open_stream_s,
|
||||||
b2a_stream_opened_r: Mutex::new(b2a_stream_opened_r),
|
b2a_stream_opened_r,
|
||||||
b2a_event_r: Mutex::new(b2a_event_r),
|
b2a_event_r,
|
||||||
b2a_bandwidth_stats_r,
|
b2a_bandwidth_stats_r,
|
||||||
a2s_disconnect_s: Arc::new(Mutex::new(Some(a2s_disconnect_s))),
|
a2s_disconnect_s: Arc::new(Mutex::new(Some(a2s_disconnect_s))),
|
||||||
}
|
}
|
||||||
@ -600,12 +592,10 @@ impl Participant {
|
|||||||
) -> Result<Stream, ParticipantError> {
|
) -> Result<Stream, ParticipantError> {
|
||||||
debug_assert!(prio <= network_protocol::HIGHEST_PRIO, "invalid prio");
|
debug_assert!(prio <= network_protocol::HIGHEST_PRIO, "invalid prio");
|
||||||
let (p2a_return_stream_s, p2a_return_stream_r) = oneshot::channel::<Stream>();
|
let (p2a_return_stream_s, p2a_return_stream_r) = oneshot::channel::<Stream>();
|
||||||
if let Err(e) = self.a2b_open_stream_s.lock().await.send((
|
if let Err(e) =
|
||||||
prio,
|
self.a2b_open_stream_s
|
||||||
promises,
|
.send((prio, promises, bandwidth, p2a_return_stream_s))
|
||||||
bandwidth,
|
{
|
||||||
p2a_return_stream_s,
|
|
||||||
)) {
|
|
||||||
debug!(?e, "bParticipant is already closed, notifying");
|
debug!(?e, "bParticipant is already closed, notifying");
|
||||||
return Err(ParticipantError::ParticipantDisconnected);
|
return Err(ParticipantError::ParticipantDisconnected);
|
||||||
}
|
}
|
||||||
@ -638,11 +628,11 @@ impl Participant {
|
|||||||
/// // Create a Network, connect on port 2110 and wait for the other side to open a stream
|
/// // Create a Network, connect on port 2110 and wait for the other side to open a stream
|
||||||
/// // Note: It's quite unusual to actively connect, but then wait on a stream to be connected, usually the Application taking initiative want's to also create the first Stream.
|
/// // Note: It's quite unusual to actively connect, but then wait on a stream to be connected, usually the Application taking initiative want's to also create the first Stream.
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let mut remote = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// # remote.listen(ListenAddr::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
|
/// # remote.listen(ListenAddr::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
|
||||||
/// let p1 = network.connect(ConnectAddr::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
|
/// let mut p1 = network.connect(ConnectAddr::Tcp("127.0.0.1:2110".parse().unwrap())).await?;
|
||||||
/// # let p2 = remote.connected().await?;
|
/// # let p2 = remote.connected().await?;
|
||||||
/// # p2.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # p2.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// let _s1 = p1.opened().await?;
|
/// let _s1 = p1.opened().await?;
|
||||||
@ -657,8 +647,8 @@ impl Participant {
|
|||||||
/// [`connected`]: Network::connected
|
/// [`connected`]: Network::connected
|
||||||
/// [`open`]: Participant::open
|
/// [`open`]: Participant::open
|
||||||
#[instrument(name="network", skip(self), fields(p = %self.local_pid))]
|
#[instrument(name="network", skip(self), fields(p = %self.local_pid))]
|
||||||
pub async fn opened(&self) -> Result<Stream, ParticipantError> {
|
pub async fn opened(&mut self) -> Result<Stream, ParticipantError> {
|
||||||
match self.b2a_stream_opened_r.lock().await.recv().await {
|
match self.b2a_stream_opened_r.recv().await {
|
||||||
Some(stream) => {
|
Some(stream) => {
|
||||||
let sid = stream.sid;
|
let sid = stream.sid;
|
||||||
debug!(?sid, "Receive opened stream");
|
debug!(?sid, "Receive opened stream");
|
||||||
@ -694,8 +684,8 @@ impl Participant {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on port `2030` TCP and opens returns their Pid and close connection.
|
/// // Create a Network, listen on port `2030` TCP and opens returns their Pid and close connection.
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let mut remote = Network::new(Pid::new(), &runtime);
|
||||||
/// let err = runtime.block_on(async {
|
/// let err = runtime.block_on(async {
|
||||||
/// network
|
/// network
|
||||||
/// .listen(ListenAddr::Tcp("127.0.0.1:2030".parse().unwrap()))
|
/// .listen(ListenAddr::Tcp("127.0.0.1:2030".parse().unwrap()))
|
||||||
@ -779,11 +769,11 @@ impl Participant {
|
|||||||
/// // Create a Network, connect on port 2040 and wait for the other side to open a stream
|
/// // Create a Network, connect on port 2040 and wait for the other side to open a stream
|
||||||
/// // Note: It's quite unusual to actively connect, but then wait on a stream to be connected, usually the Application taking initiative want's to also create the first Stream.
|
/// // Note: It's quite unusual to actively connect, but then wait on a stream to be connected, usually the Application taking initiative want's to also create the first Stream.
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let mut remote = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// # remote.listen(ListenAddr::Tcp("127.0.0.1:2040".parse().unwrap())).await?;
|
/// # remote.listen(ListenAddr::Tcp("127.0.0.1:2040".parse().unwrap())).await?;
|
||||||
/// let p1 = network.connect(ConnectAddr::Tcp("127.0.0.1:2040".parse().unwrap())).await?;
|
/// let mut p1 = network.connect(ConnectAddr::Tcp("127.0.0.1:2040".parse().unwrap())).await?;
|
||||||
/// # let p2 = remote.connected().await?;
|
/// # let p2 = remote.connected().await?;
|
||||||
/// let event = p1.fetch_event().await?;
|
/// let event = p1.fetch_event().await?;
|
||||||
/// drop(network);
|
/// drop(network);
|
||||||
@ -794,8 +784,8 @@ impl Participant {
|
|||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// [`ParticipantEvent`]: crate::api::ParticipantEvent
|
/// [`ParticipantEvent`]: crate::api::ParticipantEvent
|
||||||
pub async fn fetch_event(&self) -> Result<ParticipantEvent, ParticipantError> {
|
pub async fn fetch_event(&mut self) -> Result<ParticipantEvent, ParticipantError> {
|
||||||
match self.b2a_event_r.lock().await.recv().await {
|
match self.b2a_event_r.recv().await {
|
||||||
Some(event) => Ok(event),
|
Some(event) => Ok(event),
|
||||||
None => {
|
None => {
|
||||||
debug!("event_receiver failed, closing participant");
|
debug!("event_receiver failed, closing participant");
|
||||||
@ -811,16 +801,13 @@ impl Participant {
|
|||||||
///
|
///
|
||||||
/// [`ParticipantEvent`]: crate::api::ParticipantEvent
|
/// [`ParticipantEvent`]: crate::api::ParticipantEvent
|
||||||
/// [`fetch_event`]: Participant::fetch_event
|
/// [`fetch_event`]: Participant::fetch_event
|
||||||
pub fn try_fetch_event(&self) -> Result<Option<ParticipantEvent>, ParticipantError> {
|
pub fn try_fetch_event(&mut self) -> Result<Option<ParticipantEvent>, ParticipantError> {
|
||||||
match &mut self.b2a_event_r.try_lock() {
|
match self.b2a_event_r.try_recv() {
|
||||||
Ok(b2a_event_r) => match b2a_event_r.try_recv() {
|
Ok(event) => Ok(Some(event)),
|
||||||
Ok(event) => Ok(Some(event)),
|
Err(mpsc::error::TryRecvError::Empty) => Ok(None),
|
||||||
Err(mpsc::error::TryRecvError::Empty) => Ok(None),
|
Err(mpsc::error::TryRecvError::Disconnected) => {
|
||||||
Err(mpsc::error::TryRecvError::Disconnected) => {
|
Err(ParticipantError::ParticipantDisconnected)
|
||||||
Err(ParticipantError::ParticipantDisconnected)
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Err(_) => Ok(None),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -892,14 +879,14 @@ impl Stream {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on Port `2200` and wait for a Stream to be opened, then answer `Hello World`
|
/// // Create a Network, listen on Port `2200` and wait for a Stream to be opened, then answer `Hello World`
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let remote = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// network.listen(ListenAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
|
/// network.listen(ListenAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
|
||||||
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
|
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2200".parse().unwrap())).await?;
|
||||||
/// # // keep it alive
|
/// # // keep it alive
|
||||||
/// # let _stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # let _stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// let participant_a = network.connected().await?;
|
/// let mut participant_a = network.connected().await?;
|
||||||
/// let mut stream_a = participant_a.opened().await?;
|
/// let mut stream_a = participant_a.opened().await?;
|
||||||
/// //Send Message
|
/// //Send Message
|
||||||
/// stream_a.send("Hello World")?;
|
/// stream_a.send("Hello World")?;
|
||||||
@ -914,7 +901,7 @@ impl Stream {
|
|||||||
/// [`recv`]: Stream::recv
|
/// [`recv`]: Stream::recv
|
||||||
/// [`Serialized`]: Serialize
|
/// [`Serialized`]: Serialize
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn send<M: Serialize>(&mut self, msg: M) -> Result<(), StreamError> {
|
pub fn send<M: Serialize>(&self, msg: M) -> Result<(), StreamError> {
|
||||||
self.send_raw_move(Message::serialize(&msg, self.params()))
|
self.send_raw_move(Message::serialize(&msg, self.params()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -933,7 +920,7 @@ impl Stream {
|
|||||||
///
|
///
|
||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote1 = Network::new(Pid::new(), &runtime);
|
/// # let remote1 = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote2 = Network::new(Pid::new(), &runtime);
|
/// # let remote2 = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
@ -943,8 +930,8 @@ impl Stream {
|
|||||||
/// # assert_eq!(remote1_p.remote_pid(), remote2_p.remote_pid());
|
/// # assert_eq!(remote1_p.remote_pid(), remote2_p.remote_pid());
|
||||||
/// # remote1_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # remote1_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// # remote2_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # remote2_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// let participant_a = network.connected().await?;
|
/// let mut participant_a = network.connected().await?;
|
||||||
/// let participant_b = network.connected().await?;
|
/// let mut participant_b = network.connected().await?;
|
||||||
/// let mut stream_a = participant_a.opened().await?;
|
/// let mut stream_a = participant_a.opened().await?;
|
||||||
/// let mut stream_b = participant_b.opened().await?;
|
/// let mut stream_b = participant_b.opened().await?;
|
||||||
///
|
///
|
||||||
@ -966,7 +953,7 @@ impl Stream {
|
|||||||
/// [`compress`]: lz_fear::raw::compress2
|
/// [`compress`]: lz_fear::raw::compress2
|
||||||
/// [`Message::serialize`]: crate::message::Message::serialize
|
/// [`Message::serialize`]: crate::message::Message::serialize
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn send_raw(&mut self, message: &Message) -> Result<(), StreamError> {
|
pub fn send_raw(&self, message: &Message) -> Result<(), StreamError> {
|
||||||
self.send_raw_move(Message {
|
self.send_raw_move(Message {
|
||||||
data: message.data.clone(),
|
data: message.data.clone(),
|
||||||
#[cfg(feature = "compression")]
|
#[cfg(feature = "compression")]
|
||||||
@ -974,7 +961,7 @@ impl Stream {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_raw_move(&mut self, message: Message) -> Result<(), StreamError> {
|
fn send_raw_move(&self, message: Message) -> Result<(), StreamError> {
|
||||||
if self.send_closed.load(Ordering::Relaxed) {
|
if self.send_closed.load(Ordering::Relaxed) {
|
||||||
return Err(StreamError::StreamClosed);
|
return Err(StreamError::StreamClosed);
|
||||||
}
|
}
|
||||||
@ -1002,14 +989,14 @@ impl Stream {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on Port `2220` and wait for a Stream to be opened, then listen on it
|
/// // Create a Network, listen on Port `2220` and wait for a Stream to be opened, then listen on it
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let remote = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// network.listen(ListenAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
|
/// network.listen(ListenAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
|
||||||
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
|
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2220".parse().unwrap())).await?;
|
||||||
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// # stream_p.send("Hello World");
|
/// # stream_p.send("Hello World");
|
||||||
/// let participant_a = network.connected().await?;
|
/// let mut participant_a = network.connected().await?;
|
||||||
/// let mut stream_a = participant_a.opened().await?;
|
/// let mut stream_a = participant_a.opened().await?;
|
||||||
/// //Recv Message
|
/// //Recv Message
|
||||||
/// println!("{}", stream_a.recv::<String>().await?);
|
/// println!("{}", stream_a.recv::<String>().await?);
|
||||||
@ -1036,14 +1023,14 @@ impl Stream {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on Port `2230` and wait for a Stream to be opened, then listen on it
|
/// // Create a Network, listen on Port `2230` and wait for a Stream to be opened, then listen on it
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let remote = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// network.listen(ListenAddr::Tcp("127.0.0.1:2230".parse().unwrap())).await?;
|
/// network.listen(ListenAddr::Tcp("127.0.0.1:2230".parse().unwrap())).await?;
|
||||||
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2230".parse().unwrap())).await?;
|
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2230".parse().unwrap())).await?;
|
||||||
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// # stream_p.send("Hello World");
|
/// # stream_p.send("Hello World");
|
||||||
/// let participant_a = network.connected().await?;
|
/// let mut participant_a = network.connected().await?;
|
||||||
/// let mut stream_a = participant_a.opened().await?;
|
/// let mut stream_a = participant_a.opened().await?;
|
||||||
/// //Recv Message
|
/// //Recv Message
|
||||||
/// let msg = stream_a.recv_raw().await?;
|
/// let msg = stream_a.recv_raw().await?;
|
||||||
@ -1092,7 +1079,7 @@ impl Stream {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on Port `2240` and wait for a Stream to be opened, then listen on it
|
/// // Create a Network, listen on Port `2240` and wait for a Stream to be opened, then listen on it
|
||||||
/// let runtime = Runtime::new().unwrap();
|
/// let runtime = Runtime::new().unwrap();
|
||||||
/// let network = Network::new(Pid::new(), &runtime);
|
/// let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let remote = Network::new(Pid::new(), &runtime);
|
||||||
/// runtime.block_on(async {
|
/// runtime.block_on(async {
|
||||||
/// network.listen(ListenAddr::Tcp("127.0.0.1:2240".parse().unwrap())).await?;
|
/// network.listen(ListenAddr::Tcp("127.0.0.1:2240".parse().unwrap())).await?;
|
||||||
@ -1100,7 +1087,7 @@ impl Stream {
|
|||||||
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// # stream_p.send("Hello World");
|
/// # stream_p.send("Hello World");
|
||||||
/// # std::thread::sleep(std::time::Duration::from_secs(1));
|
/// # std::thread::sleep(std::time::Duration::from_secs(1));
|
||||||
/// let participant_a = network.connected().await?;
|
/// let mut participant_a = network.connected().await?;
|
||||||
/// let mut stream_a = participant_a.opened().await?;
|
/// let mut stream_a = participant_a.opened().await?;
|
||||||
/// //Try Recv Message
|
/// //Try Recv Message
|
||||||
/// println!("{:?}", stream_a.try_recv::<String>()?);
|
/// println!("{:?}", stream_a.try_recv::<String>()?);
|
||||||
|
@ -59,11 +59,11 @@
|
|||||||
//!
|
//!
|
||||||
//! // Server
|
//! // Server
|
||||||
//! async fn server(runtime: &Runtime) -> Result<(), Box<dyn std::error::Error>> {
|
//! async fn server(runtime: &Runtime) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
//! let server_network = Network::new(Pid::new(), runtime);
|
//! let mut server_network = Network::new(Pid::new(), runtime);
|
||||||
//! server_network
|
//! server_network
|
||||||
//! .listen(ListenAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
|
//! .listen(ListenAddr::Tcp("127.0.0.1:12345".parse().unwrap()))
|
||||||
//! .await?;
|
//! .await?;
|
||||||
//! let client = server_network.connected().await?;
|
//! let mut client = server_network.connected().await?;
|
||||||
//! let mut stream = client.opened().await?;
|
//! let mut stream = client.opened().await?;
|
||||||
//! let msg: String = stream.recv().await?;
|
//! let msg: String = stream.recv().await?;
|
||||||
//! println!("Got message: {}", msg);
|
//! println!("Got message: {}", msg);
|
||||||
|
@ -78,14 +78,14 @@ impl Message {
|
|||||||
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// // Create a Network, listen on Port `2300` and wait for a Stream to be opened, then listen on it
|
/// // Create a Network, listen on Port `2300` and wait for a Stream to be opened, then listen on it
|
||||||
/// # let runtime = Runtime::new().unwrap();
|
/// # let runtime = Runtime::new().unwrap();
|
||||||
/// # let network = Network::new(Pid::new(), &runtime);
|
/// # let mut network = Network::new(Pid::new(), &runtime);
|
||||||
/// # let remote = Network::new(Pid::new(), &runtime);
|
/// # let remote = Network::new(Pid::new(), &runtime);
|
||||||
/// # runtime.block_on(async {
|
/// # runtime.block_on(async {
|
||||||
/// # network.listen(ListenAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
|
/// # network.listen(ListenAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
|
||||||
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
|
/// # let remote_p = remote.connect(ConnectAddr::Tcp("127.0.0.1:2300".parse().unwrap())).await?;
|
||||||
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
/// # let mut stream_p = remote_p.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0).await?;
|
||||||
/// # stream_p.send("Hello World");
|
/// # stream_p.send("Hello World");
|
||||||
/// # let participant_a = network.connected().await?;
|
/// # let mut participant_a = network.connected().await?;
|
||||||
/// let mut stream_a = participant_a.opened().await?;
|
/// let mut stream_a = participant_a.opened().await?;
|
||||||
/// //Recv Message
|
/// //Recv Message
|
||||||
/// let msg = stream_a.recv_raw().await?;
|
/// let msg = stream_a.recv_raw().await?;
|
||||||
|
@ -28,7 +28,7 @@ use helper::{network_participant_stream, tcp, SLEEP_EXTERNAL, SLEEP_INTERNAL};
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_network() {
|
fn close_network() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _, _p1_a, mut s1_a, _, _p1_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _, _p1_a, s1_a, _, _p1_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
std::thread::sleep(SLEEP_INTERNAL);
|
std::thread::sleep(SLEEP_INTERNAL);
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ fn close_network() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_participant() {
|
fn close_participant() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, p1_a, mut s1_a, _n_b, p1_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, p1_a, s1_a, _n_b, p1_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
r.block_on(p1_a.disconnect()).unwrap();
|
r.block_on(p1_a.disconnect()).unwrap();
|
||||||
r.block_on(p1_b.disconnect()).unwrap();
|
r.block_on(p1_b.disconnect()).unwrap();
|
||||||
@ -75,7 +75,7 @@ fn close_streams_in_block_on() {
|
|||||||
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, s1_b) = network_participant_stream(tcp());
|
||||||
r.block_on(async {
|
r.block_on(async {
|
||||||
//make it locally so that they are dropped later
|
//make it locally so that they are dropped later
|
||||||
let mut s1_a = s1_a;
|
let s1_a = s1_a;
|
||||||
let mut s1_b = s1_b;
|
let mut s1_b = s1_b;
|
||||||
s1_a.send("ping").unwrap();
|
s1_a.send("ping").unwrap();
|
||||||
assert_eq!(s1_b.recv().await, Ok("ping".to_string()));
|
assert_eq!(s1_b.recv().await, Ok("ping".to_string()));
|
||||||
@ -87,7 +87,7 @@ fn close_streams_in_block_on() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_simple_3msg_then_close() {
|
fn stream_simple_3msg_then_close() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send(1u8).unwrap();
|
s1_a.send(1u8).unwrap();
|
||||||
s1_a.send(42).unwrap();
|
s1_a.send(42).unwrap();
|
||||||
@ -104,7 +104,7 @@ fn stream_simple_3msg_then_close() {
|
|||||||
fn stream_send_first_then_receive() {
|
fn stream_send_first_then_receive() {
|
||||||
// recv should still be possible even if stream got closed if they are in queue
|
// recv should still be possible even if stream got closed if they are in queue
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send(1u8).unwrap();
|
s1_a.send(1u8).unwrap();
|
||||||
s1_a.send(42).unwrap();
|
s1_a.send(42).unwrap();
|
||||||
@ -120,7 +120,7 @@ fn stream_send_first_then_receive() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_send_1_then_close_stream() {
|
fn stream_send_1_then_close_stream() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
s1_a.send("this message must be received, even if stream is closed already!")
|
s1_a.send("this message must be received, even if stream is closed already!")
|
||||||
.unwrap();
|
.unwrap();
|
||||||
drop(s1_a);
|
drop(s1_a);
|
||||||
@ -133,7 +133,7 @@ fn stream_send_1_then_close_stream() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_send_100000_then_close_stream() {
|
fn stream_send_100000_then_close_stream() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100000 {
|
for _ in 0..100000 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -151,7 +151,7 @@ fn stream_send_100000_then_close_stream() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_send_100000_then_close_stream_remote() {
|
fn stream_send_100000_then_close_stream_remote() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100000 {
|
for _ in 0..100000 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -164,7 +164,7 @@ fn stream_send_100000_then_close_stream_remote() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_send_100000_then_close_stream_remote2() {
|
fn stream_send_100000_then_close_stream_remote2() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100000 {
|
for _ in 0..100000 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ fn stream_send_100000_then_close_stream_remote2() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_send_100000_then_close_stream_remote3() {
|
fn stream_send_100000_then_close_stream_remote3() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100000 {
|
for _ in 0..100000 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -192,7 +192,7 @@ fn stream_send_100000_then_close_stream_remote3() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_part_then_network() {
|
fn close_part_then_network() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (_r, n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..1000 {
|
for _ in 0..1000 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -205,7 +205,7 @@ fn close_part_then_network() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_network_then_part() {
|
fn close_network_then_part() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (_r, n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..1000 {
|
for _ in 0..1000 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -218,7 +218,7 @@ fn close_network_then_part() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_network_then_disconnect_part() {
|
fn close_network_then_disconnect_part() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (r, n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..1000 {
|
for _ in 0..1000 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -231,7 +231,7 @@ fn close_network_then_disconnect_part() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_runtime_then_network() {
|
fn close_runtime_then_network() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -244,7 +244,7 @@ fn close_runtime_then_network() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_runtime_then_part() {
|
fn close_runtime_then_part() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -258,7 +258,7 @@ fn close_runtime_then_part() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_network_from_async() {
|
fn close_network_from_async() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -271,7 +271,7 @@ fn close_network_from_async() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn close_part_from_async() {
|
fn close_part_from_async() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, p_a, mut s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, p_a, s1_a, _n_b, _p_b, _s1_b) = network_participant_stream(tcp());
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
s1_a.send("woop_PARTY_HARD_woop").unwrap();
|
||||||
}
|
}
|
||||||
@ -285,8 +285,8 @@ fn close_part_from_async() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn opened_stream_before_remote_part_is_closed() {
|
fn opened_stream_before_remote_part_is_closed() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
||||||
let mut s2_a = r.block_on(p_a.open(4, Promises::empty(), 0)).unwrap();
|
let s2_a = r.block_on(p_a.open(4, Promises::empty(), 0)).unwrap();
|
||||||
s2_a.send("HelloWorld").unwrap();
|
s2_a.send("HelloWorld").unwrap();
|
||||||
let mut s2_b = r.block_on(p_b.opened()).unwrap();
|
let mut s2_b = r.block_on(p_b.opened()).unwrap();
|
||||||
drop(p_a);
|
drop(p_a);
|
||||||
@ -298,8 +298,8 @@ fn opened_stream_before_remote_part_is_closed() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn opened_stream_after_remote_part_is_closed() {
|
fn opened_stream_after_remote_part_is_closed() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
||||||
let mut s2_a = r.block_on(p_a.open(3, Promises::empty(), 0)).unwrap();
|
let s2_a = r.block_on(p_a.open(3, Promises::empty(), 0)).unwrap();
|
||||||
s2_a.send("HelloWorld").unwrap();
|
s2_a.send("HelloWorld").unwrap();
|
||||||
drop(p_a);
|
drop(p_a);
|
||||||
std::thread::sleep(SLEEP_EXTERNAL);
|
std::thread::sleep(SLEEP_EXTERNAL);
|
||||||
@ -315,8 +315,8 @@ fn opened_stream_after_remote_part_is_closed() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn open_stream_after_remote_part_is_closed() {
|
fn open_stream_after_remote_part_is_closed() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
||||||
let mut s2_a = r.block_on(p_a.open(4, Promises::empty(), 0)).unwrap();
|
let s2_a = r.block_on(p_a.open(4, Promises::empty(), 0)).unwrap();
|
||||||
s2_a.send("HelloWorld").unwrap();
|
s2_a.send("HelloWorld").unwrap();
|
||||||
drop(p_a);
|
drop(p_a);
|
||||||
std::thread::sleep(SLEEP_EXTERNAL);
|
std::thread::sleep(SLEEP_EXTERNAL);
|
||||||
@ -332,7 +332,7 @@ fn open_stream_after_remote_part_is_closed() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn failed_stream_open_after_remote_part_is_closed() {
|
fn failed_stream_open_after_remote_part_is_closed() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(tcp());
|
let (r, _n_a, p_a, _, _n_b, mut p_b, _) = network_participant_stream(tcp());
|
||||||
drop(p_a);
|
drop(p_a);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
r.block_on(p_b.opened()).unwrap_err(),
|
r.block_on(p_b.opened()).unwrap_err(),
|
||||||
@ -345,14 +345,14 @@ fn failed_stream_open_after_remote_part_is_closed() {
|
|||||||
fn open_participant_before_remote_part_is_closed() {
|
fn open_participant_before_remote_part_is_closed() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let r = Arc::new(Runtime::new().unwrap());
|
let r = Arc::new(Runtime::new().unwrap());
|
||||||
let n_a = Network::new(Pid::fake(0), &r);
|
let mut n_a = Network::new(Pid::fake(0), &r);
|
||||||
let n_b = Network::new(Pid::fake(1), &r);
|
let n_b = Network::new(Pid::fake(1), &r);
|
||||||
let addr = tcp();
|
let addr = tcp();
|
||||||
r.block_on(n_a.listen(addr.0)).unwrap();
|
r.block_on(n_a.listen(addr.0)).unwrap();
|
||||||
let p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
let p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
||||||
let mut s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
let s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
||||||
s1_b.send("HelloWorld").unwrap();
|
s1_b.send("HelloWorld").unwrap();
|
||||||
let p_a = r.block_on(n_a.connected()).unwrap();
|
let mut p_a = r.block_on(n_a.connected()).unwrap();
|
||||||
drop(s1_b);
|
drop(s1_b);
|
||||||
drop(p_b);
|
drop(p_b);
|
||||||
drop(n_b);
|
drop(n_b);
|
||||||
@ -365,18 +365,18 @@ fn open_participant_before_remote_part_is_closed() {
|
|||||||
fn open_participant_after_remote_part_is_closed() {
|
fn open_participant_after_remote_part_is_closed() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let r = Arc::new(Runtime::new().unwrap());
|
let r = Arc::new(Runtime::new().unwrap());
|
||||||
let n_a = Network::new(Pid::fake(0), &r);
|
let mut n_a = Network::new(Pid::fake(0), &r);
|
||||||
let n_b = Network::new(Pid::fake(1), &r);
|
let n_b = Network::new(Pid::fake(1), &r);
|
||||||
let addr = tcp();
|
let addr = tcp();
|
||||||
r.block_on(n_a.listen(addr.0)).unwrap();
|
r.block_on(n_a.listen(addr.0)).unwrap();
|
||||||
let p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
let p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
||||||
let mut s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
let s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
||||||
s1_b.send("HelloWorld").unwrap();
|
s1_b.send("HelloWorld").unwrap();
|
||||||
drop(s1_b);
|
drop(s1_b);
|
||||||
drop(p_b);
|
drop(p_b);
|
||||||
drop(n_b);
|
drop(n_b);
|
||||||
std::thread::sleep(SLEEP_EXTERNAL);
|
std::thread::sleep(SLEEP_EXTERNAL);
|
||||||
let p_a = r.block_on(n_a.connected()).unwrap();
|
let mut p_a = r.block_on(n_a.connected()).unwrap();
|
||||||
let mut s1_a = r.block_on(p_a.opened()).unwrap();
|
let mut s1_a = r.block_on(p_a.opened()).unwrap();
|
||||||
assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
|
assert_eq!(r.block_on(s1_a.recv()), Ok("HelloWorld".to_string()));
|
||||||
}
|
}
|
||||||
@ -385,19 +385,19 @@ fn open_participant_after_remote_part_is_closed() {
|
|||||||
fn close_network_scheduler_completely() {
|
fn close_network_scheduler_completely() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let r = Arc::new(Runtime::new().unwrap());
|
let r = Arc::new(Runtime::new().unwrap());
|
||||||
let n_a = Network::new(Pid::fake(0), &r);
|
let mut n_a = Network::new(Pid::fake(0), &r);
|
||||||
let n_b = Network::new(Pid::fake(1), &r);
|
let n_b = Network::new(Pid::fake(1), &r);
|
||||||
let addr = tcp();
|
let addr = tcp();
|
||||||
r.block_on(n_a.listen(addr.0)).unwrap();
|
r.block_on(n_a.listen(addr.0)).unwrap();
|
||||||
let p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
let mut p_b = r.block_on(n_b.connect(addr.1)).unwrap();
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
r.block_on(p_b.fetch_event()),
|
r.block_on(p_b.fetch_event()),
|
||||||
Ok(ParticipantEvent::ChannelCreated(_))
|
Ok(ParticipantEvent::ChannelCreated(_))
|
||||||
);
|
);
|
||||||
let mut s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
let s1_b = r.block_on(p_b.open(4, Promises::empty(), 0)).unwrap();
|
||||||
s1_b.send("HelloWorld").unwrap();
|
s1_b.send("HelloWorld").unwrap();
|
||||||
|
|
||||||
let p_a = r.block_on(n_a.connected()).unwrap();
|
let mut p_a = r.block_on(n_a.connected()).unwrap();
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
r.block_on(p_a.fetch_event()),
|
r.block_on(p_a.fetch_event()),
|
||||||
Ok(ParticipantEvent::ChannelCreated(_))
|
Ok(ParticipantEvent::ChannelCreated(_))
|
||||||
@ -429,7 +429,7 @@ fn close_network_scheduler_completely() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn dont_panic_on_multiply_recv_after_close() {
|
fn dont_panic_on_multiply_recv_after_close() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send(11u32).unwrap();
|
s1_a.send(11u32).unwrap();
|
||||||
drop(s1_a);
|
drop(s1_a);
|
||||||
@ -444,7 +444,7 @@ fn dont_panic_on_multiply_recv_after_close() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn dont_panic_on_recv_send_after_close() {
|
fn dont_panic_on_recv_send_after_close() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send(11u32).unwrap();
|
s1_a.send(11u32).unwrap();
|
||||||
drop(s1_a);
|
drop(s1_a);
|
||||||
@ -457,7 +457,7 @@ fn dont_panic_on_recv_send_after_close() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn dont_panic_on_multiple_send_after_close() {
|
fn dont_panic_on_multiple_send_after_close() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send(11u32).unwrap();
|
s1_a.send(11u32).unwrap();
|
||||||
drop(s1_a);
|
drop(s1_a);
|
||||||
|
@ -67,11 +67,11 @@ pub fn network_participant_stream(
|
|||||||
) {
|
) {
|
||||||
let runtime = Arc::new(Runtime::new().unwrap());
|
let runtime = Arc::new(Runtime::new().unwrap());
|
||||||
let (n_a, p1_a, s1_a, n_b, p1_b, s1_b) = runtime.block_on(async {
|
let (n_a, p1_a, s1_a, n_b, p1_b, s1_b) = runtime.block_on(async {
|
||||||
let n_a = Network::new(Pid::fake(0), &runtime);
|
let mut n_a = Network::new(Pid::fake(0), &runtime);
|
||||||
let n_b = Network::new(Pid::fake(1), &runtime);
|
let n_b = Network::new(Pid::fake(1), &runtime);
|
||||||
|
|
||||||
n_a.listen(addr.0).await.unwrap();
|
n_a.listen(addr.0).await.unwrap();
|
||||||
let p1_b = n_b.connect(addr.1).await.unwrap();
|
let mut p1_b = n_b.connect(addr.1).await.unwrap();
|
||||||
let p1_a = n_a.connected().await.unwrap();
|
let p1_a = n_a.connected().await.unwrap();
|
||||||
|
|
||||||
let s1_a = p1_a.open(4, Promises::ORDERED, 0).await.unwrap();
|
let s1_a = p1_a.open(4, Promises::ORDERED, 0).await.unwrap();
|
||||||
|
@ -10,7 +10,7 @@ use veloren_network::{ConnectAddr, ListenAddr, Network, ParticipantEvent, Pid, P
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_simple() {
|
fn stream_simple() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||||
@ -20,7 +20,7 @@ fn stream_simple() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_try_recv() {
|
fn stream_try_recv() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send(4242u32).unwrap();
|
s1_a.send(4242u32).unwrap();
|
||||||
std::thread::sleep(SLEEP_EXTERNAL);
|
std::thread::sleep(SLEEP_EXTERNAL);
|
||||||
@ -31,7 +31,7 @@ fn stream_try_recv() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_simple_3msg() {
|
fn stream_simple_3msg() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
s1_a.send(1337).unwrap();
|
s1_a.send(1337).unwrap();
|
||||||
@ -45,7 +45,7 @@ fn stream_simple_3msg() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_simple_mpsc() {
|
fn stream_simple_mpsc() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(mpsc());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(mpsc());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||||
@ -55,7 +55,7 @@ fn stream_simple_mpsc() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_simple_mpsc_3msg() {
|
fn stream_simple_mpsc_3msg() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(mpsc());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(mpsc());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
s1_a.send(1337).unwrap();
|
s1_a.send(1337).unwrap();
|
||||||
@ -69,7 +69,7 @@ fn stream_simple_mpsc_3msg() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_simple_quic() {
|
fn stream_simple_quic() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(quic());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(quic());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||||
@ -79,7 +79,7 @@ fn stream_simple_quic() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn stream_simple_quic_3msg() {
|
fn stream_simple_quic_3msg() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(quic());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(quic());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
s1_a.send(1337).unwrap();
|
s1_a.send(1337).unwrap();
|
||||||
@ -94,7 +94,7 @@ fn stream_simple_quic_3msg() {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn stream_simple_udp() {
|
fn stream_simple_udp() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(udp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(udp());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
assert_eq!(r.block_on(s1_b.recv()), Ok("Hello World".to_string()));
|
||||||
@ -105,7 +105,7 @@ fn stream_simple_udp() {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn stream_simple_udp_3msg() {
|
fn stream_simple_udp_3msg() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(udp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(udp());
|
||||||
|
|
||||||
s1_a.send("Hello World").unwrap();
|
s1_a.send("Hello World").unwrap();
|
||||||
s1_a.send(1337).unwrap();
|
s1_a.send(1337).unwrap();
|
||||||
@ -184,7 +184,7 @@ fn api_stream_send_main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let network = Network::new(Pid::new(), &r);
|
let network = Network::new(Pid::new(), &r);
|
||||||
let remote = Network::new(Pid::new(), &r);
|
let remote = Network::new(Pid::new(), &r);
|
||||||
r.block_on(async {
|
r.block_on(async {
|
||||||
let network = network;
|
let mut network = network;
|
||||||
let remote = remote;
|
let remote = remote;
|
||||||
network
|
network
|
||||||
.listen(ListenAddr::Tcp("127.0.0.1:1200".parse().unwrap()))
|
.listen(ListenAddr::Tcp("127.0.0.1:1200".parse().unwrap()))
|
||||||
@ -196,8 +196,8 @@ fn api_stream_send_main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let _stream_p = remote_p
|
let _stream_p = remote_p
|
||||||
.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
||||||
.await?;
|
.await?;
|
||||||
let participant_a = network.connected().await?;
|
let mut participant_a = network.connected().await?;
|
||||||
let mut stream_a = participant_a.opened().await?;
|
let stream_a = participant_a.opened().await?;
|
||||||
//Send Message
|
//Send Message
|
||||||
stream_a.send("Hello World")?;
|
stream_a.send("Hello World")?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -213,7 +213,7 @@ fn api_stream_recv_main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let network = Network::new(Pid::new(), &r);
|
let network = Network::new(Pid::new(), &r);
|
||||||
let remote = Network::new(Pid::new(), &r);
|
let remote = Network::new(Pid::new(), &r);
|
||||||
r.block_on(async {
|
r.block_on(async {
|
||||||
let network = network;
|
let mut network = network;
|
||||||
let remote = remote;
|
let remote = remote;
|
||||||
network
|
network
|
||||||
.listen(ListenAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
|
.listen(ListenAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
|
||||||
@ -221,11 +221,11 @@ fn api_stream_recv_main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let remote_p = remote
|
let remote_p = remote
|
||||||
.connect(ConnectAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
|
.connect(ConnectAddr::Tcp("127.0.0.1:1220".parse().unwrap()))
|
||||||
.await?;
|
.await?;
|
||||||
let mut stream_p = remote_p
|
let stream_p = remote_p
|
||||||
.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
.open(4, Promises::ORDERED | Promises::CONSISTENCY, 0)
|
||||||
.await?;
|
.await?;
|
||||||
stream_p.send("Hello World")?;
|
stream_p.send("Hello World")?;
|
||||||
let participant_a = network.connected().await?;
|
let mut participant_a = network.connected().await?;
|
||||||
let mut stream_a = participant_a.opened().await?;
|
let mut stream_a = participant_a.opened().await?;
|
||||||
//Send Message
|
//Send Message
|
||||||
assert_eq!("Hello World".to_string(), stream_a.recv::<String>().await?);
|
assert_eq!("Hello World".to_string(), stream_a.recv::<String>().await?);
|
||||||
@ -236,7 +236,7 @@ fn api_stream_recv_main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
#[test]
|
#[test]
|
||||||
fn wrong_parse() {
|
fn wrong_parse() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send(1337).unwrap();
|
s1_a.send(1337).unwrap();
|
||||||
match r.block_on(s1_b.recv::<String>()) {
|
match r.block_on(s1_b.recv::<String>()) {
|
||||||
@ -249,7 +249,7 @@ fn wrong_parse() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn multiple_try_recv() {
|
fn multiple_try_recv() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcp());
|
||||||
|
|
||||||
s1_a.send("asd").unwrap();
|
s1_a.send("asd").unwrap();
|
||||||
s1_a.send(11u32).unwrap();
|
s1_a.send(11u32).unwrap();
|
||||||
@ -295,9 +295,9 @@ fn listen_on_ipv6_doesnt_block_ipv4() {
|
|||||||
))),
|
))),
|
||||||
);
|
);
|
||||||
|
|
||||||
let (_r, _n_a, _p_a, mut s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcpv6);
|
let (_r, _n_a, _p_a, s1_a, _n_b, _p_b, mut s1_b) = network_participant_stream(tcpv6);
|
||||||
std::thread::sleep(SLEEP_EXTERNAL);
|
std::thread::sleep(SLEEP_EXTERNAL);
|
||||||
let (_r2, _n_a2, _p_a2, mut s1_a2, _n_b2, _p_b2, mut s1_b2) = network_participant_stream(tcpv4);
|
let (_r2, _n_a2, _p_a2, s1_a2, _n_b2, _p_b2, mut s1_b2) = network_participant_stream(tcpv4);
|
||||||
|
|
||||||
s1_a.send(42u32).unwrap();
|
s1_a.send(42u32).unwrap();
|
||||||
s1_a2.send(1337u32).unwrap();
|
s1_a2.send(1337u32).unwrap();
|
||||||
@ -313,7 +313,7 @@ fn listen_on_ipv6_doesnt_block_ipv4() {
|
|||||||
fn check_correct_channel_events() {
|
fn check_correct_channel_events() {
|
||||||
let (_, _) = helper::setup(false, 0);
|
let (_, _) = helper::setup(false, 0);
|
||||||
let con_addr = tcp();
|
let con_addr = tcp();
|
||||||
let (r, _n_a, p_a, _, _n_b, p_b, _) = network_participant_stream(con_addr.clone());
|
let (r, _n_a, mut p_a, _, _n_b, mut p_b, _) = network_participant_stream(con_addr.clone());
|
||||||
|
|
||||||
let event_a = r.block_on(p_a.fetch_event()).unwrap();
|
let event_a = r.block_on(p_a.fetch_event()).unwrap();
|
||||||
let event_b = r.block_on(p_b.fetch_event()).unwrap();
|
let event_b = r.block_on(p_b.fetch_event()).unwrap();
|
||||||
|
@ -41,6 +41,7 @@ rustls-pemfile = { version = "1", default-features = false }
|
|||||||
atomicwrites = "0.3.0"
|
atomicwrites = "0.3.0"
|
||||||
chrono = { version = "0.4.22", features = ["serde"] }
|
chrono = { version = "0.4.22", features = ["serde"] }
|
||||||
chrono-tz = { version = "0.6", features = ["serde"] }
|
chrono-tz = { version = "0.6", features = ["serde"] }
|
||||||
|
drop_guard = { version = "0.3.0" }
|
||||||
humantime = "2.1.0"
|
humantime = "2.1.0"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
@ -49,6 +50,7 @@ serde = { version = "1.0.110", features = ["derive"] }
|
|||||||
serde_json = "1.0.50"
|
serde_json = "1.0.50"
|
||||||
rand = { version = "0.8", features = ["small_rng"] }
|
rand = { version = "0.8", features = ["small_rng"] }
|
||||||
hashbrown = { version = "0.12", features = ["rayon", "serde", "nightly"] }
|
hashbrown = { version = "0.12", features = ["rayon", "serde", "nightly"] }
|
||||||
|
parking_lot = { version = "0.12" }
|
||||||
rayon = "1.5"
|
rayon = "1.5"
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
prometheus = { version = "0.13", default-features = false}
|
prometheus = { version = "0.13", default-features = false}
|
||||||
|
@ -6,6 +6,7 @@ use common::{
|
|||||||
terrain::TerrainChunk,
|
terrain::TerrainChunk,
|
||||||
};
|
};
|
||||||
use hashbrown::{hash_map::Entry, HashMap};
|
use hashbrown::{hash_map::Entry, HashMap};
|
||||||
|
use rayon::iter::ParallelIterator;
|
||||||
use specs::Entity as EcsEntity;
|
use specs::Entity as EcsEntity;
|
||||||
use std::sync::{
|
use std::sync::{
|
||||||
atomic::{AtomicBool, Ordering},
|
atomic::{AtomicBool, Ordering},
|
||||||
@ -59,6 +60,14 @@ impl ChunkGenerator {
|
|||||||
let index = index.as_index_ref();
|
let index = index.as_index_ref();
|
||||||
let payload = world
|
let payload = world
|
||||||
.generate_chunk(index, key, || cancel.load(Ordering::Relaxed), Some(time))
|
.generate_chunk(index, key, || cancel.load(Ordering::Relaxed), Some(time))
|
||||||
|
// FIXME: Since only the first entity who cancels a chunk is notified, we end up
|
||||||
|
// delaying chunk re-requests for up to 3 seconds for other clients, which isn't
|
||||||
|
// great. We *could* store all the other requesting clients here, but it could
|
||||||
|
// bloat memory a lot. Currently, this isn't much of an issue because we rarely
|
||||||
|
// have large numbers of pending chunks, so most of them are likely to be nearby an
|
||||||
|
// actual player most of the time, but that will eventually change. In the future,
|
||||||
|
// some solution that always pushes chunk updates to players (rather than waiting
|
||||||
|
// for explicit requests) should adequately solve this kind of issue.
|
||||||
.map_err(|_| entity);
|
.map_err(|_| entity);
|
||||||
let _ = chunk_tx.send((key, payload));
|
let _ = chunk_tx.send((key, payload));
|
||||||
});
|
});
|
||||||
@ -82,6 +91,10 @@ impl ChunkGenerator {
|
|||||||
self.pending_chunks.keys().copied()
|
self.pending_chunks.keys().copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn par_pending_chunks(&self) -> impl rayon::iter::ParallelIterator<Item = Vec2<i32>> + '_ {
|
||||||
|
self.pending_chunks.par_keys().copied()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn cancel_if_pending(&mut self, key: Vec2<i32>) {
|
pub fn cancel_if_pending(&mut self, key: Vec2<i32>) {
|
||||||
if let Some(cancel) = self.pending_chunks.remove(&key) {
|
if let Some(cancel) = self.pending_chunks.remove(&key) {
|
||||||
cancel.store(true, Ordering::Relaxed);
|
cancel.store(true, Ordering::Relaxed);
|
||||||
|
@ -2,7 +2,7 @@ use common_net::msg::{ClientType, ServerGeneral, ServerMsg};
|
|||||||
use network::{Message, Participant, Stream, StreamError, StreamParams};
|
use network::{Message, Participant, Stream, StreamError, StreamParams};
|
||||||
use serde::{de::DeserializeOwned, Serialize};
|
use serde::{de::DeserializeOwned, Serialize};
|
||||||
use specs::Component;
|
use specs::Component;
|
||||||
use std::sync::{atomic::AtomicBool, Mutex};
|
use std::sync::atomic::AtomicBool;
|
||||||
|
|
||||||
/// Client handles ALL network related information of everything that connects
|
/// Client handles ALL network related information of everything that connects
|
||||||
/// to the server Client DOES NOT handle game states
|
/// to the server Client DOES NOT handle game states
|
||||||
@ -13,17 +13,18 @@ use std::sync::{atomic::AtomicBool, Mutex};
|
|||||||
pub struct Client {
|
pub struct Client {
|
||||||
pub client_type: ClientType,
|
pub client_type: ClientType,
|
||||||
pub participant: Option<Participant>,
|
pub participant: Option<Participant>,
|
||||||
pub last_ping: Mutex<f64>,
|
pub last_ping: f64,
|
||||||
pub login_msg_sent: AtomicBool,
|
pub login_msg_sent: AtomicBool,
|
||||||
|
|
||||||
//TODO: improve network crate so that `send` is no longer `&mut self` and we can get rid of
|
//TODO: Consider splitting each of these out into their own components so all the message
|
||||||
// this Mutex. This Mutex is just to please the compiler as we do not get into contention
|
//processing systems can run in parallel with each other (though it may turn out not to
|
||||||
general_stream: Mutex<Stream>,
|
//matter that much).
|
||||||
ping_stream: Mutex<Stream>,
|
general_stream: Stream,
|
||||||
register_stream: Mutex<Stream>,
|
ping_stream: Stream,
|
||||||
character_screen_stream: Mutex<Stream>,
|
register_stream: Stream,
|
||||||
in_game_stream: Mutex<Stream>,
|
character_screen_stream: Stream,
|
||||||
terrain_stream: Mutex<Stream>,
|
in_game_stream: Stream,
|
||||||
|
terrain_stream: Stream,
|
||||||
|
|
||||||
general_stream_params: StreamParams,
|
general_stream_params: StreamParams,
|
||||||
ping_stream_params: StreamParams,
|
ping_stream_params: StreamParams,
|
||||||
@ -63,14 +64,14 @@ impl Client {
|
|||||||
Client {
|
Client {
|
||||||
client_type,
|
client_type,
|
||||||
participant: Some(participant),
|
participant: Some(participant),
|
||||||
last_ping: Mutex::new(last_ping),
|
last_ping,
|
||||||
login_msg_sent: AtomicBool::new(false),
|
login_msg_sent: AtomicBool::new(false),
|
||||||
general_stream: Mutex::new(general_stream),
|
general_stream,
|
||||||
ping_stream: Mutex::new(ping_stream),
|
ping_stream,
|
||||||
register_stream: Mutex::new(register_stream),
|
register_stream,
|
||||||
character_screen_stream: Mutex::new(character_screen_stream),
|
character_screen_stream,
|
||||||
in_game_stream: Mutex::new(in_game_stream),
|
in_game_stream,
|
||||||
terrain_stream: Mutex::new(terrain_stream),
|
terrain_stream,
|
||||||
general_stream_params,
|
general_stream_params,
|
||||||
ping_stream_params,
|
ping_stream_params,
|
||||||
register_stream_params,
|
register_stream_params,
|
||||||
@ -145,16 +146,12 @@ impl Client {
|
|||||||
|
|
||||||
pub(crate) fn send_prepared(&self, msg: &PreparedMsg) -> Result<(), StreamError> {
|
pub(crate) fn send_prepared(&self, msg: &PreparedMsg) -> Result<(), StreamError> {
|
||||||
match msg.stream_id {
|
match msg.stream_id {
|
||||||
0 => self.register_stream.lock().unwrap().send_raw(&msg.message),
|
0 => self.register_stream.send_raw(&msg.message),
|
||||||
1 => self
|
1 => self.character_screen_stream.send_raw(&msg.message),
|
||||||
.character_screen_stream
|
2 => self.in_game_stream.send_raw(&msg.message),
|
||||||
.lock()
|
3 => self.general_stream.send_raw(&msg.message),
|
||||||
.unwrap()
|
4 => self.ping_stream.send_raw(&msg.message),
|
||||||
.send_raw(&msg.message),
|
5 => self.terrain_stream.send_raw(&msg.message),
|
||||||
2 => self.in_game_stream.lock().unwrap().send_raw(&msg.message),
|
|
||||||
3 => self.general_stream.lock().unwrap().send_raw(&msg.message),
|
|
||||||
4 => self.ping_stream.lock().unwrap().send_raw(&msg.message),
|
|
||||||
5 => self.terrain_stream.lock().unwrap().send_raw(&msg.message),
|
|
||||||
_ => unreachable!("invalid stream id"),
|
_ => unreachable!("invalid stream id"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -238,17 +235,17 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn recv<M: DeserializeOwned>(
|
pub(crate) fn recv<M: DeserializeOwned>(
|
||||||
&self,
|
&mut self,
|
||||||
stream_id: u8,
|
stream_id: u8,
|
||||||
) -> Result<Option<M>, StreamError> {
|
) -> Result<Option<M>, StreamError> {
|
||||||
// TODO: are two systems using the same stream?? why is there contention here?
|
// TODO: are two systems using the same stream?? why is there contention here?
|
||||||
match stream_id {
|
match stream_id {
|
||||||
0 => self.register_stream.lock().unwrap().try_recv(),
|
0 => self.register_stream.try_recv(),
|
||||||
1 => self.character_screen_stream.lock().unwrap().try_recv(),
|
1 => self.character_screen_stream.try_recv(),
|
||||||
2 => self.in_game_stream.lock().unwrap().try_recv(),
|
2 => self.in_game_stream.try_recv(),
|
||||||
3 => self.general_stream.lock().unwrap().try_recv(),
|
3 => self.general_stream.try_recv(),
|
||||||
4 => self.ping_stream.lock().unwrap().try_recv(),
|
4 => self.ping_stream.try_recv(),
|
||||||
5 => self.terrain_stream.lock().unwrap().try_recv(),
|
5 => self.terrain_stream.try_recv(),
|
||||||
_ => unreachable!("invalid stream id"),
|
_ => unreachable!("invalid stream id"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@ use crate::{Client, ClientType, ServerInfo};
|
|||||||
use crossbeam_channel::{bounded, unbounded, Receiver, Sender};
|
use crossbeam_channel::{bounded, unbounded, Receiver, Sender};
|
||||||
use futures_util::future::FutureExt;
|
use futures_util::future::FutureExt;
|
||||||
use network::{Network, Participant, Promises};
|
use network::{Network, Participant, Promises};
|
||||||
use std::{sync::Arc, time::Duration};
|
use std::time::Duration;
|
||||||
use tokio::{runtime::Runtime, select, sync::oneshot};
|
use tokio::{runtime::Runtime, select, sync::oneshot};
|
||||||
use tracing::{debug, error, trace, warn};
|
use tracing::{debug, error, trace, warn};
|
||||||
|
|
||||||
@ -14,7 +14,13 @@ pub(crate) struct ServerInfoPacket {
|
|||||||
pub(crate) type IncomingClient = Client;
|
pub(crate) type IncomingClient = Client;
|
||||||
|
|
||||||
pub(crate) struct ConnectionHandler {
|
pub(crate) struct ConnectionHandler {
|
||||||
_network: Arc<Network>,
|
/// We never actually use this, but if it's dropped before the network has a
|
||||||
|
/// chance to exit, it won't block the main thread, and if it is dropped
|
||||||
|
/// after the network thread ends, it will drop the network here (rather
|
||||||
|
/// than delaying the network thread). So it emulates the effects of
|
||||||
|
/// storing the network in an Arc, without us losing mutability in the
|
||||||
|
/// network thread.
|
||||||
|
_network_receiver: oneshot::Receiver<Network>,
|
||||||
thread_handle: Option<tokio::task::JoinHandle<()>>,
|
thread_handle: Option<tokio::task::JoinHandle<()>>,
|
||||||
pub client_receiver: Receiver<IncomingClient>,
|
pub client_receiver: Receiver<IncomingClient>,
|
||||||
pub info_requester_receiver: Receiver<Sender<ServerInfoPacket>>,
|
pub info_requester_receiver: Receiver<Sender<ServerInfoPacket>>,
|
||||||
@ -27,36 +33,48 @@ pub(crate) struct ConnectionHandler {
|
|||||||
/// and time
|
/// and time
|
||||||
impl ConnectionHandler {
|
impl ConnectionHandler {
|
||||||
pub fn new(network: Network, runtime: &Runtime) -> Self {
|
pub fn new(network: Network, runtime: &Runtime) -> Self {
|
||||||
let network = Arc::new(network);
|
|
||||||
let network_clone = Arc::clone(&network);
|
|
||||||
let (stop_sender, stop_receiver) = oneshot::channel();
|
let (stop_sender, stop_receiver) = oneshot::channel();
|
||||||
|
let (network_sender, _network_receiver) = oneshot::channel();
|
||||||
|
|
||||||
let (client_sender, client_receiver) = unbounded::<IncomingClient>();
|
let (client_sender, client_receiver) = unbounded::<IncomingClient>();
|
||||||
let (info_requester_sender, info_requester_receiver) =
|
let (info_requester_sender, info_requester_receiver) =
|
||||||
bounded::<Sender<ServerInfoPacket>>(1);
|
bounded::<Sender<ServerInfoPacket>>(1);
|
||||||
|
|
||||||
let thread_handle = Some(runtime.spawn(Self::work(
|
let thread_handle = Some(runtime.spawn(Self::work(
|
||||||
network_clone,
|
network,
|
||||||
client_sender,
|
client_sender,
|
||||||
info_requester_sender,
|
info_requester_sender,
|
||||||
stop_receiver,
|
stop_receiver,
|
||||||
|
network_sender,
|
||||||
)));
|
)));
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
_network: network,
|
|
||||||
thread_handle,
|
thread_handle,
|
||||||
client_receiver,
|
client_receiver,
|
||||||
info_requester_receiver,
|
info_requester_receiver,
|
||||||
stop_sender: Some(stop_sender),
|
stop_sender: Some(stop_sender),
|
||||||
|
_network_receiver,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(
|
async fn work(
|
||||||
network: Arc<Network>,
|
network: Network,
|
||||||
client_sender: Sender<IncomingClient>,
|
client_sender: Sender<IncomingClient>,
|
||||||
info_requester_sender: Sender<Sender<ServerInfoPacket>>,
|
info_requester_sender: Sender<Sender<ServerInfoPacket>>,
|
||||||
stop_receiver: oneshot::Receiver<()>,
|
stop_receiver: oneshot::Receiver<()>,
|
||||||
|
network_sender: oneshot::Sender<Network>,
|
||||||
) {
|
) {
|
||||||
|
// Emulate the effects of storing the network in an Arc, without losing
|
||||||
|
// mutability.
|
||||||
|
let mut network_sender = Some(network_sender);
|
||||||
|
let mut network = drop_guard::guard(network, move |network| {
|
||||||
|
// If the network receiver was already dropped, we just drop the network here,
|
||||||
|
// just like Arc, so we don't care about the result.
|
||||||
|
let _ = network_sender
|
||||||
|
.take()
|
||||||
|
.expect("Only used once in drop")
|
||||||
|
.send(network);
|
||||||
|
});
|
||||||
let mut stop_receiver = stop_receiver.fuse();
|
let mut stop_receiver = stop_receiver.fuse();
|
||||||
loop {
|
loop {
|
||||||
let participant = match select!(
|
let participant = match select!(
|
||||||
|
@ -79,7 +79,7 @@ use common::{
|
|||||||
cmd::ServerChatCommand,
|
cmd::ServerChatCommand,
|
||||||
comp,
|
comp,
|
||||||
event::{EventBus, ServerEvent},
|
event::{EventBus, ServerEvent},
|
||||||
resources::{BattleMode, Time, TimeOfDay},
|
resources::{BattleMode, GameMode, Time, TimeOfDay},
|
||||||
rtsim::RtSimEntity,
|
rtsim::RtSimEntity,
|
||||||
slowjob::SlowJobPool,
|
slowjob::SlowJobPool,
|
||||||
terrain::{TerrainChunk, TerrainChunkSize},
|
terrain::{TerrainChunk, TerrainChunkSize},
|
||||||
@ -244,7 +244,45 @@ impl Server {
|
|||||||
|
|
||||||
let battlemode_buffer = BattleModeBuffer::default();
|
let battlemode_buffer = BattleModeBuffer::default();
|
||||||
|
|
||||||
let mut state = State::server();
|
let pools = State::pools(GameMode::Server);
|
||||||
|
|
||||||
|
#[cfg(feature = "worldgen")]
|
||||||
|
let (world, index) = World::generate(
|
||||||
|
settings.world_seed,
|
||||||
|
WorldOpts {
|
||||||
|
seed_elements: true,
|
||||||
|
world_file: if let Some(ref opts) = settings.map_file {
|
||||||
|
opts.clone()
|
||||||
|
} else {
|
||||||
|
// Load default map from assets.
|
||||||
|
FileOpts::LoadAsset(DEFAULT_WORLD_MAP.into())
|
||||||
|
},
|
||||||
|
calendar: Some(settings.calendar_mode.calendar_now()),
|
||||||
|
},
|
||||||
|
&pools,
|
||||||
|
);
|
||||||
|
#[cfg(not(feature = "worldgen"))]
|
||||||
|
let (world, index) = World::generate(settings.world_seed);
|
||||||
|
|
||||||
|
#[cfg(feature = "worldgen")]
|
||||||
|
let map = world.get_map_data(index.as_index_ref(), &pools);
|
||||||
|
#[cfg(not(feature = "worldgen"))]
|
||||||
|
let map = WorldMapMsg {
|
||||||
|
dimensions_lg: Vec2::zero(),
|
||||||
|
max_height: 1.0,
|
||||||
|
rgba: Grid::new(Vec2::new(1, 1), 1),
|
||||||
|
horizons: [(vec![0], vec![0]), (vec![0], vec![0])],
|
||||||
|
alt: Grid::new(Vec2::new(1, 1), 1),
|
||||||
|
sites: Vec::new(),
|
||||||
|
pois: Vec::new(),
|
||||||
|
default_chunk: Arc::new(world.generate_oob_chunk()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut state = State::server(
|
||||||
|
pools,
|
||||||
|
world.sim().map_size_lg(),
|
||||||
|
Arc::clone(&map.default_chunk),
|
||||||
|
);
|
||||||
state.ecs_mut().insert(battlemode_buffer);
|
state.ecs_mut().insert(battlemode_buffer);
|
||||||
state.ecs_mut().insert(settings.clone());
|
state.ecs_mut().insert(settings.clone());
|
||||||
state.ecs_mut().insert(editable_settings);
|
state.ecs_mut().insert(editable_settings);
|
||||||
@ -295,6 +333,7 @@ impl Server {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let pool = state.ecs_mut().write_resource::<SlowJobPool>();
|
let pool = state.ecs_mut().write_resource::<SlowJobPool>();
|
||||||
|
pool.configure("CHUNK_DROP", |_n| 1);
|
||||||
pool.configure("CHUNK_GENERATOR", |n| n / 2 + n / 4);
|
pool.configure("CHUNK_GENERATOR", |n| n / 2 + n / 4);
|
||||||
pool.configure("CHUNK_SERIALIZER", |n| n / 2);
|
pool.configure("CHUNK_SERIALIZER", |n| n / 2);
|
||||||
}
|
}
|
||||||
@ -350,39 +389,6 @@ impl Server {
|
|||||||
.ecs_mut()
|
.ecs_mut()
|
||||||
.insert(AutoMod::new(&settings.moderation, censor));
|
.insert(AutoMod::new(&settings.moderation, censor));
|
||||||
|
|
||||||
#[cfg(feature = "worldgen")]
|
|
||||||
let (world, index) = World::generate(
|
|
||||||
settings.world_seed,
|
|
||||||
WorldOpts {
|
|
||||||
seed_elements: true,
|
|
||||||
world_file: if let Some(ref opts) = settings.map_file {
|
|
||||||
opts.clone()
|
|
||||||
} else {
|
|
||||||
// Load default map from assets.
|
|
||||||
FileOpts::LoadAsset(DEFAULT_WORLD_MAP.into())
|
|
||||||
},
|
|
||||||
calendar: Some(settings.calendar_mode.calendar_now()),
|
|
||||||
},
|
|
||||||
state.thread_pool(),
|
|
||||||
);
|
|
||||||
|
|
||||||
#[cfg(feature = "worldgen")]
|
|
||||||
let map = world.get_map_data(index.as_index_ref(), state.thread_pool());
|
|
||||||
|
|
||||||
#[cfg(not(feature = "worldgen"))]
|
|
||||||
let (world, index) = World::generate(settings.world_seed);
|
|
||||||
#[cfg(not(feature = "worldgen"))]
|
|
||||||
let map = WorldMapMsg {
|
|
||||||
dimensions_lg: Vec2::zero(),
|
|
||||||
max_height: 1.0,
|
|
||||||
rgba: Grid::new(Vec2::new(1, 1), 1),
|
|
||||||
horizons: [(vec![0], vec![0]), (vec![0], vec![0])],
|
|
||||||
sea_level: 0.0,
|
|
||||||
alt: Grid::new(Vec2::new(1, 1), 1),
|
|
||||||
sites: Vec::new(),
|
|
||||||
pois: Vec::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
state.ecs_mut().insert(map);
|
state.ecs_mut().insert(map);
|
||||||
|
|
||||||
#[cfg(feature = "worldgen")]
|
#[cfg(feature = "worldgen")]
|
||||||
@ -784,10 +790,11 @@ impl Server {
|
|||||||
// so, we delete them. We check for
|
// so, we delete them. We check for
|
||||||
// `home_chunk` in order to avoid duplicating
|
// `home_chunk` in order to avoid duplicating
|
||||||
// the entity under some circumstances.
|
// the entity under some circumstances.
|
||||||
terrain.get_key(chunk_key).is_none() && terrain.get_key(*hc).is_none()
|
terrain.get_key_real(chunk_key).is_none()
|
||||||
|
&& terrain.get_key_real(*hc).is_none()
|
||||||
},
|
},
|
||||||
Some(Anchor::Entity(entity)) => !self.state.ecs().is_alive(*entity),
|
Some(Anchor::Entity(entity)) => !self.state.ecs().is_alive(*entity),
|
||||||
None => terrain.get_key(chunk_key).is_none(),
|
None => terrain.get_key_real(chunk_key).is_none(),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.map(|(entity, _, _, _)| entity)
|
.map(|(entity, _, _, _)| entity)
|
||||||
|
@ -97,16 +97,15 @@ impl LoginProvider {
|
|||||||
PendingLogin { pending_r }
|
PendingLogin { pending_r }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn login(
|
pub(crate) fn login<R>(
|
||||||
&mut self,
|
|
||||||
pending: &mut PendingLogin,
|
pending: &mut PendingLogin,
|
||||||
#[cfg(feature = "plugins")] world: &EcsWorld,
|
#[cfg(feature = "plugins")] world: &EcsWorld,
|
||||||
#[cfg(feature = "plugins")] plugin_manager: &PluginMgr,
|
#[cfg(feature = "plugins")] plugin_manager: &PluginMgr,
|
||||||
admins: &HashMap<Uuid, AdminRecord>,
|
admins: &HashMap<Uuid, AdminRecord>,
|
||||||
whitelist: &HashMap<Uuid, WhitelistRecord>,
|
whitelist: &HashMap<Uuid, WhitelistRecord>,
|
||||||
banlist: &HashMap<Uuid, BanEntry>,
|
banlist: &HashMap<Uuid, BanEntry>,
|
||||||
player_count_exceeded: bool,
|
player_count_exceeded: impl FnOnce(String, Uuid) -> (bool, R),
|
||||||
) -> Option<Result<(String, Uuid), RegisterError>> {
|
) -> Option<Result<R, RegisterError>> {
|
||||||
match pending.pending_r.try_recv() {
|
match pending.pending_r.try_recv() {
|
||||||
Ok(Err(e)) => Some(Err(e)),
|
Ok(Err(e)) => Some(Err(e)),
|
||||||
Ok(Ok((username, uuid))) => {
|
Ok(Ok((username, uuid))) => {
|
||||||
@ -138,15 +137,13 @@ impl LoginProvider {
|
|||||||
return Some(Err(RegisterError::NotOnWhitelist));
|
return Some(Err(RegisterError::NotOnWhitelist));
|
||||||
}
|
}
|
||||||
|
|
||||||
// non-admins can only join if the player count has not been exceeded.
|
|
||||||
if admin.is_none() && player_count_exceeded {
|
|
||||||
return Some(Err(RegisterError::TooManyPlayers));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "plugins")]
|
#[cfg(feature = "plugins")]
|
||||||
{
|
{
|
||||||
// Plugin player join hooks execute for all players, but are only allowed to
|
// Plugin player join hooks execute for all players, but are only allowed to
|
||||||
// filter non-admins.
|
// filter non-admins.
|
||||||
|
//
|
||||||
|
// We also run it before checking player count, to avoid lock contention in the
|
||||||
|
// plugin.
|
||||||
match plugin_manager.execute_event(world, &PlayerJoinEvent {
|
match plugin_manager.execute_event(world, &PlayerJoinEvent {
|
||||||
player_name: username.clone(),
|
player_name: username.clone(),
|
||||||
player_id: *uuid.as_bytes(),
|
player_id: *uuid.as_bytes(),
|
||||||
@ -166,8 +163,13 @@ impl LoginProvider {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(?username, "New User");
|
// non-admins can only join if the player count has not been exceeded.
|
||||||
Some(Ok((username, uuid)))
|
let (player_count_exceeded, res) = player_count_exceeded(username, uuid);
|
||||||
|
if admin.is_none() && player_count_exceeded {
|
||||||
|
return Some(Err(RegisterError::TooManyPlayers));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Ok(res))
|
||||||
},
|
},
|
||||||
Err(oneshot::error::TryRecvError::Closed) => {
|
Err(oneshot::error::TryRecvError::Closed) => {
|
||||||
error!("channel got closed to early, this shouldn't happen");
|
error!("channel got closed to early, this shouldn't happen");
|
||||||
|
@ -162,7 +162,7 @@ pub struct Settings {
|
|||||||
pub gameserver_protocols: Vec<Protocol>,
|
pub gameserver_protocols: Vec<Protocol>,
|
||||||
pub metrics_address: SocketAddr,
|
pub metrics_address: SocketAddr,
|
||||||
pub auth_server_address: Option<String>,
|
pub auth_server_address: Option<String>,
|
||||||
pub max_players: usize,
|
pub max_players: u16,
|
||||||
pub world_seed: u32,
|
pub world_seed: u32,
|
||||||
pub server_name: String,
|
pub server_name: String,
|
||||||
pub start_time: f64,
|
pub start_time: f64,
|
||||||
|
@ -108,7 +108,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(chunk_key, meta)| {
|
.filter_map(|(chunk_key, meta)| {
|
||||||
terrain
|
terrain
|
||||||
.get_key_arc(chunk_key)
|
.get_key_arc_real(chunk_key)
|
||||||
.map(|chunk| (Arc::clone(chunk), chunk_key, meta))
|
.map(|chunk| (Arc::clone(chunk), chunk_key, meta))
|
||||||
})
|
})
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -13,7 +13,7 @@ use common::{
|
|||||||
};
|
};
|
||||||
use common_ecs::{Job, Origin, Phase, System};
|
use common_ecs::{Job, Origin, Phase, System};
|
||||||
use common_net::msg::{ClientGeneral, ServerGeneral};
|
use common_net::msg::{ClientGeneral, ServerGeneral};
|
||||||
use specs::{Entities, Join, Read, ReadExpect, ReadStorage, WriteExpect};
|
use specs::{Entities, Join, Read, ReadExpect, ReadStorage, WriteExpect, WriteStorage};
|
||||||
use std::sync::{atomic::Ordering, Arc};
|
use std::sync::{atomic::Ordering, Arc};
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
@ -221,7 +221,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
ReadExpect<'a, CharacterLoader>,
|
ReadExpect<'a, CharacterLoader>,
|
||||||
WriteExpect<'a, CharacterUpdater>,
|
WriteExpect<'a, CharacterUpdater>,
|
||||||
ReadStorage<'a, Uid>,
|
ReadStorage<'a, Uid>,
|
||||||
ReadStorage<'a, Client>,
|
WriteStorage<'a, Client>,
|
||||||
ReadStorage<'a, Player>,
|
ReadStorage<'a, Player>,
|
||||||
ReadStorage<'a, Admin>,
|
ReadStorage<'a, Admin>,
|
||||||
ReadStorage<'a, Presence>,
|
ReadStorage<'a, Presence>,
|
||||||
@ -242,7 +242,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
character_loader,
|
character_loader,
|
||||||
mut character_updater,
|
mut character_updater,
|
||||||
uids,
|
uids,
|
||||||
clients,
|
mut clients,
|
||||||
players,
|
players,
|
||||||
admins,
|
admins,
|
||||||
presences,
|
presences,
|
||||||
@ -253,7 +253,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
) {
|
) {
|
||||||
let mut server_emitter = server_event_bus.emitter();
|
let mut server_emitter = server_event_bus.emitter();
|
||||||
|
|
||||||
for (entity, client) in (&entities, &clients).join() {
|
for (entity, client) in (&entities, &mut clients).join() {
|
||||||
let _ = super::try_recv_all(client, 1, |client, msg| {
|
let _ = super::try_recv_all(client, 1, |client, msg| {
|
||||||
Self::handle_client_character_screen_msg(
|
Self::handle_client_character_screen_msg(
|
||||||
&mut server_emitter,
|
&mut server_emitter,
|
||||||
|
@ -7,7 +7,8 @@ use common::{
|
|||||||
};
|
};
|
||||||
use common_ecs::{Job, Origin, Phase, System};
|
use common_ecs::{Job, Origin, Phase, System};
|
||||||
use common_net::msg::ClientGeneral;
|
use common_net::msg::ClientGeneral;
|
||||||
use specs::{Entities, Join, Read, ReadStorage};
|
use rayon::prelude::*;
|
||||||
|
use specs::{Entities, Join, ParJoin, Read, ReadStorage, WriteStorage};
|
||||||
use tracing::{debug, error, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
impl Sys {
|
impl Sys {
|
||||||
@ -70,7 +71,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
ReadStorage<'a, Uid>,
|
ReadStorage<'a, Uid>,
|
||||||
ReadStorage<'a, ChatMode>,
|
ReadStorage<'a, ChatMode>,
|
||||||
ReadStorage<'a, Player>,
|
ReadStorage<'a, Player>,
|
||||||
ReadStorage<'a, Client>,
|
WriteStorage<'a, Client>,
|
||||||
);
|
);
|
||||||
|
|
||||||
const NAME: &'static str = "msg::general";
|
const NAME: &'static str = "msg::general";
|
||||||
@ -79,27 +80,30 @@ impl<'a> System<'a> for Sys {
|
|||||||
|
|
||||||
fn run(
|
fn run(
|
||||||
_job: &mut Job<Self>,
|
_job: &mut Job<Self>,
|
||||||
(entities, server_event_bus, time, uids, chat_modes, players, clients): Self::SystemData,
|
(entities, server_event_bus, time, uids, chat_modes, players, mut clients): Self::SystemData,
|
||||||
) {
|
) {
|
||||||
let mut server_emitter = server_event_bus.emitter();
|
(&entities, &mut clients, players.maybe())
|
||||||
|
.par_join()
|
||||||
|
.for_each_init(
|
||||||
|
|| server_event_bus.emitter(),
|
||||||
|
|server_emitter, (entity, client, player)| {
|
||||||
|
let res = super::try_recv_all(client, 3, |client, msg| {
|
||||||
|
Self::handle_general_msg(
|
||||||
|
server_emitter,
|
||||||
|
entity,
|
||||||
|
client,
|
||||||
|
player,
|
||||||
|
&uids,
|
||||||
|
&chat_modes,
|
||||||
|
msg,
|
||||||
|
)
|
||||||
|
});
|
||||||
|
|
||||||
for (entity, client, player) in (&entities, &clients, players.maybe()).join() {
|
if let Ok(1_u64..=u64::MAX) = res {
|
||||||
let res = super::try_recv_all(client, 3, |client, msg| {
|
// Update client ping.
|
||||||
Self::handle_general_msg(
|
client.last_ping = time.0
|
||||||
&mut server_emitter,
|
}
|
||||||
entity,
|
},
|
||||||
client,
|
);
|
||||||
player,
|
|
||||||
&uids,
|
|
||||||
&chat_modes,
|
|
||||||
msg,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Ok(1_u64..=u64::MAX) = res {
|
|
||||||
// Update client ping.
|
|
||||||
*client.last_ping.lock().unwrap() = time.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ pub fn add_server_systems(dispatch_builder: &mut DispatcherBuilder) {
|
|||||||
/// handles all send msg and calls a handle fn
|
/// handles all send msg and calls a handle fn
|
||||||
/// Aborts when a error occurred returns cnt of successful msg otherwise
|
/// Aborts when a error occurred returns cnt of successful msg otherwise
|
||||||
pub(crate) fn try_recv_all<M, F>(
|
pub(crate) fn try_recv_all<M, F>(
|
||||||
client: &Client,
|
client: &mut Client,
|
||||||
stream_id: u8,
|
stream_id: u8,
|
||||||
mut f: F,
|
mut f: F,
|
||||||
) -> Result<u64, crate::error::Error>
|
) -> Result<u64, crate::error::Error>
|
||||||
|
@ -5,7 +5,8 @@ use common::{
|
|||||||
};
|
};
|
||||||
use common_ecs::{Job, Origin, Phase, System};
|
use common_ecs::{Job, Origin, Phase, System};
|
||||||
use common_net::msg::PingMsg;
|
use common_net::msg::PingMsg;
|
||||||
use specs::{Entities, Join, Read, ReadStorage};
|
use rayon::prelude::*;
|
||||||
|
use specs::{Entities, ParJoin, Read, WriteStorage};
|
||||||
use tracing::{debug, info};
|
use tracing::{debug, info};
|
||||||
|
|
||||||
impl Sys {
|
impl Sys {
|
||||||
@ -26,7 +27,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
Entities<'a>,
|
Entities<'a>,
|
||||||
Read<'a, EventBus<ServerEvent>>,
|
Read<'a, EventBus<ServerEvent>>,
|
||||||
Read<'a, Time>,
|
Read<'a, Time>,
|
||||||
ReadStorage<'a, Client>,
|
WriteStorage<'a, Client>,
|
||||||
Read<'a, Settings>,
|
Read<'a, Settings>,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -36,45 +37,49 @@ impl<'a> System<'a> for Sys {
|
|||||||
|
|
||||||
fn run(
|
fn run(
|
||||||
_job: &mut Job<Self>,
|
_job: &mut Job<Self>,
|
||||||
(entities, server_event_bus, time, clients, settings): Self::SystemData,
|
(entities, server_event_bus, time, mut clients, settings): Self::SystemData,
|
||||||
) {
|
) {
|
||||||
let mut server_emitter = server_event_bus.emitter();
|
(&entities, &mut clients).par_join().for_each_init(
|
||||||
|
|| server_event_bus.emitter(),
|
||||||
|
|server_emitter, (entity, client)| {
|
||||||
|
// ignore network events
|
||||||
|
while let Some(Ok(Some(_))) =
|
||||||
|
client.participant.as_mut().map(|p| p.try_fetch_event())
|
||||||
|
{}
|
||||||
|
|
||||||
for (entity, client) in (&entities, &clients).join() {
|
let res = super::try_recv_all(client, 4, Self::handle_ping_msg);
|
||||||
// ignore network events
|
|
||||||
while let Some(Ok(Some(_))) = client.participant.as_ref().map(|p| p.try_fetch_event()) {
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = super::try_recv_all(client, 4, Self::handle_ping_msg);
|
match res {
|
||||||
|
Err(e) => {
|
||||||
match res {
|
debug!(?entity, ?e, "network error with client, disconnecting");
|
||||||
Err(e) => {
|
|
||||||
debug!(?entity, ?e, "network error with client, disconnecting");
|
|
||||||
server_emitter.emit(ServerEvent::ClientDisconnect(
|
|
||||||
entity,
|
|
||||||
common::comp::DisconnectReason::NetworkError,
|
|
||||||
));
|
|
||||||
},
|
|
||||||
Ok(1_u64..=u64::MAX) => {
|
|
||||||
// Update client ping.
|
|
||||||
*client.last_ping.lock().unwrap() = time.0
|
|
||||||
},
|
|
||||||
Ok(0) => {
|
|
||||||
let last_ping: f64 = *client.last_ping.lock().unwrap();
|
|
||||||
if time.0 - last_ping > settings.client_timeout.as_secs() as f64
|
|
||||||
// Timeout
|
|
||||||
{
|
|
||||||
info!(?entity, "timeout error with client, disconnecting");
|
|
||||||
server_emitter.emit(ServerEvent::ClientDisconnect(
|
server_emitter.emit(ServerEvent::ClientDisconnect(
|
||||||
entity,
|
entity,
|
||||||
common::comp::DisconnectReason::Timeout,
|
common::comp::DisconnectReason::NetworkError,
|
||||||
));
|
));
|
||||||
} else if time.0 - last_ping > settings.client_timeout.as_secs() as f64 * 0.5 {
|
},
|
||||||
// Try pinging the client if the timeout is nearing.
|
Ok(1_u64..=u64::MAX) => {
|
||||||
client.send_fallible(PingMsg::Ping);
|
// Update client ping.
|
||||||
}
|
client.last_ping = time.0
|
||||||
},
|
},
|
||||||
}
|
Ok(0) => {
|
||||||
}
|
let last_ping: f64 = client.last_ping;
|
||||||
|
if time.0 - last_ping > settings.client_timeout.as_secs() as f64
|
||||||
|
// Timeout
|
||||||
|
{
|
||||||
|
info!(?entity, "timeout error with client, disconnecting");
|
||||||
|
server_emitter.emit(ServerEvent::ClientDisconnect(
|
||||||
|
entity,
|
||||||
|
common::comp::DisconnectReason::Timeout,
|
||||||
|
));
|
||||||
|
} else if time.0 - last_ping
|
||||||
|
> settings.client_timeout.as_secs() as f64 * 0.5
|
||||||
|
{
|
||||||
|
// Try pinging the client if the timeout is nearing.
|
||||||
|
client.send_fallible(PingMsg::Ping);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,18 +12,21 @@ use common::{
|
|||||||
resources::TimeOfDay,
|
resources::TimeOfDay,
|
||||||
uid::{Uid, UidAllocator},
|
uid::{Uid, UidAllocator},
|
||||||
};
|
};
|
||||||
|
use common_base::prof_span;
|
||||||
use common_ecs::{Job, Origin, Phase, System};
|
use common_ecs::{Job, Origin, Phase, System};
|
||||||
use common_net::msg::{
|
use common_net::msg::{
|
||||||
CharacterInfo, ClientRegister, DisconnectReason, PlayerInfo, PlayerListUpdate, RegisterError,
|
CharacterInfo, ClientRegister, DisconnectReason, PlayerInfo, PlayerListUpdate, RegisterError,
|
||||||
ServerGeneral, ServerInit, WorldMapMsg,
|
ServerGeneral, ServerInit, WorldMapMsg,
|
||||||
};
|
};
|
||||||
use hashbrown::HashMap;
|
use hashbrown::{hash_map, HashMap};
|
||||||
|
use itertools::Either;
|
||||||
use plugin_api::Health;
|
use plugin_api::Health;
|
||||||
|
use rayon::prelude::*;
|
||||||
use specs::{
|
use specs::{
|
||||||
shred::ResourceId, storage::StorageEntry, Entities, Join, Read, ReadExpect, ReadStorage,
|
shred::ResourceId, Entities, Join, ParJoin, Read, ReadExpect, ReadStorage, SystemData, World,
|
||||||
SystemData, World, WriteExpect, WriteStorage,
|
WriteStorage,
|
||||||
};
|
};
|
||||||
use tracing::{debug, trace};
|
use tracing::{debug, info, trace, warn};
|
||||||
|
|
||||||
#[cfg(feature = "plugins")]
|
#[cfg(feature = "plugins")]
|
||||||
use {common_state::plugin::memory_manager::EcsWorld, common_state::plugin::PluginMgr};
|
use {common_state::plugin::memory_manager::EcsWorld, common_state::plugin::PluginMgr};
|
||||||
@ -38,8 +41,8 @@ pub struct ReadData<'a> {
|
|||||||
entities: Entities<'a>,
|
entities: Entities<'a>,
|
||||||
stats: ReadStorage<'a, Stats>,
|
stats: ReadStorage<'a, Stats>,
|
||||||
uids: ReadStorage<'a, Uid>,
|
uids: ReadStorage<'a, Uid>,
|
||||||
clients: ReadStorage<'a, Client>,
|
|
||||||
server_event_bus: Read<'a, EventBus<ServerEvent>>,
|
server_event_bus: Read<'a, EventBus<ServerEvent>>,
|
||||||
|
login_provider: ReadExpect<'a, LoginProvider>,
|
||||||
player_metrics: ReadExpect<'a, PlayerMetrics>,
|
player_metrics: ReadExpect<'a, PlayerMetrics>,
|
||||||
settings: ReadExpect<'a, Settings>,
|
settings: ReadExpect<'a, Settings>,
|
||||||
editable_settings: ReadExpect<'a, EditableSettings>,
|
editable_settings: ReadExpect<'a, EditableSettings>,
|
||||||
@ -59,10 +62,10 @@ pub struct Sys;
|
|||||||
impl<'a> System<'a> for Sys {
|
impl<'a> System<'a> for Sys {
|
||||||
type SystemData = (
|
type SystemData = (
|
||||||
ReadData<'a>,
|
ReadData<'a>,
|
||||||
|
WriteStorage<'a, Client>,
|
||||||
WriteStorage<'a, Player>,
|
WriteStorage<'a, Player>,
|
||||||
WriteStorage<'a, Admin>,
|
WriteStorage<'a, Admin>,
|
||||||
WriteStorage<'a, PendingLogin>,
|
WriteStorage<'a, PendingLogin>,
|
||||||
WriteExpect<'a, LoginProvider>,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const NAME: &'static str = "msg::register";
|
const NAME: &'static str = "msg::register";
|
||||||
@ -71,218 +74,346 @@ impl<'a> System<'a> for Sys {
|
|||||||
|
|
||||||
fn run(
|
fn run(
|
||||||
_job: &mut Job<Self>,
|
_job: &mut Job<Self>,
|
||||||
(
|
(read_data, mut clients, mut players, mut admins, mut pending_logins): Self::SystemData,
|
||||||
read_data,
|
|
||||||
mut players,
|
|
||||||
mut admins,
|
|
||||||
mut pending_logins,
|
|
||||||
mut login_provider,
|
|
||||||
): Self::SystemData,
|
|
||||||
) {
|
) {
|
||||||
let mut server_emitter = read_data.server_event_bus.emitter();
|
// Player list to send new players, and lookup from UUID to entity (so we don't
|
||||||
// Player list to send new players.
|
// have to do a linear scan over all entities on each login to see if
|
||||||
let player_list = (
|
// it's a duplicate).
|
||||||
|
//
|
||||||
|
// NOTE: For this to work as desired, we must maintain the invariant that there
|
||||||
|
// is just one player per UUID!
|
||||||
|
let (player_list, old_players_by_uuid): (HashMap<_, _>, HashMap<_, _>) = (
|
||||||
|
&read_data.entities,
|
||||||
&read_data.uids,
|
&read_data.uids,
|
||||||
&players,
|
&players,
|
||||||
read_data.stats.maybe(),
|
read_data.stats.maybe(),
|
||||||
admins.maybe(),
|
admins.maybe(),
|
||||||
)
|
)
|
||||||
.join()
|
.join()
|
||||||
.map(|(uid, player, stats, admin)| {
|
.map(|(entity, uid, player, stats, admin)| {
|
||||||
(*uid, PlayerInfo {
|
(
|
||||||
is_online: true,
|
(*uid, PlayerInfo {
|
||||||
is_moderator: admin.is_some(),
|
is_online: true,
|
||||||
player_alias: player.alias.clone(),
|
is_moderator: admin.is_some(),
|
||||||
character: stats.map(|stats| CharacterInfo {
|
player_alias: player.alias.clone(),
|
||||||
name: stats.name.clone(),
|
character: stats.map(|stats| CharacterInfo {
|
||||||
|
name: stats.name.clone(),
|
||||||
|
}),
|
||||||
|
uuid: player.uuid(),
|
||||||
}),
|
}),
|
||||||
uuid: player.uuid(),
|
(player.uuid(), entity),
|
||||||
})
|
)
|
||||||
})
|
})
|
||||||
.collect::<HashMap<_, _>>();
|
.unzip();
|
||||||
|
let max_players = usize::from(read_data.settings.max_players);
|
||||||
|
// NOTE: max_players starts as a u16, so this will not use unlimited memory even
|
||||||
|
// if people set absurdly high values (though we should also cap the value
|
||||||
|
// elsewhere).
|
||||||
|
let capacity = max_players * 2;
|
||||||
// List of new players to update player lists of all clients.
|
// List of new players to update player lists of all clients.
|
||||||
let mut new_players = Vec::new();
|
//
|
||||||
|
// Big enough that we hopefully won't have to reallocate.
|
||||||
|
//
|
||||||
|
// Also includes a list of logins to retry and finished_pending, since we
|
||||||
|
// happen to update those around the same time that we update the new
|
||||||
|
// players list.
|
||||||
|
//
|
||||||
|
// NOTE: stdlib mutex is more than good enough on Linux and (probably) Windows,
|
||||||
|
// but not Mac.
|
||||||
|
let new_players = parking_lot::Mutex::new((
|
||||||
|
HashMap::<_, (_, _, _, _)>::with_capacity(capacity),
|
||||||
|
Vec::with_capacity(capacity),
|
||||||
|
Vec::with_capacity(capacity),
|
||||||
|
));
|
||||||
|
|
||||||
// defer auth lockup
|
// defer auth lockup
|
||||||
for (entity, client) in (&read_data.entities, &read_data.clients).join() {
|
for (entity, client) in (&read_data.entities, &mut clients).join() {
|
||||||
let _ = super::try_recv_all(client, 0, |_, msg: ClientRegister| {
|
let _ = super::try_recv_all(client, 0, |_, msg: ClientRegister| {
|
||||||
trace!(?msg.token_or_username, "defer auth lockup");
|
trace!(?msg.token_or_username, "defer auth lockup");
|
||||||
let pending = login_provider.verify(&msg.token_or_username);
|
let pending = read_data.login_provider.verify(&msg.token_or_username);
|
||||||
let _ = pending_logins.insert(entity, pending);
|
let _ = pending_logins.insert(entity, pending);
|
||||||
Ok(())
|
Ok(())
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut finished_pending = vec![];
|
let old_player_count = player_list.len();
|
||||||
let mut retries = vec![];
|
#[cfg(feature = "plugins")]
|
||||||
let mut player_count = player_list.len();
|
let ecs_world = EcsWorld {
|
||||||
let max_players = read_data.settings.max_players;
|
entities: &read_data.entities,
|
||||||
for (entity, client, pending) in
|
health: (&read_data._healths).into(),
|
||||||
(&read_data.entities, &read_data.clients, &mut pending_logins).join()
|
uid: (&read_data.uids).into(),
|
||||||
{
|
// NOTE: Only the old player list is provided, to avoid scalability
|
||||||
if let Err(e) = || -> Result<(), crate::error::Error> {
|
// bottlenecks.
|
||||||
#[cfg(feature = "plugins")]
|
player: (&players).into(),
|
||||||
let ecs_world = EcsWorld {
|
uid_allocator: &read_data._uid_allocator,
|
||||||
entities: &read_data.entities,
|
};
|
||||||
health: (&read_data._healths).into(),
|
|
||||||
uid: (&read_data.uids).into(),
|
|
||||||
player: (&players).into(),
|
|
||||||
uid_allocator: &read_data._uid_allocator,
|
|
||||||
};
|
|
||||||
|
|
||||||
let (username, uuid) = match login_provider.login(
|
// NOTE: this is just default value.
|
||||||
pending,
|
//
|
||||||
#[cfg(feature = "plugins")]
|
// It will be overwritten in ServerExt::update_character_data.
|
||||||
&ecs_world,
|
let battle_mode = read_data.settings.gameplay.battle_mode.default_mode();
|
||||||
#[cfg(feature = "plugins")]
|
|
||||||
&read_data._plugin_mgr,
|
(
|
||||||
&read_data.editable_settings.admins,
|
&read_data.entities,
|
||||||
&read_data.editable_settings.whitelist,
|
&read_data.uids,
|
||||||
&read_data.editable_settings.banlist,
|
&clients,
|
||||||
player_count >= max_players,
|
!players.mask(),
|
||||||
) {
|
&mut pending_logins,
|
||||||
None => return Ok(()),
|
)
|
||||||
Some(r) => {
|
.join()
|
||||||
|
// NOTE: Required because Specs has very poor work splitting for sparse joins.
|
||||||
|
.par_bridge()
|
||||||
|
.for_each_init(
|
||||||
|
|| read_data.server_event_bus.emitter(),
|
||||||
|
|server_emitter, (entity, uid, client, _, pending)| {
|
||||||
|
prof_span!("msg::register login");
|
||||||
|
if let Err(e) = || -> Result<(), crate::error::Error> {
|
||||||
|
let extra_checks = |username: String, uuid: authc::Uuid| {
|
||||||
|
// We construct a few things outside the lock to reduce contention.
|
||||||
|
let pending_login =
|
||||||
|
PendingLogin::new_success(username.clone(), uuid);
|
||||||
|
let player = Player::new(username, battle_mode, uuid, None);
|
||||||
|
let admin = read_data.editable_settings.admins.get(&uuid);
|
||||||
|
let msg = player
|
||||||
|
.is_valid()
|
||||||
|
.then_some(PlayerInfo {
|
||||||
|
player_alias: player.alias.clone(),
|
||||||
|
is_online: true,
|
||||||
|
is_moderator: admin.is_some(),
|
||||||
|
character: None, // new players will be on character select.
|
||||||
|
uuid: player.uuid(),
|
||||||
|
})
|
||||||
|
.map(|player_info| {
|
||||||
|
// Prepare the player list update to be sent to all clients.
|
||||||
|
client.prepare(ServerGeneral::PlayerListUpdate(
|
||||||
|
PlayerListUpdate::Add(*uid, player_info),
|
||||||
|
))
|
||||||
|
});
|
||||||
|
// Check if this player was already logged in before the system
|
||||||
|
// started.
|
||||||
|
let old_player = old_players_by_uuid
|
||||||
|
.get(&uuid)
|
||||||
|
.copied()
|
||||||
|
// We only need the old client to report an error; however, we
|
||||||
|
// can't assume the old player has a client (even though it would
|
||||||
|
// be a bit strange for them not to), so we have to remember that
|
||||||
|
// case. So we grab the old client (outside the lock, to avoid
|
||||||
|
// contention). We have to distinguish this from the case of a
|
||||||
|
// *new* player already having logged in (which we can't check
|
||||||
|
// until the lock is taken); in that case, we *know* the client
|
||||||
|
// is present, since the list is only populated by the current
|
||||||
|
// join (which includes the client).
|
||||||
|
.map(|entity| (entity, Some(clients.get(entity))));
|
||||||
|
// We take the lock only when necessary, and for a short duration,
|
||||||
|
// to avoid contention with other threads. We need to hold the
|
||||||
|
// guard past the end of the login function because otherwise
|
||||||
|
// there's a race between when we read it and when we (potentially)
|
||||||
|
// write to it.
|
||||||
|
let guard = new_players.lock();
|
||||||
|
// Guard comes first in the tuple so it's dropped before the other
|
||||||
|
// stuff if login returns an error.
|
||||||
|
(
|
||||||
|
old_player_count + guard.0.len() >= max_players,
|
||||||
|
(guard, (pending_login, player, admin, msg, old_player)),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
// Destructure new_players_guard last so it gets dropped before the other
|
||||||
|
// three.
|
||||||
|
let (
|
||||||
|
(pending_login, player, admin, msg, old_player),
|
||||||
|
mut new_players_guard,
|
||||||
|
) = match LoginProvider::login(
|
||||||
|
pending,
|
||||||
|
#[cfg(feature = "plugins")]
|
||||||
|
&ecs_world,
|
||||||
|
#[cfg(feature = "plugins")]
|
||||||
|
&read_data._plugin_mgr,
|
||||||
|
&read_data.editable_settings.admins,
|
||||||
|
&read_data.editable_settings.whitelist,
|
||||||
|
&read_data.editable_settings.banlist,
|
||||||
|
extra_checks,
|
||||||
|
) {
|
||||||
|
None => return Ok(()),
|
||||||
|
Some(r) => {
|
||||||
|
match r {
|
||||||
|
Err(e) => {
|
||||||
|
new_players.lock().2.push(entity);
|
||||||
|
// NOTE: Done only on error to avoid doing extra work within
|
||||||
|
// the lock.
|
||||||
|
trace!(?e, "pending login returned error");
|
||||||
|
server_emitter.emit(ServerEvent::ClientDisconnect(
|
||||||
|
entity,
|
||||||
|
common::comp::DisconnectReason::Kicked,
|
||||||
|
));
|
||||||
|
client.send(Err(e))?;
|
||||||
|
return Ok(());
|
||||||
|
},
|
||||||
|
// Swap the order of the tuple, so when it's destructured guard
|
||||||
|
// is dropped first.
|
||||||
|
Ok((guard, res)) => (res, guard),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let (new_players_by_uuid, retries, finished_pending) = &mut *new_players_guard;
|
||||||
finished_pending.push(entity);
|
finished_pending.push(entity);
|
||||||
trace!(?r, "pending login returned");
|
// Check if the user logged in before us during this tick (this is why we
|
||||||
match r {
|
// need the lock held).
|
||||||
Err(e) => {
|
let uuid = player.uuid();
|
||||||
|
let old_player = old_player.map_or_else(
|
||||||
|
move || match new_players_by_uuid.entry(uuid) {
|
||||||
|
// We don't actually extract the client yet, to avoid doing extra
|
||||||
|
// work with the lock held.
|
||||||
|
hash_map::Entry::Occupied(o) => Either::Left((o.get().0, None)),
|
||||||
|
hash_map::Entry::Vacant(v) => Either::Right(v),
|
||||||
|
},
|
||||||
|
Either::Left,
|
||||||
|
);
|
||||||
|
let vacant_player = match old_player {
|
||||||
|
Either::Left((old_entity, old_client)) => {
|
||||||
|
if matches!(old_client, None | Some(Some(_))) {
|
||||||
|
// We can't login the new client right now as the
|
||||||
|
// removal of the old client and player occurs later in
|
||||||
|
// the tick, so we instead setup the new login to be
|
||||||
|
// processed in the next tick
|
||||||
|
// Create "fake" successful pending auth and mark it to
|
||||||
|
// be inserted into pending_logins at the end of this
|
||||||
|
// run.
|
||||||
|
retries.push((entity, pending_login));
|
||||||
|
drop(new_players_guard);
|
||||||
|
let old_client = old_client
|
||||||
|
.flatten()
|
||||||
|
.or_else(|| clients.get(old_entity))
|
||||||
|
.expect(
|
||||||
|
"All entries in the new player list were explicitly \
|
||||||
|
joining on client",
|
||||||
|
);
|
||||||
|
let _ = old_client.send(ServerGeneral::Disconnect(
|
||||||
|
DisconnectReason::Kicked(String::from(
|
||||||
|
"You have logged in from another location.",
|
||||||
|
)),
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
drop(new_players_guard);
|
||||||
|
// A player without a client is strange, so we don't really want
|
||||||
|
// to retry. Warn about this case and hope that trying to
|
||||||
|
// perform the disconnect process removes the invalid player
|
||||||
|
// entry.
|
||||||
|
warn!(
|
||||||
|
"Player without client detected for entity {:?}",
|
||||||
|
old_entity
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// Remove old client
|
||||||
server_emitter.emit(ServerEvent::ClientDisconnect(
|
server_emitter.emit(ServerEvent::ClientDisconnect(
|
||||||
entity,
|
old_entity,
|
||||||
common::comp::DisconnectReason::Kicked,
|
common::comp::DisconnectReason::NewerLogin,
|
||||||
));
|
));
|
||||||
client.send(Err(e))?;
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
},
|
},
|
||||||
Ok((username, uuid)) => (username, uuid),
|
Either::Right(v) => v,
|
||||||
}
|
};
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Check if user is already logged-in
|
let Some(msg) = msg else {
|
||||||
if let Some((old_entity, old_client, _)) =
|
drop(new_players_guard);
|
||||||
(&read_data.entities, &read_data.clients, &players)
|
// Invalid player
|
||||||
.join()
|
client.send(Err(RegisterError::InvalidCharacter))?;
|
||||||
.find(|(_, _, old_player)| old_player.uuid() == uuid)
|
return Ok(());
|
||||||
{
|
};
|
||||||
// Remove old client
|
|
||||||
server_emitter.emit(ServerEvent::ClientDisconnect(
|
|
||||||
old_entity,
|
|
||||||
common::comp::DisconnectReason::NewerLogin,
|
|
||||||
));
|
|
||||||
let _ = old_client.send(ServerGeneral::Disconnect(DisconnectReason::Kicked(
|
|
||||||
String::from("You have logged in from another location."),
|
|
||||||
)));
|
|
||||||
// We can't login the new client right now as the
|
|
||||||
// removal of the old client and player occurs later in
|
|
||||||
// the tick, so we instead setup the new login to be
|
|
||||||
// processed in the next tick
|
|
||||||
// Create "fake" successful pending auth and mark it to
|
|
||||||
// be inserted into pending_logins at the end of this
|
|
||||||
// run
|
|
||||||
retries.push((entity, PendingLogin::new_success(username, uuid)));
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this is just default value.
|
// We know the player list didn't already contain this entity because we
|
||||||
//
|
// joined on !players, so we can assume from here that we'll definitely be
|
||||||
// It will be overwritten in ServerExt::update_character_data.
|
// adding a new player.
|
||||||
let battle_mode = read_data.settings.gameplay.battle_mode.default_mode();
|
|
||||||
let player = Player::new(username, battle_mode, uuid, None);
|
|
||||||
|
|
||||||
let admin = read_data.editable_settings.admins.get(&uuid);
|
// Add to list to notify all clients of the new player
|
||||||
|
vacant_player.insert((entity, player, admin, msg));
|
||||||
|
drop(new_players_guard);
|
||||||
|
read_data.player_metrics.players_connected.inc();
|
||||||
|
|
||||||
if !player.is_valid() {
|
// Tell the client its request was successful.
|
||||||
// Invalid player
|
client.send(Ok(()))?;
|
||||||
client.send(Err(RegisterError::InvalidCharacter))?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(StorageEntry::Vacant(v)) = players.entry(entity) {
|
// Send client all the tracked components currently attached to its entity
|
||||||
// Add Player component to this client, if the entity exists.
|
// as well as synced resources (currently only `TimeOfDay`)
|
||||||
v.insert(player);
|
debug!("Starting initial sync with client.");
|
||||||
player_count += 1;
|
client.send(ServerInit::GameSync {
|
||||||
read_data.player_metrics.players_connected.inc();
|
// Send client their entity
|
||||||
|
entity_package: read_data
|
||||||
|
.trackers
|
||||||
|
.create_entity_package_with_uid(entity, *uid, None, None, None),
|
||||||
|
time_of_day: *read_data.time_of_day,
|
||||||
|
max_group_size: read_data.settings.max_player_group_size,
|
||||||
|
client_timeout: read_data.settings.client_timeout,
|
||||||
|
world_map: (*read_data.map).clone(),
|
||||||
|
recipe_book: default_recipe_book().cloned(),
|
||||||
|
component_recipe_book: default_component_recipe_book().cloned(),
|
||||||
|
material_stats: (*read_data.material_stats).clone(),
|
||||||
|
ability_map: (*read_data.ability_map).clone(),
|
||||||
|
})?;
|
||||||
|
debug!("Done initial sync with client.");
|
||||||
|
|
||||||
// Give the Admin component to the player if their name exists in
|
// Send initial player list
|
||||||
// admin list
|
client.send(ServerGeneral::PlayerListUpdate(PlayerListUpdate::Init(
|
||||||
if let Some(admin) = admin {
|
player_list.clone(),
|
||||||
admins
|
)))?;
|
||||||
.insert(entity, Admin(admin.role.into()))
|
|
||||||
.expect("Inserting into players proves the entity exists.");
|
Ok(())
|
||||||
|
}() {
|
||||||
|
trace!(?e, "failed to process register");
|
||||||
}
|
}
|
||||||
|
},
|
||||||
// Tell the client its request was successful.
|
);
|
||||||
client.send(Ok(()))?;
|
let (new_players, retries, finished_pending) = new_players.into_inner();
|
||||||
|
finished_pending.into_iter().for_each(|e| {
|
||||||
// Send client all the tracked components currently attached to its entity as
|
// Remove all entities in finished_pending from pending_logins.
|
||||||
// well as synced resources (currently only `TimeOfDay`)
|
|
||||||
debug!("Starting initial sync with client.");
|
|
||||||
client.send(ServerInit::GameSync {
|
|
||||||
// Send client their entity
|
|
||||||
entity_package:
|
|
||||||
read_data.trackers
|
|
||||||
.create_entity_package(entity, None, None, None)
|
|
||||||
// NOTE: We are apparently okay with crashing if a uid is removed from
|
|
||||||
// a non-logged-in player without removing the whole thing.
|
|
||||||
.expect(
|
|
||||||
"We created this entity as marked() (using create_entity_synced) so \
|
|
||||||
it definitely has a uid",
|
|
||||||
),
|
|
||||||
time_of_day: *read_data.time_of_day,
|
|
||||||
max_group_size: read_data.settings.max_player_group_size,
|
|
||||||
client_timeout: read_data.settings.client_timeout,
|
|
||||||
world_map: (*read_data.map).clone(),
|
|
||||||
recipe_book: default_recipe_book().cloned(),
|
|
||||||
component_recipe_book: default_component_recipe_book().cloned(),
|
|
||||||
material_stats: (*read_data.material_stats).clone(),
|
|
||||||
ability_map: (*read_data.ability_map).clone(),
|
|
||||||
})?;
|
|
||||||
debug!("Done initial sync with client.");
|
|
||||||
|
|
||||||
// Send initial player list
|
|
||||||
client.send(ServerGeneral::PlayerListUpdate(PlayerListUpdate::Init(
|
|
||||||
player_list.clone(),
|
|
||||||
)))?;
|
|
||||||
|
|
||||||
// Add to list to notify all clients of the new player
|
|
||||||
new_players.push(entity);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}() {
|
|
||||||
trace!(?e, "failed to process register")
|
|
||||||
};
|
|
||||||
}
|
|
||||||
for e in finished_pending {
|
|
||||||
pending_logins.remove(e);
|
pending_logins.remove(e);
|
||||||
}
|
});
|
||||||
|
|
||||||
// Insert retry attempts back into pending_logins to be processed next tick
|
// Insert retry attempts back into pending_logins to be processed next tick
|
||||||
for (entity, pending) in retries {
|
for (entity, pending) in retries {
|
||||||
let _ = pending_logins.insert(entity, pending);
|
let _ = pending_logins.insert(entity, pending);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle new players.
|
// Handle new players.
|
||||||
// Tell all clients to add them to the player list.
|
let msgs = new_players
|
||||||
let player_info = |entity| {
|
.into_values()
|
||||||
let player_info = read_data.uids.get(entity).zip(players.get(entity));
|
.map(|(entity, player, admin, msg)| {
|
||||||
player_info.map(|(u, p)| (entity, u, p))
|
let username = &player.alias;
|
||||||
};
|
info!(?username, "New User");
|
||||||
for (entity, uid, player) in new_players.into_iter().filter_map(player_info) {
|
// Add Player component to this client.
|
||||||
let mut lazy_msg = None;
|
//
|
||||||
for (_, client) in (&players, &read_data.clients).join() {
|
// Note that since players has been write locked for the duration of this
|
||||||
if lazy_msg.is_none() {
|
// system, we know that nobody else added any players since we
|
||||||
lazy_msg = Some(client.prepare(ServerGeneral::PlayerListUpdate(
|
// last checked its value, and we checked that everything in
|
||||||
PlayerListUpdate::Add(*uid, PlayerInfo {
|
// new_players was not already in players, so we know the insert
|
||||||
player_alias: player.alias.clone(),
|
// succeeds and the old entry was vacant. Moreover, we know that all new
|
||||||
is_online: true,
|
// players we added have different UUIDs both from each other, and from any old
|
||||||
is_moderator: admins.get(entity).is_some(),
|
// players, preserving the uniqueness invariant.
|
||||||
character: None, // new players will be on character select.
|
players
|
||||||
uuid: player.uuid(),
|
.insert(entity, player)
|
||||||
}),
|
.expect("The entity was joined against in the same system, so it exists");
|
||||||
)));
|
|
||||||
|
// Give the Admin component to the player if their name exists in
|
||||||
|
// admin list
|
||||||
|
if let Some(admin) = admin {
|
||||||
|
admins
|
||||||
|
.insert(entity, Admin(admin.role.into()))
|
||||||
|
.expect("Inserting into players proves the entity exists.");
|
||||||
}
|
}
|
||||||
lazy_msg.as_ref().map(|msg| client.send_prepared(msg));
|
msg
|
||||||
}
|
})
|
||||||
}
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// Tell all clients to add the new players to the player list, in parallel.
|
||||||
|
(players.mask(), &clients)
|
||||||
|
.par_join()
|
||||||
|
.for_each(|(_, client)| {
|
||||||
|
// Send messages sequentially within each client; by the time we have enough
|
||||||
|
// players to make parallelizing useful, we will have way more
|
||||||
|
// players than cores.
|
||||||
|
msgs.iter().for_each(|msg| {
|
||||||
|
let _ = client.send_prepared(msg);
|
||||||
|
});
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,8 +11,8 @@ use common::{
|
|||||||
};
|
};
|
||||||
use common_ecs::{Job, Origin, ParMode, Phase, System};
|
use common_ecs::{Job, Origin, ParMode, Phase, System};
|
||||||
use common_net::msg::{ClientGeneral, ServerGeneral};
|
use common_net::msg::{ClientGeneral, ServerGeneral};
|
||||||
use rayon::iter::ParallelIterator;
|
use rayon::prelude::*;
|
||||||
use specs::{Entities, Join, ParJoin, Read, ReadExpect, ReadStorage, Write};
|
use specs::{Entities, Join, Read, ReadExpect, ReadStorage, Write, WriteStorage};
|
||||||
use tracing::{debug, trace};
|
use tracing::{debug, trace};
|
||||||
|
|
||||||
/// This system will handle new messages from clients
|
/// This system will handle new messages from clients
|
||||||
@ -29,7 +29,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
Write<'a, Vec<ChunkRequest>>,
|
Write<'a, Vec<ChunkRequest>>,
|
||||||
ReadStorage<'a, Pos>,
|
ReadStorage<'a, Pos>,
|
||||||
ReadStorage<'a, Presence>,
|
ReadStorage<'a, Presence>,
|
||||||
ReadStorage<'a, Client>,
|
WriteStorage<'a, Client>,
|
||||||
);
|
);
|
||||||
|
|
||||||
const NAME: &'static str = "msg::terrain";
|
const NAME: &'static str = "msg::terrain";
|
||||||
@ -48,17 +48,19 @@ impl<'a> System<'a> for Sys {
|
|||||||
mut chunk_requests,
|
mut chunk_requests,
|
||||||
positions,
|
positions,
|
||||||
presences,
|
presences,
|
||||||
clients,
|
mut clients,
|
||||||
): Self::SystemData,
|
): Self::SystemData,
|
||||||
) {
|
) {
|
||||||
job.cpu_stats.measure(ParMode::Rayon);
|
job.cpu_stats.measure(ParMode::Rayon);
|
||||||
let mut new_chunk_requests = (&entities, &clients, (&presences).maybe())
|
let mut new_chunk_requests = (&entities, &mut clients, (&presences).maybe())
|
||||||
.par_join()
|
.join()
|
||||||
|
// NOTE: Required because Specs has very poor work splitting for sparse joins.
|
||||||
|
.par_bridge()
|
||||||
.map_init(
|
.map_init(
|
||||||
|| (chunk_send_bus.emitter(), server_event_bus.emitter()),
|
|| (chunk_send_bus.emitter(), server_event_bus.emitter()),
|
||||||
|(chunk_send_emitter, server_emitter), (entity, client, maybe_presence)| {
|
|(chunk_send_emitter, server_emitter), (entity, client, maybe_presence)| {
|
||||||
let mut chunk_requests = Vec::new();
|
let mut chunk_requests = Vec::new();
|
||||||
let _ = super::try_recv_all(client, 5, |_, msg| {
|
let _ = super::try_recv_all(client, 5, |client, msg| {
|
||||||
let presence = match maybe_presence {
|
let presence = match maybe_presence {
|
||||||
Some(g) => g,
|
Some(g) => g,
|
||||||
None => {
|
None => {
|
||||||
|
@ -64,7 +64,23 @@ macro_rules! trackers {
|
|||||||
vel: Option<Vel>,
|
vel: Option<Vel>,
|
||||||
ori: Option<Ori>,
|
ori: Option<Ori>,
|
||||||
) -> Option<EntityPackage<EcsCompPacket>> {
|
) -> Option<EntityPackage<EcsCompPacket>> {
|
||||||
let uid = self.uid.get(entity).copied()?.0;
|
let uid = self.uid.get(entity).copied()?;
|
||||||
|
Some(self.create_entity_package_with_uid(entity, uid, pos, vel, ori))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// See [create_entity_package].
|
||||||
|
///
|
||||||
|
/// NOTE: Only if you're certain you know the UID for the entity, and it hasn't
|
||||||
|
/// changed!
|
||||||
|
pub fn create_entity_package_with_uid(
|
||||||
|
&self,
|
||||||
|
entity: EcsEntity,
|
||||||
|
uid: Uid,
|
||||||
|
pos: Option<Pos>,
|
||||||
|
vel: Option<Vel>,
|
||||||
|
ori: Option<Ori>,
|
||||||
|
) -> EntityPackage<EcsCompPacket> {
|
||||||
|
let uid = uid.0;
|
||||||
let mut comps = Vec::new();
|
let mut comps = Vec::new();
|
||||||
// NOTE: we could potentially include a bitmap indicating which components are present instead of tagging
|
// NOTE: we could potentially include a bitmap indicating which components are present instead of tagging
|
||||||
// components with the type in order to save bandwidth
|
// components with the type in order to save bandwidth
|
||||||
@ -94,7 +110,7 @@ macro_rules! trackers {
|
|||||||
vel.map(|c| comps.push(c.into()));
|
vel.map(|c| comps.push(c.into()));
|
||||||
ori.map(|c| comps.push(c.into()));
|
ori.map(|c| comps.push(c.into()));
|
||||||
|
|
||||||
Some(EntityPackage { uid, comps })
|
EntityPackage { uid, comps }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create sync package for switching a client to another entity specifically to
|
/// Create sync package for switching a client to another entity specifically to
|
||||||
|
@ -32,7 +32,13 @@ use common_ecs::{Job, Origin, Phase, System};
|
|||||||
use common_net::msg::ServerGeneral;
|
use common_net::msg::ServerGeneral;
|
||||||
use common_state::TerrainChanges;
|
use common_state::TerrainChanges;
|
||||||
use comp::Behavior;
|
use comp::Behavior;
|
||||||
use specs::{Entities, Join, Read, ReadExpect, ReadStorage, Write, WriteExpect, WriteStorage};
|
use core::cmp::Reverse;
|
||||||
|
use itertools::Itertools;
|
||||||
|
use rayon::{iter::Either, prelude::*};
|
||||||
|
use specs::{
|
||||||
|
storage::GenericReadStorage, Entities, Entity, Join, ParJoin, Read, ReadExpect, ReadStorage,
|
||||||
|
Write, WriteExpect, WriteStorage,
|
||||||
|
};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use vek::*;
|
use vek::*;
|
||||||
|
|
||||||
@ -162,7 +168,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
let chunk = Arc::new(chunk);
|
let chunk = Arc::new(chunk);
|
||||||
|
|
||||||
// Add to list of chunks to send to nearby players.
|
// Add to list of chunks to send to nearby players.
|
||||||
new_chunks.push((key, Arc::clone(&chunk)));
|
new_chunks.push(key);
|
||||||
|
|
||||||
// TODO: code duplication for chunk insertion between here and state.rs
|
// TODO: code duplication for chunk insertion between here and state.rs
|
||||||
// Insert the chunk into terrain changes
|
// Insert the chunk into terrain changes
|
||||||
@ -223,7 +229,7 @@ impl<'a> System<'a> for Sys {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert a safezone if chunk contains the spawn position
|
// Insert a safezone if chunk contains the spawn position
|
||||||
if server_settings.gameplay.safe_spawn && is_spawn_chunk(key, *spawn_point, &terrain) {
|
if server_settings.gameplay.safe_spawn && is_spawn_chunk(key, *spawn_point) {
|
||||||
server_emitter.emit(ServerEvent::CreateSafezone {
|
server_emitter.emit(ServerEvent::CreateSafezone {
|
||||||
range: Some(SAFE_ZONE_RADIUS),
|
range: Some(SAFE_ZONE_RADIUS),
|
||||||
pos: Pos(spawn_point.0),
|
pos: Pos(spawn_point.0),
|
||||||
@ -231,93 +237,167 @@ impl<'a> System<'a> for Sys {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut repositioned = Vec::new();
|
// TODO: Consider putting this in another system since this forces us to take
|
||||||
for (entity, pos, _) in (&entities, &mut positions, &reposition_on_load).join() {
|
// positions by write rather than read access.
|
||||||
// If an entity is marked as needing repositioning once the chunk loads (e.g.
|
let repositioned = (&entities, &mut positions, (&mut force_update).maybe(), reposition_on_load.mask())
|
||||||
// from having just logged in), reposition them.
|
// TODO: Consider using par_bridge() because Rayon has very poor work splitting for
|
||||||
|
// sparse joins.
|
||||||
let chunk_pos = terrain.pos_key(pos.0.map(|e| e as i32));
|
.par_join()
|
||||||
if let Some(chunk) = terrain.get_key(chunk_pos) {
|
.filter_map(|(entity, pos, force_update, _)| {
|
||||||
pos.0 = terrain
|
// NOTE: We use regular as casts rather than as_ because we want to saturate on
|
||||||
.try_find_space(pos.0.as_::<i32>())
|
// overflow.
|
||||||
|
let entity_pos = pos.0.map(|x| x as i32);
|
||||||
|
// If an entity is marked as needing repositioning once the chunk loads (e.g.
|
||||||
|
// from having just logged in), reposition them.
|
||||||
|
let chunk_pos = TerrainGrid::chunk_key(entity_pos);
|
||||||
|
let chunk = terrain.get_key(chunk_pos)?;
|
||||||
|
let new_pos = terrain
|
||||||
|
.try_find_space(entity_pos)
|
||||||
.map(|x| x.as_::<f32>())
|
.map(|x| x.as_::<f32>())
|
||||||
.unwrap_or_else(|| chunk.find_accessible_pos(pos.0.xy().as_::<i32>(), false));
|
.unwrap_or_else(|| chunk.find_accessible_pos(entity_pos.xy(), false));
|
||||||
repositioned.push(entity);
|
pos.0 = new_pos;
|
||||||
force_update
|
force_update.map(|force_update| force_update.update());
|
||||||
.get_mut(entity)
|
Some((entity, new_pos))
|
||||||
.map(|force_update| force_update.update());
|
})
|
||||||
let _ = waypoints.insert(entity, Waypoint::new(pos.0, *time));
|
.collect::<Vec<_>>();
|
||||||
}
|
|
||||||
}
|
for (entity, new_pos) in repositioned {
|
||||||
for entity in repositioned {
|
let _ = waypoints.insert(entity, Waypoint::new(new_pos, *time));
|
||||||
reposition_on_load.remove(entity);
|
reposition_on_load.remove(entity);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send the chunk to all nearby players.
|
let max_view_distance = server_settings.max_view_distance.unwrap_or(u32::MAX);
|
||||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
let (presences_position_entities, presences_positions) = prepare_player_presences(
|
||||||
new_chunks.into_par_iter().for_each_init(
|
&world,
|
||||||
|| chunk_send_bus.emitter(),
|
max_view_distance,
|
||||||
|chunk_send_emitter, (key, _chunk)| {
|
&entities,
|
||||||
(&entities, &presences, &positions, &clients)
|
&positions,
|
||||||
.join()
|
&presences,
|
||||||
.for_each(|(entity, presence, pos, _client)| {
|
&clients,
|
||||||
let chunk_pos = terrain.pos_key(pos.0.map(|e| e as i32));
|
);
|
||||||
// Subtract 2 from the offset before computing squared magnitude
|
let real_max_view_distance = convert_to_loaded_vd(u32::MAX, max_view_distance);
|
||||||
// 1 since chunks need neighbors to be meshed
|
|
||||||
// 1 to act as a buffer if the player moves in that direction
|
|
||||||
let adjusted_dist_sqr = (chunk_pos - key)
|
|
||||||
.map(|e: i32| (e.unsigned_abs()).saturating_sub(2))
|
|
||||||
.magnitude_squared();
|
|
||||||
|
|
||||||
if adjusted_dist_sqr <= presence.terrain_view_distance.current().pow(2) {
|
// Send the chunks to all nearby players.
|
||||||
chunk_send_emitter.emit(ChunkSendEntry {
|
new_chunks.par_iter().for_each_init(
|
||||||
entity,
|
|| chunk_send_bus.emitter(),
|
||||||
chunk_key: key,
|
|chunk_send_emitter, chunk_key| {
|
||||||
});
|
// We only have to check players inside the maximum view distance of the server
|
||||||
}
|
// of our own position.
|
||||||
|
//
|
||||||
|
// We start by partitioning by X, finding only entities in chunks within the X
|
||||||
|
// range of us. These are guaranteed in bounds due to restrictions on max view
|
||||||
|
// distance (namely: the square of any chunk coordinate plus the max view
|
||||||
|
// distance along both axes must fit in an i32).
|
||||||
|
let min_chunk_x = chunk_key.x - real_max_view_distance;
|
||||||
|
let max_chunk_x = chunk_key.x + real_max_view_distance;
|
||||||
|
let start = presences_position_entities
|
||||||
|
.partition_point(|((pos, _), _)| i32::from(pos.x) < min_chunk_x);
|
||||||
|
// NOTE: We *could* just scan forward until we hit the end, but this way we save
|
||||||
|
// a comparison in the inner loop, since also needs to check the
|
||||||
|
// list length. We could also save some time by starting from
|
||||||
|
// start rather than end, but the hope is that this way the
|
||||||
|
// compiler (and machine) can reorder things so both ends are
|
||||||
|
// fetched in parallel; since the vast majority of the time both fetched
|
||||||
|
// elements should already be in cache, this should not use any
|
||||||
|
// extra memory bandwidth.
|
||||||
|
//
|
||||||
|
// TODO: Benchmark and figure out whether this is better in practice than just
|
||||||
|
// scanning forward.
|
||||||
|
let end = presences_position_entities
|
||||||
|
.partition_point(|((pos, _), _)| i32::from(pos.x) < max_chunk_x);
|
||||||
|
let interior = &presences_position_entities[start..end];
|
||||||
|
interior
|
||||||
|
.iter()
|
||||||
|
.filter(|((player_chunk_pos, player_vd_sqr), _)| {
|
||||||
|
chunk_in_vd(*player_chunk_pos, *player_vd_sqr, *chunk_key)
|
||||||
|
})
|
||||||
|
.for_each(|(_, entity)| {
|
||||||
|
chunk_send_emitter.emit(ChunkSendEntry {
|
||||||
|
entity: *entity,
|
||||||
|
chunk_key: *chunk_key,
|
||||||
|
});
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let tick = (tick.0 % 16) as i32;
|
||||||
|
|
||||||
// Remove chunks that are too far from players.
|
// Remove chunks that are too far from players.
|
||||||
let mut chunks_to_remove = Vec::new();
|
//
|
||||||
terrain
|
// Note that all chunks involved here (both terrain chunks and pending chunks)
|
||||||
.iter()
|
// are guaranteed in bounds. This simplifies the rest of the logic
|
||||||
.map(|(k, _)| k)
|
// here.
|
||||||
|
let chunks_to_remove = terrain
|
||||||
|
.par_keys()
|
||||||
|
.copied()
|
||||||
|
// There may be lots of pending chunks, so don't check them all. This should be okay
|
||||||
|
// as long as we're maintaining a reasonable tick rate.
|
||||||
|
.chain(chunk_generator.par_pending_chunks())
|
||||||
// Don't check every chunk every tick (spread over 16 ticks)
|
// Don't check every chunk every tick (spread over 16 ticks)
|
||||||
.filter(|k| k.x.unsigned_abs() % 4 + (k.y.unsigned_abs() % 4) * 4 == (tick.0 % 16) as u32)
|
//
|
||||||
// There shouldn't be to many pending chunks so we will just check them all
|
// TODO: Investigate whether we can add support for performing this filtering directly
|
||||||
.chain(chunk_generator.pending_chunks())
|
// within hashbrown (basically, specify we want to iterate through just buckets with
|
||||||
.for_each(|chunk_key| {
|
// hashes in a particular range). This could provide significiant speedups since we
|
||||||
let mut should_drop = true;
|
// could avoid having to iterate through a bunch of buckets we don't care about.
|
||||||
|
//
|
||||||
|
// TODO: Make the percentage of the buckets that we go through adjust dynamically
|
||||||
|
// depending on the current number of chunks. In the worst case, we might want to scan
|
||||||
|
// just 1/256 of the chunks each tick, for example.
|
||||||
|
.filter(|k| k.x % 4 + (k.y % 4) * 4 == tick)
|
||||||
|
.filter(|&chunk_key| {
|
||||||
|
// We only have to check players inside the maximum view distance of the server of
|
||||||
|
// our own position.
|
||||||
|
//
|
||||||
|
// We start by partitioning by X, finding only entities in chunks within the X
|
||||||
|
// range of us. These are guaranteed in bounds due to restrictions on max view
|
||||||
|
// distance (namely: the square of any chunk coordinate plus the max view distance
|
||||||
|
// along both axes must fit in an i32).
|
||||||
|
let min_chunk_x = chunk_key.x - real_max_view_distance;
|
||||||
|
let max_chunk_x = chunk_key.x + real_max_view_distance;
|
||||||
|
let start = presences_positions
|
||||||
|
.partition_point(|(pos, _)| i32::from(pos.x) < min_chunk_x);
|
||||||
|
// NOTE: We *could* just scan forward until we hit the end, but this way we save a
|
||||||
|
// comparison in the inner loop, since also needs to check the list length. We
|
||||||
|
// could also save some time by starting from start rather than end, but the hope
|
||||||
|
// is that this way the compiler (and machine) can reorder things so both ends are
|
||||||
|
// fetched in parallel; since the vast majority of the time both fetched elements
|
||||||
|
// should already be in cache, this should not use any extra memory bandwidth.
|
||||||
|
//
|
||||||
|
// TODO: Benchmark and figure out whether this is better in practice than just
|
||||||
|
// scanning forward.
|
||||||
|
let end = presences_positions
|
||||||
|
.partition_point(|(pos, _)| i32::from(pos.x) < max_chunk_x);
|
||||||
|
let interior = &presences_positions[start..end];
|
||||||
|
!interior.iter().any(|&(player_chunk_pos, player_vd_sqr)| {
|
||||||
|
chunk_in_vd(player_chunk_pos, player_vd_sqr, chunk_key)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// For each player with a position, calculate the distance.
|
let chunks_to_remove = chunks_to_remove
|
||||||
for (presence, pos) in (&presences, &positions).join() {
|
.into_iter()
|
||||||
if chunk_in_vd(pos.0, chunk_key, &terrain, presence.terrain_view_distance.current()) {
|
.filter_map(|key| {
|
||||||
should_drop = false;
|
// Register the unloading of this chunk from terrain persistence
|
||||||
break;
|
#[cfg(feature = "persistent_world")]
|
||||||
}
|
if let Some(terrain_persistence) = _terrain_persistence.as_mut() {
|
||||||
|
terrain_persistence.unload_chunk(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
if should_drop {
|
chunk_generator.cancel_if_pending(key);
|
||||||
chunks_to_remove.push(chunk_key);
|
|
||||||
}
|
// TODO: code duplication for chunk insertion between here and state.rs
|
||||||
|
terrain.remove(key).map(|chunk| {
|
||||||
|
terrain_changes.removed_chunks.insert(key);
|
||||||
|
rtsim.hook_unload_chunk(key);
|
||||||
|
chunk
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !chunks_to_remove.is_empty() {
|
||||||
|
// Drop chunks in a background thread.
|
||||||
|
slow_jobs.spawn("CHUNK_DROP", move || {
|
||||||
|
drop(chunks_to_remove);
|
||||||
});
|
});
|
||||||
|
|
||||||
for key in chunks_to_remove {
|
|
||||||
// Register the unloading of this chunk from terrain persistence
|
|
||||||
#[cfg(feature = "persistent_world")]
|
|
||||||
if let Some(terrain_persistence) = _terrain_persistence.as_mut() {
|
|
||||||
terrain_persistence.unload_chunk(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: code duplication for chunk insertion between here and state.rs
|
|
||||||
if terrain.remove(key).is_some() {
|
|
||||||
terrain_changes.removed_chunks.insert(key);
|
|
||||||
rtsim.hook_unload_chunk(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk_generator.cancel_if_pending(key);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -467,26 +547,173 @@ impl NpcData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn chunk_in_vd(
|
pub fn convert_to_loaded_vd(vd: u32, max_view_distance: u32) -> i32 {
|
||||||
player_pos: Vec3<f32>,
|
// Hardcoded max VD to prevent stupid view distances from creating overflows.
|
||||||
chunk_pos: Vec2<i32>,
|
// This must be a value ≤
|
||||||
terrain: &TerrainGrid,
|
// √(i32::MAX - 2 * ((1 << (MAX_WORLD_BLOCKS_LG - TERRAIN_CHUNK_BLOCKS_LG) - 1)²
|
||||||
vd: u32,
|
// - 1)) / 2
|
||||||
) -> bool {
|
//
|
||||||
|
// since otherwise we could end up overflowing. Since it is a requirement that
|
||||||
|
// each dimension (in chunks) has to fit in a i16, we can derive √((1<<31)-1
|
||||||
|
// - 2*((1<<15)-1)^2) / 2 ≥ 1 << 7 as the absolute limit.
|
||||||
|
//
|
||||||
|
// TODO: Make this more official and use it elsewhere.
|
||||||
|
const MAX_VD: u32 = 1 << 7;
|
||||||
|
|
||||||
// This fuzzy threshold prevents chunks rapidly unloading and reloading when
|
// This fuzzy threshold prevents chunks rapidly unloading and reloading when
|
||||||
// players move over a chunk border.
|
// players move over a chunk border.
|
||||||
const UNLOAD_THRESHOLD: u32 = 2;
|
const UNLOAD_THRESHOLD: u32 = 2;
|
||||||
|
|
||||||
let player_chunk_pos = terrain.pos_key(player_pos.map(|e| e as i32));
|
// NOTE: This cast is safe for the reasons mentioned above.
|
||||||
|
(vd.max(crate::MIN_VD)
|
||||||
let adjusted_dist_sqr = (player_chunk_pos - chunk_pos)
|
.min(max_view_distance)
|
||||||
.map(|e: i32| e.unsigned_abs())
|
.saturating_add(UNLOAD_THRESHOLD))
|
||||||
.magnitude_squared();
|
.min(MAX_VD) as i32
|
||||||
|
|
||||||
adjusted_dist_sqr <= (vd.max(crate::MIN_VD) + UNLOAD_THRESHOLD).pow(2)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_spawn_chunk(chunk_pos: Vec2<i32>, spawn_pos: SpawnPoint, terrain: &TerrainGrid) -> bool {
|
/// Returns: ((player_chunk_pos, player_vd_squared), entity, is_client)
|
||||||
let spawn_chunk_pos = terrain.pos_key(spawn_pos.0.map(|e| e as i32));
|
fn prepare_for_vd_check(
|
||||||
|
world_aabr_in_chunks: &Aabr<i32>,
|
||||||
|
max_view_distance: u32,
|
||||||
|
entity: Entity,
|
||||||
|
presence: &Presence,
|
||||||
|
pos: &Pos,
|
||||||
|
client: Option<u32>,
|
||||||
|
) -> Option<((Vec2<i16>, i32), Entity, bool)> {
|
||||||
|
let is_client = client.is_some();
|
||||||
|
let pos = pos.0;
|
||||||
|
let vd = presence.terrain_view_distance.current();
|
||||||
|
|
||||||
|
// NOTE: We use regular as casts rather than as_ because we want to saturate on
|
||||||
|
// overflow.
|
||||||
|
let player_pos = pos.map(|x| x as i32);
|
||||||
|
let player_chunk_pos = TerrainGrid::chunk_key(player_pos);
|
||||||
|
let player_vd = convert_to_loaded_vd(vd, max_view_distance);
|
||||||
|
|
||||||
|
// We filter out positions that are *clearly* way out of range from
|
||||||
|
// consideration. This is pretty easy to do, and means we don't have to
|
||||||
|
// perform expensive overflow checks elsewhere (otherwise, a player
|
||||||
|
// sufficiently far off the map could cause chunks they were nowhere near to
|
||||||
|
// stay loaded, parallel universes style).
|
||||||
|
//
|
||||||
|
// One could also imagine snapping a player to the part of the map nearest to
|
||||||
|
// them. We don't currently do this in case we rely elsewhere on players
|
||||||
|
// always being near the chunks they're keeping loaded, but it would allow
|
||||||
|
// us to use u32 exclusively so it's tempting.
|
||||||
|
let player_aabr_in_chunks = Aabr {
|
||||||
|
min: player_chunk_pos - player_vd,
|
||||||
|
max: player_chunk_pos + player_vd,
|
||||||
|
};
|
||||||
|
|
||||||
|
(world_aabr_in_chunks.max.x >= player_aabr_in_chunks.min.x &&
|
||||||
|
world_aabr_in_chunks.min.x <= player_aabr_in_chunks.max.x &&
|
||||||
|
world_aabr_in_chunks.max.y >= player_aabr_in_chunks.min.y &&
|
||||||
|
world_aabr_in_chunks.min.y <= player_aabr_in_chunks.max.y)
|
||||||
|
// The cast to i32 here is definitely safe thanks to MAX_VD limiting us to fit
|
||||||
|
// within i32^2.
|
||||||
|
//
|
||||||
|
// The cast from each coordinate to i16 should also be correct here. This is because valid
|
||||||
|
// world chunk coordinates are no greater than 1 << 14 - 1; since we verified that the
|
||||||
|
// player is within world bounds modulo player_vd, which is guaranteed to never let us
|
||||||
|
// overflow an i16 when added to a u14, safety of the cast follows.
|
||||||
|
.then(|| ((player_chunk_pos.as_::<i16>(), player_vd.pow(2) as i32), entity, is_client))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prepare_player_presences<'a, P>(
|
||||||
|
world: &World,
|
||||||
|
max_view_distance: u32,
|
||||||
|
entities: &Entities<'a>,
|
||||||
|
positions: P,
|
||||||
|
presences: &ReadStorage<'a, Presence>,
|
||||||
|
clients: &ReadStorage<'a, Client>,
|
||||||
|
) -> (Vec<((Vec2<i16>, i32), Entity)>, Vec<(Vec2<i16>, i32)>)
|
||||||
|
where
|
||||||
|
P: GenericReadStorage<Component = Pos> + Join<Type = &'a Pos>,
|
||||||
|
{
|
||||||
|
// We start by collecting presences and positions from players, because they are
|
||||||
|
// very sparse in the entity list and therefore iterating over them for each
|
||||||
|
// chunk can be quite slow.
|
||||||
|
let world_aabr_in_chunks = Aabr {
|
||||||
|
min: Vec2::zero(),
|
||||||
|
// NOTE: Cast is correct because chunk coordinates must fit in an i32 (actually, i16).
|
||||||
|
max: world
|
||||||
|
.sim()
|
||||||
|
.get_size()
|
||||||
|
.map(|x| x.saturating_sub(1))
|
||||||
|
.as_::<i32>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (mut presences_positions_entities, mut presences_positions): (Vec<_>, Vec<_>) =
|
||||||
|
(entities, presences, positions, clients.mask().maybe())
|
||||||
|
.join()
|
||||||
|
.filter_map(|(entity, presence, position, client)| {
|
||||||
|
prepare_for_vd_check(
|
||||||
|
&world_aabr_in_chunks,
|
||||||
|
max_view_distance,
|
||||||
|
entity,
|
||||||
|
presence,
|
||||||
|
position,
|
||||||
|
client,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.partition_map(|(player_data, entity, is_client)| {
|
||||||
|
// For chunks with clients, we need to record their entity, because they might
|
||||||
|
// be used for insertion. These elements fit in 8 bytes, so
|
||||||
|
// this should be pretty cache-friendly.
|
||||||
|
if is_client {
|
||||||
|
Either::Left((player_data, entity))
|
||||||
|
} else {
|
||||||
|
// For chunks without clients, we only need to record the position and view
|
||||||
|
// distance. These elements fit in 4 bytes, which is even cache-friendlier.
|
||||||
|
Either::Right(player_data)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// We sort the presence lists by X position, so we can efficiently filter out
|
||||||
|
// players nowhere near the chunk. This is basically a poor substitute for
|
||||||
|
// the effects of a proper KDTree, but a proper KDTree has too much overhead
|
||||||
|
// to be worth using for such a short list (~ 1000 players at most). We
|
||||||
|
// also sort by y and reverse view distance; this will become important later.
|
||||||
|
presences_positions_entities
|
||||||
|
.sort_unstable_by_key(|&((pos, vd2), _)| (pos.x, pos.y, Reverse(vd2)));
|
||||||
|
presences_positions.sort_unstable_by_key(|&(pos, vd2)| (pos.x, pos.y, Reverse(vd2)));
|
||||||
|
// For the vast majority of chunks (present and pending ones), we'll only ever
|
||||||
|
// need the position and view distance. So we extend it with these from the
|
||||||
|
// list of client chunks, and then do some further work to improve
|
||||||
|
// performance (taking advantage of the fact that they don't require
|
||||||
|
// entities).
|
||||||
|
presences_positions.extend(
|
||||||
|
presences_positions_entities
|
||||||
|
.iter()
|
||||||
|
.map(|&(player_data, _)| player_data),
|
||||||
|
);
|
||||||
|
// Since both lists were previously sorted, we use stable sort over unstable
|
||||||
|
// sort, as it's faster in that case (theoretically a proper merge operation
|
||||||
|
// would be ideal, but it's not worth pulling in a library for).
|
||||||
|
presences_positions.sort_by_key(|&(pos, vd2)| (pos.x, pos.y, Reverse(vd2)));
|
||||||
|
// Now that the list is sorted, we deduplicate players in the same chunk (this
|
||||||
|
// is why we need to sort y as well as x; dedup only works if the list is
|
||||||
|
// sorted by the element we use to dedup). Importantly, we can then use
|
||||||
|
// only the *first* element as a substitute for all the players in the
|
||||||
|
// chunk, because we *also* sorted from greatest to lowest view
|
||||||
|
// distance, and dedup_by removes all but the first matching element. In the
|
||||||
|
// common case where a few chunks are very crowded, this further reduces the
|
||||||
|
// work required per chunk.
|
||||||
|
presences_positions.dedup_by_key(|&mut (pos, _)| pos);
|
||||||
|
|
||||||
|
(presences_positions_entities, presences_positions)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn chunk_in_vd(player_chunk_pos: Vec2<i16>, player_vd_sqr: i32, chunk_pos: Vec2<i32>) -> bool {
|
||||||
|
// NOTE: Guaranteed in bounds as long as prepare_player_presences prepared the
|
||||||
|
// player_chunk_pos and player_vd_sqr.
|
||||||
|
let adjusted_dist_sqr = (player_chunk_pos.as_::<i32>() - chunk_pos).magnitude_squared();
|
||||||
|
|
||||||
|
adjusted_dist_sqr <= player_vd_sqr
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_spawn_chunk(chunk_pos: Vec2<i32>, spawn_pos: SpawnPoint) -> bool {
|
||||||
|
// FIXME: Ensure spawn_pos doesn't overflow before performing this cast.
|
||||||
|
let spawn_chunk_pos = TerrainGrid::chunk_key(spawn_pos.0.map(|e| e as i32));
|
||||||
chunk_pos == spawn_chunk_pos
|
chunk_pos == spawn_chunk_pos
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
use crate::{chunk_serialize::ChunkSendEntry, client::Client, presence::Presence};
|
use crate::{chunk_serialize::ChunkSendEntry, client::Client, presence::Presence, Settings};
|
||||||
use common::{comp::Pos, event::EventBus, terrain::TerrainGrid};
|
use common::{comp::Pos, event::EventBus};
|
||||||
use common_ecs::{Job, Origin, Phase, System};
|
use common_ecs::{Job, Origin, Phase, System};
|
||||||
use common_net::msg::{CompressedData, ServerGeneral};
|
use common_net::msg::{CompressedData, ServerGeneral};
|
||||||
use common_state::TerrainChanges;
|
use common_state::TerrainChanges;
|
||||||
|
use rayon::prelude::*;
|
||||||
use specs::{Entities, Join, Read, ReadExpect, ReadStorage};
|
use specs::{Entities, Join, Read, ReadExpect, ReadStorage};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use world::World;
|
||||||
|
|
||||||
/// This systems sends new chunks to clients as well as changes to existing
|
/// This systems sends new chunks to clients as well as changes to existing
|
||||||
/// chunks
|
/// chunks
|
||||||
@ -12,7 +15,8 @@ pub struct Sys;
|
|||||||
impl<'a> System<'a> for Sys {
|
impl<'a> System<'a> for Sys {
|
||||||
type SystemData = (
|
type SystemData = (
|
||||||
Entities<'a>,
|
Entities<'a>,
|
||||||
ReadExpect<'a, TerrainGrid>,
|
ReadExpect<'a, Arc<World>>,
|
||||||
|
Read<'a, Settings>,
|
||||||
Read<'a, TerrainChanges>,
|
Read<'a, TerrainChanges>,
|
||||||
ReadExpect<'a, EventBus<ChunkSendEntry>>,
|
ReadExpect<'a, EventBus<ChunkSendEntry>>,
|
||||||
ReadStorage<'a, Pos>,
|
ReadStorage<'a, Pos>,
|
||||||
@ -26,26 +30,71 @@ impl<'a> System<'a> for Sys {
|
|||||||
|
|
||||||
fn run(
|
fn run(
|
||||||
_job: &mut Job<Self>,
|
_job: &mut Job<Self>,
|
||||||
(entities, terrain, terrain_changes, chunk_send_bus, positions, presences, clients): Self::SystemData,
|
(
|
||||||
|
entities,
|
||||||
|
world,
|
||||||
|
server_settings,
|
||||||
|
terrain_changes,
|
||||||
|
chunk_send_bus,
|
||||||
|
positions,
|
||||||
|
presences,
|
||||||
|
clients,
|
||||||
|
): Self::SystemData,
|
||||||
) {
|
) {
|
||||||
let mut chunk_send_emitter = chunk_send_bus.emitter();
|
let max_view_distance = server_settings.max_view_distance.unwrap_or(u32::MAX);
|
||||||
|
let (presences_position_entities, _) = super::terrain::prepare_player_presences(
|
||||||
|
&world,
|
||||||
|
max_view_distance,
|
||||||
|
&entities,
|
||||||
|
&positions,
|
||||||
|
&presences,
|
||||||
|
&clients,
|
||||||
|
);
|
||||||
|
let real_max_view_distance =
|
||||||
|
super::terrain::convert_to_loaded_vd(u32::MAX, max_view_distance);
|
||||||
|
|
||||||
// Sync changed chunks
|
// Sync changed chunks
|
||||||
for chunk_key in &terrain_changes.modified_chunks {
|
terrain_changes.modified_chunks.par_iter().for_each_init(
|
||||||
for (entity, presence, pos) in (&entities, &presences, &positions).join() {
|
|| chunk_send_bus.emitter(),
|
||||||
if super::terrain::chunk_in_vd(
|
|chunk_send_emitter, &chunk_key| {
|
||||||
pos.0,
|
// We only have to check players inside the maximum view distance of the server
|
||||||
*chunk_key,
|
// of our own position.
|
||||||
&terrain,
|
//
|
||||||
presence.terrain_view_distance.current(),
|
// We start by partitioning by X, finding only entities in chunks within the X
|
||||||
) {
|
// range of us. These are guaranteed in bounds due to restrictions on max view
|
||||||
chunk_send_emitter.emit(ChunkSendEntry {
|
// distance (namely: the square of any chunk coordinate plus the max view
|
||||||
entity,
|
// distance along both axes must fit in an i32).
|
||||||
chunk_key: *chunk_key,
|
let min_chunk_x = chunk_key.x - real_max_view_distance;
|
||||||
|
let max_chunk_x = chunk_key.x + real_max_view_distance;
|
||||||
|
let start = presences_position_entities
|
||||||
|
.partition_point(|((pos, _), _)| i32::from(pos.x) < min_chunk_x);
|
||||||
|
// NOTE: We *could* just scan forward until we hit the end, but this way we save
|
||||||
|
// a comparison in the inner loop, since also needs to check the
|
||||||
|
// list length. We could also save some time by starting from
|
||||||
|
// start rather than end, but the hope is that this way the
|
||||||
|
// compiler (and machine) can reorder things so both ends are
|
||||||
|
// fetched in parallel; since the vast majority of the time both fetched
|
||||||
|
// elements should already be in cache, this should not use any
|
||||||
|
// extra memory bandwidth.
|
||||||
|
//
|
||||||
|
// TODO: Benchmark and figure out whether this is better in practice than just
|
||||||
|
// scanning forward.
|
||||||
|
let end = presences_position_entities
|
||||||
|
.partition_point(|((pos, _), _)| i32::from(pos.x) < max_chunk_x);
|
||||||
|
let interior = &presences_position_entities[start..end];
|
||||||
|
interior
|
||||||
|
.iter()
|
||||||
|
.filter(|((player_chunk_pos, player_vd_sqr), _)| {
|
||||||
|
super::terrain::chunk_in_vd(*player_chunk_pos, *player_vd_sqr, chunk_key)
|
||||||
|
})
|
||||||
|
.for_each(|(_, entity)| {
|
||||||
|
chunk_send_emitter.emit(ChunkSendEntry {
|
||||||
|
entity: *entity,
|
||||||
|
chunk_key,
|
||||||
|
});
|
||||||
});
|
});
|
||||||
}
|
},
|
||||||
}
|
);
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Don't send all changed blocks to all clients
|
// TODO: Don't send all changed blocks to all clients
|
||||||
// Sync changed blocks
|
// Sync changed blocks
|
||||||
|
@ -42,6 +42,8 @@ impl World {
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub const fn map_size_lg(&self) -> MapSizeLg { DEFAULT_WORLD_CHUNKS_LG }
|
pub const fn map_size_lg(&self) -> MapSizeLg { DEFAULT_WORLD_CHUNKS_LG }
|
||||||
|
|
||||||
|
pub fn generate_oob_chunk(&self) -> TerrainChunk { TerrainChunk::water(0) }
|
||||||
|
|
||||||
pub fn generate_chunk(
|
pub fn generate_chunk(
|
||||||
&self,
|
&self,
|
||||||
_index: IndexRef,
|
_index: IndexRef,
|
||||||
|
@ -14,7 +14,6 @@ const GEN_SIZE: i32 = 4;
|
|||||||
pub fn criterion_benchmark(c: &mut Criterion) {
|
pub fn criterion_benchmark(c: &mut Criterion) {
|
||||||
let pool = rayon::ThreadPoolBuilder::new().build().unwrap();
|
let pool = rayon::ThreadPoolBuilder::new().build().unwrap();
|
||||||
// Generate chunks here to test
|
// Generate chunks here to test
|
||||||
let mut terrain = TerrainGrid::new().unwrap();
|
|
||||||
let (world, index) = World::generate(
|
let (world, index) = World::generate(
|
||||||
42,
|
42,
|
||||||
sim::WorldOpts {
|
sim::WorldOpts {
|
||||||
@ -27,6 +26,11 @@ pub fn criterion_benchmark(c: &mut Criterion) {
|
|||||||
},
|
},
|
||||||
&pool,
|
&pool,
|
||||||
);
|
);
|
||||||
|
let mut terrain = TerrainGrid::new(
|
||||||
|
world.sim().map_size_lg(),
|
||||||
|
Arc::new(world.sim().generate_oob_chunk()),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
let index = index.as_index_ref();
|
let index = index.as_index_ref();
|
||||||
(0..GEN_SIZE)
|
(0..GEN_SIZE)
|
||||||
.flat_map(|x| (0..GEN_SIZE).map(move |y| Vec2::new(x, y)))
|
.flat_map(|x| (0..GEN_SIZE).map(move |y| Vec2::new(x, y)))
|
||||||
|
@ -889,8 +889,7 @@ impl<V: RectRasterableVol> Terrain<V> {
|
|||||||
neighbours &= scene_data
|
neighbours &= scene_data
|
||||||
.state
|
.state
|
||||||
.terrain()
|
.terrain()
|
||||||
.get_key(pos + Vec2::new(i, j))
|
.contains_key_real(pos + Vec2::new(i, j));
|
||||||
.is_some();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -978,8 +977,7 @@ impl<V: RectRasterableVol> Terrain<V> {
|
|||||||
neighbours &= scene_data
|
neighbours &= scene_data
|
||||||
.state
|
.state
|
||||||
.terrain()
|
.terrain()
|
||||||
.get_key(neighbour_chunk_pos + Vec2::new(i, j))
|
.contains_key_real(neighbour_chunk_pos + Vec2::new(i, j));
|
||||||
.is_some();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if neighbours {
|
if neighbours {
|
||||||
|
@ -746,7 +746,11 @@ fn main() {
|
|||||||
let mut totals: BTreeMap<&str, f32> = BTreeMap::new();
|
let mut totals: BTreeMap<&str, f32> = BTreeMap::new();
|
||||||
let mut total_timings: BTreeMap<&str, f32> = BTreeMap::new();
|
let mut total_timings: BTreeMap<&str, f32> = BTreeMap::new();
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
let mut volgrid = VolGrid2d::new().unwrap();
|
let mut volgrid = VolGrid2d::new(
|
||||||
|
world.sim().map_size_lg(),
|
||||||
|
Arc::new(world.sim().generate_oob_chunk()),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
for (i, spiralpos) in Spiral2d::with_radius(RADIUS)
|
for (i, spiralpos) in Spiral2d::with_radius(RADIUS)
|
||||||
.map(|v| v + sitepos.as_())
|
.map(|v| v + sitepos.as_())
|
||||||
.enumerate()
|
.enumerate()
|
||||||
|
@ -255,7 +255,6 @@ impl World {
|
|||||||
.map(|zcache| zcache.sample.stone_col)
|
.map(|zcache| zcache.sample.stone_col)
|
||||||
.unwrap_or_else(|| index.colors.deep_stone_color.into()),
|
.unwrap_or_else(|| index.colors.deep_stone_color.into()),
|
||||||
);
|
);
|
||||||
let water = Block::new(BlockKind::Water, Rgb::zero());
|
|
||||||
|
|
||||||
let (base_z, sim_chunk) = match self
|
let (base_z, sim_chunk) = match self
|
||||||
.sim
|
.sim
|
||||||
@ -269,15 +268,9 @@ impl World {
|
|||||||
Some(base_z) => (base_z as i32, self.sim.get(chunk_pos).unwrap()),
|
Some(base_z) => (base_z as i32, self.sim.get(chunk_pos).unwrap()),
|
||||||
// Some((base_z, sim_chunk)) => (base_z as i32, sim_chunk),
|
// Some((base_z, sim_chunk)) => (base_z as i32, sim_chunk),
|
||||||
None => {
|
None => {
|
||||||
return Ok((
|
// NOTE: This is necessary in order to generate a handful of chunks at the edges
|
||||||
TerrainChunk::new(
|
// of the map.
|
||||||
CONFIG.sea_level as i32,
|
return Ok((self.sim().generate_oob_chunk(), ChunkSupplement::default()));
|
||||||
water,
|
|
||||||
air,
|
|
||||||
TerrainChunkMeta::void(),
|
|
||||||
),
|
|
||||||
ChunkSupplement::default(),
|
|
||||||
));
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ use common::{
|
|||||||
store::Id,
|
store::Id,
|
||||||
terrain::{
|
terrain::{
|
||||||
map::MapConfig, uniform_idx_as_vec2, vec2_as_uniform_idx, BiomeKind, MapSizeLg,
|
map::MapConfig, uniform_idx_as_vec2, vec2_as_uniform_idx, BiomeKind, MapSizeLg,
|
||||||
TerrainChunkSize,
|
TerrainChunk, TerrainChunkSize,
|
||||||
},
|
},
|
||||||
vol::RectVolSize,
|
vol::RectVolSize,
|
||||||
};
|
};
|
||||||
@ -67,6 +67,7 @@ use std::{
|
|||||||
io::{BufReader, BufWriter},
|
io::{BufReader, BufWriter},
|
||||||
ops::{Add, Div, Mul, Neg, Sub},
|
ops::{Add, Div, Mul, Neg, Sub},
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
|
sync::Arc,
|
||||||
};
|
};
|
||||||
use tracing::{debug, warn};
|
use tracing::{debug, warn};
|
||||||
use vek::*;
|
use vek::*;
|
||||||
@ -1590,6 +1591,10 @@ impl WorldSim {
|
|||||||
|
|
||||||
pub fn get_size(&self) -> Vec2<u32> { self.map_size_lg().chunks().map(u32::from) }
|
pub fn get_size(&self) -> Vec2<u32> { self.map_size_lg().chunks().map(u32::from) }
|
||||||
|
|
||||||
|
pub fn generate_oob_chunk(&self) -> TerrainChunk {
|
||||||
|
TerrainChunk::water(CONFIG.sea_level as i32)
|
||||||
|
}
|
||||||
|
|
||||||
/// Draw a map of the world based on chunk information. Returns a buffer of
|
/// Draw a map of the world based on chunk information. Returns a buffer of
|
||||||
/// u32s.
|
/// u32s.
|
||||||
pub fn get_map(&self, index: IndexRef, calendar: Option<&Calendar>) -> WorldMapMsg {
|
pub fn get_map(&self, index: IndexRef, calendar: Option<&Calendar>) -> WorldMapMsg {
|
||||||
@ -1684,13 +1689,13 @@ impl WorldSim {
|
|||||||
);
|
);
|
||||||
WorldMapMsg {
|
WorldMapMsg {
|
||||||
dimensions_lg: self.map_size_lg().vec(),
|
dimensions_lg: self.map_size_lg().vec(),
|
||||||
sea_level: CONFIG.sea_level,
|
|
||||||
max_height: self.max_height,
|
max_height: self.max_height,
|
||||||
rgba: Grid::from_raw(self.get_size().map(|e| e as i32), v),
|
rgba: Grid::from_raw(self.get_size().map(|e| e as i32), v),
|
||||||
alt: Grid::from_raw(self.get_size().map(|e| e as i32), alts),
|
alt: Grid::from_raw(self.get_size().map(|e| e as i32), alts),
|
||||||
horizons,
|
horizons,
|
||||||
sites: Vec::new(), // Will be substituted later
|
sites: Vec::new(), // Will be substituted later
|
||||||
pois: Vec::new(), // Will be substituted later
|
pois: Vec::new(), // Will be substituted later
|
||||||
|
default_chunk: Arc::new(self.generate_oob_chunk()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user