Parallelize entity sync loop over regions.

This commit is contained in:
Imbris 2021-12-25 18:06:55 -05:00
parent 9788b144ec
commit 6547a6bf5e
7 changed files with 226 additions and 190 deletions

1
Cargo.lock generated
View File

@ -2620,6 +2620,7 @@ checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5"
dependencies = [
"autocfg",
"hashbrown 0.11.2",
"rayon",
"serde",
]

View File

@ -73,7 +73,7 @@ kiddo = { version = "0.1", optional = true }
# Data structures
hashbrown = { version = "0.11", features = ["rayon", "serde", "nightly"] }
slotmap = { version = "1.0", features = ["serde"] }
indexmap = "1.3.0"
indexmap = { version = "1.3.0", features = ["rayon"] }
slab = "0.4.2"
# ECS

View File

@ -342,16 +342,24 @@ impl RegionMap {
}
}
// Returns a region given a key
/// Returns a region given a key
pub fn get(&self, key: Vec2<i32>) -> Option<&Region> { self.regions.get(&key) }
// Returns an iterator of (Position, Region)
/// Returns an iterator of (Position, Region)
pub fn iter(&self) -> impl Iterator<Item = (Vec2<i32>, &Region)> {
self.regions.iter().map(|(key, r)| (*key, r))
}
/// Returns a parallel iterator of (Position, Regions)
pub fn par_iter(
&self,
) -> impl rayon::iter::IndexedParallelIterator<Item = (Vec2<i32>, &Region)> {
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
self.regions.par_iter().map(|(key, r)| (*key, r))
}
}
// Note vd is in blocks in this case
/// Note vd is in blocks in this case
pub fn region_in_vd(key: Vec2<i32>, pos: Vec3<f32>, vd: f32) -> bool {
let vd_extended = vd + TETHER_LENGTH as f32 * 2.0f32.sqrt();

View File

@ -136,7 +136,12 @@ impl Client {
}
}
pub(crate) fn send_fallible<M: Into<ServerMsg>>(&self, msg: M) { let _ = self.send(msg); }
pub(crate) fn send_fallible<M: Into<ServerMsg>>(&self, msg: M) {
// TODO: hack to avoid locking stream mutex while serializing the message,
// remove this when the mutexes on the Streams are removed
let prepared = self.prepare(msg);
let _ = self.send_prepared(&prepared);
}
pub(crate) fn send_prepared(&self, msg: &PreparedMsg) -> Result<(), StreamError> {
match msg.stream_id {

View File

@ -23,6 +23,7 @@ use vek::*;
/// This system will send physics updates to the client
#[derive(Default)]
pub struct Sys;
impl<'a> System<'a> for Sys {
#[allow(clippy::type_complexity)]
type SystemData = (
@ -105,9 +106,16 @@ impl<'a> System<'a> for Sys {
// 5. Inform clients of the component changes for that entity
// - Throttle update rate base on distance to each client
// Sync physics
// via iterating through regions
for (key, region) in region_map.iter() {
// Sync physics and other components
// via iterating through regions (in parallel)
use rayon::iter::ParallelIterator;
common_base::prof_span!(guard, "regions");
region_map.par_iter().for_each_init(
|| {
common_base::prof_span!(guard, "entity sync rayon job");
guard
},
|_guard, (key, region)| {
// Assemble subscriber list for this region by iterating through clients and
// checking if they are subscribed to this region
let mut subscribers = (
@ -130,7 +138,8 @@ impl<'a> System<'a> for Sys {
for event in region.events() {
match event {
RegionEvent::Entered(id, maybe_key) => {
// Don't process newly created entities here (redundant network messages)
// Don't process newly created entities here (redundant network
// messages)
if trackers.uid.inserted().contains(*id) {
continue;
}
@ -184,20 +193,22 @@ impl<'a> System<'a> for Sys {
&tracked_comps,
region.entities(),
deleted_entities
.take_deleted_in_region(key)
.get_deleted_in_region(key)
.cloned() // TODO: quick hack to make this parallel, we can avoid this
.unwrap_or_default(),
);
// We lazily initializethe the synchronization messages in case there are no
// We lazily initialize the the synchronization messages in case there are no
// clients.
let mut entity_comp_sync = Either::Left((entity_sync_package, comp_sync_package));
for (client, _, _, _) in &mut subscribers {
let msg =
entity_comp_sync.right_or_else(|(entity_sync_package, comp_sync_package)| {
let msg = entity_comp_sync.right_or_else(
|(entity_sync_package, comp_sync_package)| {
(
client.prepare(ServerGeneral::EntitySync(entity_sync_package)),
client.prepare(ServerGeneral::CompSync(comp_sync_package)),
)
});
},
);
// We don't care much about stream errors here since they could just represent
// network disconnection, which is handled elsewhere.
let _ = client.send_prepared(&msg.0);
@ -224,18 +235,22 @@ impl<'a> System<'a> for Sys {
let send_now = if client_entity == &entity {
let player_physics_setting = players
.get(entity)
.and_then(|p| player_physics_settings.settings.get(&p.uuid()).copied())
.and_then(|p| {
player_physics_settings.settings.get(&p.uuid()).copied()
})
.unwrap_or_default();
// Don't send client physics updates about itself unless force update is set
// or the client is subject to server-authoritative physics
// Don't send client physics updates about itself unless force update is
// set or the client is subject to
// server-authoritative physics
force_update.is_some() || player_physics_setting.server_authoritative()
} else if matches!(collider, Some(Collider::Voxel { .. })) {
// Things with a voxel collider (airships, etc.) need to have very stable
// physics so we always send updated for these where
// we can.
// Things with a voxel collider (airships, etc.) need to have very
// stable physics so we always send updated
// for these where we can.
true
} else {
// Throttle update rates for all other entities based on distance to client
// Throttle update rates for all other entities based on distance to
// client
let distance_sq = client_pos.0.distance_squared(pos.0);
let id_staggered_tick = tick + entity.id() as u64;
@ -282,10 +297,20 @@ impl<'a> System<'a> for Sys {
client.send_fallible(ServerGeneral::CompSync(comp_sync_package));
}
},
);
drop(guard);
// TODO: this is a quick hack to make the loop over regions above parallel,
// there might is probably a way to make it cleaner
//
// Remove delete entities for each region that we alread handled above
region_map
.iter()
.for_each(|(key, _)| drop(deleted_entities.take_deleted_in_region(key)));
// Update the last physics components for each entity
for (_, _, &pos, vel, ori, last_pos, last_vel, last_ori) in (
region.entities(),
for (_, &pos, vel, ori, last_pos, last_vel, last_ori) in (
&entities,
&positions,
velocities.maybe(),
@ -300,7 +325,6 @@ impl<'a> System<'a> for Sys {
vel.and_then(|&v| last_vel.replace(Last(v)));
ori.and_then(|&o| last_ori.replace(Last(o)));
}
}
// Handle entity deletion in regions that don't exist in RegionMap
// (theoretically none)

View File

@ -434,9 +434,7 @@ impl DeletedEntities {
self.map.remove(&key)
}
pub fn get_deleted_in_region(&mut self, key: Vec2<i32>) -> Option<&Vec<u64>> {
self.map.get(&key)
}
pub fn get_deleted_in_region(&self, key: Vec2<i32>) -> Option<&Vec<u64>> { self.map.get(&key) }
pub fn take_remaining_deleted(&mut self) -> Vec<(Vec2<i32>, Vec<u64>)> {
// TODO: don't allocate

View File

@ -13,7 +13,7 @@ use common::{
use common_ecs::{Job, Origin, Phase, System};
use common_net::msg::ServerGeneral;
use specs::{
Entities, Join, ReadExpect, ReadStorage, SystemData, World, WorldExt, Write, WriteStorage,
Entities, Join, Read, ReadExpect, ReadStorage, SystemData, World, WorldExt, WriteStorage,
};
use tracing::{debug, error};
use vek::*;
@ -33,7 +33,7 @@ impl<'a> System<'a> for Sys {
ReadStorage<'a, Presence>,
ReadStorage<'a, Client>,
WriteStorage<'a, RegionSubscription>,
Write<'a, DeletedEntities>,
Read<'a, DeletedEntities>,
TrackedComps<'a>,
);
@ -54,7 +54,7 @@ impl<'a> System<'a> for Sys {
presences,
clients,
mut subscriptions,
mut deleted_entities,
deleted_entities,
tracked_comps,
): Self::SystemData,
) {