veloren/server/src/lib.rs

607 lines
22 KiB
Rust
Raw Normal View History

2019-08-19 12:39:23 +00:00
#![deny(unsafe_code)]
2019-10-03 21:28:17 +00:00
#![feature(drain_filter)]
2019-01-02 17:23:31 +00:00
pub mod auth_provider;
pub mod chunk_generator;
pub mod client;
pub mod cmd;
pub mod error;
2020-02-16 20:04:06 +00:00
pub mod events;
pub mod input;
pub mod metrics;
pub mod persistence;
pub mod settings;
pub mod state_ext;
pub mod sys;
#[cfg(not(feature = "worldgen"))] mod test_world;
// Reexports
2020-02-16 20:04:06 +00:00
pub use crate::{error::Error, events::Event, input::Input, settings::ServerSettings};
use crate::{
auth_provider::AuthProvider,
chunk_generator::ChunkGenerator,
client::{Client, RegionSubscription},
cmd::ChatCommandExt,
state_ext::StateExt,
sys::sentinel::{DeletedEntities, TrackedComps},
};
use common::{
cmd::ChatCommand,
comp,
event::{EventBus, ServerEvent},
msg::{ClientMsg, ClientState, ServerInfo, ServerMsg},
net::PostOffice,
2020-02-16 20:04:06 +00:00
state::{State, TimeOfDay},
sync::WorldSyncExt,
2020-02-16 20:04:06 +00:00
terrain::TerrainChunkSize,
vol::{ReadVol, RectVolSize},
};
use log::{debug, error};
use metrics::{ServerMetrics, TickMetrics};
use specs::{join::Join, Builder, Entity as EcsEntity, RunNow, SystemData, WorldExt};
use std::{
i32,
sync::Arc,
time::{Duration, Instant},
};
#[cfg(not(feature = "worldgen"))]
use test_world::{World, WORLD_SIZE};
use uvth::{ThreadPool, ThreadPoolBuilder};
use vek::*;
#[cfg(feature = "worldgen")]
use world::{
2020-04-23 16:00:48 +00:00
civ::SiteKind,
sim::{FileOpts, WorldOpts, DEFAULT_WORLD_MAP, WORLD_SIZE},
World,
};
#[macro_use] extern crate diesel;
#[macro_use] extern crate diesel_migrations;
const CLIENT_TIMEOUT: f64 = 20.0; // Seconds
2019-01-02 17:23:31 +00:00
#[derive(Copy, Clone)]
struct SpawnPoint(Vec3<f32>);
// Tick count used for throttling network updates
// Note this doesn't account for dt (so update rate changes with tick rate)
#[derive(Copy, Clone, Default)]
pub struct Tick(u64);
2019-01-02 17:23:31 +00:00
pub struct Server {
2019-01-02 19:22:01 +00:00
state: State,
world: Arc<World>,
map: Vec<u32>,
2019-01-02 17:23:31 +00:00
postoffice: PostOffice<ServerMsg, ClientMsg>,
thread_pool: ThreadPool,
server_info: ServerInfo,
Fixing the DEADLOCK in handshake -> channel creation - this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :) - When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport however the protocol could already catch non handshake data any more and push in into this mpsc::Channel. Then this channel got dropped and a fresh one was created for the network::Channel. These droped Frames are ofc a BUG! I tried multiple things to solve this: - dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1. This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)> to handle ALL the network::channel. If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out Bad Idea... - using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the scheduler doesnt know the remote_pid yet - i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what So i switched over to the simply method now: - Do everything like before with 2 mpsc::Channels - after the handshake. close the receiver and listen for all remaining (cid, frame) combinations - when starting the channel, reapply them to the new sender/listener combination - added tracing - switched Protocol RwLock to Mutex, as it's only ever 1 - Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema - Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail - fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed - add extra test to verify that a send message is received even if the Stream is already closed - changed OutGoing to Outgoing - fixed a bug that `metrics.tick()` was never called - removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
metrics: ServerMetrics,
tick_metrics: TickMetrics,
2019-08-08 03:56:02 +00:00
server_settings: ServerSettings,
2019-01-02 17:23:31 +00:00
}
impl Server {
/// Create a new `Server`
2019-06-29 16:41:26 +00:00
pub fn new(settings: ServerSettings) -> Result<Self, Error> {
let mut state = State::default();
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(EventBus::<ServerEvent>::default());
2019-12-21 17:02:39 +00:00
state
.ecs_mut()
.insert(AuthProvider::new(settings.auth_server_address.clone()));
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(Tick(0));
state.ecs_mut().insert(ChunkGenerator::new());
state
.ecs_mut()
.insert(persistence::character::CharacterUpdater::new(
settings.persistence_db_dir.clone(),
));
2020-05-15 20:03:51 +00:00
state.ecs_mut().insert(crate::settings::PersistenceDBDir(
settings.persistence_db_dir.clone(),
));
2019-11-04 00:57:36 +00:00
// System timers for performance monitoring
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(sys::EntitySyncTimer::default());
state.ecs_mut().insert(sys::MessageTimer::default());
state.ecs_mut().insert(sys::SentinelTimer::default());
state.ecs_mut().insert(sys::SubscriptionTimer::default());
state.ecs_mut().insert(sys::TerrainSyncTimer::default());
state.ecs_mut().insert(sys::TerrainTimer::default());
state.ecs_mut().insert(sys::WaypointTimer::default());
2020-05-24 22:18:41 +00:00
state.ecs_mut().insert(sys::SpeechBubbleTimer::default());
state.ecs_mut().insert(sys::PersistenceTimer::default());
// System schedulers to control execution of systems
state
.ecs_mut()
.insert(sys::PersistenceScheduler::every(Duration::from_secs(10)));
2019-10-20 07:20:21 +00:00
// Server-only components
state.ecs_mut().register::<RegionSubscription>();
state.ecs_mut().register::<Client>();
#[cfg(feature = "worldgen")]
let world = World::generate(settings.world_seed, WorldOpts {
seed_elements: true,
world_file: if let Some(ref opts) = settings.map_file {
opts.clone()
} else {
// Load default map from assets.
FileOpts::LoadAsset(DEFAULT_WORLD_MAP.into())
},
..WorldOpts::default()
});
#[cfg(feature = "worldgen")]
let map = world.sim().get_map();
#[cfg(not(feature = "worldgen"))]
let world = World::generate(settings.world_seed);
#[cfg(not(feature = "worldgen"))]
2020-01-24 10:40:52 +00:00
let map = vec![0];
#[cfg(feature = "worldgen")]
let spawn_point = {
// NOTE: all of these `.map(|e| e as [type])` calls should compile into no-ops,
// but are needed to be explicit about casting (and to make the compiler stop
// complaining)
// spawn in the chunk, that is in the middle of the world
let center_chunk: Vec2<i32> = WORLD_SIZE.map(|e| e as i32) / 2;
// Find a town to spawn in that's close to the centre of the world
let spawn_chunk = world
.civs()
.sites()
.filter(|site| matches!(site.kind, SiteKind::Settlement))
.map(|site| site.center)
.min_by_key(|site_pos| site_pos.distance_squared(center_chunk))
.unwrap_or(center_chunk);
// calculate the absolute position of the chunk in the world
// (we could add TerrainChunkSize::RECT_SIZE / 2 here, to spawn in the midde of
// the chunk)
2020-04-23 16:00:48 +00:00
let spawn_location = spawn_chunk.map2(TerrainChunkSize::RECT_SIZE, |e, sz| {
e as i32 * sz as i32 + sz as i32 / 2
});
// get a z cache for the collumn in which we want to spawn
let mut block_sampler = world.sample_blocks();
let z_cache = block_sampler
.get_z_cache(spawn_location)
.expect(&format!("no z_cache found for chunk: {}", spawn_chunk));
// get the minimum and maximum z values at which there could be soild blocks
let (min_z, _, max_z) = z_cache.get_z_limits(&mut block_sampler);
// round range outwards, so no potential air block is missed
let min_z = min_z.floor() as i32;
let max_z = max_z.ceil() as i32;
// loop over all blocks from min_z to max_z + 1
// until the first air block is found
// (up to max_z + 1, because max_z could still be a soild block)
// if no air block is found default to max_z + 1
let z = (min_z..(max_z + 1) + 1)
.find(|z| {
block_sampler
.get_with_z_cache(
Vec3::new(spawn_location.x, spawn_location.y, *z),
Some(&z_cache),
false,
)
.map(|b| b.is_air())
.unwrap_or(false)
})
.unwrap_or(max_z + 1);
// build the actual spawn point and
// add 0.5, so that the player spawns in the middle of the block
Vec3::new(spawn_location.x, spawn_location.y, z).map(|e| (e as f32)) + 0.5
};
#[cfg(not(feature = "worldgen"))]
let spawn_point = Vec3::new(0.0, 0.0, 256.0);
// set the spawn point we calculated above
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(SpawnPoint(spawn_point));
2019-07-12 13:03:35 +00:00
// Set starting time for the server.
state.ecs_mut().write_resource::<TimeOfDay>().0 = settings.start_time;
2019-11-04 00:57:36 +00:00
// Register trackers
sys::sentinel::register_trackers(&mut state.ecs_mut());
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(DeletedEntities::default());
2019-11-29 06:04:37 +00:00
let mut metrics = ServerMetrics::new();
// register all metrics submodules here
let tick_metrics = TickMetrics::new(metrics.registry(), metrics.tick_clone())
.expect("Failed to initialize server tick metrics submodule.");
metrics
.run(settings.metrics_address)
.expect("Failed to initialize server metrics submodule.");
let this = Self {
state,
world: Arc::new(world),
map,
postoffice: PostOffice::bind(settings.gameserver_address)?,
2019-07-12 19:29:16 +00:00
thread_pool: ThreadPoolBuilder::new()
.name("veloren-worker".into())
.build(),
server_info: ServerInfo {
2019-07-01 09:37:17 +00:00
name: settings.server_name.clone(),
description: settings.server_description.clone(),
2019-07-21 17:45:31 +00:00
git_hash: common::util::GIT_HASH.to_string(),
2019-10-18 13:32:26 +00:00
git_date: common::util::GIT_DATE.to_string(),
2019-12-21 17:02:39 +00:00
auth_provider: settings.auth_server_address.clone(),
},
Fixing the DEADLOCK in handshake -> channel creation - this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :) - When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport however the protocol could already catch non handshake data any more and push in into this mpsc::Channel. Then this channel got dropped and a fresh one was created for the network::Channel. These droped Frames are ofc a BUG! I tried multiple things to solve this: - dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1. This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)> to handle ALL the network::channel. If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out Bad Idea... - using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the scheduler doesnt know the remote_pid yet - i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what So i switched over to the simply method now: - Do everything like before with 2 mpsc::Channels - after the handshake. close the receiver and listen for all remaining (cid, frame) combinations - when starting the channel, reapply them to the new sender/listener combination - added tracing - switched Protocol RwLock to Mutex, as it's only ever 1 - Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema - Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail - fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed - add extra test to verify that a send message is received even if the Stream is already closed - changed OutGoing to Outgoing - fixed a bug that `metrics.tick()` was never called - removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
metrics,
tick_metrics,
server_settings: settings.clone(),
};
// Run pending DB migrations (if any)
debug!("Running DB migrations...");
if let Some(error) =
persistence::run_migrations(&this.server_settings.persistence_db_dir).err()
{
log::info!("Migration error: {}", format!("{:#?}", error));
}
debug!("created veloren server with: {:?}", &settings);
2020-03-26 11:41:30 +00:00
log::info!(
"Server version: {}[{}]",
*common::util::GIT_HASH,
*common::util::GIT_DATE
);
Ok(this)
2019-01-02 17:23:31 +00:00
}
pub fn with_thread_pool(mut self, thread_pool: ThreadPool) -> Self {
self.thread_pool = thread_pool;
self
}
2019-01-15 15:13:11 +00:00
/// Get a reference to the server's game state.
pub fn state(&self) -> &State { &self.state }
2019-01-15 15:13:11 +00:00
/// Get a mutable reference to the server's game state.
pub fn state_mut(&mut self) -> &mut State { &mut self.state }
2019-01-15 15:13:11 +00:00
/// Get a reference to the server's world.
pub fn world(&self) -> &World { &self.world }
2019-01-15 15:13:11 +00:00
/// Execute a single server tick, handle input and update the game state by
/// the given duration.
2019-07-01 16:38:19 +00:00
pub fn tick(&mut self, _input: Input, dt: Duration) -> Result<Vec<Event>, Error> {
self.state.ecs().write_resource::<Tick>().0 += 1;
// This tick function is the centre of the Veloren universe. Most server-side
// things are managed from here, and as such it's important that it
// stays organised. Please consult the core developers before making
// significant changes to this code. Here is the approximate order of
// things. Please update it as this code changes.
2019-01-02 17:23:31 +00:00
//
// 1) Collect input from the frontend, apply input effects to the
// state of the game
// 2) Go through any events (timer-driven or otherwise) that need handling
// and apply them to the state of the game
// 3) Go through all incoming client network communications, apply them to
// the game state
// 4) Perform a single LocalState tick (i.e: update the world and entities
// in the world)
// 5) Go through the terrain update queue and apply all changes to
// the terrain
2019-01-02 17:23:31 +00:00
// 6) Send relevant state updates to all clients
// 7) Update Metrics with current data
// 8) Finish the tick, passing control of the main thread back
// to the frontend
2019-01-02 17:23:31 +00:00
// 1) Build up a list of events for this frame, to be passed to the frontend.
let mut frontend_events = Vec::new();
// If networking has problems, handle them.
if let Some(err) = self.postoffice.error() {
return Err(err.into());
}
// 2)
let before_new_connections = Instant::now();
// 3) Handle inputs from clients
frontend_events.append(&mut self.handle_new_connections()?);
let before_message_system = Instant::now();
// Run message recieving sys before the systems in common for decreased latency
// (e.g. run before controller system)
sys::message::Sys.run_now(&self.state.ecs());
let before_state_tick = Instant::now();
2019-11-29 06:04:37 +00:00
// 4) Tick the server's LocalState.
// 5) Fetch any generated `TerrainChunk`s and insert them into the terrain.
// in sys/terrain.rs
self.state.tick(dt, sys::add_server_systems, false);
2019-01-02 17:23:31 +00:00
let before_handle_events = Instant::now();
// Handle game events
frontend_events.append(&mut self.handle_events());
let before_update_terrain_and_regions = Instant::now();
// Apply terrain changes and update the region map after processing server
// events so that changes made by server events will be immediately
// visble to client synchronization systems, minimizing the latency of
// `ServerEvent` mediated effects
self.state.update_region_map();
self.state.apply_terrain_changes();
let before_sync = Instant::now();
// 6) Synchronise clients with the new state of the world.
sys::run_sync_systems(self.state.ecs_mut());
let before_world_tick = Instant::now();
// Tick the world
self.world.tick(dt);
let before_entity_cleanup = Instant::now();
2019-08-02 17:48:14 +00:00
// Remove NPCs that are outside the view distances of all players
// This is done by removing NPCs in unloaded chunks
2019-08-02 17:48:14 +00:00
let to_delete = {
let terrain = self.state.terrain();
(
&self.state.ecs().entities(),
&self.state.ecs().read_storage::<comp::Pos>(),
!&self.state.ecs().read_storage::<comp::Player>(),
2019-08-02 17:48:14 +00:00
)
.join()
.filter(|(_, pos, _)| terrain.get(pos.0.map(|e| e.floor() as i32)).is_err())
.map(|(entity, _, _)| entity)
.collect::<Vec<_>>()
};
for entity in to_delete {
2019-11-29 06:04:37 +00:00
if let Err(err) = self.state.delete_entity_recorded(entity) {
error!("Failed to delete agent outside the terrain: {:?}", err);
}
2019-08-02 17:48:14 +00:00
}
let end_of_server_tick = Instant::now();
// 7) Update Metrics
// Get system timing info
2019-10-20 07:20:21 +00:00
let entity_sync_nanos = self
.state
.ecs()
.read_resource::<sys::EntitySyncTimer>()
.nanos as i64;
let message_nanos = self.state.ecs().read_resource::<sys::MessageTimer>().nanos as i64;
2019-11-04 00:57:36 +00:00
let sentinel_nanos = self.state.ecs().read_resource::<sys::SentinelTimer>().nanos as i64;
2019-10-20 07:20:21 +00:00
let subscription_nanos = self
.state
.ecs()
.read_resource::<sys::SubscriptionTimer>()
.nanos as i64;
let terrain_sync_nanos = self
.state
.ecs()
.read_resource::<sys::TerrainSyncTimer>()
.nanos as i64;
let terrain_nanos = self.state.ecs().read_resource::<sys::TerrainTimer>().nanos as i64;
let waypoint_nanos = self.state.ecs().read_resource::<sys::WaypointTimer>().nanos as i64;
let stats_persistence_nanos = self
.state
.ecs()
.read_resource::<sys::PersistenceTimer>()
.nanos as i64;
let total_sys_ran_in_dispatcher_nanos = terrain_nanos + waypoint_nanos;
// Report timing info
self.tick_metrics
.tick_time
.with_label_values(&["new connections"])
.set((before_message_system - before_new_connections).as_nanos() as i64);
self.tick_metrics
.tick_time
2019-10-20 07:20:21 +00:00
.with_label_values(&["state tick"])
.set(
(before_handle_events - before_state_tick).as_nanos() as i64
- total_sys_ran_in_dispatcher_nanos,
);
self.tick_metrics
.tick_time
.with_label_values(&["handle server events"])
.set((before_update_terrain_and_regions - before_handle_events).as_nanos() as i64);
self.tick_metrics
.tick_time
.with_label_values(&["update terrain and region map"])
.set((before_sync - before_update_terrain_and_regions).as_nanos() as i64);
self.tick_metrics
.tick_time
.with_label_values(&["world tick"])
.set((before_entity_cleanup - before_world_tick).as_nanos() as i64);
self.tick_metrics
.tick_time
.with_label_values(&["entity cleanup"])
.set((end_of_server_tick - before_entity_cleanup).as_nanos() as i64);
self.tick_metrics
.tick_time
2019-10-20 07:20:21 +00:00
.with_label_values(&["entity sync"])
.set(entity_sync_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["message"])
.set(message_nanos);
self.tick_metrics
.tick_time
.with_label_values(&["sentinel"])
.set(sentinel_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["subscription"])
.set(subscription_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["terrain sync"])
.set(terrain_sync_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["terrain"])
.set(terrain_nanos);
self.tick_metrics
.tick_time
.with_label_values(&["waypoint"])
.set(waypoint_nanos);
self.tick_metrics
.tick_time
.with_label_values(&["persistence:stats"])
.set(stats_persistence_nanos);
// Report other info
self.tick_metrics
.player_online
.set(self.state.ecs().read_storage::<Client>().join().count() as i64);
self.tick_metrics
.time_of_day
.set(self.state.ecs().read_resource::<TimeOfDay>().0);
if self.tick_metrics.is_100th_tick() {
let mut chonk_cnt = 0;
let chunk_cnt = self.state.terrain().iter().fold(0, |a, (_, c)| {
chonk_cnt += 1;
a + c.sub_chunks_len()
});
self.tick_metrics.chonks_count.set(chonk_cnt as i64);
self.tick_metrics.chunks_count.set(chunk_cnt as i64);
2020-05-12 23:44:27 +00:00
let entity_count = self.state.ecs().entities().join().count();
self.tick_metrics.entity_count.set(entity_count as i64);
}
//self.metrics.entity_count.set(self.state.);
self.tick_metrics
.tick_time
.with_label_values(&["metrics"])
.set(end_of_server_tick.elapsed().as_nanos() as i64);
Fixing the DEADLOCK in handshake -> channel creation - this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :) - When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport however the protocol could already catch non handshake data any more and push in into this mpsc::Channel. Then this channel got dropped and a fresh one was created for the network::Channel. These droped Frames are ofc a BUG! I tried multiple things to solve this: - dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1. This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)> to handle ALL the network::channel. If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out Bad Idea... - using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the scheduler doesnt know the remote_pid yet - i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what So i switched over to the simply method now: - Do everything like before with 2 mpsc::Channels - after the handshake. close the receiver and listen for all remaining (cid, frame) combinations - when starting the channel, reapply them to the new sender/listener combination - added tracing - switched Protocol RwLock to Mutex, as it's only ever 1 - Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema - Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail - fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed - add extra test to verify that a send message is received even if the Stream is already closed - changed OutGoing to Outgoing - fixed a bug that `metrics.tick()` was never called - removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
self.metrics.tick();
// 8) Finish the tick, pass control back to the frontend.
Ok(frontend_events)
2019-01-02 17:23:31 +00:00
}
/// Clean up the server after a tick.
pub fn cleanup(&mut self) {
// Cleanup the local state
self.state.cleanup();
}
/// Handle new client connections.
fn handle_new_connections(&mut self) -> Result<Vec<Event>, Error> {
let mut frontend_events = Vec::new();
for postbox in self.postoffice.new_postboxes() {
let mut client = Client {
client_state: ClientState::Connected,
postbox,
last_ping: self.state.get_time(),
login_msg_sent: false,
};
if self.server_settings.max_players
<= self.state.ecs().read_storage::<Client>().join().count()
{
// Note: in this case the client is dropped
client.notify(ServerMsg::TooManyPlayers);
2019-07-01 11:19:26 +00:00
} else {
let entity = self
.state
.ecs_mut()
.create_entity_synced()
.with(client)
.build();
// Send client all the tracked components currently attached to its entity as
// well as synced resources (currently only `TimeOfDay`)
log::debug!("Starting initial sync with client.");
self.state
.ecs()
.write_storage::<Client>()
.get_mut(entity)
.unwrap()
.notify(ServerMsg::InitialSync {
// Send client their entity
entity_package: TrackedComps::fetch(&self.state.ecs())
.create_entity_package(entity, None, None, None),
server_info: self.server_info.clone(),
time_of_day: *self.state.ecs().read_resource(),
world_map: (WORLD_SIZE.map(|e| e as u32), self.map.clone()),
});
log::debug!("Done initial sync with client.");
2019-07-01 11:19:26 +00:00
frontend_events.push(Event::ClientConnected { entity });
}
2019-07-26 21:01:41 +00:00
}
Ok(frontend_events)
}
pub fn notify_client(&self, entity: EcsEntity, msg: ServerMsg) {
if let Some(client) = self.state.ecs().write_storage::<Client>().get_mut(entity) {
client.notify(msg)
}
}
pub fn generate_chunk(&mut self, entity: EcsEntity, key: Vec2<i32>) {
self.state
.ecs()
.write_resource::<ChunkGenerator>()
.generate_chunk(entity, key, &mut self.thread_pool, self.world.clone());
}
fn process_chat_cmd(&mut self, entity: EcsEntity, cmd: String) {
// Separate string into keyword and arguments.
let sep = cmd.find(' ');
let (kwd, args) = match sep {
Some(i) => (cmd[..i].to_string(), cmd[(i + 1)..].to_string()),
None => (cmd, "".to_string()),
};
// Find the command object and run its handler.
if let Ok(command) = kwd.parse::<ChatCommand>() {
command.execute(self, entity, args);
} else {
self.notify_client(
entity,
ServerMsg::private(format!(
"Unknown command '/{}'.\nType '/help' for available commands",
kwd
)),
);
}
}
fn entity_is_admin(&self, entity: EcsEntity) -> bool {
self.state
.read_storage::<comp::Admin>()
.get(entity)
.is_some()
}
pub fn number_of_players(&self) -> i64 { self.tick_metrics.player_online.get() }
2019-01-02 17:23:31 +00:00
}
impl Drop for Server {
fn drop(&mut self) { self.state.notify_registered_clients(ServerMsg::Shutdown); }
}