veloren/server/src/lib.rs

755 lines
28 KiB
Rust
Raw Normal View History

2019-08-19 12:39:23 +00:00
#![deny(unsafe_code)]
#![allow(clippy::option_map_unit_fn)]
#![feature(bool_to_option, drain_filter, option_zip)]
2019-01-02 17:23:31 +00:00
pub mod alias_validator;
pub mod chunk_generator;
pub mod client;
pub mod cmd;
pub mod error;
2020-02-16 20:04:06 +00:00
pub mod events;
pub mod input;
pub mod login_provider;
pub mod metrics;
pub mod persistence;
pub mod settings;
pub mod state_ext;
pub mod sys;
#[cfg(not(feature = "worldgen"))] mod test_world;
// Reexports
2020-02-16 20:04:06 +00:00
pub use crate::{error::Error, events::Event, input::Input, settings::ServerSettings};
use crate::{
alias_validator::AliasValidator,
chunk_generator::ChunkGenerator,
client::{Client, RegionSubscription},
cmd::ChatCommandExt,
login_provider::LoginProvider,
state_ext::StateExt,
sys::sentinel::{DeletedEntities, TrackedComps},
};
use common::{
cmd::ChatCommand,
comp::{self, ChatType},
event::{EventBus, ServerEvent},
msg::{ClientState, ServerInfo, ServerMsg},
2020-07-14 20:11:39 +00:00
recipe::default_recipe_book,
2020-02-16 20:04:06 +00:00
state::{State, TimeOfDay},
sync::WorldSyncExt,
2020-02-16 20:04:06 +00:00
terrain::TerrainChunkSize,
vol::{ReadVol, RectVolSize},
};
use futures_executor::block_on;
use futures_timer::Delay;
use futures_util::{select, FutureExt};
use metrics::{ServerMetrics, TickMetrics};
use network::{Network, Pid, ProtocolAddr};
use persistence::character::{CharacterLoader, CharacterLoaderResponseType, CharacterUpdater};
use specs::{join::Join, Builder, Entity as EcsEntity, RunNow, SystemData, WorldExt};
use std::{
i32,
2020-06-25 18:50:04 +00:00
ops::{Deref, DerefMut},
sync::Arc,
time::{Duration, Instant},
};
#[cfg(not(feature = "worldgen"))]
use test_world::{World, WORLD_SIZE};
use tracing::{debug, error, info, warn};
use uvth::{ThreadPool, ThreadPoolBuilder};
use vek::*;
#[cfg(feature = "worldgen")]
use world::{
2020-04-23 16:00:48 +00:00
civ::SiteKind,
sim::{FileOpts, WorldOpts, DEFAULT_WORLD_MAP, WORLD_SIZE},
World,
};
#[macro_use] extern crate diesel;
#[macro_use] extern crate diesel_migrations;
const CLIENT_TIMEOUT: f64 = 20.0; // Seconds
2019-01-02 17:23:31 +00:00
#[derive(Copy, Clone)]
struct SpawnPoint(Vec3<f32>);
// Tick count used for throttling network updates
// Note this doesn't account for dt (so update rate changes with tick rate)
#[derive(Copy, Clone, Default)]
pub struct Tick(u64);
2019-01-02 17:23:31 +00:00
pub struct Server {
2019-01-02 19:22:01 +00:00
state: State,
world: Arc<World>,
map: Vec<u32>,
2019-01-02 17:23:31 +00:00
network: Network,
thread_pool: ThreadPool,
Fixing the DEADLOCK in handshake -> channel creation - this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :) - When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport however the protocol could already catch non handshake data any more and push in into this mpsc::Channel. Then this channel got dropped and a fresh one was created for the network::Channel. These droped Frames are ofc a BUG! I tried multiple things to solve this: - dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1. This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)> to handle ALL the network::channel. If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out Bad Idea... - using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the scheduler doesnt know the remote_pid yet - i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what So i switched over to the simply method now: - Do everything like before with 2 mpsc::Channels - after the handshake. close the receiver and listen for all remaining (cid, frame) combinations - when starting the channel, reapply them to the new sender/listener combination - added tracing - switched Protocol RwLock to Mutex, as it's only ever 1 - Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema - Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail - fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed - add extra test to verify that a send message is received even if the Stream is already closed - changed OutGoing to Outgoing - fixed a bug that `metrics.tick()` was never called - removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
metrics: ServerMetrics,
tick_metrics: TickMetrics,
2019-01-02 17:23:31 +00:00
}
impl Server {
/// Create a new `Server`
#[allow(clippy::expect_fun_call)] // TODO: Pending review in #587
#[allow(clippy::needless_update)] // TODO: Pending review in #587
2019-06-29 16:41:26 +00:00
pub fn new(settings: ServerSettings) -> Result<Self, Error> {
let mut state = State::default();
state.ecs_mut().insert(settings.clone());
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(EventBus::<ServerEvent>::default());
state
.ecs_mut()
.insert(LoginProvider::new(settings.auth_server_address.clone()));
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(Tick(0));
state.ecs_mut().insert(ChunkGenerator::new());
state
.ecs_mut()
.insert(CharacterUpdater::new(settings.persistence_db_dir.clone()));
state
.ecs_mut()
.insert(CharacterLoader::new(settings.persistence_db_dir.clone()));
state
.ecs_mut()
.insert(persistence::character::CharacterUpdater::new(
settings.persistence_db_dir.clone(),
));
state
.ecs_mut()
.insert(comp::AdminList(settings.admins.clone()));
2019-11-04 00:57:36 +00:00
// System timers for performance monitoring
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(sys::EntitySyncTimer::default());
state.ecs_mut().insert(sys::MessageTimer::default());
state.ecs_mut().insert(sys::SentinelTimer::default());
state.ecs_mut().insert(sys::SubscriptionTimer::default());
state.ecs_mut().insert(sys::TerrainSyncTimer::default());
state.ecs_mut().insert(sys::TerrainTimer::default());
state.ecs_mut().insert(sys::WaypointTimer::default());
state.ecs_mut().insert(sys::InviteTimeoutTimer::default());
state.ecs_mut().insert(sys::PersistenceTimer::default());
// System schedulers to control execution of systems
state
.ecs_mut()
.insert(sys::PersistenceScheduler::every(Duration::from_secs(10)));
2019-10-20 07:20:21 +00:00
// Server-only components
state.ecs_mut().register::<RegionSubscription>();
state.ecs_mut().register::<Client>();
//Alias validator
let banned_words_paths = &settings.banned_words_files;
let mut banned_words = Vec::new();
for path in banned_words_paths {
let mut list = match std::fs::File::open(&path) {
Ok(file) => match ron::de::from_reader(&file) {
Ok(vec) => vec,
Err(error) => {
tracing::warn!(?error, ?file, "Couldn't deserialize banned words file");
return Err(Error::Other(format!(
"Couldn't read banned words file \"{}\"",
path.to_string_lossy()
)));
},
},
Err(error) => {
tracing::warn!(?error, ?path, "Couldn't open banned words file");
return Err(Error::Other(format!(
"Couldn't open banned words file \"{}\". Error: {}",
path.to_string_lossy(),
error
)));
},
};
banned_words.append(&mut list);
}
let banned_words_count = banned_words.len();
tracing::debug!(?banned_words_count);
tracing::trace!(?banned_words);
state.ecs_mut().insert(AliasValidator::new(banned_words));
#[cfg(feature = "worldgen")]
let world = World::generate(settings.world_seed, WorldOpts {
seed_elements: true,
world_file: if let Some(ref opts) = settings.map_file {
opts.clone()
} else {
// Load default map from assets.
FileOpts::LoadAsset(DEFAULT_WORLD_MAP.into())
},
..WorldOpts::default()
});
#[cfg(feature = "worldgen")]
let map = world.sim().get_map();
#[cfg(not(feature = "worldgen"))]
let world = World::generate(settings.world_seed);
#[cfg(not(feature = "worldgen"))]
2020-01-24 10:40:52 +00:00
let map = vec![0];
#[cfg(feature = "worldgen")]
let spawn_point = {
// NOTE: all of these `.map(|e| e as [type])` calls should compile into no-ops,
// but are needed to be explicit about casting (and to make the compiler stop
// complaining)
// spawn in the chunk, that is in the middle of the world
let center_chunk: Vec2<i32> = WORLD_SIZE.map(|e| e as i32) / 2;
// Find a town to spawn in that's close to the centre of the world
let spawn_chunk = world
.civs()
.sites()
.filter(|site| matches!(site.kind, SiteKind::Settlement))
.map(|site| site.center)
.min_by_key(|site_pos| site_pos.distance_squared(center_chunk))
.unwrap_or(center_chunk);
// calculate the absolute position of the chunk in the world
// (we could add TerrainChunkSize::RECT_SIZE / 2 here, to spawn in the midde of
// the chunk)
2020-04-23 16:00:48 +00:00
let spawn_location = spawn_chunk.map2(TerrainChunkSize::RECT_SIZE, |e, sz| {
e as i32 * sz as i32 + sz as i32 / 2
});
// get a z cache for the collumn in which we want to spawn
let mut block_sampler = world.sample_blocks();
let z_cache = block_sampler
.get_z_cache(spawn_location)
.expect(&format!("no z_cache found for chunk: {}", spawn_chunk));
// get the minimum and maximum z values at which there could be soild blocks
let (min_z, _, max_z) = z_cache.get_z_limits(&mut block_sampler);
// round range outwards, so no potential air block is missed
let min_z = min_z.floor() as i32;
let max_z = max_z.ceil() as i32;
// loop over all blocks from min_z to max_z + 1
// until the first air block is found
// (up to max_z + 1, because max_z could still be a soild block)
// if no air block is found default to max_z + 1
let z = (min_z..(max_z + 1) + 1)
.find(|z| {
block_sampler
.get_with_z_cache(
Vec3::new(spawn_location.x, spawn_location.y, *z),
Some(&z_cache),
false,
)
.map(|b| b.is_air())
.unwrap_or(false)
})
.unwrap_or(max_z + 1);
// build the actual spawn point and
// add 0.5, so that the player spawns in the middle of the block
Vec3::new(spawn_location.x, spawn_location.y, z).map(|e| (e as f32)) + 0.5
};
#[cfg(not(feature = "worldgen"))]
let spawn_point = Vec3::new(0.0, 0.0, 256.0);
// set the spawn point we calculated above
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(SpawnPoint(spawn_point));
2019-07-12 13:03:35 +00:00
// Set starting time for the server.
state.ecs_mut().write_resource::<TimeOfDay>().0 = settings.start_time;
2019-11-04 00:57:36 +00:00
// Register trackers
sys::sentinel::register_trackers(&mut state.ecs_mut());
2019-11-30 06:41:20 +00:00
state.ecs_mut().insert(DeletedEntities::default());
2019-11-29 06:04:37 +00:00
let mut metrics = ServerMetrics::new();
// register all metrics submodules here
let tick_metrics = TickMetrics::new(metrics.registry(), metrics.tick_clone())
.expect("Failed to initialize server tick metrics submodule.");
metrics
.run(settings.metrics_address)
.expect("Failed to initialize server metrics submodule.");
let thread_pool = ThreadPoolBuilder::new()
.name("veloren-worker".to_string())
.build();
let (network, f) = Network::new(Pid::new());
thread_pool.execute(f);
block_on(network.listen(ProtocolAddr::Tcp(settings.gameserver_address)))?;
let this = Self {
state,
world: Arc::new(world),
map,
network,
thread_pool,
Fixing the DEADLOCK in handshake -> channel creation - this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :) - When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport however the protocol could already catch non handshake data any more and push in into this mpsc::Channel. Then this channel got dropped and a fresh one was created for the network::Channel. These droped Frames are ofc a BUG! I tried multiple things to solve this: - dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1. This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)> to handle ALL the network::channel. If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out Bad Idea... - using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the scheduler doesnt know the remote_pid yet - i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what So i switched over to the simply method now: - Do everything like before with 2 mpsc::Channels - after the handshake. close the receiver and listen for all remaining (cid, frame) combinations - when starting the channel, reapply them to the new sender/listener combination - added tracing - switched Protocol RwLock to Mutex, as it's only ever 1 - Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema - Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail - fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed - add extra test to verify that a send message is received even if the Stream is already closed - changed OutGoing to Outgoing - fixed a bug that `metrics.tick()` was never called - removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
metrics,
tick_metrics,
};
// Run pending DB migrations (if any)
debug!("Running DB migrations...");
2020-06-25 12:07:01 +00:00
if let Some(e) = persistence::run_migrations(&settings.persistence_db_dir).err() {
info!(?e, "Migration error");
}
debug!(?settings, "created veloren server with");
let git_hash = *common::util::GIT_HASH;
let git_date = *common::util::GIT_DATE;
info!(?git_hash, ?git_date, "Server version",);
Ok(this)
2019-01-02 17:23:31 +00:00
}
pub fn get_server_info(&self) -> ServerInfo {
let settings = self.state.ecs().fetch::<ServerSettings>();
ServerInfo {
name: settings.server_name.clone(),
description: settings.server_description.clone(),
git_hash: common::util::GIT_HASH.to_string(),
git_date: common::util::GIT_DATE.to_string(),
auth_provider: settings.auth_server_address.clone(),
}
}
pub fn with_thread_pool(mut self, thread_pool: ThreadPool) -> Self {
self.thread_pool = thread_pool;
self
}
/// Get a reference to the server's settings
2020-06-25 18:50:04 +00:00
pub fn settings(&self) -> impl Deref<Target = ServerSettings> + '_ {
self.state.ecs().fetch::<ServerSettings>()
}
/// Get a mutable reference to the server's settings
2020-06-25 18:50:04 +00:00
pub fn settings_mut(&mut self) -> impl DerefMut<Target = ServerSettings> + '_ {
self.state.ecs_mut().fetch_mut::<ServerSettings>()
}
2019-01-15 15:13:11 +00:00
/// Get a reference to the server's game state.
pub fn state(&self) -> &State { &self.state }
2019-01-15 15:13:11 +00:00
/// Get a mutable reference to the server's game state.
pub fn state_mut(&mut self) -> &mut State { &mut self.state }
2019-01-15 15:13:11 +00:00
/// Get a reference to the server's world.
pub fn world(&self) -> &World { &self.world }
2019-01-15 15:13:11 +00:00
/// Execute a single server tick, handle input and update the game state by
/// the given duration.
2019-07-01 16:38:19 +00:00
pub fn tick(&mut self, _input: Input, dt: Duration) -> Result<Vec<Event>, Error> {
self.state.ecs().write_resource::<Tick>().0 += 1;
// This tick function is the centre of the Veloren universe. Most server-side
// things are managed from here, and as such it's important that it
// stays organised. Please consult the core developers before making
// significant changes to this code. Here is the approximate order of
// things. Please update it as this code changes.
2019-01-02 17:23:31 +00:00
//
// 1) Collect input from the frontend, apply input effects to the
// state of the game
// 2) Go through any events (timer-driven or otherwise) that need handling
// and apply them to the state of the game
// 3) Go through all incoming client network communications, apply them to
// the game state
// 4) Perform a single LocalState tick (i.e: update the world and entities
// in the world)
// 5) Go through the terrain update queue and apply all changes to
// the terrain
2019-01-02 17:23:31 +00:00
// 6) Send relevant state updates to all clients
// 7) Check for persistence updates related to character data, and message the
// relevant entities
// 8) Update Metrics with current data
// 9) Finish the tick, passing control of the main thread back
// to the frontend
2019-01-02 17:23:31 +00:00
// 1) Build up a list of events for this frame, to be passed to the frontend.
let mut frontend_events = Vec::new();
// 2)
let before_new_connections = Instant::now();
// 3) Handle inputs from clients
2020-07-22 07:50:26 +00:00
block_on(self.handle_new_connections(&mut frontend_events))?;
let before_message_system = Instant::now();
// Run message recieving sys before the systems in common for decreased latency
// (e.g. run before controller system)
sys::message::Sys.run_now(&self.state.ecs());
let before_state_tick = Instant::now();
2019-11-29 06:04:37 +00:00
// 4) Tick the server's LocalState.
// 5) Fetch any generated `TerrainChunk`s and insert them into the terrain.
// in sys/terrain.rs
self.state.tick(dt, sys::add_server_systems, false);
2019-01-02 17:23:31 +00:00
let before_handle_events = Instant::now();
// Handle game events
frontend_events.append(&mut self.handle_events());
let before_update_terrain_and_regions = Instant::now();
// Apply terrain changes and update the region map after processing server
// events so that changes made by server events will be immediately
// visble to client synchronization systems, minimizing the latency of
// `ServerEvent` mediated effects
self.state.update_region_map();
self.state.apply_terrain_changes();
let before_sync = Instant::now();
// 6) Synchronise clients with the new state of the world.
sys::run_sync_systems(self.state.ecs_mut());
let before_world_tick = Instant::now();
// Tick the world
self.world.tick(dt);
let before_entity_cleanup = Instant::now();
2019-08-02 17:48:14 +00:00
// Remove NPCs that are outside the view distances of all players
// This is done by removing NPCs in unloaded chunks
2019-08-02 17:48:14 +00:00
let to_delete = {
let terrain = self.state.terrain();
(
&self.state.ecs().entities(),
&self.state.ecs().read_storage::<comp::Pos>(),
!&self.state.ecs().read_storage::<comp::Player>(),
2019-08-02 17:48:14 +00:00
)
.join()
.filter(|(_, pos, _)| terrain.get(pos.0.map(|e| e.floor() as i32)).is_err())
.map(|(entity, _, _)| entity)
.collect::<Vec<_>>()
};
2019-08-02 17:48:14 +00:00
for entity in to_delete {
if let Err(e) = self.state.delete_entity_recorded(entity) {
error!(?e, "Failed to delete agent outside the terrain");
2019-11-29 06:04:37 +00:00
}
2019-08-02 17:48:14 +00:00
}
// 7 Persistence updates
let before_persistence_updates = Instant::now();
// Get character-related database responses and notify the requesting client
self.state
.ecs()
.read_resource::<persistence::character::CharacterLoader>()
.messages()
.for_each(|query_result| match query_result.result {
CharacterLoaderResponseType::CharacterList(result) => match result {
Ok(character_list_data) => self.notify_client(
query_result.entity,
ServerMsg::CharacterListUpdate(character_list_data),
),
Err(error) => self.notify_client(
query_result.entity,
ServerMsg::CharacterActionError(error.to_string()),
),
},
CharacterLoaderResponseType::CharacterData(result) => {
let message = match *result {
Ok(character_data) => ServerEvent::UpdateCharacterData {
entity: query_result.entity,
components: character_data,
},
Err(error) => {
// We failed to load data for the character from the DB. Notify the
// client to push the state back to character selection, with the error
// to display
self.notify_client(
query_result.entity,
ServerMsg::CharacterDataLoadError(error.to_string()),
);
// Clean up the entity data on the server
ServerEvent::ExitIngame {
entity: query_result.entity,
}
},
};
self.state
.ecs()
.read_resource::<EventBus<ServerEvent>>()
.emit_now(message);
},
});
let end_of_server_tick = Instant::now();
// 8) Update Metrics
// Get system timing info
2019-10-20 07:20:21 +00:00
let entity_sync_nanos = self
.state
.ecs()
.read_resource::<sys::EntitySyncTimer>()
.nanos as i64;
let message_nanos = self.state.ecs().read_resource::<sys::MessageTimer>().nanos as i64;
2019-11-04 00:57:36 +00:00
let sentinel_nanos = self.state.ecs().read_resource::<sys::SentinelTimer>().nanos as i64;
2019-10-20 07:20:21 +00:00
let subscription_nanos = self
.state
.ecs()
.read_resource::<sys::SubscriptionTimer>()
.nanos as i64;
let terrain_sync_nanos = self
.state
.ecs()
.read_resource::<sys::TerrainSyncTimer>()
.nanos as i64;
let terrain_nanos = self.state.ecs().read_resource::<sys::TerrainTimer>().nanos as i64;
let waypoint_nanos = self.state.ecs().read_resource::<sys::WaypointTimer>().nanos as i64;
let invite_timeout_nanos = self.state.ecs().read_resource::<sys::InviteTimeoutTimer>().nanos as i64;
let stats_persistence_nanos = self
.state
.ecs()
.read_resource::<sys::PersistenceTimer>()
.nanos as i64;
let total_sys_ran_in_dispatcher_nanos = terrain_nanos + waypoint_nanos + invite_timeout_nanos;
// Report timing info
self.tick_metrics
.tick_time
.with_label_values(&["new connections"])
.set((before_message_system - before_new_connections).as_nanos() as i64);
self.tick_metrics
.tick_time
2019-10-20 07:20:21 +00:00
.with_label_values(&["state tick"])
.set(
(before_handle_events - before_state_tick).as_nanos() as i64
- total_sys_ran_in_dispatcher_nanos,
);
self.tick_metrics
.tick_time
.with_label_values(&["handle server events"])
.set((before_update_terrain_and_regions - before_handle_events).as_nanos() as i64);
self.tick_metrics
.tick_time
.with_label_values(&["update terrain and region map"])
.set((before_sync - before_update_terrain_and_regions).as_nanos() as i64);
self.tick_metrics
.tick_time
.with_label_values(&["world tick"])
.set((before_entity_cleanup - before_world_tick).as_nanos() as i64);
self.tick_metrics
.tick_time
.with_label_values(&["entity cleanup"])
.set((before_persistence_updates - before_entity_cleanup).as_nanos() as i64);
self.tick_metrics
.tick_time
.with_label_values(&["persistence_updates"])
.set((end_of_server_tick - before_persistence_updates).as_nanos() as i64);
self.tick_metrics
.tick_time
2019-10-20 07:20:21 +00:00
.with_label_values(&["entity sync"])
.set(entity_sync_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["message"])
.set(message_nanos);
self.tick_metrics
.tick_time
.with_label_values(&["sentinel"])
.set(sentinel_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["subscription"])
.set(subscription_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["terrain sync"])
.set(terrain_sync_nanos);
self.tick_metrics
2019-10-20 07:20:21 +00:00
.tick_time
.with_label_values(&["terrain"])
.set(terrain_nanos);
self.tick_metrics
.tick_time
.with_label_values(&["waypoint"])
.set(waypoint_nanos);
self.tick_metrics
.tick_time
.with_label_values(&["invite timeout"])
.set(invite_timeout_nanos);
self.tick_metrics
.tick_time
.with_label_values(&["persistence:stats"])
.set(stats_persistence_nanos);
// Report other info
self.tick_metrics
.player_online
.set(self.state.ecs().read_storage::<Client>().join().count() as i64);
self.tick_metrics
.time_of_day
.set(self.state.ecs().read_resource::<TimeOfDay>().0);
if self.tick_metrics.is_100th_tick() {
let mut chonk_cnt = 0;
let chunk_cnt = self.state.terrain().iter().fold(0, |a, (_, c)| {
chonk_cnt += 1;
a + c.sub_chunks_len()
});
self.tick_metrics.chonks_count.set(chonk_cnt as i64);
self.tick_metrics.chunks_count.set(chunk_cnt as i64);
2020-05-12 23:44:27 +00:00
let entity_count = self.state.ecs().entities().join().count();
self.tick_metrics.entity_count.set(entity_count as i64);
}
//self.metrics.entity_count.set(self.state.);
self.tick_metrics
.tick_time
.with_label_values(&["metrics"])
.set(end_of_server_tick.elapsed().as_nanos() as i64);
Fixing the DEADLOCK in handshake -> channel creation - this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :) - When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport however the protocol could already catch non handshake data any more and push in into this mpsc::Channel. Then this channel got dropped and a fresh one was created for the network::Channel. These droped Frames are ofc a BUG! I tried multiple things to solve this: - dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1. This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)> to handle ALL the network::channel. If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out Bad Idea... - using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the scheduler doesnt know the remote_pid yet - i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what So i switched over to the simply method now: - Do everything like before with 2 mpsc::Channels - after the handshake. close the receiver and listen for all remaining (cid, frame) combinations - when starting the channel, reapply them to the new sender/listener combination - added tracing - switched Protocol RwLock to Mutex, as it's only ever 1 - Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema - Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail - fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed - add extra test to verify that a send message is received even if the Stream is already closed - changed OutGoing to Outgoing - fixed a bug that `metrics.tick()` was never called - removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
self.metrics.tick();
// 9) Finish the tick, pass control back to the frontend.
Ok(frontend_events)
2019-01-02 17:23:31 +00:00
}
/// Clean up the server after a tick.
pub fn cleanup(&mut self) {
// Cleanup the local state
self.state.cleanup();
}
/// Handle new client connections.
async fn handle_new_connections(
&mut self,
frontend_events: &mut Vec<Event>,
) -> Result<(), Error> {
2020-07-22 07:50:26 +00:00
//TIMEOUT 0.1 ms for msg handling
const TIMEOUT: Duration = Duration::from_micros(100);
loop {
2020-07-22 07:50:26 +00:00
let participant = match select!(
_ = Delay::new(TIMEOUT).fuse() => None,
pr = self.network.connected().fuse() => Some(pr),
) {
None => return Ok(()),
Some(pr) => pr?,
};
debug!("New Participant connected to the server");
2020-07-22 07:50:26 +00:00
let singleton_stream = match select!(
_ = Delay::new(TIMEOUT*100).fuse() => None,
sr = participant.opened().fuse() => Some(sr),
) {
None => {
warn!("Either Slowloris attack or very slow client, dropping");
return Ok(()); //return rather then continue to give removes a tick more to send data.
},
Some(Ok(s)) => s,
Some(Err(e)) => {
warn!(?e, "Failed to open a Stream from remote client. dropping");
continue;
},
};
let mut client = Client {
client_state: ClientState::Connected,
participant: std::sync::Mutex::new(Some(participant)),
singleton_stream,
network_error: std::sync::atomic::AtomicBool::new(false),
last_ping: self.state.get_time(),
login_msg_sent: false,
};
2020-06-25 18:50:04 +00:00
if self.settings().max_players
<= self.state.ecs().read_storage::<Client>().join().count()
{
// Note: in this case the client is dropped
client.notify(ServerMsg::TooManyPlayers);
2019-07-01 11:19:26 +00:00
} else {
let entity = self
.state
.ecs_mut()
.create_entity_synced()
.with(client)
.build();
// Send client all the tracked components currently attached to its entity as
// well as synced resources (currently only `TimeOfDay`)
debug!("Starting initial sync with client.");
self.state
.ecs()
.write_storage::<Client>()
.get_mut(entity)
.unwrap()
.notify(ServerMsg::InitialSync {
// Send client their entity
entity_package: TrackedComps::fetch(&self.state.ecs())
.create_entity_package(entity, None, None, None),
server_info: self.get_server_info(),
time_of_day: *self.state.ecs().read_resource(),
max_group_size: self.settings().max_player_group_size,
world_map: (WORLD_SIZE.map(|e| e as u32), self.map.clone()),
2020-07-14 20:11:39 +00:00
recipe_book: (&*default_recipe_book()).clone(),
});
2019-07-01 11:19:26 +00:00
frontend_events.push(Event::ClientConnected { entity });
debug!("Done initial sync with client.");
2019-07-01 11:19:26 +00:00
}
2019-07-26 21:01:41 +00:00
}
}
pub fn notify_client<S>(&self, entity: EcsEntity, msg: S)
where
S: Into<ServerMsg>,
{
if let Some(client) = self.state.ecs().write_storage::<Client>().get_mut(entity) {
client.notify(msg.into())
}
}
pub fn generate_chunk(&mut self, entity: EcsEntity, key: Vec2<i32>) {
self.state
.ecs()
.write_resource::<ChunkGenerator>()
.generate_chunk(entity, key, &mut self.thread_pool, self.world.clone());
}
fn process_chat_cmd(&mut self, entity: EcsEntity, cmd: String) {
// Separate string into keyword and arguments.
let sep = cmd.find(' ');
let (kwd, args) = match sep {
Some(i) => (cmd[..i].to_string(), cmd[(i + 1)..].to_string()),
None => (cmd, "".to_string()),
};
// Find the command object and run its handler.
if let Ok(command) = kwd.parse::<ChatCommand>() {
command.execute(self, entity, args);
} else {
self.notify_client(
entity,
2020-06-12 17:44:29 +00:00
ChatType::CommandError.server_msg(format!(
"Unknown command '/{}'.\nType '/help' for available commands",
kwd
)),
);
}
}
fn entity_is_admin(&self, entity: EcsEntity) -> bool {
self.state
.read_storage::<comp::Admin>()
.get(entity)
.is_some()
}
pub fn number_of_players(&self) -> i64 { self.tick_metrics.player_online.get() }
2019-01-02 17:23:31 +00:00
}
impl Drop for Server {
fn drop(&mut self) { self.state.notify_registered_clients(ServerMsg::Shutdown); }
}