2019-08-19 12:39:23 +00:00
|
|
|
#![deny(unsafe_code)]
|
2019-10-03 21:28:17 +00:00
|
|
|
#![feature(drain_filter)]
|
2019-01-02 17:23:31 +00:00
|
|
|
|
2019-08-08 22:24:14 +00:00
|
|
|
pub mod auth_provider;
|
2019-10-20 05:19:50 +00:00
|
|
|
pub mod chunk_generator;
|
2019-03-03 22:02:38 +00:00
|
|
|
pub mod client;
|
2019-04-23 09:53:45 +00:00
|
|
|
pub mod cmd;
|
2019-03-03 22:02:38 +00:00
|
|
|
pub mod error;
|
2020-02-16 20:04:06 +00:00
|
|
|
pub mod events;
|
2019-03-03 22:02:38 +00:00
|
|
|
pub mod input;
|
2019-09-06 14:21:09 +00:00
|
|
|
pub mod metrics;
|
2020-05-09 15:41:25 +00:00
|
|
|
pub mod persistence;
|
2019-09-07 13:10:57 +00:00
|
|
|
pub mod settings;
|
2020-03-10 02:27:32 +00:00
|
|
|
pub mod state_ext;
|
2019-10-15 04:06:14 +00:00
|
|
|
pub mod sys;
|
2020-02-01 20:39:39 +00:00
|
|
|
#[cfg(not(feature = "worldgen"))] mod test_world;
|
2019-03-03 22:02:38 +00:00
|
|
|
|
|
|
|
// Reexports
|
2020-02-16 20:04:06 +00:00
|
|
|
pub use crate::{error::Error, events::Event, input::Input, settings::ServerSettings};
|
2019-03-03 22:02:38 +00:00
|
|
|
|
2019-05-09 17:58:16 +00:00
|
|
|
use crate::{
|
2019-08-08 22:24:14 +00:00
|
|
|
auth_provider::AuthProvider,
|
2019-10-20 05:19:50 +00:00
|
|
|
chunk_generator::ChunkGenerator,
|
2019-10-15 04:06:14 +00:00
|
|
|
client::{Client, RegionSubscription},
|
2020-05-05 22:33:16 +00:00
|
|
|
cmd::ChatCommandExt,
|
2020-03-10 02:27:32 +00:00
|
|
|
state_ext::StateExt,
|
2019-12-18 05:22:52 +00:00
|
|
|
sys::sentinel::{DeletedEntities, TrackedComps},
|
2019-05-06 14:26:10 +00:00
|
|
|
};
|
2019-03-03 22:02:38 +00:00
|
|
|
use common::{
|
2020-05-05 22:33:16 +00:00
|
|
|
cmd::ChatCommand,
|
2020-03-10 02:27:32 +00:00
|
|
|
comp,
|
2019-08-25 14:49:54 +00:00
|
|
|
event::{EventBus, ServerEvent},
|
2020-01-07 06:27:18 +00:00
|
|
|
msg::{ClientMsg, ClientState, ServerInfo, ServerMsg},
|
2019-03-03 22:02:38 +00:00
|
|
|
net::PostOffice,
|
2020-02-16 20:04:06 +00:00
|
|
|
state::{State, TimeOfDay},
|
2020-03-10 02:27:32 +00:00
|
|
|
sync::WorldSyncExt,
|
2020-02-16 20:04:06 +00:00
|
|
|
terrain::TerrainChunkSize,
|
|
|
|
vol::{ReadVol, RectVolSize},
|
2019-03-03 22:02:38 +00:00
|
|
|
};
|
2020-03-10 02:27:32 +00:00
|
|
|
use log::{debug, error};
|
2020-02-19 17:08:57 +00:00
|
|
|
use metrics::{ServerMetrics, TickMetrics};
|
2020-03-10 02:27:32 +00:00
|
|
|
use specs::{join::Join, Builder, Entity as EcsEntity, RunNow, SystemData, WorldExt};
|
2019-09-07 13:10:57 +00:00
|
|
|
use std::{
|
|
|
|
i32,
|
2019-10-20 05:19:50 +00:00
|
|
|
sync::Arc,
|
2019-09-07 13:10:57 +00:00
|
|
|
time::{Duration, Instant},
|
|
|
|
};
|
2020-01-23 14:10:49 +00:00
|
|
|
#[cfg(not(feature = "worldgen"))]
|
|
|
|
use test_world::{World, WORLD_SIZE};
|
2019-07-12 16:30:15 +00:00
|
|
|
use uvth::{ThreadPool, ThreadPoolBuilder};
|
2019-05-09 17:58:16 +00:00
|
|
|
use vek::*;
|
2020-01-22 03:12:17 +00:00
|
|
|
#[cfg(feature = "worldgen")]
|
2020-01-11 20:38:30 +00:00
|
|
|
use world::{
|
2020-04-23 16:00:48 +00:00
|
|
|
civ::SiteKind,
|
2020-01-20 18:19:06 +00:00
|
|
|
sim::{FileOpts, WorldOpts, DEFAULT_WORLD_MAP, WORLD_SIZE},
|
2020-01-11 20:38:30 +00:00
|
|
|
World,
|
|
|
|
};
|
2020-01-22 03:12:17 +00:00
|
|
|
|
2020-05-09 15:41:25 +00:00
|
|
|
#[macro_use] extern crate diesel;
|
|
|
|
#[macro_use] extern crate diesel_migrations;
|
|
|
|
|
2019-04-17 14:46:04 +00:00
|
|
|
const CLIENT_TIMEOUT: f64 = 20.0; // Seconds
|
2019-01-02 17:23:31 +00:00
|
|
|
|
2019-05-21 22:04:39 +00:00
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
struct SpawnPoint(Vec3<f32>);
|
|
|
|
|
2019-10-15 04:06:14 +00:00
|
|
|
// Tick count used for throttling network updates
|
|
|
|
// Note this doesn't account for dt (so update rate changes with tick rate)
|
|
|
|
#[derive(Copy, Clone, Default)]
|
|
|
|
pub struct Tick(u64);
|
|
|
|
|
2019-01-02 17:23:31 +00:00
|
|
|
pub struct Server {
|
2019-01-02 19:22:01 +00:00
|
|
|
state: State,
|
2019-05-16 17:40:32 +00:00
|
|
|
world: Arc<World>,
|
2020-01-13 07:10:38 +00:00
|
|
|
map: Vec<u32>,
|
2019-01-02 17:23:31 +00:00
|
|
|
|
2019-03-03 22:02:38 +00:00
|
|
|
postoffice: PostOffice<ServerMsg, ClientMsg>,
|
2019-04-10 23:16:29 +00:00
|
|
|
|
|
|
|
thread_pool: ThreadPool,
|
2019-05-08 16:22:52 +00:00
|
|
|
|
|
|
|
server_info: ServerInfo,
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
metrics: ServerMetrics,
|
2020-02-19 17:08:57 +00:00
|
|
|
tick_metrics: TickMetrics,
|
2019-08-08 03:56:02 +00:00
|
|
|
|
2019-10-15 04:06:14 +00:00
|
|
|
server_settings: ServerSettings,
|
2019-01-02 17:23:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Server {
|
2019-10-11 12:19:55 +00:00
|
|
|
/// Create a new `Server`
|
2020-06-10 19:47:36 +00:00
|
|
|
#[allow(clippy::expect_fun_call)] // TODO: Pending review in #587
|
|
|
|
#[allow(clippy::needless_update)] // TODO: Pending review in #587
|
2019-06-29 16:41:26 +00:00
|
|
|
pub fn new(settings: ServerSettings) -> Result<Self, Error> {
|
2019-07-01 20:42:43 +00:00
|
|
|
let mut state = State::default();
|
2019-11-30 06:41:20 +00:00
|
|
|
state.ecs_mut().insert(EventBus::<ServerEvent>::default());
|
2019-12-21 17:02:39 +00:00
|
|
|
state
|
|
|
|
.ecs_mut()
|
|
|
|
.insert(AuthProvider::new(settings.auth_server_address.clone()));
|
2019-11-30 06:41:20 +00:00
|
|
|
state.ecs_mut().insert(Tick(0));
|
|
|
|
state.ecs_mut().insert(ChunkGenerator::new());
|
2020-06-01 21:34:52 +00:00
|
|
|
state
|
|
|
|
.ecs_mut()
|
|
|
|
.insert(persistence::character::CharacterUpdater::new(
|
|
|
|
settings.persistence_db_dir.clone(),
|
|
|
|
));
|
2020-05-15 20:03:51 +00:00
|
|
|
state.ecs_mut().insert(crate::settings::PersistenceDBDir(
|
|
|
|
settings.persistence_db_dir.clone(),
|
|
|
|
));
|
2020-04-25 13:41:27 +00:00
|
|
|
|
2019-11-04 00:57:36 +00:00
|
|
|
// System timers for performance monitoring
|
2019-11-30 06:41:20 +00:00
|
|
|
state.ecs_mut().insert(sys::EntitySyncTimer::default());
|
|
|
|
state.ecs_mut().insert(sys::MessageTimer::default());
|
|
|
|
state.ecs_mut().insert(sys::SentinelTimer::default());
|
|
|
|
state.ecs_mut().insert(sys::SubscriptionTimer::default());
|
|
|
|
state.ecs_mut().insert(sys::TerrainSyncTimer::default());
|
|
|
|
state.ecs_mut().insert(sys::TerrainTimer::default());
|
2020-03-09 03:32:34 +00:00
|
|
|
state.ecs_mut().insert(sys::WaypointTimer::default());
|
2020-05-24 22:18:41 +00:00
|
|
|
state.ecs_mut().insert(sys::SpeechBubbleTimer::default());
|
2020-06-01 21:34:52 +00:00
|
|
|
state.ecs_mut().insert(sys::PersistenceTimer::default());
|
2020-04-25 13:41:27 +00:00
|
|
|
|
|
|
|
// System schedulers to control execution of systems
|
|
|
|
state
|
|
|
|
.ecs_mut()
|
2020-06-01 21:34:52 +00:00
|
|
|
.insert(sys::PersistenceScheduler::every(Duration::from_secs(10)));
|
2020-04-25 13:41:27 +00:00
|
|
|
|
2019-10-20 07:20:21 +00:00
|
|
|
// Server-only components
|
2019-10-06 17:35:47 +00:00
|
|
|
state.ecs_mut().register::<RegionSubscription>();
|
2019-10-15 04:06:14 +00:00
|
|
|
state.ecs_mut().register::<Client>();
|
2019-04-16 21:06:33 +00:00
|
|
|
|
2020-01-22 03:12:17 +00:00
|
|
|
#[cfg(feature = "worldgen")]
|
2020-02-01 20:39:39 +00:00
|
|
|
let world = World::generate(settings.world_seed, WorldOpts {
|
|
|
|
seed_elements: true,
|
|
|
|
world_file: if let Some(ref opts) = settings.map_file {
|
|
|
|
opts.clone()
|
|
|
|
} else {
|
|
|
|
// Load default map from assets.
|
|
|
|
FileOpts::LoadAsset(DEFAULT_WORLD_MAP.into())
|
2019-12-11 09:14:50 +00:00
|
|
|
},
|
2020-02-01 20:39:39 +00:00
|
|
|
..WorldOpts::default()
|
|
|
|
});
|
2020-01-22 03:12:17 +00:00
|
|
|
#[cfg(feature = "worldgen")]
|
2020-01-13 07:10:38 +00:00
|
|
|
let map = world.sim().get_map();
|
2019-11-01 00:24:18 +00:00
|
|
|
|
2020-01-22 03:12:17 +00:00
|
|
|
#[cfg(not(feature = "worldgen"))]
|
|
|
|
let world = World::generate(settings.world_seed);
|
2020-01-24 21:24:57 +00:00
|
|
|
#[cfg(not(feature = "worldgen"))]
|
2020-01-24 10:40:52 +00:00
|
|
|
let map = vec![0];
|
2020-01-22 03:12:17 +00:00
|
|
|
|
|
|
|
#[cfg(feature = "worldgen")]
|
2019-11-01 00:24:18 +00:00
|
|
|
let spawn_point = {
|
|
|
|
// NOTE: all of these `.map(|e| e as [type])` calls should compile into no-ops,
|
2020-02-01 20:39:39 +00:00
|
|
|
// but are needed to be explicit about casting (and to make the compiler stop
|
|
|
|
// complaining)
|
2019-11-01 00:24:18 +00:00
|
|
|
|
|
|
|
// spawn in the chunk, that is in the middle of the world
|
2020-04-23 14:30:19 +00:00
|
|
|
let center_chunk: Vec2<i32> = WORLD_SIZE.map(|e| e as i32) / 2;
|
|
|
|
|
|
|
|
// Find a town to spawn in that's close to the centre of the world
|
|
|
|
let spawn_chunk = world
|
|
|
|
.civs()
|
|
|
|
.sites()
|
|
|
|
.filter(|site| matches!(site.kind, SiteKind::Settlement))
|
|
|
|
.map(|site| site.center)
|
|
|
|
.min_by_key(|site_pos| site_pos.distance_squared(center_chunk))
|
|
|
|
.unwrap_or(center_chunk);
|
|
|
|
|
2019-11-01 00:24:18 +00:00
|
|
|
// calculate the absolute position of the chunk in the world
|
2020-02-01 20:39:39 +00:00
|
|
|
// (we could add TerrainChunkSize::RECT_SIZE / 2 here, to spawn in the midde of
|
|
|
|
// the chunk)
|
2020-04-23 16:00:48 +00:00
|
|
|
let spawn_location = spawn_chunk.map2(TerrainChunkSize::RECT_SIZE, |e, sz| {
|
|
|
|
e as i32 * sz as i32 + sz as i32 / 2
|
|
|
|
});
|
2019-11-01 00:24:18 +00:00
|
|
|
|
|
|
|
// get a z cache for the collumn in which we want to spawn
|
|
|
|
let mut block_sampler = world.sample_blocks();
|
|
|
|
let z_cache = block_sampler
|
|
|
|
.get_z_cache(spawn_location)
|
|
|
|
.expect(&format!("no z_cache found for chunk: {}", spawn_chunk));
|
|
|
|
|
|
|
|
// get the minimum and maximum z values at which there could be soild blocks
|
|
|
|
let (min_z, _, max_z) = z_cache.get_z_limits(&mut block_sampler);
|
|
|
|
// round range outwards, so no potential air block is missed
|
|
|
|
let min_z = min_z.floor() as i32;
|
|
|
|
let max_z = max_z.ceil() as i32;
|
|
|
|
|
|
|
|
// loop over all blocks from min_z to max_z + 1
|
|
|
|
// until the first air block is found
|
|
|
|
// (up to max_z + 1, because max_z could still be a soild block)
|
|
|
|
// if no air block is found default to max_z + 1
|
2020-01-12 14:45:20 +00:00
|
|
|
let z = (min_z..(max_z + 1) + 1)
|
2019-11-01 00:24:18 +00:00
|
|
|
.find(|z| {
|
|
|
|
block_sampler
|
|
|
|
.get_with_z_cache(
|
|
|
|
Vec3::new(spawn_location.x, spawn_location.y, *z),
|
|
|
|
Some(&z_cache),
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
.map(|b| b.is_air())
|
|
|
|
.unwrap_or(false)
|
|
|
|
})
|
|
|
|
.unwrap_or(max_z + 1);
|
|
|
|
|
|
|
|
// build the actual spawn point and
|
|
|
|
// add 0.5, so that the player spawns in the middle of the block
|
|
|
|
Vec3::new(spawn_location.x, spawn_location.y, z).map(|e| (e as f32)) + 0.5
|
|
|
|
};
|
|
|
|
|
2020-01-22 03:12:17 +00:00
|
|
|
#[cfg(not(feature = "worldgen"))]
|
|
|
|
let spawn_point = Vec3::new(0.0, 0.0, 256.0);
|
|
|
|
|
2019-11-01 00:24:18 +00:00
|
|
|
// set the spawn point we calculated above
|
2019-11-30 06:41:20 +00:00
|
|
|
state.ecs_mut().insert(SpawnPoint(spawn_point));
|
2019-11-01 00:24:18 +00:00
|
|
|
|
2019-07-12 13:03:35 +00:00
|
|
|
// Set starting time for the server.
|
|
|
|
state.ecs_mut().write_resource::<TimeOfDay>().0 = settings.start_time;
|
|
|
|
|
2019-11-04 00:57:36 +00:00
|
|
|
// Register trackers
|
|
|
|
sys::sentinel::register_trackers(&mut state.ecs_mut());
|
|
|
|
|
2019-11-30 06:41:20 +00:00
|
|
|
state.ecs_mut().insert(DeletedEntities::default());
|
2019-11-29 06:04:37 +00:00
|
|
|
|
2020-02-19 17:08:57 +00:00
|
|
|
let mut metrics = ServerMetrics::new();
|
|
|
|
// register all metrics submodules here
|
|
|
|
let tick_metrics = TickMetrics::new(metrics.registry(), metrics.tick_clone())
|
|
|
|
.expect("Failed to initialize server tick metrics submodule.");
|
|
|
|
metrics
|
|
|
|
.run(settings.metrics_address)
|
|
|
|
.expect("Failed to initialize server metrics submodule.");
|
|
|
|
|
2019-06-06 14:48:41 +00:00
|
|
|
let this = Self {
|
2019-04-16 21:06:33 +00:00
|
|
|
state,
|
2019-11-01 00:24:18 +00:00
|
|
|
world: Arc::new(world),
|
2020-01-13 07:10:38 +00:00
|
|
|
map,
|
2019-03-03 22:02:38 +00:00
|
|
|
|
2019-10-11 12:19:55 +00:00
|
|
|
postoffice: PostOffice::bind(settings.gameserver_address)?,
|
2019-04-10 23:16:29 +00:00
|
|
|
|
2019-07-12 19:29:16 +00:00
|
|
|
thread_pool: ThreadPoolBuilder::new()
|
|
|
|
.name("veloren-worker".into())
|
|
|
|
.build(),
|
2019-05-08 16:22:52 +00:00
|
|
|
|
|
|
|
server_info: ServerInfo {
|
2019-07-01 09:37:17 +00:00
|
|
|
name: settings.server_name.clone(),
|
|
|
|
description: settings.server_description.clone(),
|
2019-07-21 17:45:31 +00:00
|
|
|
git_hash: common::util::GIT_HASH.to_string(),
|
2019-10-18 13:32:26 +00:00
|
|
|
git_date: common::util::GIT_DATE.to_string(),
|
2019-12-21 17:02:39 +00:00
|
|
|
auth_provider: settings.auth_server_address.clone(),
|
2019-05-08 16:22:52 +00:00
|
|
|
},
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
metrics,
|
2020-02-19 17:08:57 +00:00
|
|
|
tick_metrics,
|
2019-10-11 12:19:55 +00:00
|
|
|
server_settings: settings.clone(),
|
2019-04-16 21:06:33 +00:00
|
|
|
};
|
2020-05-09 15:41:25 +00:00
|
|
|
|
|
|
|
// Run pending DB migrations (if any)
|
|
|
|
debug!("Running DB migrations...");
|
|
|
|
|
2020-05-12 23:58:15 +00:00
|
|
|
if let Some(error) =
|
|
|
|
persistence::run_migrations(&this.server_settings.persistence_db_dir).err()
|
|
|
|
{
|
2020-05-09 15:41:25 +00:00
|
|
|
log::info!("Migration error: {}", format!("{:#?}", error));
|
|
|
|
}
|
|
|
|
|
2019-11-23 14:34:03 +00:00
|
|
|
debug!("created veloren server with: {:?}", &settings);
|
2020-05-09 15:41:25 +00:00
|
|
|
|
2020-03-26 11:41:30 +00:00
|
|
|
log::info!(
|
|
|
|
"Server version: {}[{}]",
|
|
|
|
*common::util::GIT_HASH,
|
|
|
|
*common::util::GIT_DATE
|
|
|
|
);
|
2019-04-16 21:06:33 +00:00
|
|
|
|
|
|
|
Ok(this)
|
2019-01-02 17:23:31 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 13:13:24 +00:00
|
|
|
pub fn with_thread_pool(mut self, thread_pool: ThreadPool) -> Self {
|
|
|
|
self.thread_pool = thread_pool;
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2019-01-15 15:13:11 +00:00
|
|
|
/// Get a reference to the server's game state.
|
2020-02-01 20:39:39 +00:00
|
|
|
pub fn state(&self) -> &State { &self.state }
|
|
|
|
|
2019-01-15 15:13:11 +00:00
|
|
|
/// Get a mutable reference to the server's game state.
|
2020-02-01 20:39:39 +00:00
|
|
|
pub fn state_mut(&mut self) -> &mut State { &mut self.state }
|
2019-01-12 15:57:19 +00:00
|
|
|
|
2019-01-15 15:13:11 +00:00
|
|
|
/// Get a reference to the server's world.
|
2020-02-01 20:39:39 +00:00
|
|
|
pub fn world(&self) -> &World { &self.world }
|
2019-01-15 15:13:11 +00:00
|
|
|
|
2020-02-01 20:39:39 +00:00
|
|
|
/// Execute a single server tick, handle input and update the game state by
|
|
|
|
/// the given duration.
|
2019-07-01 16:38:19 +00:00
|
|
|
pub fn tick(&mut self, _input: Input, dt: Duration) -> Result<Vec<Event>, Error> {
|
2019-10-15 04:06:14 +00:00
|
|
|
self.state.ecs().write_resource::<Tick>().0 += 1;
|
2020-02-01 20:39:39 +00:00
|
|
|
// This tick function is the centre of the Veloren universe. Most server-side
|
|
|
|
// things are managed from here, and as such it's important that it
|
|
|
|
// stays organised. Please consult the core developers before making
|
|
|
|
// significant changes to this code. Here is the approximate order of
|
|
|
|
// things. Please update it as this code changes.
|
2019-01-02 17:23:31 +00:00
|
|
|
//
|
2020-01-27 16:48:42 +00:00
|
|
|
// 1) Collect input from the frontend, apply input effects to the
|
|
|
|
// state of the game
|
|
|
|
// 2) Go through any events (timer-driven or otherwise) that need handling
|
|
|
|
// and apply them to the state of the game
|
|
|
|
// 3) Go through all incoming client network communications, apply them to
|
|
|
|
// the game state
|
|
|
|
// 4) Perform a single LocalState tick (i.e: update the world and entities
|
|
|
|
// in the world)
|
|
|
|
// 5) Go through the terrain update queue and apply all changes to
|
|
|
|
// the terrain
|
2019-01-02 17:23:31 +00:00
|
|
|
// 6) Send relevant state updates to all clients
|
2019-09-06 14:21:09 +00:00
|
|
|
// 7) Update Metrics with current data
|
2020-01-27 16:48:42 +00:00
|
|
|
// 8) Finish the tick, passing control of the main thread back
|
|
|
|
// to the frontend
|
2019-01-02 17:23:31 +00:00
|
|
|
|
2019-05-17 20:47:58 +00:00
|
|
|
// 1) Build up a list of events for this frame, to be passed to the frontend.
|
2019-03-03 22:02:38 +00:00
|
|
|
let mut frontend_events = Vec::new();
|
|
|
|
|
2019-05-17 09:22:32 +00:00
|
|
|
// If networking has problems, handle them.
|
2019-03-05 18:39:18 +00:00
|
|
|
if let Some(err) = self.postoffice.error() {
|
2019-03-03 22:02:38 +00:00
|
|
|
return Err(err.into());
|
|
|
|
}
|
|
|
|
|
2019-05-17 20:47:58 +00:00
|
|
|
// 2)
|
2019-03-03 22:02:38 +00:00
|
|
|
|
2020-03-10 02:27:32 +00:00
|
|
|
let before_new_connections = Instant::now();
|
|
|
|
|
2019-05-17 20:47:58 +00:00
|
|
|
// 3) Handle inputs from clients
|
|
|
|
frontend_events.append(&mut self.handle_new_connections()?);
|
2019-03-03 22:02:38 +00:00
|
|
|
|
2020-03-10 02:27:32 +00:00
|
|
|
let before_message_system = Instant::now();
|
|
|
|
|
2020-02-01 20:39:39 +00:00
|
|
|
// Run message recieving sys before the systems in common for decreased latency
|
|
|
|
// (e.g. run before controller system)
|
2019-12-01 21:54:21 +00:00
|
|
|
sys::message::Sys.run_now(&self.state.ecs());
|
|
|
|
|
2020-03-09 03:32:34 +00:00
|
|
|
let before_state_tick = Instant::now();
|
2019-12-01 21:54:21 +00:00
|
|
|
|
2019-11-29 06:04:37 +00:00
|
|
|
// 4) Tick the server's LocalState.
|
2020-03-09 03:32:34 +00:00
|
|
|
// 5) Fetch any generated `TerrainChunk`s and insert them into the terrain.
|
|
|
|
// in sys/terrain.rs
|
|
|
|
self.state.tick(dt, sys::add_server_systems, false);
|
2019-01-02 17:23:31 +00:00
|
|
|
|
2019-10-25 05:35:15 +00:00
|
|
|
let before_handle_events = Instant::now();
|
2020-03-09 03:32:34 +00:00
|
|
|
|
2019-10-25 05:35:15 +00:00
|
|
|
// Handle game events
|
|
|
|
frontend_events.append(&mut self.handle_events());
|
|
|
|
|
2020-03-09 03:32:34 +00:00
|
|
|
let before_update_terrain_and_regions = Instant::now();
|
2019-05-18 08:59:58 +00:00
|
|
|
|
2020-03-09 03:32:34 +00:00
|
|
|
// Apply terrain changes and update the region map after processing server
|
|
|
|
// events so that changes made by server events will be immediately
|
|
|
|
// visble to client synchronization systems, minimizing the latency of
|
|
|
|
// `ServerEvent` mediated effects
|
|
|
|
self.state.update_region_map();
|
|
|
|
self.state.apply_terrain_changes();
|
|
|
|
|
|
|
|
let before_sync = Instant::now();
|
2019-04-10 23:16:29 +00:00
|
|
|
|
2019-05-17 20:47:58 +00:00
|
|
|
// 6) Synchronise clients with the new state of the world.
|
2020-03-09 03:32:34 +00:00
|
|
|
sys::run_sync_systems(self.state.ecs_mut());
|
|
|
|
|
|
|
|
let before_world_tick = Instant::now();
|
|
|
|
|
|
|
|
// Tick the world
|
|
|
|
self.world.tick(dt);
|
|
|
|
|
|
|
|
let before_entity_cleanup = Instant::now();
|
2019-07-01 13:36:45 +00:00
|
|
|
|
2019-08-02 17:48:14 +00:00
|
|
|
// Remove NPCs that are outside the view distances of all players
|
2019-10-06 17:35:47 +00:00
|
|
|
// This is done by removing NPCs in unloaded chunks
|
2019-08-02 17:48:14 +00:00
|
|
|
let to_delete = {
|
|
|
|
let terrain = self.state.terrain();
|
|
|
|
(
|
|
|
|
&self.state.ecs().entities(),
|
|
|
|
&self.state.ecs().read_storage::<comp::Pos>(),
|
2020-01-25 20:43:34 +00:00
|
|
|
!&self.state.ecs().read_storage::<comp::Player>(),
|
2019-08-02 17:48:14 +00:00
|
|
|
)
|
|
|
|
.join()
|
|
|
|
.filter(|(_, pos, _)| terrain.get(pos.0.map(|e| e.floor() as i32)).is_err())
|
|
|
|
.map(|(entity, _, _)| entity)
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
};
|
|
|
|
for entity in to_delete {
|
2019-11-29 06:04:37 +00:00
|
|
|
if let Err(err) = self.state.delete_entity_recorded(entity) {
|
|
|
|
error!("Failed to delete agent outside the terrain: {:?}", err);
|
|
|
|
}
|
2019-08-02 17:48:14 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 03:32:34 +00:00
|
|
|
let end_of_server_tick = Instant::now();
|
2019-09-06 14:21:09 +00:00
|
|
|
// 7) Update Metrics
|
2020-03-10 02:27:32 +00:00
|
|
|
// Get system timing info
|
2019-10-20 07:20:21 +00:00
|
|
|
let entity_sync_nanos = self
|
|
|
|
.state
|
|
|
|
.ecs()
|
|
|
|
.read_resource::<sys::EntitySyncTimer>()
|
|
|
|
.nanos as i64;
|
|
|
|
let message_nanos = self.state.ecs().read_resource::<sys::MessageTimer>().nanos as i64;
|
2019-11-04 00:57:36 +00:00
|
|
|
let sentinel_nanos = self.state.ecs().read_resource::<sys::SentinelTimer>().nanos as i64;
|
2019-10-20 07:20:21 +00:00
|
|
|
let subscription_nanos = self
|
|
|
|
.state
|
|
|
|
.ecs()
|
|
|
|
.read_resource::<sys::SubscriptionTimer>()
|
|
|
|
.nanos as i64;
|
|
|
|
let terrain_sync_nanos = self
|
|
|
|
.state
|
|
|
|
.ecs()
|
|
|
|
.read_resource::<sys::TerrainSyncTimer>()
|
|
|
|
.nanos as i64;
|
|
|
|
let terrain_nanos = self.state.ecs().read_resource::<sys::TerrainTimer>().nanos as i64;
|
2020-03-09 03:32:34 +00:00
|
|
|
let waypoint_nanos = self.state.ecs().read_resource::<sys::WaypointTimer>().nanos as i64;
|
2020-05-11 10:06:53 +00:00
|
|
|
let stats_persistence_nanos = self
|
|
|
|
.state
|
|
|
|
.ecs()
|
2020-06-01 21:34:52 +00:00
|
|
|
.read_resource::<sys::PersistenceTimer>()
|
2020-05-11 10:06:53 +00:00
|
|
|
.nanos as i64;
|
2020-03-09 03:32:34 +00:00
|
|
|
let total_sys_ran_in_dispatcher_nanos = terrain_nanos + waypoint_nanos;
|
2020-05-11 10:06:53 +00:00
|
|
|
|
2020-03-10 02:27:32 +00:00
|
|
|
// Report timing info
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2020-03-10 02:27:32 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["new connections"])
|
|
|
|
.set((before_message_system - before_new_connections).as_nanos() as i64);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-09-07 13:10:57 +00:00
|
|
|
.tick_time
|
2019-10-20 07:20:21 +00:00
|
|
|
.with_label_values(&["state tick"])
|
2019-12-01 21:54:21 +00:00
|
|
|
.set(
|
2020-03-09 03:32:34 +00:00
|
|
|
(before_handle_events - before_state_tick).as_nanos() as i64
|
|
|
|
- total_sys_ran_in_dispatcher_nanos,
|
2019-12-01 21:54:21 +00:00
|
|
|
);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-10-25 05:35:15 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["handle server events"])
|
2020-03-09 03:32:34 +00:00
|
|
|
.set((before_update_terrain_and_regions - before_handle_events).as_nanos() as i64);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-09-07 13:10:57 +00:00
|
|
|
.tick_time
|
2020-03-09 03:32:34 +00:00
|
|
|
.with_label_values(&["update terrain and region map"])
|
|
|
|
.set((before_sync - before_update_terrain_and_regions).as_nanos() as i64);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2020-03-09 03:32:34 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["world tick"])
|
|
|
|
.set((before_entity_cleanup - before_world_tick).as_nanos() as i64);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2020-03-09 03:32:34 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["entity cleanup"])
|
|
|
|
.set((end_of_server_tick - before_entity_cleanup).as_nanos() as i64);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-09-07 13:10:57 +00:00
|
|
|
.tick_time
|
2019-10-20 07:20:21 +00:00
|
|
|
.with_label_values(&["entity sync"])
|
|
|
|
.set(entity_sync_nanos);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-10-20 07:20:21 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["message"])
|
|
|
|
.set(message_nanos);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2020-03-09 03:32:34 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["sentinel"])
|
|
|
|
.set(sentinel_nanos);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-10-20 07:20:21 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["subscription"])
|
|
|
|
.set(subscription_nanos);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-10-20 07:20:21 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["terrain sync"])
|
|
|
|
.set(terrain_sync_nanos);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-10-20 07:20:21 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["terrain"])
|
|
|
|
.set(terrain_nanos);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2020-03-09 03:32:34 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["waypoint"])
|
|
|
|
.set(waypoint_nanos);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2020-05-11 10:06:53 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["persistence:stats"])
|
|
|
|
.set(stats_persistence_nanos);
|
|
|
|
|
2020-03-10 02:27:32 +00:00
|
|
|
// Report other info
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-10-15 04:06:14 +00:00
|
|
|
.player_online
|
|
|
|
.set(self.state.ecs().read_storage::<Client>().join().count() as i64);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-09-10 13:22:34 +00:00
|
|
|
.time_of_day
|
|
|
|
.set(self.state.ecs().read_resource::<TimeOfDay>().0);
|
2020-02-19 17:08:57 +00:00
|
|
|
if self.tick_metrics.is_100th_tick() {
|
2019-09-10 13:22:34 +00:00
|
|
|
let mut chonk_cnt = 0;
|
|
|
|
let chunk_cnt = self.state.terrain().iter().fold(0, |a, (_, c)| {
|
|
|
|
chonk_cnt += 1;
|
|
|
|
a + c.sub_chunks_len()
|
|
|
|
});
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics.chonks_count.set(chonk_cnt as i64);
|
|
|
|
self.tick_metrics.chunks_count.set(chunk_cnt as i64);
|
2020-05-12 23:44:27 +00:00
|
|
|
|
|
|
|
let entity_count = self.state.ecs().entities().join().count();
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics.entity_count.set(entity_count as i64);
|
2019-09-10 13:22:34 +00:00
|
|
|
}
|
2019-09-07 13:10:57 +00:00
|
|
|
//self.metrics.entity_count.set(self.state.);
|
2020-02-19 17:08:57 +00:00
|
|
|
self.tick_metrics
|
2019-09-07 13:10:57 +00:00
|
|
|
.tick_time
|
|
|
|
.with_label_values(&["metrics"])
|
2020-03-09 03:32:34 +00:00
|
|
|
.set(end_of_server_tick.elapsed().as_nanos() as i64);
|
Fixing the DEADLOCK in handshake -> channel creation
- this bug was initially called imbris bug, as it happened on his runners and i couldn't reproduce it locally at fist :)
- When in a Handshake a seperate mpsc::Channel was created for (Cid, Frame) transport
however the protocol could already catch non handshake data any more and push in into this
mpsc::Channel.
Then this channel got dropped and a fresh one was created for the network::Channel.
These droped Frames are ofc a BUG!
I tried multiple things to solve this:
- dont create a new mpsc::Channel, but instead bind it to the Protocol itself and always use 1.
This would work theoretically, but in bParticipant side we are using 1 mpsc::Channel<(Cid, Frame)>
to handle ALL the network::channel.
If now ever Protocol would have it's own, and with that every network::Channel had it's own it would no longer work out
Bad Idea...
- using the first method but creating the mpsc::Channel inside the scheduler instead protocol neither works, as the
scheduler doesnt know the remote_pid yet
- i dont want a hack to say the protocol only listen to 2 messages and then stop no matter what
So i switched over to the simply method now:
- Do everything like before with 2 mpsc::Channels
- after the handshake. close the receiver and listen for all remaining (cid, frame) combinations
- when starting the channel, reapply them to the new sender/listener combination
- added tracing
- switched Protocol RwLock to Mutex, as it's only ever 1
- Additionally changed the layout and introduces the c2w_frame_s and w2s_cid_frame_s name schema
- Fixed a bug in scheduler which WOULD cause a DEADLOCK if handshake would fail
- fixd a but in api_send_send_main, i need to store the stream_p otherwise it's immeadiatly closed and a stream_a.send() isn't guaranteed
- add extra test to verify that a send message is received even if the Stream is already closed
- changed OutGoing to Outgoing
- fixed a bug that `metrics.tick()` was never called
- removed 2 unused nightly features and added `deny_code`
2020-06-03 07:13:00 +00:00
|
|
|
self.metrics.tick();
|
2019-09-06 14:21:09 +00:00
|
|
|
|
|
|
|
// 8) Finish the tick, pass control back to the frontend.
|
2019-05-25 21:13:38 +00:00
|
|
|
|
2019-03-03 22:02:38 +00:00
|
|
|
Ok(frontend_events)
|
2019-01-02 17:23:31 +00:00
|
|
|
}
|
2019-01-30 12:11:34 +00:00
|
|
|
|
2019-05-17 09:22:32 +00:00
|
|
|
/// Clean up the server after a tick.
|
2019-01-30 12:11:34 +00:00
|
|
|
pub fn cleanup(&mut self) {
|
|
|
|
// Cleanup the local state
|
|
|
|
self.state.cleanup();
|
|
|
|
}
|
2019-03-03 22:02:38 +00:00
|
|
|
|
2019-05-17 09:22:32 +00:00
|
|
|
/// Handle new client connections.
|
2019-03-03 22:02:38 +00:00
|
|
|
fn handle_new_connections(&mut self) -> Result<Vec<Event>, Error> {
|
|
|
|
let mut frontend_events = Vec::new();
|
|
|
|
|
2019-06-06 14:48:41 +00:00
|
|
|
for postbox in self.postoffice.new_postboxes() {
|
2019-04-21 18:12:29 +00:00
|
|
|
let mut client = Client {
|
|
|
|
client_state: ClientState::Connected,
|
|
|
|
postbox,
|
|
|
|
last_ping: self.state.get_time(),
|
2019-12-31 08:10:51 +00:00
|
|
|
login_msg_sent: false,
|
2019-04-21 18:12:29 +00:00
|
|
|
};
|
|
|
|
|
2019-10-15 04:06:14 +00:00
|
|
|
if self.server_settings.max_players
|
|
|
|
<= self.state.ecs().read_storage::<Client>().join().count()
|
|
|
|
{
|
|
|
|
// Note: in this case the client is dropped
|
2020-01-07 06:27:18 +00:00
|
|
|
client.notify(ServerMsg::TooManyPlayers);
|
2019-07-01 11:19:26 +00:00
|
|
|
} else {
|
2019-10-15 04:06:14 +00:00
|
|
|
let entity = self
|
|
|
|
.state
|
|
|
|
.ecs_mut()
|
|
|
|
.create_entity_synced()
|
|
|
|
.with(client)
|
|
|
|
.build();
|
2020-02-01 20:39:39 +00:00
|
|
|
// Send client all the tracked components currently attached to its entity as
|
|
|
|
// well as synced resources (currently only `TimeOfDay`)
|
2019-11-23 14:34:03 +00:00
|
|
|
log::debug!("Starting initial sync with client.");
|
2019-10-15 04:06:14 +00:00
|
|
|
self.state
|
|
|
|
.ecs()
|
|
|
|
.write_storage::<Client>()
|
|
|
|
.get_mut(entity)
|
|
|
|
.unwrap()
|
|
|
|
.notify(ServerMsg::InitialSync {
|
2019-12-18 05:22:52 +00:00
|
|
|
// Send client their entity
|
|
|
|
entity_package: TrackedComps::fetch(&self.state.ecs())
|
2020-03-18 21:00:07 +00:00
|
|
|
.create_entity_package(entity, None, None, None),
|
2019-10-15 04:06:14 +00:00
|
|
|
server_info: self.server_info.clone(),
|
2019-12-18 05:22:52 +00:00
|
|
|
time_of_day: *self.state.ecs().read_resource(),
|
2020-01-13 07:10:38 +00:00
|
|
|
world_map: (WORLD_SIZE.map(|e| e as u32), self.map.clone()),
|
2019-10-15 04:06:14 +00:00
|
|
|
});
|
2019-11-23 14:34:03 +00:00
|
|
|
log::debug!("Done initial sync with client.");
|
2019-07-01 11:19:26 +00:00
|
|
|
|
|
|
|
frontend_events.push(Event::ClientConnected { entity });
|
|
|
|
}
|
2019-07-26 21:01:41 +00:00
|
|
|
}
|
|
|
|
|
2019-03-03 22:02:38 +00:00
|
|
|
Ok(frontend_events)
|
|
|
|
}
|
2019-03-04 19:50:26 +00:00
|
|
|
|
2019-10-15 04:06:14 +00:00
|
|
|
pub fn notify_client(&self, entity: EcsEntity, msg: ServerMsg) {
|
|
|
|
if let Some(client) = self.state.ecs().write_storage::<Client>().get_mut(entity) {
|
|
|
|
client.notify(msg)
|
2019-04-17 17:32:29 +00:00
|
|
|
}
|
2019-03-04 19:50:26 +00:00
|
|
|
}
|
2019-04-10 23:41:37 +00:00
|
|
|
|
2019-09-16 01:38:53 +00:00
|
|
|
pub fn generate_chunk(&mut self, entity: EcsEntity, key: Vec2<i32>) {
|
2019-10-20 05:19:50 +00:00
|
|
|
self.state
|
|
|
|
.ecs()
|
|
|
|
.write_resource::<ChunkGenerator>()
|
|
|
|
.generate_chunk(entity, key, &mut self.thread_pool, self.world.clone());
|
2019-04-16 13:06:30 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 15:38:01 +00:00
|
|
|
fn process_chat_cmd(&mut self, entity: EcsEntity, cmd: String) {
|
2019-05-17 09:22:32 +00:00
|
|
|
// Separate string into keyword and arguments.
|
2019-04-16 13:06:30 +00:00
|
|
|
let sep = cmd.find(' ');
|
|
|
|
let (kwd, args) = match sep {
|
|
|
|
Some(i) => (cmd[..i].to_string(), cmd[(i + 1)..].to_string()),
|
|
|
|
None => (cmd, "".to_string()),
|
|
|
|
};
|
2019-04-16 15:38:01 +00:00
|
|
|
|
2019-05-17 09:22:32 +00:00
|
|
|
// Find the command object and run its handler.
|
2020-05-05 22:33:16 +00:00
|
|
|
if let Ok(command) = kwd.parse::<ChatCommand>() {
|
|
|
|
command.execute(self, entity, args);
|
|
|
|
} else {
|
|
|
|
self.notify_client(
|
|
|
|
entity,
|
|
|
|
ServerMsg::private(format!(
|
|
|
|
"Unknown command '/{}'.\nType '/help' for available commands",
|
|
|
|
kwd
|
|
|
|
)),
|
|
|
|
);
|
2019-04-10 23:41:37 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-14 15:51:59 +00:00
|
|
|
|
|
|
|
fn entity_is_admin(&self, entity: EcsEntity) -> bool {
|
|
|
|
self.state
|
|
|
|
.read_storage::<comp::Admin>()
|
|
|
|
.get(entity)
|
|
|
|
.is_some()
|
|
|
|
}
|
2020-03-04 20:42:22 +00:00
|
|
|
|
2020-02-19 17:08:57 +00:00
|
|
|
pub fn number_of_players(&self) -> i64 { self.tick_metrics.player_online.get() }
|
2019-01-02 17:23:31 +00:00
|
|
|
}
|
2019-03-05 18:39:18 +00:00
|
|
|
|
|
|
|
impl Drop for Server {
|
2020-02-01 20:39:39 +00:00
|
|
|
fn drop(&mut self) { self.state.notify_registered_clients(ServerMsg::Shutdown); }
|
2019-03-05 18:39:18 +00:00
|
|
|
}
|