mirror of
https://gitlab.com/veloren/veloren.git
synced 2024-08-30 18:12:32 +00:00
use a constant for recommended_threads
This commit is contained in:
parent
76319cb260
commit
4cfdbdd069
@ -16,3 +16,7 @@ pub const WATER_DENSITY: f32 = 999.1026;
|
|||||||
pub const IRON_DENSITY: f32 = 7870.0;
|
pub const IRON_DENSITY: f32 = 7870.0;
|
||||||
// pub const HUMAN_DENSITY: f32 = 1010.0; // real value
|
// pub const HUMAN_DENSITY: f32 = 1010.0; // real value
|
||||||
pub const HUMAN_DENSITY: f32 = 990.0; // value we use to make humanoids gently float
|
pub const HUMAN_DENSITY: f32 = 990.0; // value we use to make humanoids gently float
|
||||||
|
// 1 thread might be used for long-running cpu intensive tasks, like chunk
|
||||||
|
// generation. having at least 2 helps not blocking in the main tick here
|
||||||
|
pub const MIN_RECOMMENDED_RAYON_THREADS: usize = 2;
|
||||||
|
pub const MIN_RECOMMENDED_TOKIO_THREADS: usize = 2;
|
||||||
|
@ -103,9 +103,7 @@ impl State {
|
|||||||
|
|
||||||
let thread_pool = Arc::new(
|
let thread_pool = Arc::new(
|
||||||
ThreadPoolBuilder::new()
|
ThreadPoolBuilder::new()
|
||||||
.num_threads(
|
.num_threads(num_cpus::get().max(common::consts::MIN_RECOMMENDED_RAYON_THREADS))
|
||||||
num_cpus::get().max(2), /* Have AT LEAST 2 rayon threads */
|
|
||||||
)
|
|
||||||
.thread_name(move |i| format!("rayon-{}-{}", thread_name_infix, i))
|
.thread_name(move |i| format!("rayon-{}-{}", thread_name_infix, i))
|
||||||
.build()
|
.build()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
@ -211,8 +209,8 @@ impl State {
|
|||||||
ecs.insert(Vec::<common::outcome::Outcome>::new());
|
ecs.insert(Vec::<common::outcome::Outcome>::new());
|
||||||
ecs.insert(common::CachedSpatialGrid::default());
|
ecs.insert(common::CachedSpatialGrid::default());
|
||||||
|
|
||||||
let slow_limit = num_cpus::get().max(2) as u64;
|
let num_cpu = num_cpus::get() as u64;
|
||||||
let slow_limit = slow_limit / 2 + slow_limit / 4;
|
let slow_limit = (num_cpu / 2 + num_cpu / 4).max(1);
|
||||||
tracing::trace!(?slow_limit, "Slow Thread limit");
|
tracing::trace!(?slow_limit, "Slow Thread limit");
|
||||||
ecs.insert(SlowJobPool::new(slow_limit, Arc::clone(&thread_pool)));
|
ecs.insert(SlowJobPool::new(slow_limit, Arc::clone(&thread_pool)));
|
||||||
|
|
||||||
@ -324,7 +322,7 @@ impl State {
|
|||||||
/// Get a mutable reference to the internal ECS world.
|
/// Get a mutable reference to the internal ECS world.
|
||||||
pub fn ecs_mut(&mut self) -> &mut specs::World { &mut self.ecs }
|
pub fn ecs_mut(&mut self) -> &mut specs::World { &mut self.ecs }
|
||||||
|
|
||||||
pub fn thread_pool(&self) -> Arc<ThreadPool> { Arc::clone(&self.thread_pool) }
|
pub fn thread_pool(&self) -> &Arc<ThreadPool> { &self.thread_pool }
|
||||||
|
|
||||||
/// Get a reference to the `TerrainChanges` structure of the state. This
|
/// Get a reference to the `TerrainChanges` structure of the state. This
|
||||||
/// contains information about terrain state that has changed since the
|
/// contains information about terrain state that has changed since the
|
||||||
|
@ -14,7 +14,7 @@ use crate::{
|
|||||||
cmd::Message, shutdown_coordinator::ShutdownCoordinator, tui_runner::Tui, tuilog::TuiLog,
|
cmd::Message, shutdown_coordinator::ShutdownCoordinator, tui_runner::Tui, tuilog::TuiLog,
|
||||||
};
|
};
|
||||||
use clap::{App, Arg, SubCommand};
|
use clap::{App, Arg, SubCommand};
|
||||||
use common::clock::Clock;
|
use common::{clock::Clock, consts::MIN_RECOMMENDED_TOKIO_THREADS};
|
||||||
use common_base::span;
|
use common_base::span;
|
||||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use server::{
|
use server::{
|
||||||
@ -119,10 +119,11 @@ fn main() -> io::Result<()> {
|
|||||||
|
|
||||||
// We don't need that many threads in the async pool, at least 2 but generally
|
// We don't need that many threads in the async pool, at least 2 but generally
|
||||||
// 25% of all available will do
|
// 25% of all available will do
|
||||||
|
// TODO: evaluate std::thread::available_concurrency as a num_cpus replacement
|
||||||
let runtime = Arc::new(
|
let runtime = Arc::new(
|
||||||
tokio::runtime::Builder::new_multi_thread()
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.worker_threads((num_cpus::get() / 4).max(2))
|
.worker_threads((num_cpus::get() / 4).max(MIN_RECOMMENDED_TOKIO_THREADS))
|
||||||
.thread_name_fn(|| {
|
.thread_name_fn(|| {
|
||||||
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
||||||
|
@ -3,6 +3,7 @@ use client::{
|
|||||||
error::{Error as ClientError, NetworkConnectError, NetworkError},
|
error::{Error as ClientError, NetworkConnectError, NetworkError},
|
||||||
Client, ServerInfo,
|
Client, ServerInfo,
|
||||||
};
|
};
|
||||||
|
use common::consts::MIN_RECOMMENDED_TOKIO_THREADS;
|
||||||
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
|
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
|
||||||
use std::{
|
use std::{
|
||||||
sync::{
|
sync::{
|
||||||
@ -62,11 +63,12 @@ impl ClientInit {
|
|||||||
let cancel2 = Arc::clone(&cancel);
|
let cancel2 = Arc::clone(&cancel);
|
||||||
|
|
||||||
let runtime = runtime.unwrap_or_else(|| {
|
let runtime = runtime.unwrap_or_else(|| {
|
||||||
|
// TODO: evaluate std::thread::available_concurrency as a num_cpus replacement
|
||||||
let cores = num_cpus::get();
|
let cores = num_cpus::get();
|
||||||
Arc::new(
|
Arc::new(
|
||||||
runtime::Builder::new_multi_thread()
|
runtime::Builder::new_multi_thread()
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.worker_threads((cores / 4).max(2))
|
.worker_threads((cores / 4).max(MIN_RECOMMENDED_TOKIO_THREADS))
|
||||||
.thread_name_fn(|| {
|
.thread_name_fn(|| {
|
||||||
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use common::clock::Clock;
|
use common::{clock::Clock, consts::MIN_RECOMMENDED_TOKIO_THREADS};
|
||||||
use crossbeam::channel::{bounded, unbounded, Receiver, Sender, TryRecvError};
|
use crossbeam::channel::{bounded, unbounded, Receiver, Sender, TryRecvError};
|
||||||
use server::{
|
use server::{
|
||||||
persistence::{DatabaseSettings, SqlLogMode},
|
persistence::{DatabaseSettings, SqlLogMode},
|
||||||
@ -82,12 +82,13 @@ impl Singleplayer {
|
|||||||
let settings = server::Settings::singleplayer(&server_data_dir);
|
let settings = server::Settings::singleplayer(&server_data_dir);
|
||||||
let editable_settings = server::EditableSettings::singleplayer(&server_data_dir);
|
let editable_settings = server::EditableSettings::singleplayer(&server_data_dir);
|
||||||
|
|
||||||
|
// TODO: evaluate std::thread::available_concurrency as a num_cpus replacement
|
||||||
let cores = num_cpus::get();
|
let cores = num_cpus::get();
|
||||||
debug!("Creating a new runtime for server");
|
debug!("Creating a new runtime for server");
|
||||||
let runtime = Arc::new(
|
let runtime = Arc::new(
|
||||||
tokio::runtime::Builder::new_multi_thread()
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.worker_threads((cores / 4).max(2))
|
.worker_threads((cores / 4).max(MIN_RECOMMENDED_TOKIO_THREADS))
|
||||||
.thread_name_fn(|| {
|
.thread_name_fn(|| {
|
||||||
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user