Move even more stuff to background threads.

This commit is contained in:
Joshua Yanovski 2022-08-05 03:30:52 -07:00
parent c577c21156
commit ec6b343dc8
6 changed files with 200 additions and 78 deletions

View File

@ -163,6 +163,11 @@ impl<V: RectRasterableVol> VolGrid2d<V> {
self.chunks.get(&key).map(|arc_chunk| arc_chunk.as_ref())
}
#[inline(always)]
pub fn contains_key(&self, key: Vec2<i32>) -> bool {
self.chunks.contains_key(&key)
}
pub fn get_key_arc(&self, key: Vec2<i32>) -> Option<&Arc<V>> { self.chunks.get(&key) }
pub fn clear(&mut self) { self.chunks.clear(); }

View File

@ -10,6 +10,19 @@ pub struct Buffer<T: Copy + Pod> {
}
impl<T: Copy + Pod> Buffer<T> {
pub fn new_mapped(device: &wgpu::Device, len: usize, usage: wgpu::BufferUsage) -> Self {
Self {
buf: device.create_buffer(&wgpu::BufferDescriptor {
label: None,
mapped_at_creation: true,
size: len as u64 * std::mem::size_of::<T>() as u64,
usage: usage | wgpu::BufferUsage::COPY_DST,
}),
len,
phantom_data: std::marker::PhantomData,
}
}
pub fn new(device: &wgpu::Device, usage: wgpu::BufferUsage, data: &[T]) -> Self {
let contents = bytemuck::cast_slice(data);
@ -49,6 +62,10 @@ impl<T: Copy + Pod> DynamicBuffer<T> {
Self(Buffer::new(device, usage | wgpu::BufferUsage::COPY_DST, data))
}
pub fn new_mapped(device: &wgpu::Device, len: usize, usage: wgpu::BufferUsage) -> Self {
Self(Buffer::new_mapped(device, len, usage | wgpu::BufferUsage::COPY_DST))
}
pub fn update(&self, queue: &wgpu::Queue, vals: &[T], offset: usize) {
if !vals.is_empty() {
queue.write_buffer(
@ -58,6 +75,29 @@ impl<T: Copy + Pod> DynamicBuffer<T> {
)
}
}
/// Update the GPU-side value represented by this constant handle, if it was previously memory
/// mapped, and then unmaps it.
///
/// NOTE: Will panic if the buffer was not explicitly mapped before this (without being
/// unmapped), either directly or via [Buffer::new_mapped].
///
/// NOTE: Queue is not *explicitly* used here, but it is implicitly used during the unmap
/// (within wgpu internals) and requires acquiring a lock on it, so it's left in the API to
/// deter people from using it when the queue isn't available.
pub fn update_mapped(&mut self, _queue: &wgpu::Queue, vals: &[T], offset: usize) {
if !vals.is_empty() {
let contents = bytemuck::cast_slice(vals);
let size_ty = std::mem::size_of::<T>() as u64;
let offset = offset as u64 * size_ty;
let size = vals.len() as u64 * size_ty;
self.buf.slice(offset..offset + size)
.get_mapped_range_mut()
.copy_from_slice(contents);
}
self.buf.unmap();
}
}
impl<T: Copy + Pod> std::ops::Deref for DynamicBuffer<T> {

View File

@ -17,10 +17,33 @@ impl<T: Copy + Pod> Consts<T> {
}
}
pub fn new_with_data(device: &wgpu::Device, data: &[T]) -> Self {
Self {
// TODO: examine if all our consts need to be updatable
buf: DynamicBuffer::new_with_data(device, wgpu::BufferUsage::UNIFORM, data),
}
}
/// Create a new `Const<T>` that is mapped at creation.
///
/// Warning: buffer must be unmapped before attempting to use this buffer on the GPU!
pub fn new_mapped(device: &wgpu::Device, len: usize) -> Self {
Self {
// TODO: examine if all our consts need to be updatable
buf: DynamicBuffer::new_mapped(device, len, wgpu::BufferUsage::UNIFORM),
}
}
/// Update the GPU-side value represented by this constant handle.
pub fn update(&mut self, queue: &wgpu::Queue, vals: &[T], offset: usize) {
self.buf.update(queue, vals, offset)
}
/// Update the GPU-side value represented by this constant handle, if it was previously memory
/// mapped, and then immediately unmaps it.
pub fn update_mapped(&mut self, queue: &wgpu::Queue, vals: &[T], offset: usize) {
self.buf.update_mapped(queue, vals, offset)
}
pub fn buf(&self) -> &wgpu::Buffer { &self.buf.buf }
}

View File

@ -34,6 +34,7 @@ use super::{
use common::assets::{self, AssetExt, AssetHandle, ReloadWatcher};
use common_base::span;
use core::{sync::atomic::{AtomicUsize, Ordering}, convert::TryFrom};
use crossbeam_channel as channel;
#[cfg(feature = "egui-ui")]
use egui_wgpu_backend::wgpu::TextureFormat;
use std::sync::Arc;
@ -151,6 +152,9 @@ pub struct Renderer {
// Some if there is a pending need to recreate the pipelines (e.g. RenderMode change or shader
// hotloading)
recreation_pending: Option<PipelineModes>,
// Handle to wgpu maintain thread (which is mostly responsible for performing garbage
// collection).
maintain_tx: channel::Sender<()>,
layouts: Layouts,
// Note: we keep these here since their bind groups need to be updated if we resize the
@ -471,9 +475,9 @@ impl Renderer {
)?;
let clouds_locals =
Self::create_consts_inner(&device, &queue, &[clouds::Locals::default()]);
Self::create_consts_inner(&device, &[clouds::Locals::default()]);
let postprocess_locals =
Self::create_consts_inner(&device, &queue, &[postprocess::Locals::default()]);
Self::create_consts_inner(&device, &[postprocess::Locals::default()]);
let locals = Locals::new(
&device,
@ -484,7 +488,7 @@ impl Renderer {
&views.tgt_depth,
views.bloom_tgts.as_ref().map(|tgts| locals::BloomParams {
locals: bloom_sizes.map(|size| {
Self::create_consts_inner(&device, &queue, &[bloom::Locals::new(size)])
Self::create_consts_inner(&device, &[bloom::Locals::new(size)])
}),
src_views: [&views.tgt_color_pp, &tgts[1], &tgts[2], &tgts[3], &tgts[4]],
final_tgt_view: &tgts[0],
@ -503,6 +507,19 @@ impl Renderer {
profiler.enable_timer = other_modes.profiler_enabled;
profiler.enable_debug_marker = other_modes.profiler_enabled;
// If the maintain channel is still busy, there's no actual reason to send the maintain
// request, since we'll do it next frame anyway and it will cover any work missed during
// the previous frame.
let (maintain_tx, maintain_rx) = channel::bounded(0);
let device_ = Arc::clone(&device);
std::thread::spawn(move || {
// Maintain each time we are requested to do so, until the renderer dies.
while let Ok(()) = maintain_rx.recv() {
device_.poll(wgpu::Maintain::Poll);
}
});
#[cfg(feature = "egui-ui")]
let egui_renderpass =
egui_wgpu_backend::RenderPass::new(&*device, TextureFormat::Bgra8UnormSrgb, 1);
@ -516,6 +533,7 @@ impl Renderer {
state,
recreation_pending: None,
maintain_tx,
layouts,
locals,
@ -672,7 +690,7 @@ impl Renderer {
.as_ref()
.map(|tgts| locals::BloomParams {
locals: bloom_sizes.map(|size| {
Self::create_consts_inner(&self.device, &self.queue, &[bloom::Locals::new(
Self::create_consts_inner(&self.device, &[bloom::Locals::new(
size,
)])
}),
@ -787,7 +805,15 @@ impl Renderer {
self.queue.submit(std::iter::empty());
}
self.device.poll(wgpu::Maintain::Poll)
// If the send fails, we can (generally) assume it's because the channel is out of
// capacity. If not, it's because of an internal wgpu panic which we don't care to
// handle anyway (and we're currently okay with leaking wgpu state if the other thread
// panicked and we don't have panic=abort on, since panic=abort is on in release).
//
// Since if the channel is out of capacity, it means a maintain is already being processed
// (in which case we can just catch up next frame), this is a long-winded way of saying we
// can ignore the result of try_send.
let _ = self.maintain_tx.try_send(());
}
/// Create render target views
@ -1229,17 +1255,14 @@ impl Renderer {
/// Create a new set of constants with the provided values.
pub fn create_consts<T: Copy + bytemuck::Pod>(&mut self, vals: &[T]) -> Consts<T> {
Self::create_consts_inner(&self.device, &self.queue, vals)
Self::create_consts_inner(&self.device, vals)
}
pub fn create_consts_inner<T: Copy + bytemuck::Pod>(
device: &wgpu::Device,
queue: &wgpu::Queue,
vals: &[T],
) -> Consts<T> {
let mut consts = Consts::new(device, vals.len());
consts.update(queue, vals, 0);
consts
Consts::new_with_data(device, vals)
}
/// Update a set of constants with the provided values.
@ -1247,6 +1270,11 @@ impl Renderer {
consts.update(&self.queue, vals, 0)
}
/// Update a set of memory mapped constants with the provided values.
pub fn update_mapped<T: Copy + bytemuck::Pod>(&self, consts: &mut Consts<T>, vals: &[T]) {
consts.update_mapped(&self.queue, vals, 0)
}
pub fn update_clouds_locals(&mut self, new_val: clouds::Locals) {
self.locals.clouds.update(&self.queue, &[new_val], 0)
}
@ -1298,7 +1326,10 @@ impl Renderer {
}
// NOTE: This operation is monotonic, so Relaxed is sufficient.
quad_index_buffer_u32_len.fetch_update(
//
// We don't care whether the result succeded or failed, since either way we know
// we're at the maximum value now.
let _ = quad_index_buffer_u32_len.fetch_update(
Ordering::Relaxed,
Ordering::Relaxed,
|old_len| (old_len < quad_index_length).then_some(vert_length),

View File

@ -1,5 +1,6 @@
use crate::render::pipelines::rain_occlusion;
use std::sync::Arc;
use super::{
super::{
pipelines::{
@ -8,6 +9,7 @@ use super::{
},
texture::Texture,
},
Consts,
Renderer,
};
@ -63,12 +65,26 @@ impl Renderer {
.bind_locals(&self.device, locals, bone_data)
}
/* /// Create a new set of constants with the provided values, lazily (so this can be instantiated
/// from another thread).
pub fn create_consts_lazy<T: Copy + bytemuck::Pod>(&mut self) ->
impl for<'a> Fn(&'a [T]) -> Consts<T> + Send + Sync
{
let device = Arc::clone(&self.device);
move |vals| Self::create_consts_inner(&device, vals)
} */
/// NOTE: Locals are mapped at creation, so you still have to memory map and bind them in order
/// before use.
pub fn create_terrain_bound_locals(
&mut self,
locals: &[terrain::Locals],
) -> terrain::BoundLocals {
let locals = self.create_consts(locals);
self.layouts.terrain.bind_locals(&self.device, locals)
) -> /*for<'a> Fn(&'a [terrain::Locals]) -> terrain::BoundLocals + Send + Sync*/impl Fn() -> terrain::BoundLocals + Send + Sync {
let device = Arc::clone(&self.device);
let immutable = Arc::clone(&self.layouts.immutable);
move || {
let locals = Consts::new_mapped(&device, 1);
immutable.terrain.bind_locals(&device, locals)
}
}
pub fn create_shadow_bound_locals(&mut self, locals: &[shadow::Locals]) -> shadow::BoundLocals {

View File

@ -24,6 +24,7 @@ use client::Client;
use common::{
assets::{self, AssetExt, DotVoxAsset},
figure::Segment,
slowjob::SlowJobPool,
spiral::Spiral2d,
terrain::{Block, SpriteKind, TerrainChunk},
vol::{BaseVol, ReadVol, RectRasterableVol, SampleVol},
@ -117,6 +118,8 @@ pub struct MeshWorkerResponseMesh {
shadow_z_bounds: (f32, f32),
opaque_model: Option<Model<TerrainVertex>>,
fluid_model: Option<Model<FluidVertex>>,
/// NOTE: These are memory mapped, and must be unmapped!
locals: pipelines::terrain::BoundLocals,
col_lights_info: ColLightInfo,
light_map: LightMapFn,
glow_map: LightMapFn,
@ -238,9 +241,10 @@ fn mesh_worker<V: BaseVol<Vox = Block> + RectRasterableVol + ReadVol + Debug + '
range: Aabb<i32>,
sprite_data: &HashMap<(SpriteKind, usize), [SpriteData; SPRITE_LOD_LEVELS]>,
sprite_config: &SpriteSpec,
create_opaque: impl for<'a> Fn(&'a Mesh<TerrainVertex>) -> Option<Model<TerrainVertex>> + Send + Sync,
create_fluid: impl for<'a> Fn(&'a Mesh<FluidVertex>) -> Option<Model<FluidVertex>> + Send + Sync,
create_instances: impl for<'a> Fn(&'a [SpriteInstance]) -> Instances<SpriteInstance> + Send + Sync,
create_opaque: impl for<'a> Fn(&'a Mesh<TerrainVertex>) -> Option<Model<TerrainVertex>>,
create_fluid: impl for<'a> Fn(&'a Mesh<FluidVertex>) -> Option<Model<FluidVertex>>,
create_instances: impl for<'a> Fn(&'a [SpriteInstance]) -> Instances<SpriteInstance>,
create_locals: impl Fn() -> pipelines::terrain::BoundLocals,
) -> MeshWorkerResponse {
span!(_guard, "mesh_worker");
let (blocks_of_interest, sprite_kinds) = BlocksOfInterest::from_chunk(&chunk)/*default()*/;
@ -276,6 +280,7 @@ fn mesh_worker<V: BaseVol<Vox = Block> + RectRasterableVol + ReadVol + Debug + '
shadow_z_bounds: ((chunk.get_min_z() as f32).max(bounds.min.z), (chunk.get_max_z() as f32).min(bounds.max.z)),
opaque_model: create_opaque(&opaque_mesh),
fluid_model: create_fluid(&fluid_mesh),
locals: create_locals(),
col_lights_info,
light_map,
glow_map,
@ -417,8 +422,8 @@ pub struct Terrain<V: RectRasterableVol = TerrainChunk> {
// workers.
mesh_send_tmp: channel::Sender<MeshWorkerResponse>,
mesh_recv: channel::Receiver<MeshWorkerResponse>,
new_atlas_tx: channel::Sender<(AtlasAllocator, Texture)>,
new_atlas_rx: channel::Receiver<(AtlasAllocator, Texture)>,
new_atlas_tx: channel::Sender<Texture>,
new_atlas_rx: channel::Receiver<Texture>,
mesh_todo: HashMap<Vec2<i32>, ChunkMeshState>,
mesh_todos_active: Arc<AtomicU64>,
mesh_recv_overflow: f32,
@ -637,6 +642,15 @@ impl<V: RectRasterableVol> Terrain<V> {
// with worker threads that are meshing chunks.
let (send, recv) = channel::unbounded();
let max_texture_size = renderer.max_texture_size();
let atlas_size = guillotiere::Size::new(max_texture_size as i32, max_texture_size as i32);
let atlas = AtlasAllocator::with_options(atlas_size, &guillotiere::AllocatorOptions {
// TODO: Verify some good empirical constants.
small_size_threshold: 128,
large_size_threshold: 1024,
..guillotiere::AllocatorOptions::default()
});
// Number of background atlases to have prepared at a time. It is unlikely we would ever
// want to change this value from 1, unless rendering gets super speedy or our atlas count
// starts exploding. This should never be set to 0 unless you are okay with blocking every
@ -657,8 +671,8 @@ impl<V: RectRasterableVol> Terrain<V> {
// We start by creating an extra atlas, ensuring that we are always building one more atlas
// than we currently need in a background job.
let (atlas, col_lights) =
Self::make_atlas(client, renderer, &mut new_atlas_tx, &mut new_atlas_rx, None, EXTRA_ATLAS_COUNT)
let col_lights =
Self::make_atlas(&client.state().slow_job_pool(), renderer, &mut new_atlas_tx, &mut new_atlas_rx, None, EXTRA_ATLAS_COUNT)
.expect("Failed to create atlas texture");
Self {
@ -691,23 +705,17 @@ impl<V: RectRasterableVol> Terrain<V> {
/// `old_texture` is an optional argument representing an old texture with the same size and
/// (ideally) format as the new \atlas.
fn make_atlas(
client: &Client,
slowjob: &SlowJobPool,
renderer: &mut Renderer,
new_atlas_tx: &mut channel::Sender<(AtlasAllocator, Texture)>,
new_atlas_rx: &mut channel::Receiver<(AtlasAllocator, Texture)>,
new_atlas_tx: &mut channel::Sender<Texture>,
new_atlas_rx: &mut channel::Receiver<Texture>,
old_texture: Option<&Texture>,
count: usize,
) -> Result<(AtlasAllocator, ColLights<pipelines::terrain::Locals>), channel::RecvError> {
) -> Result<ColLights<pipelines::terrain::Locals>, channel::RecvError> {
span!(_guard, "make_atlas", "Terrain::make_atlas");
let max_texture_size = renderer.max_texture_size();
let atlas_size = guillotiere::Size::new(max_texture_size as i32, max_texture_size as i32);
(0..=count).for_each(|_| {
let atlas = AtlasAllocator::with_options(atlas_size, &guillotiere::AllocatorOptions {
// TODO: Verify some good empirical constants.
small_size_threshold: 128,
large_size_threshold: 1024,
..guillotiere::AllocatorOptions::default()
});
let new_atlas_tx = new_atlas_tx.clone();
let texture_fn = renderer.create_texture_raw(
wgpu::TextureDescriptor {
@ -746,20 +754,17 @@ impl<V: RectRasterableVol> Terrain<V> {
..Default::default()
},
);
client
.state()
.slow_job_pool()
.spawn("TERRAIN_MESHING", move || {
slowjob.spawn("TERRAIN_MESHING", move || {
// Construct the next atlas on a separate thread. If it doesn't get sent, it means
// the original channel was dropped, which implies the terrain scene data no longer
// exists, so we can just drop the result in that case.
let _ = new_atlas_tx.send((atlas, texture_fn()));
let _ = new_atlas_tx.send(texture_fn());
});
});
// Receive the most recent available atlas. This call blocks only when there was no time
// to produce a fresh atlas between calls to make_atlas, which should hopefully be rare.
let (atlas, texture) = new_atlas_rx.recv()?;
let texture = new_atlas_rx.recv()?;
// Needs to be fully initialized for partial writes to work on Dx12 AMD.
//
// Ideally, we would either not have to do this, or have an explicit clear available, but
@ -773,7 +778,7 @@ impl<V: RectRasterableVol> Terrain<V> {
renderer.clear_texture(&texture);
}
let col_light = renderer.terrain_bind_col_light(texture);
Ok((atlas, col_light))
Ok(col_light)
}
fn remove_chunk_meta(&mut self, _pos: Vec2<i32>, chunk: &TerrainChunkData) {
@ -928,11 +933,13 @@ impl<V: RectRasterableVol> Terrain<V> {
..
} = camera.dependents();
let terrain_changes = scene_data.state.terrain_changes();
// Remove any models for chunks that have been recently removed.
// Note: Does this before adding to todo list just in case removed chunks were
// replaced with new chunks (although this would probably be recorded as
// modified chunks)
for &pos in &scene_data.state.terrain_changes().removed_chunks {
for &pos in &terrain_changes.removed_chunks {
self.remove_chunk(pos);
// Remove neighbors from meshing todo
for i in -1..2 {
@ -949,19 +956,17 @@ impl<V: RectRasterableVol> Terrain<V> {
let current_time = scene_data.state.get_time();
let mut visible_bounding_box: Option<Aabb<f32>> = None;
let terrain = scene_data.state.terrain();
// Add any recently created or changed chunks to the list of chunks to be
// meshed.
span!(guard, "Add new/modified chunks to mesh todo list");
for (modified, pos) in scene_data
.state
.terrain_changes()
for (modified, pos) in terrain_changes
.modified_chunks
.iter()
.map(|c| (true, c))
.chain(
scene_data
.state
.terrain_changes()
terrain_changes
.new_chunks
.iter()
.map(|c| (false, c)),
@ -981,11 +986,8 @@ impl<V: RectRasterableVol> Terrain<V> {
let mut neighbours = true;
for i in -1..2 {
for j in -1..2 {
neighbours &= scene_data
.state
.terrain()
.get_key(pos + Vec2::new(i, j))
.is_some();
neighbours &= terrain
.contains_key(pos + Vec2::new(i, j));
}
}
@ -1007,11 +1009,11 @@ impl<V: RectRasterableVol> Terrain<V> {
// be meshed
span!(guard, "Add chunks with modified blocks to mesh todo list");
// TODO: would be useful if modified blocks were grouped by chunk
for (&pos, &old_block) in scene_data.state.terrain_changes().modified_blocks.iter() {
for (&pos, &old_block) in terrain_changes.modified_blocks.iter() {
// terrain_changes() are both set and applied during the same tick on the
// client, so the current state is the new state and modified_blocks
// stores the old state.
let new_block = scene_data.state.get_block(pos);
let new_block = terrain.get(pos).ok().copied();
let (skip_color, skip_lights) = if let Some(new_block) = new_block {
Self::skip_remesh(old_block, new_block)
@ -1058,7 +1060,7 @@ impl<V: RectRasterableVol> Terrain<V> {
for x in -1..2 {
for y in -1..2 {
let neighbour_pos = pos + Vec3::new(x, y, 0) * block_effect_radius;
let neighbour_chunk_pos = scene_data.state.terrain().pos_key(neighbour_pos);
let neighbour_chunk_pos = terrain.pos_key(neighbour_pos);
if skip_lights && !(x == 0 && y == 0) {
// We don't need to remesh neighboring chunks if this block change doesn't
@ -1070,11 +1072,8 @@ impl<V: RectRasterableVol> Terrain<V> {
let mut neighbours = true;
for i in -1..2 {
for j in -1..2 {
neighbours &= scene_data
.state
.terrain()
.get_key(neighbour_chunk_pos + Vec2::new(i, j))
.is_some();
neighbours &= terrain
.contains_key(neighbour_chunk_pos + Vec2::new(i, j));
}
}
if neighbours {
@ -1101,6 +1100,7 @@ impl<V: RectRasterableVol> Terrain<V> {
}
}
}
drop(terrain_changes);
drop(guard);
// Limit ourselves to u16::MAX even if larger textures are supported.
@ -1127,14 +1127,14 @@ impl<V: RectRasterableVol> Terrain<V> {
)
});
let slowjob = scene_data.state.slow_job_pool();
for (todo, chunk) in todo.into_iter()
.filter(|todo| !todo.is_worker_active)
/* .min_by_key(|todo| ((todo.pos.as_::<i64>() * TerrainChunk::RECT_SIZE.as_::<i64>()).distance_squared(mesh_focus_pos), todo.started_tick)) */
// Find a reference to the actual `TerrainChunk` we're meshing
./*and_then*/filter_map(|todo| {
let pos = todo.pos;
Some((todo, scene_data.state
.terrain()
Some((todo, terrain
.get_key_arc(pos)
.cloned()
.or_else(|| {
@ -1162,7 +1162,7 @@ impl<V: RectRasterableVol> Terrain<V> {
// Copy out the chunk data we need to perform the meshing. We do this by taking
// a sample of the terrain that includes both the chunk we want and
// its neighbours.
let volume = match scene_data.state.terrain().sample(aabr) {
let volume = match terrain.sample(aabr) {
Ok(sample) => sample, /* TODO: Ensure that all of the chunk's neighbours still
* exist to avoid buggy shadow borders */
// Either this chunk or its neighbours doesn't yet exist, so we keep it in the
@ -1207,10 +1207,9 @@ impl<V: RectRasterableVol> Terrain<V> {
let create_opaque = renderer.create_model_lazy();
let create_fluid = renderer.create_model_lazy();
let create_instances = renderer.create_instances_lazy();
let create_locals = renderer.create_terrain_bound_locals();
cnt.fetch_add(1, Ordering::Relaxed);
scene_data
.state
.slow_job_pool()
slowjob
.spawn("TERRAIN_MESHING", move || {
let sprite_data = sprite_data;
let _ = send.send(mesh_worker(
@ -1227,11 +1226,13 @@ impl<V: RectRasterableVol> Terrain<V> {
create_opaque,
create_fluid,
create_instances,
create_locals,
));
cnt.fetch_sub(1, Ordering::Relaxed);
});
todo.is_worker_active = true;
}
drop(terrain);
drop(guard);
// Receive a chunk mesh from a worker thread and upload it to the GPU, then
@ -1255,7 +1256,7 @@ impl<V: RectRasterableVol> Terrain<V> {
let sprite_instances = response.sprite_instances;
if let Some(mesh) = response.mesh {
if let Some(mut mesh) = response.mesh {
// Full update, insert the whole chunk.
let load_time = self
@ -1275,9 +1276,9 @@ impl<V: RectRasterableVol> Terrain<V> {
let new_atlas_rx = &mut self.new_atlas_rx;
let allocation = atlas.allocate(alloc_size).unwrap_or_else(|| {
// Atlas allocation failure: try allocating a new texture and atlas.
let (new_atlas, new_col_lights) =
let new_col_lights =
Self::make_atlas(
scene_data.client,
&slowjob,
renderer,
new_atlas_tx,
new_atlas_rx,
@ -1297,7 +1298,9 @@ impl<V: RectRasterableVol> Terrain<V> {
chunks.iter_mut().for_each(|(_, chunk)| {
chunk.col_lights_alloc = None;
});
*atlas = new_atlas;
// Clear out the atlast rather than actually creating a new one, so we
// can reuse existing allocations.
atlas.clear();
*col_lights = Arc::new(new_col_lights);
atlas
@ -1317,6 +1320,17 @@ impl<V: RectRasterableVol> Terrain<V> {
&tex,
);
// Update the memory mapped locals.
renderer.update_mapped(&mut mesh.locals, &[TerrainLocals::new(
Vec3::from(
response.pos.map2(VolGrid2d::<V>::chunk_size(), |e, sz| {
e as f32 * sz as f32
}),
),
atlas_offs,
load_time,
)]);
self.insert_chunk(response.pos, TerrainChunkData {
load_time,
opaque_model: mesh.opaque_model,
@ -1326,15 +1340,7 @@ impl<V: RectRasterableVol> Terrain<V> {
light_map: mesh.light_map,
glow_map: mesh.glow_map,
sprite_instances,
locals: renderer.create_terrain_bound_locals(&[TerrainLocals::new(
Vec3::from(
response.pos.map2(VolGrid2d::<V>::chunk_size(), |e, sz| {
e as f32 * sz as f32
}),
),
atlas_offs,
load_time,
)]),
locals: mesh.locals,
visible: Visibility {
in_range: false,
in_frustum: false,
@ -1363,6 +1369,7 @@ impl<V: RectRasterableVol> Terrain<V> {
None => {},
}
}
drop(slowjob);
drop(guard);
// Construct view frustum