diff --git a/Cargo.lock b/Cargo.lock index 3e068ca3a8..8ada5d4086 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6554,6 +6554,7 @@ dependencies = [ "approx 0.4.0", "bitflags", "bitvec", + "bytemuck", "chrono", "chrono-tz", "clap 2.34.0", diff --git a/common/Cargo.toml b/common/Cargo.toml index d8c40a1828..a01f413cb0 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -40,6 +40,7 @@ strum = { version = "0.24", features = ["derive"] } approx = "0.4.0" bitvec = "0.22" # bumpalo = { version = "3.9.1", features = ["allocator_api"] } +bytemuck = { version="1.4", features=["derive"] } clap = "2.33" crossbeam-utils = "0.8.1" bitflags = "1.2" diff --git a/common/net/src/msg/compression.rs b/common/net/src/msg/compression.rs index f9f92de363..889029d3a0 100644 --- a/common/net/src/msg/compression.rs +++ b/common/net/src/msg/compression.rs @@ -672,10 +672,10 @@ impl VoxelImageDecoding for TriPngEncoding( +pub fn image_terrain_chonk>, M: Clone, P: PackingFormula, VIE: VoxelImageEncoding>( vie: &VIE, packing: P, - chonk: &Chonk, + chonk: &Chonk, ) -> Option { image_terrain( vie, @@ -688,13 +688,14 @@ pub fn image_terrain_chonk> + Debug, M: Clone + Debug, P: PackingFormula, VIE: VoxelImageEncoding, >( vie: &VIE, packing: P, - volgrid: &VolGrid2d>, + volgrid: &VolGrid2d>, ) -> Option { let mut lo = Vec3::broadcast(i32::MAX); let mut hi = Vec3::broadcast(i32::MIN); @@ -818,7 +819,7 @@ pub struct WireChonk WireChonk { - pub fn from_chonk(vie: VIE, packing: P, chonk: &Chonk) -> Option { + pub fn from_chonk>>(vie: VIE, packing: P, chonk: &Chonk) -> Option { let data = image_terrain_chonk(&vie, packing, chonk)?; Some(Self { zmin: chonk.get_min_z(), @@ -835,7 +836,7 @@ impl Option> { + pub fn to_chonk> + From>>(&self) -> Option> { let mut chonk = Chonk::new(self.zmin, self.below, self.above, self.meta.clone()); write_image_terrain( &self.vie, diff --git a/common/src/terrain/block.rs b/common/src/terrain/block.rs index c74ce70ba4..7e411ae77d 100644 --- a/common/src/terrain/block.rs +++ b/common/src/terrain/block.rs @@ -1,5 +1,5 @@ -use zerocopy::AsBytes; use super::SpriteKind; +use bitvec::prelude::*; use crate::{ comp::{fluid_dynamics::LiquidKind, tool::ToolKind}, consts::FRIC_GROUND, @@ -7,10 +7,11 @@ use crate::{ }; use num_derive::FromPrimitive; use num_traits::FromPrimitive; -use serde::{Deserialize, Serialize}; +use serde::{ser, Deserialize, Serialize}; use std::ops::Deref; use strum::{Display, EnumIter, EnumString}; use vek::*; +use zerocopy::AsBytes; make_case_elim!( block_kind, @@ -30,6 +31,11 @@ make_case_elim!( Display, )] #[repr(u8)] + /// XXX(@Sharp): If you feel like significantly modifying how BlockKind works, you *MUST* also + /// update the implementation of BlockVec! BlockVec uses unsafe code that relies on EnumIter. + /// If you are just adding variants, that's fine (for now), but any other changes (like + /// changing from repr(u8)) need review. + /// /// NOTE: repr(u8) preserves the niche optimization for fieldless enums! pub enum BlockKind { Air = 0x00, // Air counts as a fluid @@ -113,8 +119,12 @@ impl BlockKind { } } +/// XXX(@Sharp): If you feel like significantly modifying how Block works, you *MUST* also update +/// the implementation of BlockVec! BlockVec uses unsafe code that depends on being able to +/// independently validate the kind and treat attr as bytes; changing things so that this no longer +/// works will require careful review. #[derive(AsBytes, Copy, Clone, Debug, Eq, Serialize, Deserialize)] -/// NOTE: repr(C) appears to preservre niche optimizations! +/// NOTE: repr(C) appears to preserve niche optimizations! #[repr(align(4), C)] pub struct Block { kind: BlockKind, @@ -431,6 +441,125 @@ impl Block { } } +/// A wrapper around Vec, usable for efficient deserialization. +/// +/// XXX(@Sharp): This is crucially interwoven with the definition of Block and BlockKind, as it +/// uses unsafe code to speed up deserialization. If you decide to change how these types work in +/// a significant way (i.e. beyond adding new variants to BlockKind), this needs careful review! +#[derive( + Clone, + Debug, + Deserialize, + Hash, + Eq, + PartialEq, +)] +#[serde(try_from = "&'_ [u8]")] +pub struct BlockVec(Vec); + +impl core::ops::Deref for BlockVec { + type Target = Vec; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl core::ops::DerefMut for BlockVec { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From> for BlockVec { + #[inline] + fn from(inner: Vec) -> Self { + Self(inner) + } +} + +impl Serialize for BlockVec { + /// We can *safely* serialize a BlockVec as a Vec of bytes (this is validated by AsBytes). + /// This also means that the representation here is architecture independent. + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + serializer.serialize_bytes(self.0.as_bytes()) + } +} + +impl<'a/*, Error: de::Error*/> TryFrom<&'a [u8]> for BlockVec { + type Error = &'static str; + /// XXX(@Sharp): This implementation is subtle and its safety depens on correct implementation! + /// It is well-commented, but those comments are only valid so long as this implementation + /// doesn't change. If you do need to change this implementation, please seek careful review! + /// + /// NOTE: Ideally, we would perform a try_from(Vec) instead, to avoid the extra copy. + /// Unfortunately this is not generally sound, since Vec allocations must be deallocated with + /// the same layout with which they were allocated, which includes alignment (and no, it does + /// not matter if they in practice have the same alignment at runtime, it's still UB). If we + /// were to do this, we'd effectively have to hold a Vec inside BlockVec at all times, not + /// exposing &mut access at all, and instead requiring transmutes to get access to Blocks. + /// This seems like a huge pain so for now, hopefully deserialize (the non-owned version) is + /// sufficient. + #[allow(unsafe_code)] + fn try_from(blocks: &'a [u8]) -> Result + { + // First, make sure we're correctly interpretable as a [u8; 4]. + let blocks: &[[u8; 4]] = bytemuck::try_cast_slice(blocks) + .map_err(|_| /*Error::invalid_length(blocks.len(), &"a multiple of 4")*/"Length must be a multiple of 4")?; + // The basic observation here is that a slice of [u8; 4] is *almost* the same as a slice of + // blocks, so conversion from the former to the latter can be very cheap. The only problem + // is that BlockKind (the first byte in `Block`) has some invalid states, so not every u8 + // slice of the appropriate size is a block slice. Fortunately, since we don't care about + // figuring out which block triggered the error, we can figure this out really cheaply--we + // just have to set a bit for every block we see, then check at the end to make sure all + // the bits we set are valid elements. We can construct the valid bit set using EnumIter, + // and the requirement is: (!valid & set_bits) = 0. + + // Construct the invalid list. Initially, it's all 1s, then we set all the bits + // corresponding to valid block kinds to 0, leaving a set bit for each invalid block kind. + // + // TODO: Verify whether this gets constant folded away; if not, try to do this as a const + // fn? Might need to modify the EnumIter implementation. + let mut invalid_bits = bitarr![1; 256]; + ::iter().for_each(|bk| { + invalid_bits.set((bk as u8).into(), false); + }); + + // Initially, the set bit list is empty. + let mut set_bits = bitarr![0; 256]; + + // TODO: SIMD iteration. + // NOTE: The block kind is guaranteed to be at the front, thanks to the repr(C). + blocks.into_iter().for_each(|&[kind, _, _, _]| { + // TODO: Check assembly to see if the bounds check gets elided; if so, leave this as + // set instead of set_unchecked, to scope down the use of unsafe as much as possible. + set_bits.set(kind.into(), true); + }); + + // The invalid bits and the set bits should have no overlap. + set_bits &= invalid_bits; + if set_bits.any() { + // At least one invalid bit was set, so there was an invalid BlockKind somewhere. + // + // TODO: Use radix representation of the bad block kind. + return Err(/*Error::unknown_variant("an invalid u8", &["see the definition of BlockKind for details"])*/"Found an unknown BlockKind while parsing Vec"); + } + // All set bits are cleared, so all block kinds were valid. Combined with the slice being + // compatible with [u8; 4], we can transmute the slice to a slice of Blocks and then + // construct a new vector from it. + let blocks = unsafe { core::mem::transmute::<&'a [[u8; 4]], &'a [Block]>(blocks) }; + // Finally, *safely* construct a vector from the new blocks (as mentioned above, we cannot + // reuse the old byte vector even if we wanted to, since it doesn't have the same + // alignment as Block). + Ok(Self(blocks.to_vec())) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/common/src/terrain/chonk.rs b/common/src/terrain/chonk.rs index e3ddd70a0e..25a3cc6a44 100644 --- a/common/src/terrain/chonk.rs +++ b/common/src/terrain/chonk.rs @@ -16,13 +16,42 @@ pub enum ChonkError { } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SubChunkSize { - phantom: PhantomData, +pub struct SubChunkSize { + storage: Storage, + phantom: PhantomData<(V, ChonkSize)>, +} + +impl>, ChonkSize: RectVolSize> core::ops::Deref for SubChunkSize { + type Target = Vec; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.storage + } +} + +impl>, ChonkSize: RectVolSize> core::ops::DerefMut for SubChunkSize { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.storage + } +} + +impl>, ChonkSize: RectVolSize> From> for SubChunkSize { + #[inline] + fn from(storage: Vec) -> Self { + Self { + storage: storage.into(), + phantom: PhantomData, + } + } } // TODO (haslersn): Assert ChonkSize::RECT_SIZE.x == ChonkSize::RECT_SIZE.y - -impl VolSize for SubChunkSize { +impl VolSize for SubChunkSize + /* where Storage: Clone + core::ops::Deref> + core::ops::DerefMut + From>, + * */ +{ const SIZE: Vec3 = Vec3 { x: ChonkSize::RECT_SIZE.x, y: ChonkSize::RECT_SIZE.x, @@ -31,19 +60,19 @@ impl VolSize for SubChunkSize { }; } -type SubChunk = Chunk, M>; +type SubChunk = Chunk, M>; #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Chonk { +pub struct Chonk { z_offset: i32, - sub_chunks: Vec>, + sub_chunks: Vec>, below: V, above: V, meta: M, phantom: PhantomData, } -impl Chonk { +impl>, S: RectVolSize, M: Clone> Chonk { pub fn new(z_offset: i32, below: V, above: V, meta: M) -> Self { Self { z_offset, @@ -62,7 +91,7 @@ impl Chonk { #[inline] pub fn get_max_z(&self) -> i32 { - self.z_offset + (self.sub_chunks.len() as u32 * SubChunkSize::::SIZE.z) as i32 + self.z_offset + (self.sub_chunks.len() as u32 * SubChunkSize::::SIZE.z) as i32 } pub fn sub_chunks_len(&self) -> usize { self.sub_chunks.len() } @@ -80,8 +109,8 @@ impl Chonk { .enumerate() .filter(|(_, sc)| sc.num_groups() > 0) .flat_map(move |(i, sc)| { - let z_offset = self.z_offset + i as i32 * SubChunkSize::::SIZE.z as i32; - sc.vol_iter(Vec3::zero(), SubChunkSize::::SIZE.map(|e| e as i32)) + let z_offset = self.z_offset + i as i32 * SubChunkSize::::SIZE.z as i32; + sc.vol_iter(Vec3::zero(), SubChunkSize::::SIZE.map(|e| e as i32)) .map(move |(pos, vox)| (pos + Vec3::unit_z() * z_offset, vox)) }) } @@ -91,13 +120,13 @@ impl Chonk { #[inline] fn sub_chunk_idx(&self, z: i32) -> i32 { let diff = z - self.z_offset; - diff >> (SubChunkSize::::SIZE.z - 1).count_ones() + diff >> (SubChunkSize::::SIZE.z - 1).count_ones() } // Converts a z coordinate into a local z coordinate within a sub chunk fn sub_chunk_z(&self, z: i32) -> i32 { let diff = z - self.z_offset; - diff & (SubChunkSize::::SIZE.z - 1) as i32 + diff & (SubChunkSize::::SIZE.z - 1) as i32 } // Returns the z offset of the sub_chunk that contains layer z @@ -106,6 +135,7 @@ impl Chonk { /// Compress chunk by using more intelligent defaults. pub fn defragment(&mut self) where + Storage: From>, V: zerocopy::AsBytes + Clone + Eq + Hash, [(); { core::mem::size_of::() }]:, { @@ -149,20 +179,20 @@ impl Chonk { // Finally, bump the z_offset to account for the removed subchunks at the // bottom. TODO: Add invariants to justify why `below_len` must fit in // i32. - self.z_offset += below_len as i32 * SubChunkSize::::SIZE.z as i32; + self.z_offset += below_len as i32 * SubChunkSize::::SIZE.z as i32; } } -impl BaseVol for Chonk { +impl BaseVol for Chonk { type Error = ChonkError; type Vox = V; } -impl RectRasterableVol for Chonk { +impl RectRasterableVol for Chonk { const RECT_SIZE: Vec2 = S::RECT_SIZE; } -impl ReadVol for Chonk { +impl>, S: RectVolSize, M: Clone> ReadVol for Chonk { #[inline(always)] fn get(&self, pos: Vec3) -> Result<&V, Self::Error> { if pos.z < self.get_min_z() { @@ -176,7 +206,7 @@ impl ReadVol for Chonk { let sub_chunk_idx = self.sub_chunk_idx(pos.z); let rpos = pos - Vec3::unit_z() - * (self.z_offset + sub_chunk_idx * SubChunkSize::::SIZE.z as i32); + * (self.z_offset + sub_chunk_idx * SubChunkSize::::SIZE.z as i32); self.sub_chunks[sub_chunk_idx as usize] .get(rpos) .map_err(Self::Error::SubChunkError) @@ -184,7 +214,7 @@ impl ReadVol for Chonk { } } -impl WriteVol for Chonk { +impl> + From>, S: Clone + RectVolSize, M: Clone> WriteVol for Chonk { #[inline(always)] fn set(&mut self, pos: Vec3, block: Self::Vox) -> Result { let mut sub_chunk_idx = self.sub_chunk_idx(pos.z); @@ -195,10 +225,10 @@ impl WriteVol for Chonk return Ok(self.below.clone()); } // Prepend exactly sufficiently many SubChunks via Vec::splice - let c = Chunk::, M>::filled(self.below.clone(), self.meta.clone()); + let c = Chunk::, M>::filled(self.below.clone(), self.meta.clone()); let n = (-sub_chunk_idx) as usize; self.sub_chunks.splice(0..0, std::iter::repeat(c).take(n)); - self.z_offset += sub_chunk_idx * SubChunkSize::::SIZE.z as i32; + self.z_offset += sub_chunk_idx * SubChunkSize::::SIZE.z as i32; sub_chunk_idx = 0; } else if pos.z >= self.get_max_z() { // Make sure we're not adding a redundant chunk. @@ -206,27 +236,27 @@ impl WriteVol for Chonk return Ok(self.above.clone()); } // Append exactly sufficiently many SubChunks via Vec::extend - let c = Chunk::, M>::filled(self.above.clone(), self.meta.clone()); + let c = Chunk::, M>::filled(self.above.clone(), self.meta.clone()); let n = 1 + sub_chunk_idx as usize - self.sub_chunks.len(); self.sub_chunks.extend(std::iter::repeat(c).take(n)); } let rpos = pos - - Vec3::unit_z() * (self.z_offset + sub_chunk_idx * SubChunkSize::::SIZE.z as i32); + - Vec3::unit_z() * (self.z_offset + sub_chunk_idx * SubChunkSize::::SIZE.z as i32); self.sub_chunks[sub_chunk_idx as usize] // TODO (haslersn): self.sub_chunks.get(...).and_then(...) .set(rpos, block) .map_err(Self::Error::SubChunkError) } } -struct ChonkIterHelper { +struct ChonkIterHelper { sub_chunk_min_z: i32, lower_bound: Vec3, upper_bound: Vec3, - phantom: PhantomData>, + phantom: PhantomData>, } -impl Iterator for ChonkIterHelper { +impl Iterator for ChonkIterHelper { type Item = (i32, Vec3, Vec3); #[inline(always)] @@ -239,19 +269,19 @@ impl Iterator for ChonkIterHelper { let current_min_z = self.sub_chunk_min_z; lb.z -= current_min_z; ub.z -= current_min_z; - ub.z = std::cmp::min(ub.z, SubChunkSize::::SIZE.z as i32); - self.sub_chunk_min_z += SubChunkSize::::SIZE.z as i32; + ub.z = std::cmp::min(ub.z, SubChunkSize::::SIZE.z as i32); + self.sub_chunk_min_z += SubChunkSize::::SIZE.z as i32; self.lower_bound.z = self.sub_chunk_min_z; Some((current_min_z, lb, ub)) } } -pub struct ChonkPosIter { - outer: ChonkIterHelper, - opt_inner: Option<(i32, ChunkPosIter, M>)>, +pub struct ChonkPosIter { + outer: ChonkIterHelper, + opt_inner: Option<(i32, ChunkPosIter, M>)>, } -impl Iterator for ChonkPosIter { +impl Iterator for ChonkPosIter { type Item = Vec3; #[inline(always)] @@ -266,25 +296,25 @@ impl Iterator for ChonkPosIter { match self.outer.next() { None => return None, Some((sub_chunk_min_z, lb, ub)) => { - self.opt_inner = Some((sub_chunk_min_z, SubChunk::::pos_iter(lb, ub))) + self.opt_inner = Some((sub_chunk_min_z, SubChunk::::pos_iter(lb, ub))) }, } } } } -enum InnerChonkVolIter<'a, V, S: RectVolSize, M: Clone> { - Vol(ChunkVolIter<'a, V, SubChunkSize, M>), - Pos(ChunkPosIter, M>), +enum InnerChonkVolIter<'a, V, Storage, S: RectVolSize, M: Clone> { + Vol(ChunkVolIter<'a, V, SubChunkSize, M>), + Pos(ChunkPosIter, M>), } -pub struct ChonkVolIter<'a, V, S: RectVolSize, M: Clone> { - chonk: &'a Chonk, - outer: ChonkIterHelper, - opt_inner: Option<(i32, InnerChonkVolIter<'a, V, S, M>)>, +pub struct ChonkVolIter<'a, V, Storage, S: RectVolSize, M: Clone> { + chonk: &'a Chonk, + outer: ChonkIterHelper, + opt_inner: Option<(i32, InnerChonkVolIter<'a, V, Storage, S, M>)>, } -impl<'a, V, S: RectVolSize, M: Clone> Iterator for ChonkVolIter<'a, V, S, M> { +impl<'a, V, Storage: core::ops::DerefMut>, S: RectVolSize, M: Clone> Iterator for ChonkVolIter<'a, V, Storage, S, M> { type Item = (Vec3, &'a V); #[inline(always)] @@ -292,8 +322,8 @@ impl<'a, V, S: RectVolSize, M: Clone> Iterator for ChonkVolIter<'a, V, S, M> { loop { if let Some((sub_chunk_min_z, ref mut inner)) = self.opt_inner { let got = match inner { - InnerChonkVolIter::<'a, V, S, M>::Vol(iter) => iter.next(), - InnerChonkVolIter::<'a, V, S, M>::Pos(iter) => iter.next().map(|pos| { + InnerChonkVolIter::<'a, V, Storage, S, M>::Vol(iter) => iter.next(), + InnerChonkVolIter::<'a, V, Storage, S, M>::Pos(iter) => iter.next().map(|pos| { if sub_chunk_min_z < self.chonk.get_min_z() { (pos, &self.chonk.below) } else { @@ -312,9 +342,9 @@ impl<'a, V, S: RectVolSize, M: Clone> Iterator for ChonkVolIter<'a, V, S, M> { let inner = if sub_chunk_min_z < self.chonk.get_min_z() || sub_chunk_min_z >= self.chonk.get_max_z() { - InnerChonkVolIter::<'a, V, S, M>::Pos(SubChunk::::pos_iter(lb, ub)) + InnerChonkVolIter::<'a, V, Storage, S, M>::Pos(SubChunk::::pos_iter(lb, ub)) } else { - InnerChonkVolIter::<'a, V, S, M>::Vol( + InnerChonkVolIter::<'a, V, Storage, S, M>::Vol( self.chonk.sub_chunks [self.chonk.sub_chunk_idx(sub_chunk_min_z) as usize] .vol_iter(lb, ub), @@ -327,12 +357,12 @@ impl<'a, V, S: RectVolSize, M: Clone> Iterator for ChonkVolIter<'a, V, S, M> { } } -impl<'a, V, S: RectVolSize, M: Clone> IntoPosIterator for &'a Chonk { - type IntoIter = ChonkPosIter; +impl<'a, V, Storage: core::ops::DerefMut>, S: RectVolSize, M: Clone> IntoPosIterator for &'a Chonk { + type IntoIter = ChonkPosIter; fn pos_iter(self, lower_bound: Vec3, upper_bound: Vec3) -> Self::IntoIter { Self::IntoIter { - outer: ChonkIterHelper:: { + outer: ChonkIterHelper:: { sub_chunk_min_z: self.sub_chunk_min_z(lower_bound.z), lower_bound, upper_bound, @@ -343,13 +373,13 @@ impl<'a, V, S: RectVolSize, M: Clone> IntoPosIterator for &'a Chonk { } } -impl<'a, V, S: RectVolSize, M: Clone> IntoVolIterator<'a> for &'a Chonk { - type IntoIter = ChonkVolIter<'a, V, S, M>; +impl<'a, V, Storage: core::ops::DerefMut>, S: RectVolSize, M: Clone> IntoVolIterator<'a> for &'a Chonk { + type IntoIter = ChonkVolIter<'a, V, Storage, S, M>; fn vol_iter(self, lower_bound: Vec3, upper_bound: Vec3) -> Self::IntoIter { Self::IntoIter { chonk: self, - outer: ChonkIterHelper:: { + outer: ChonkIterHelper:: { sub_chunk_min_z: self.sub_chunk_min_z(lower_bound.z), lower_bound, upper_bound, diff --git a/common/src/terrain/mod.rs b/common/src/terrain/mod.rs index a4a647512f..0f389d25b2 100644 --- a/common/src/terrain/mod.rs +++ b/common/src/terrain/mod.rs @@ -9,7 +9,7 @@ pub mod structure; // Reexports pub use self::{ biome::BiomeKind, - block::{Block, BlockKind}, + block::{Block, BlockKind, BlockVec}, map::MapSizeLg, site::SitesKind, sprite::SpriteKind, @@ -157,7 +157,7 @@ impl TerrainChunkMeta { // Terrain type aliases -pub type TerrainChunk = chonk::Chonk; +pub type TerrainChunk = chonk::Chonk; pub type TerrainGrid = VolGrid2d; impl TerrainGrid { diff --git a/common/src/vol.rs b/common/src/vol.rs index 1eb4e9a4e8..3c73183720 100644 --- a/common/src/vol.rs +++ b/common/src/vol.rs @@ -4,7 +4,12 @@ use vek::*; /// Used to specify a volume's compile-time size. This exists as a substitute /// until const generics are implemented. -pub trait VolSize: Clone { +/// +/// The actual type should be suitable for use as storage for a vector of Vs. The type signature +/// essentially requires that this "just" be a wrapper around Vec, but in some cases we may be +/// able to implement serialization / deserialization, and potentially other operations, more +/// efficiently with such a wrapper than we would by using Vec. +pub trait VolSize/*: Clone + core::ops::Deref> + core::ops::DerefMut + From>*/ { const SIZE: Vec3; } diff --git a/common/src/volumes/chunk.rs b/common/src/volumes/chunk.rs index edcf812a29..be0c0a8914 100644 --- a/common/src/volumes/chunk.rs +++ b/common/src/volumes/chunk.rs @@ -49,16 +49,15 @@ pub enum ChunkError { /// index buffer can consist of `u8`s. This keeps the space requirement for the /// index buffer as low as 4 cache lines. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Chunk { +pub struct Chunk, M> { indices: Vec, /* TODO (haslersn): Box<[u8; S::SIZE.x * S::SIZE.y * S::SIZE.z]>, this is * however not possible in Rust yet */ - vox: Vec, + vox: S, default: V, meta: M, - phantom: PhantomData, } -impl Chunk { +impl, M> Chunk { pub const GROUP_COUNT: Vec3 = Vec3::new( S::SIZE.x / Self::GROUP_SIZE.x, S::SIZE.y / Self::GROUP_SIZE.y, @@ -74,10 +73,15 @@ impl Chunk { ); const GROUP_VOLUME: u32 = [Self::VOLUME / 256, 1][(Self::VOLUME < 256) as usize]; const VOLUME: u32 = (S::SIZE.x * S::SIZE.y * S::SIZE.z) as u32; +} +impl> + VolSize, M> Chunk { /// Creates a new `Chunk` with the provided dimensions and all voxels filled /// with duplicates of the provided voxel. - pub fn filled(default: V, meta: M) -> Self { + pub fn filled(default: V, meta: M) -> Self + where + S: From>, + { // TODO (haslersn): Alter into compile time assertions // // An extent is valid if it fulfils the following conditions. @@ -111,10 +115,9 @@ impl Chunk { Self { indices: vec![255; Self::GROUP_COUNT_TOTAL as usize], - vox: Vec::new(), + vox: Vec::new().into(), default, meta, - phantom: PhantomData, } } @@ -122,6 +125,7 @@ impl Chunk { pub fn defragment(&mut self) where V: zerocopy::AsBytes + Clone + Eq + Hash, + S: From>, [(); { core::mem::size_of::() }]:, { // First, construct a HashMap with max capacity equal to GROUP_COUNT (since each @@ -179,7 +183,7 @@ impl Chunk { let mut new_vox = Vec::with_capacity(Self::GROUP_COUNT_TOTAL as usize - default_groups.len()); let num_groups = self.num_groups(); - let mut indices = &mut self.indices[..Self::GROUP_COUNT_TOTAL as usize]; + let indices = &mut self.indices[..Self::GROUP_COUNT_TOTAL as usize]; indices .iter_mut() .enumerate() @@ -209,7 +213,7 @@ impl Chunk { }); // Finally, reset our vox and default values to the new ones. - self.vox = new_vox; + self.vox = new_vox.into(); self.default = new_default; } @@ -250,7 +254,8 @@ impl Chunk { } #[inline(always)] - fn idx_unchecked(&self, pos: Vec3) -> Option { + fn idx_unchecked(&self, pos: Vec3) -> Option + { let grp_idx = Self::grp_idx(pos); let rel_idx = Self::rel_idx(pos); let base = u32::from(self.indices[grp_idx as usize]); @@ -290,6 +295,7 @@ impl Chunk { #[inline(always)] fn set_unchecked(&mut self, pos: Vec3, vox: V) -> V where + S: core::ops::DerefMut>, V: Clone + PartialEq, { if vox != self.default { @@ -303,16 +309,16 @@ impl Chunk { } } -impl BaseVol for Chunk { +impl, M> BaseVol for Chunk { type Error = ChunkError; type Vox = V; } -impl RasterableVol for Chunk { +impl, M> RasterableVol for Chunk { const SIZE: Vec3 = S::SIZE; } -impl ReadVol for Chunk { +impl> + VolSize, M> ReadVol for Chunk { #[inline(always)] fn get(&self, pos: Vec3) -> Result<&Self::Vox, Self::Error> { if !pos @@ -326,7 +332,10 @@ impl ReadVol for Chunk { } } -impl WriteVol for Chunk { +impl, M> WriteVol for Chunk + where + S: core::ops::DerefMut>, +{ #[inline(always)] fn set(&mut self, pos: Vec3, vox: Self::Vox) -> Result { if !pos @@ -340,7 +349,7 @@ impl WriteVol for Chunk { } } -pub struct ChunkPosIter { +pub struct ChunkPosIter, M> { // Store as `u8`s so as to reduce memory footprint. lb: Vec3, ub: Vec3, @@ -348,7 +357,7 @@ pub struct ChunkPosIter { phantom: PhantomData>, } -impl ChunkPosIter { +impl, M> ChunkPosIter { fn new(lower_bound: Vec3, upper_bound: Vec3) -> Self { // If the range is empty, then we have the special case `ub = lower_bound`. let ub = if lower_bound.map2(upper_bound, |l, u| l < u).reduce_and() { @@ -365,7 +374,7 @@ impl ChunkPosIter { } } -impl Iterator for ChunkPosIter { +impl, M> Iterator for ChunkPosIter { type Item = Vec3; #[inline(always)] @@ -420,12 +429,12 @@ impl Iterator for ChunkPosIter { } } -pub struct ChunkVolIter<'a, V, S: VolSize, M> { +pub struct ChunkVolIter<'a, V, S: VolSize, M> { chunk: &'a Chunk, iter_impl: ChunkPosIter, } -impl<'a, V, S: VolSize, M> Iterator for ChunkVolIter<'a, V, S, M> { +impl<'a, V, S: core::ops::DerefMut> + VolSize, M> Iterator for ChunkVolIter<'a, V, S, M> { type Item = (Vec3, &'a V); #[inline(always)] @@ -436,7 +445,7 @@ impl<'a, V, S: VolSize, M> Iterator for ChunkVolIter<'a, V, S, M> { } } -impl Chunk { +impl, M> Chunk { /// It's possible to obtain a positional iterator without having a `Chunk` /// instance. pub fn pos_iter(lower_bound: Vec3, upper_bound: Vec3) -> ChunkPosIter { @@ -444,7 +453,7 @@ impl Chunk { } } -impl<'a, V, S: VolSize, M> IntoPosIterator for &'a Chunk { +impl<'a, V, S: VolSize, M> IntoPosIterator for &'a Chunk { type IntoIter = ChunkPosIter; fn pos_iter(self, lower_bound: Vec3, upper_bound: Vec3) -> Self::IntoIter { @@ -452,7 +461,7 @@ impl<'a, V, S: VolSize, M> IntoPosIterator for &'a Chunk { } } -impl<'a, V, S: VolSize, M> IntoVolIterator<'a> for &'a Chunk { +impl<'a, V, S: core::ops::DerefMut> + VolSize, M> IntoVolIterator<'a> for &'a Chunk { type IntoIter = ChunkVolIter<'a, V, S, M>; fn vol_iter(self, lower_bound: Vec3, upper_bound: Vec3) -> Self::IntoIter { diff --git a/world/benches/site2.rs b/world/benches/site2.rs index b45bafc620..be17f311df 100644 --- a/world/benches/site2.rs +++ b/world/benches/site2.rs @@ -310,6 +310,19 @@ fn dungeon(c: &mut Criterion) { }); }); + c.bench_function("deserialize_chunk", |b| { + // let chunk_pos = (world.sim().map_size_lg().chunks() >> 1).as_(); + // let chunk_pos = Vec2::new(9500 / 32, 29042 / 32); + // let chunk_pos = Vec2::new(26944 / 32, 26848 / 32); + let chunk_pos = Vec2::new(842, 839); + let chunk = world.generate_chunk(index.as_index_ref(), chunk_pos, || false, None).unwrap().0; + let serialized = bincode::serialize(&chunk).unwrap(); + // let chunk_pos = Vec2::new(24507/32, 20682/32); + // let chunk_pos = Vec2::new(19638/32, 19621/32); + b.iter(|| { + black_box(bincode::deserialize::(&serialized).unwrap()); + }); + }); /* c.bench_function("generate_dungeon", |b| { let mut rng = rand::rngs::StdRng::from_seed(seed); diff --git a/world/examples/chunk_compression_benchmarks.rs b/world/examples/chunk_compression_benchmarks.rs index f4cb5fbf4e..76fdb8dcd9 100644 --- a/world/examples/chunk_compression_benchmarks.rs +++ b/world/examples/chunk_compression_benchmarks.rs @@ -78,8 +78,8 @@ fn do_deflate_flate2(data: &[u8]) -> Vec { encoder.finish().expect("Failed to finish compression!") } -fn chonk_to_dyna( - chonk: &Chonk, +fn chonk_to_dyna( + chonk: &Chonk, block: V, ) -> Dyna { let mut dyna = Dyna::::filled(