Merge branch 'aweinstock/imageterrain' into 'master'

Compress terrain with images for clients with low bandwidth.

See merge request veloren/veloren!2207
This commit is contained in:
Marcel 2021-05-03 14:10:38 +00:00
commit 1643e30545
24 changed files with 2144 additions and 352 deletions

View File

@ -53,6 +53,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Custom map markers can be placed now
- Fundamentals/prototype for wiring system
- Mountain peak and lake markers on the map
- There's now a checkbox in the graphics tab to opt-in to receiving lossily-compressed terrain colors.
### Changed

10
Cargo.lock generated
View File

@ -2381,6 +2381,7 @@ dependencies = [
"bytemuck",
"byteorder",
"color_quant",
"jpeg-decoder",
"num-iter",
"num-rational 0.3.2",
"num-traits",
@ -2510,6 +2511,12 @@ dependencies = [
"libc",
]
[[package]]
name = "jpeg-decoder"
version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2"
[[package]]
name = "js-sys"
version = "0.3.50"
@ -5508,6 +5515,8 @@ dependencies = [
"bincode",
"flate2",
"hashbrown",
"image",
"num-traits",
"serde",
"specs",
"specs-idvs",
@ -5801,6 +5810,7 @@ dependencies = [
"minifb",
"noise",
"num 0.4.0",
"num-traits",
"ordered-float 2.1.1",
"packed_simd_2",
"rand 0.8.3",

View File

@ -77,6 +77,7 @@
"hud.settings.fullscreen_mode.exclusive": "Exclusive",
"hud.settings.fullscreen_mode.borderless": "Borderless",
"hud.settings.particles": "Particles",
"hud.settings.lossy_terrain_compression": "Lossy terrain compression",
"hud.settings.resolution": "Resolution",
"hud.settings.bit_depth": "Bit Depth",
"hud.settings.refresh_rate": "Refresh Rate",

View File

@ -800,7 +800,10 @@ impl Client {
| ClientGeneral::RefundSkill(_)
| ClientGeneral::RequestSiteInfo(_)
| ClientGeneral::UnlockSkillGroup(_)
| ClientGeneral::RequestPlayerPhysics { .. } => &mut self.in_game_stream,
| ClientGeneral::RequestPlayerPhysics { .. }
| ClientGeneral::RequestLossyTerrainCompression { .. } => {
&mut self.in_game_stream
},
//Only in game, terrain
ClientGeneral::TerrainChunkRequest { .. } => &mut self.terrain_stream,
//Always possible
@ -820,6 +823,12 @@ impl Client {
})
}
pub fn request_lossy_terrain_compression(&mut self, lossy_terrain_compression: bool) {
self.send_msg(ClientGeneral::RequestLossyTerrainCompression {
lossy_terrain_compression,
})
}
fn send_msg<S>(&mut self, msg: S)
where
S: Into<ClientMsg>,
@ -1952,7 +1961,7 @@ impl Client {
fn handle_server_terrain_msg(&mut self, msg: ServerGeneral) -> Result<(), Error> {
match msg {
ServerGeneral::TerrainChunkUpdate { key, chunk } => {
if let Some(chunk) = chunk.ok().and_then(|c| c.decompress()) {
if let Some(chunk) = chunk.ok().and_then(|c| c.to_chunk()) {
self.state.insert_chunk(key, Arc::new(chunk));
}
self.pending_chunks.remove(&key);

View File

@ -16,6 +16,8 @@ common = {package = "veloren-common", path = "../../common"}
bincode = "1.3.3"
flate2 = "1.0.20"
image = { version = "0.23.12", default-features = false, features = ["png", "jpeg"] }
num-traits = "0.2"
sum_type = "0.2.0"
vek = { version = "=0.14.1", features = ["serde"] }
tracing = { version = "0.1", default-features = false }

View File

@ -1,2 +1,8 @@
#![allow(incomplete_features)]
#![feature(
const_generics,
const_fn_floating_point_arithmetic,
const_evaluatable_checked
)]
pub mod msg;
pub mod sync;

View File

@ -84,6 +84,9 @@ pub enum ClientGeneral {
RequestPlayerPhysics {
server_authoritative: bool,
},
RequestLossyTerrainCompression {
lossy_terrain_compression: bool,
},
}
impl ClientMsg {
@ -121,7 +124,8 @@ impl ClientMsg {
| ClientGeneral::RefundSkill(_)
| ClientGeneral::RequestSiteInfo(_)
| ClientGeneral::UnlockSkillGroup(_)
| ClientGeneral::RequestPlayerPhysics { .. } => {
| ClientGeneral::RequestPlayerPhysics { .. }
| ClientGeneral::RequestLossyTerrainCompression { .. } => {
c_type == ClientType::Game && presence.is_some()
},
//Always possible

View File

@ -0,0 +1,800 @@
use common::{
terrain::{chonk::Chonk, Block, BlockKind, SpriteKind},
vol::{BaseVol, ReadVol, RectVolSize, WriteVol},
volumes::vol_grid_2d::VolGrid2d,
};
use hashbrown::HashMap;
use image::{ImageBuffer, ImageDecoder, Pixel};
use num_traits::cast::FromPrimitive;
use serde::{Deserialize, Serialize};
use std::{
fmt::Debug,
io::{Read, Write},
marker::PhantomData,
};
use tracing::warn;
use vek::*;
/// Wrapper for compressed, serialized data (for stuff that doesn't use the
/// default lz4 compression)
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CompressedData<T> {
pub data: Vec<u8>,
compressed: bool,
_phantom: PhantomData<T>,
}
impl<T: Serialize> CompressedData<T> {
pub fn compress(t: &T, level: u32) -> Self {
use flate2::{write::DeflateEncoder, Compression};
let uncompressed = bincode::serialize(t)
.expect("bincode serialization can only fail if a byte limit is set");
if uncompressed.len() >= 32 {
const EXPECT_MSG: &str =
"compression only fails for fallible Read/Write impls (which Vec<u8> is not)";
let buf = Vec::with_capacity(uncompressed.len() / 10);
let mut encoder = DeflateEncoder::new(buf, Compression::new(level));
encoder.write_all(&*uncompressed).expect(EXPECT_MSG);
let compressed = encoder.finish().expect(EXPECT_MSG);
CompressedData {
data: compressed,
compressed: true,
_phantom: PhantomData,
}
} else {
CompressedData {
data: uncompressed,
compressed: false,
_phantom: PhantomData,
}
}
}
}
impl<T: for<'a> Deserialize<'a>> CompressedData<T> {
pub fn decompress(&self) -> Option<T> {
if self.compressed {
let mut uncompressed = Vec::with_capacity(self.data.len());
flate2::read::DeflateDecoder::new(&*self.data)
.read_to_end(&mut uncompressed)
.ok()?;
bincode::deserialize(&*uncompressed).ok()
} else {
bincode::deserialize(&*self.data).ok()
}
}
}
/// Formula for packing voxel data into a 2d array
pub trait PackingFormula: Copy {
fn dimensions(&self, dims: Vec3<u32>) -> (u32, u32);
fn index(&self, dims: Vec3<u32>, x: u32, y: u32, z: u32) -> (u32, u32);
}
/// A wide, short image. Shares the advantage of not wasting space with
/// TallPacking (which is strictly worse, and was moved to benchmark code in
/// `world`), but faster to compress and smaller since PNG compresses each
/// row indepedently, so a wide image has fewer calls to the compressor. FLIP_X
/// has the same spatial locality preserving behavior as with TallPacking.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct WidePacking<const FLIP_X: bool>();
impl<const FLIP_X: bool> PackingFormula for WidePacking<FLIP_X> {
#[inline(always)]
fn dimensions(&self, dims: Vec3<u32>) -> (u32, u32) { (dims.x * dims.z, dims.y) }
#[allow(clippy::many_single_char_names)]
#[inline(always)]
fn index(&self, dims: Vec3<u32>, x: u32, y: u32, z: u32) -> (u32, u32) {
let i0 = if FLIP_X {
if z % 2 == 0 { x } else { dims.x - x - 1 }
} else {
x
};
let i = z * dims.x + i0;
let j = y;
(i, j)
}
}
/// A grid of the z levels, left to right, top to bottom, like English prose.
/// Convenient for visualizing terrain for debugging or for user-inspectable
/// file formats, but wastes space if the number of z levels isn't a perfect
/// square.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct GridLtrPacking;
impl PackingFormula for GridLtrPacking {
#[inline(always)]
fn dimensions(&self, dims: Vec3<u32>) -> (u32, u32) {
let rootz = (dims.z as f64).sqrt().ceil() as u32;
(dims.x * rootz, dims.y * rootz)
}
#[allow(clippy::many_single_char_names)]
#[inline(always)]
fn index(&self, dims: Vec3<u32>, x: u32, y: u32, z: u32) -> (u32, u32) {
let rootz = (dims.z as f64).sqrt().ceil() as u32;
let i = x + (z % rootz) * dims.x;
let j = y + (z / rootz) * dims.y;
(i, j)
}
}
pub trait VoxelImageEncoding: Copy {
type Workspace;
type Output;
fn create(width: u32, height: u32) -> Self::Workspace;
fn put_solid(ws: &mut Self::Workspace, x: u32, y: u32, kind: BlockKind, rgb: Rgb<u8>);
fn put_sprite(
ws: &mut Self::Workspace,
x: u32,
y: u32,
kind: BlockKind,
sprite: SpriteKind,
ori: Option<u8>,
);
fn finish(ws: &Self::Workspace) -> Option<Self::Output>;
}
pub trait VoxelImageDecoding: VoxelImageEncoding {
fn start(ws: &Self::Output) -> Option<Self::Workspace>;
fn get_block(ws: &Self::Workspace, x: u32, y: u32, is_border: bool) -> Block;
}
pub fn image_from_bytes<'a, I: ImageDecoder<'a>, P: 'static + Pixel<Subpixel = u8>>(
decoder: I,
) -> Option<ImageBuffer<P, Vec<u8>>> {
let (w, h) = decoder.dimensions();
let mut buf = vec![0; decoder.total_bytes() as usize];
decoder.read_image(&mut buf).ok()?;
ImageBuffer::from_raw(w, h, buf)
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct QuadPngEncoding<const RESOLUTION_DIVIDER: u32>();
impl<const N: u32> VoxelImageEncoding for QuadPngEncoding<N> {
type Output = CompressedData<(Vec<u8>, [usize; 3])>;
#[allow(clippy::type_complexity)]
type Workspace = (
ImageBuffer<image::Luma<u8>, Vec<u8>>,
ImageBuffer<image::Luma<u8>, Vec<u8>>,
ImageBuffer<image::Luma<u8>, Vec<u8>>,
ImageBuffer<image::Rgb<u8>, Vec<u8>>,
);
fn create(width: u32, height: u32) -> Self::Workspace {
(
ImageBuffer::new(width, height),
ImageBuffer::new(width, height),
ImageBuffer::new(width, height),
ImageBuffer::new(width / N, height / N),
)
}
#[inline(always)]
fn put_solid(ws: &mut Self::Workspace, x: u32, y: u32, kind: BlockKind, rgb: Rgb<u8>) {
ws.0.put_pixel(x, y, image::Luma([kind as u8]));
ws.3.put_pixel(x / N, y / N, image::Rgb([rgb.r, rgb.g, rgb.b]));
}
#[inline(always)]
fn put_sprite(
ws: &mut Self::Workspace,
x: u32,
y: u32,
kind: BlockKind,
sprite: SpriteKind,
ori: Option<u8>,
) {
ws.0.put_pixel(x, y, image::Luma([kind as u8]));
ws.1.put_pixel(x, y, image::Luma([sprite as u8]));
ws.2.put_pixel(x, y, image::Luma([ori.unwrap_or(0)]));
}
fn finish(ws: &Self::Workspace) -> Option<Self::Output> {
let mut buf = Vec::new();
use image::codecs::png::{CompressionType, FilterType};
let mut indices = [0; 3];
let mut f = |x: &ImageBuffer<_, Vec<u8>>, i| {
let png = image::codecs::png::PngEncoder::new_with_quality(
&mut buf,
CompressionType::Rle,
FilterType::Up,
);
png.encode(&*x.as_raw(), x.width(), x.height(), image::ColorType::L8)
.ok()?;
indices[i] = buf.len();
Some(())
};
f(&ws.0, 0)?;
f(&ws.1, 1)?;
f(&ws.2, 2)?;
{
let png = image::codecs::png::PngEncoder::new_with_quality(
&mut buf,
CompressionType::Rle,
FilterType::Sub,
);
png.encode(
&*ws.3.as_raw(),
ws.3.width(),
ws.3.height(),
image::ColorType::Rgb8,
)
.ok()?;
}
Some(CompressedData::compress(&(buf, indices), 4))
}
}
/// AldanTanneo's sin approximation (since std's sin implementation isn't const
/// yet)
const fn sin(x: f64) -> f64 {
use std::f64::consts::PI;
let mut x = (x - PI * 0.5) % (PI * 2.0);
x = if x < 0.0 { -x } else { x } - PI;
x = if x < 0.0 { -x } else { x } - PI * 0.5;
let x2 = x * x;
let x3 = x * x2 / 6.0;
let x5 = x3 * x2 / 20.0;
let x7 = x5 * x2 / 42.0;
let x9 = x7 * x2 / 72.0;
let x11 = x9 * x2 / 110.0;
x - x3 + x5 - x7 + x9 - x11
}
/// https://en.wikipedia.org/wiki/Lanczos_resampling#Lanczos_kernel
const fn lanczos(x: f64, a: f64) -> f64 {
use std::f64::consts::PI;
if x < f64::EPSILON {
1.0
} else if -a <= x && x <= a {
(a * sin(PI * x) * sin(PI * x / a)) / (PI * PI * x * x)
} else {
0.0
}
}
/// Needs to be a separate function since `const fn`s can appear in the output
/// of a const-generic function, but raw arithmetic expressions can't be
const fn lanczos_lookup_array_size(n: u32, r: u32) -> usize { (2 * n * (r + 1) - 1) as usize }
const fn gen_lanczos_lookup<const N: u32, const R: u32>(
a: f64,
) -> [f64; lanczos_lookup_array_size(N, R)] {
let quadpng_n = N as f64;
let sample_radius = R as f64;
let step = 1.0 / (2.0 * quadpng_n);
let max = (quadpng_n - 1.0) / (2.0 * quadpng_n) + sample_radius;
// after doing some maths with the above:
let mut array = [0.0; lanczos_lookup_array_size(N, R)];
let mut i = 0;
while i < array.len() {
array[i] = lanczos(step * i as f64 - max, a);
i += 1;
}
array
}
impl<const N: u32> VoxelImageDecoding for QuadPngEncoding<N> {
fn start(data: &Self::Output) -> Option<Self::Workspace> {
use image::codecs::png::PngDecoder;
let (quad, indices) = data.decompress()?;
let ranges: [_; 4] = [
0..indices[0],
indices[0]..indices[1],
indices[1]..indices[2],
indices[2]..quad.len(),
];
let a = image_from_bytes(PngDecoder::new(&quad[ranges[0].clone()]).ok()?)?;
let b = image_from_bytes(PngDecoder::new(&quad[ranges[1].clone()]).ok()?)?;
let c = image_from_bytes(PngDecoder::new(&quad[ranges[2].clone()]).ok()?)?;
let d = image_from_bytes(PngDecoder::new(&quad[ranges[3].clone()]).ok()?)?;
Some((a, b, c, d))
}
#[allow(clippy::many_single_char_names)]
fn get_block(ws: &Self::Workspace, x: u32, y: u32, is_border: bool) -> Block {
if let Some(kind) = BlockKind::from_u8(ws.0.get_pixel(x, y).0[0]) {
if kind.is_filled() {
let (w, h) = ws.3.dimensions();
let mut rgb = match 0 {
// Weighted-average interpolation
0 => {
const SAMPLE_RADIUS: i32 = 2i32; // sample_size = SAMPLE_RADIUS * 2 + 1
let mut rgb: Vec3<f64> = Vec3::zero();
let mut total = 0.0;
for dx in -SAMPLE_RADIUS..=SAMPLE_RADIUS {
for dy in -SAMPLE_RADIUS..=SAMPLE_RADIUS {
let (i, j) = (
(x.wrapping_add(dx as u32) / N),
(y.wrapping_add(dy as u32) / N),
);
if i < w && j < h {
let r = 5.0 - (dx.abs() + dy.abs()) as f64;
let pix = Vec3::<u8>::from(ws.3.get_pixel(i, j).0);
if pix != Vec3::zero() {
rgb += r * pix.as_();
total += r;
}
}
}
}
rgb /= total;
rgb
},
// Mckol's Lanczos interpolation with AldanTanneo's Lanczos LUT
1 if N == 4 => {
const LANCZOS_A: f64 = 2.0; // See https://www.desmos.com/calculator/xxejcymyua
const SAMPLE_RADIUS: i32 = 2i32; // sample_size = SAMPLE_RADIUS * 2 + 1
// rustc currently doesn't support supplying N and SAMPLE_RADIUS, even with
// a few workarounds, so hack around it by using the dynamic check above
const LANCZOS_LUT: [f64; lanczos_lookup_array_size(4, 2)] =
gen_lanczos_lookup::<4, 2>(LANCZOS_A);
// As a reminder: x, y are destination pixel coordinates (not downscaled).
let mut rgb: Vec3<f64> = Vec3::zero();
for dx in -SAMPLE_RADIUS..=SAMPLE_RADIUS {
for dy in -SAMPLE_RADIUS..=SAMPLE_RADIUS {
// Source pixel coordinates (downscaled):
let (src_x, src_y) = (
(x.wrapping_add(dx as u32) / N),
(y.wrapping_add(dy as u32) / N),
);
if src_x < w && src_y < h {
let pix: Vec3<f64> =
Vec3::<u8>::from(ws.3.get_pixel(src_x, src_y).0).as_();
// Relative coordinates where 1 unit is the size of one source
// pixel and 0 is the center of the source pixel:
let x_rel = ((x % N) as f64 - (N - 1) as f64 / 2.0) / N as f64;
let y_rel = ((y % N) as f64 - (N - 1) as f64 / 2.0) / N as f64;
// Distance from the currently processed target pixel's center
// to the currently processed source pixel's center:
rgb += LANCZOS_LUT
.get((dx as f64 - x_rel).abs() as usize)
.unwrap_or(&0.0)
* LANCZOS_LUT
.get((dy as f64 - y_rel).abs() as usize)
.unwrap_or(&0.0)
* pix;
}
}
}
rgb
},
// Mckol's Lanczos interpolation
1 | 2 => {
const LANCZOS_A: f64 = 2.0; // See https://www.desmos.com/calculator/xxejcymyua
const SAMPLE_RADIUS: i32 = 2i32; // sample_size = SAMPLE_RADIUS * 2 + 1
// As a reminder: x, y are destination pixel coordinates (not downscaled).
let mut rgb: Vec3<f64> = Vec3::zero();
for dx in -SAMPLE_RADIUS..=SAMPLE_RADIUS {
for dy in -SAMPLE_RADIUS..=SAMPLE_RADIUS {
// Source pixel coordinates (downscaled):
let (src_x, src_y) = (
(x.wrapping_add(dx as u32) / N),
(y.wrapping_add(dy as u32) / N),
);
if src_x < w && src_y < h {
let pix: Vec3<f64> =
Vec3::<u8>::from(ws.3.get_pixel(src_x, src_y).0).as_();
// Relative coordinates where 1 unit is the size of one source
// pixel and 0 is the center of the source pixel:
let x_rel = ((x % N) as f64 - (N - 1) as f64 / 2.0) / N as f64;
let y_rel = ((y % N) as f64 - (N - 1) as f64 / 2.0) / N as f64;
// Distance from the currently processed target pixel's center
// to the currently processed source pixel's center:
rgb += lanczos((dx as f64 - x_rel).abs(), LANCZOS_A)
* lanczos((dy as f64 - y_rel).abs(), LANCZOS_A)
* pix;
}
}
}
rgb
},
// No interpolation
_ => Vec3::<u8>::from(ws.3.get_pixel(x / N, y / N).0).as_(),
};
if is_border {
rgb = Vec3::<u8>::from(ws.3.get_pixel(x / N, y / N).0).as_();
}
Block::new(kind, Rgb {
r: rgb.x as u8,
g: rgb.y as u8,
b: rgb.z as u8,
})
} else {
let mut block = Block::new(kind, Rgb { r: 0, g: 0, b: 0 });
if let Some(spritekind) = SpriteKind::from_u8(ws.1.get_pixel(x, y).0[0]) {
block = block.with_sprite(spritekind);
}
if let Some(oriblock) = block.with_ori(ws.2.get_pixel(x, y).0[0]) {
block = oriblock;
}
block
}
} else {
Block::empty()
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct TriPngEncoding<const AVERAGE_PALETTE: bool>();
impl<const AVERAGE_PALETTE: bool> VoxelImageEncoding for TriPngEncoding<AVERAGE_PALETTE> {
#[allow(clippy::type_complexity)]
type Output = CompressedData<(Vec<u8>, Vec<Rgb<u8>>, [usize; 3])>;
#[allow(clippy::type_complexity)]
type Workspace = (
ImageBuffer<image::Luma<u8>, Vec<u8>>,
ImageBuffer<image::Luma<u8>, Vec<u8>>,
ImageBuffer<image::Luma<u8>, Vec<u8>>,
HashMap<BlockKind, HashMap<Rgb<u8>, usize>>,
);
fn create(width: u32, height: u32) -> Self::Workspace {
(
ImageBuffer::new(width, height),
ImageBuffer::new(width, height),
ImageBuffer::new(width, height),
HashMap::new(),
)
}
fn put_solid(ws: &mut Self::Workspace, x: u32, y: u32, kind: BlockKind, rgb: Rgb<u8>) {
ws.0.put_pixel(x, y, image::Luma([kind as u8]));
ws.1.put_pixel(x, y, image::Luma([0]));
ws.2.put_pixel(x, y, image::Luma([0]));
if AVERAGE_PALETTE {
*ws.3.entry(kind).or_default().entry(rgb).or_insert(0) += 1;
}
}
fn put_sprite(
ws: &mut Self::Workspace,
x: u32,
y: u32,
kind: BlockKind,
sprite: SpriteKind,
ori: Option<u8>,
) {
ws.0.put_pixel(x, y, image::Luma([kind as u8]));
ws.1.put_pixel(x, y, image::Luma([sprite as u8]));
ws.2.put_pixel(x, y, image::Luma([ori.unwrap_or(0)]));
}
fn finish(ws: &Self::Workspace) -> Option<Self::Output> {
let mut buf = Vec::new();
use image::codecs::png::{CompressionType, FilterType};
let mut indices = [0; 3];
let mut f = |x: &ImageBuffer<_, Vec<u8>>, i| {
let png = image::codecs::png::PngEncoder::new_with_quality(
&mut buf,
CompressionType::Rle,
FilterType::Up,
);
png.encode(&*x.as_raw(), x.width(), x.height(), image::ColorType::L8)
.ok()?;
indices[i] = buf.len();
Some(())
};
f(&ws.0, 0)?;
f(&ws.1, 1)?;
f(&ws.2, 2)?;
let palette = if AVERAGE_PALETTE {
let mut palette = vec![Rgb { r: 0, g: 0, b: 0 }; 256];
for (block, hist) in ws.3.iter() {
let (mut r, mut g, mut b) = (0.0, 0.0, 0.0);
let mut total = 0;
for (color, count) in hist.iter() {
r += color.r as f64 * *count as f64;
g += color.g as f64 * *count as f64;
b += color.b as f64 * *count as f64;
total += *count;
}
r /= total as f64;
g /= total as f64;
b /= total as f64;
palette[*block as u8 as usize].r = r as u8;
palette[*block as u8 as usize].g = g as u8;
palette[*block as u8 as usize].b = b as u8;
}
palette
} else {
Vec::new()
};
Some(CompressedData::compress(&(buf, palette, indices), 4))
}
}
impl<const AVERAGE_PALETTE: bool> VoxelImageDecoding for TriPngEncoding<AVERAGE_PALETTE> {
fn start(data: &Self::Output) -> Option<Self::Workspace> {
use image::codecs::png::PngDecoder;
let (quad, palette, indices) = data.decompress()?;
let ranges: [_; 3] = [
0..indices[0],
indices[0]..indices[1],
indices[1]..indices[2],
];
let a = image_from_bytes(PngDecoder::new(&quad[ranges[0].clone()]).ok()?)?;
let b = image_from_bytes(PngDecoder::new(&quad[ranges[1].clone()]).ok()?)?;
let c = image_from_bytes(PngDecoder::new(&quad[ranges[2].clone()]).ok()?)?;
let mut d: HashMap<_, HashMap<_, _>> = HashMap::new();
if AVERAGE_PALETTE {
for i in 0..=255 {
if let Some(block) = BlockKind::from_u8(i) {
d.entry(block)
.or_default()
.entry(palette[i as usize])
.insert(1);
}
}
}
Some((a, b, c, d))
}
fn get_block(ws: &Self::Workspace, x: u32, y: u32, _: bool) -> Block {
if let Some(kind) = BlockKind::from_u8(ws.0.get_pixel(x, y).0[0]) {
if kind.is_filled() {
let rgb = if AVERAGE_PALETTE {
*ws.3
.get(&kind)
.and_then(|h| h.keys().next())
.unwrap_or(&Rgb::default())
} else {
use BlockKind::*;
match kind {
Air | Water => Rgb { r: 0, g: 0, b: 0 },
Rock => Rgb {
r: 93,
g: 110,
b: 145,
},
WeakRock => Rgb {
r: 93,
g: 132,
b: 145,
},
Grass => Rgb {
r: 51,
g: 160,
b: 94,
},
Snow => Rgb {
r: 192,
g: 255,
b: 255,
},
Earth => Rgb {
r: 200,
g: 140,
b: 93,
},
Sand => Rgb {
r: 241,
g: 177,
b: 128,
},
Wood => Rgb {
r: 128,
g: 77,
b: 51,
},
Leaves => Rgb {
r: 93,
g: 206,
b: 64,
},
Misc => Rgb {
r: 255,
g: 0,
b: 255,
},
}
};
Block::new(kind, rgb)
} else {
let mut block = Block::new(kind, Rgb { r: 0, g: 0, b: 0 });
if let Some(spritekind) = SpriteKind::from_u8(ws.1.get_pixel(x, y).0[0]) {
block = block.with_sprite(spritekind);
}
if let Some(oriblock) = block.with_ori(ws.2.get_pixel(x, y).0[0]) {
block = oriblock;
}
block
}
} else {
Block::empty()
}
}
}
pub fn image_terrain_chonk<S: RectVolSize, M: Clone, P: PackingFormula, VIE: VoxelImageEncoding>(
vie: VIE,
packing: P,
chonk: &Chonk<Block, S, M>,
) -> Option<VIE::Output> {
image_terrain(
vie,
packing,
chonk,
Vec3::new(0, 0, chonk.get_min_z() as u32),
Vec3::new(S::RECT_SIZE.x, S::RECT_SIZE.y, chonk.get_max_z() as u32),
)
}
pub fn image_terrain_volgrid<
S: RectVolSize + Debug,
M: Clone + Debug,
P: PackingFormula,
VIE: VoxelImageEncoding,
>(
vie: VIE,
packing: P,
volgrid: &VolGrid2d<Chonk<Block, S, M>>,
) -> Option<VIE::Output> {
let mut lo = Vec3::broadcast(i32::MAX);
let mut hi = Vec3::broadcast(i32::MIN);
for (pos, chonk) in volgrid.iter() {
lo.x = lo.x.min(pos.x * S::RECT_SIZE.x as i32);
lo.y = lo.y.min(pos.y * S::RECT_SIZE.y as i32);
lo.z = lo.z.min(chonk.get_min_z());
hi.x = hi.x.max((pos.x + 1) * S::RECT_SIZE.x as i32);
hi.y = hi.y.max((pos.y + 1) * S::RECT_SIZE.y as i32);
hi.z = hi.z.max(chonk.get_max_z());
}
image_terrain(vie, packing, volgrid, lo.as_(), hi.as_())
}
pub fn image_terrain<
V: BaseVol<Vox = Block> + ReadVol,
P: PackingFormula,
VIE: VoxelImageEncoding,
>(
_: VIE,
packing: P,
vol: &V,
lo: Vec3<u32>,
hi: Vec3<u32>,
) -> Option<VIE::Output> {
let dims = Vec3::new(
hi.x.wrapping_sub(lo.x),
hi.y.wrapping_sub(lo.y),
hi.z.wrapping_sub(lo.z),
);
let (width, height) = packing.dimensions(dims);
let mut image = VIE::create(width, height);
for z in 0..dims.z {
for y in 0..dims.y {
for x in 0..dims.x {
let (i, j) = packing.index(dims, x, y, z);
let block = *vol
.get(
Vec3::new(
x.wrapping_add(lo.x),
y.wrapping_add(lo.y),
z.wrapping_add(lo.z),
)
.as_(),
)
.unwrap_or(&Block::empty());
match (block.get_color(), block.get_sprite()) {
(Some(rgb), None) => {
VIE::put_solid(&mut image, i, j, *block, rgb);
},
(None, Some(sprite)) => {
VIE::put_sprite(&mut image, i, j, *block, sprite, block.get_ori());
},
_ => panic!(
"attr being used for color vs sprite is mutually exclusive (and that's \
required for this translation to be lossless), but there's no way to \
guarantee that at the type level with Block's public API"
),
}
}
}
}
VIE::finish(&image)
}
pub fn write_image_terrain<
V: BaseVol<Vox = Block> + WriteVol,
P: PackingFormula,
VIE: VoxelImageEncoding + VoxelImageDecoding,
>(
_: VIE,
packing: P,
vol: &mut V,
data: &VIE::Output,
lo: Vec3<u32>,
hi: Vec3<u32>,
) -> Option<()> {
let ws = VIE::start(data)?;
let dims = Vec3::new(
hi.x.wrapping_sub(lo.x),
hi.y.wrapping_sub(lo.y),
hi.z.wrapping_sub(lo.z),
);
for z in 0..dims.z {
for y in 0..dims.y {
for x in 0..dims.x {
let (i, j) = packing.index(dims, x, y, z);
let is_border = x <= 1 || x >= dims.x - 2 || y <= 1 || y >= dims.y - 2;
let block = VIE::get_block(&ws, i, j, is_border);
if let Err(e) = vol.set(lo.as_() + Vec3::new(x, y, z).as_(), block) {
warn!(
"Error placing a block into a volume at {:?}: {:?}",
(x, y, z),
e
);
}
}
}
}
Some(())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WireChonk<VIE: VoxelImageEncoding, P: PackingFormula, M: Clone, S: RectVolSize> {
zmin: i32,
zmax: i32,
data: VIE::Output,
below: Block,
above: Block,
meta: M,
vie: VIE,
packing: P,
size: PhantomData<S>,
}
impl<VIE: VoxelImageEncoding + VoxelImageDecoding, P: PackingFormula, M: Clone, S: RectVolSize>
WireChonk<VIE, P, M, S>
{
pub fn from_chonk(vie: VIE, packing: P, chonk: &Chonk<Block, S, M>) -> Option<Self> {
let data = image_terrain_chonk(vie, packing, chonk)?;
Some(Self {
zmin: chonk.get_min_z(),
zmax: chonk.get_max_z(),
data,
below: *chonk
.get(Vec3::new(0, 0, chonk.get_min_z().saturating_sub(1)))
.ok()?,
above: *chonk.get(Vec3::new(0, 0, chonk.get_max_z() + 1)).ok()?,
meta: chonk.meta().clone(),
vie,
packing,
size: PhantomData,
})
}
pub fn to_chonk(&self) -> Option<Chonk<Block, S, M>> {
let mut chonk = Chonk::new(self.zmin, self.below, self.above, self.meta.clone());
write_image_terrain(
self.vie,
self.packing,
&mut chonk,
&self.data,
Vec3::new(0, 0, self.zmin as u32),
Vec3::new(S::RECT_SIZE.x, S::RECT_SIZE.y, self.zmax as u32),
)?;
Some(chonk)
}
}

View File

@ -1,4 +1,5 @@
pub mod client;
pub mod compression;
pub mod ecs_packet;
pub mod server;
pub mod world_msg;
@ -6,17 +7,20 @@ pub mod world_msg;
// Reexports
pub use self::{
client::{ClientGeneral, ClientMsg, ClientRegister, ClientType},
compression::{
CompressedData, GridLtrPacking, PackingFormula, QuadPngEncoding, TriPngEncoding,
VoxelImageEncoding, WidePacking, WireChonk,
},
ecs_packet::EcsCompPacket,
server::{
CharacterInfo, DisconnectReason, InviteAnswer, Notification, PlayerInfo, PlayerListUpdate,
RegisterError, ServerGeneral, ServerInfo, ServerInit, ServerMsg, ServerRegisterAnswer,
RegisterError, SerializedTerrainChunk, ServerGeneral, ServerInfo, ServerInit, ServerMsg,
ServerRegisterAnswer,
},
world_msg::WorldMapMsg,
};
use common::character::CharacterId;
use serde::{Deserialize, Serialize};
use std::marker::PhantomData;
use tracing::trace;
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum PresenceKind {
@ -44,60 +48,3 @@ pub fn validate_chat_msg(msg: &str) -> Result<(), ChatMsgValidationError> {
Err(ChatMsgValidationError::TooLong)
}
}
/// Wrapper for compressed, serialized data (for stuff that doesn't use the
/// default lz4 compression)
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CompressedData<T> {
pub data: Vec<u8>,
compressed: bool,
_phantom: PhantomData<T>,
}
impl<T: Serialize + for<'a> Deserialize<'a>> CompressedData<T> {
pub fn compress(t: &T, level: u32) -> Self {
use flate2::{write::DeflateEncoder, Compression};
use std::io::Write;
let uncompressed = bincode::serialize(t)
.expect("bincode serialization can only fail if a byte limit is set");
if uncompressed.len() >= 32 {
const EXPECT_MSG: &str =
"compression only fails for fallible Read/Write impls (which Vec<u8> is not)";
let mut encoder = DeflateEncoder::new(Vec::new(), Compression::new(level));
encoder.write_all(&*uncompressed).expect(EXPECT_MSG);
let compressed = encoder.finish().expect(EXPECT_MSG);
trace!(
"compressed {}, uncompressed {}, ratio {}",
compressed.len(),
uncompressed.len(),
compressed.len() as f32 / uncompressed.len() as f32
);
CompressedData {
data: compressed,
compressed: true,
_phantom: PhantomData,
}
} else {
CompressedData {
data: uncompressed,
compressed: false,
_phantom: PhantomData,
}
}
}
pub fn decompress(&self) -> Option<T> {
use std::io::Read;
if self.compressed {
let mut uncompressed = Vec::new();
flate2::read::DeflateDecoder::new(&*self.data)
.read_to_end(&mut uncompressed)
.ok()?;
bincode::deserialize(&*uncompressed).ok()
} else {
bincode::deserialize(&*self.data).ok()
}
}
}

View File

@ -1,4 +1,7 @@
use super::{world_msg::EconomyInfo, ClientType, CompressedData, EcsCompPacket, PingMsg};
use super::{
world_msg::EconomyInfo, ClientType, CompressedData, EcsCompPacket, PingMsg, QuadPngEncoding,
TriPngEncoding, WidePacking, WireChonk,
};
use crate::sync;
use common::{
character::{self, CharacterItem},
@ -6,13 +9,14 @@ use common::{
outcome::Outcome,
recipe::RecipeBook,
resources::TimeOfDay,
terrain::{Block, TerrainChunk},
terrain::{Block, TerrainChunk, TerrainChunkMeta, TerrainChunkSize},
trade::{PendingTrade, SitePrices, TradeId, TradeResult},
uid::Uid,
};
use hashbrown::HashMap;
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tracing::warn;
use vek::*;
///This struct contains all messages the server might send (on different
@ -62,6 +66,53 @@ pub enum ServerInit {
pub type ServerRegisterAnswer = Result<(), RegisterError>;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SerializedTerrainChunk {
DeflatedChonk(CompressedData<TerrainChunk>),
QuadPng(WireChonk<QuadPngEncoding<4>, WidePacking<true>, TerrainChunkMeta, TerrainChunkSize>),
TriPng(WireChonk<TriPngEncoding<false>, WidePacking<true>, TerrainChunkMeta, TerrainChunkSize>),
}
impl SerializedTerrainChunk {
pub fn via_heuristic(chunk: &TerrainChunk, lossy_compression: bool) -> Self {
if lossy_compression && (chunk.get_max_z() - chunk.get_min_z() <= 128) {
Self::quadpng(chunk)
} else {
Self::deflate(chunk)
}
}
pub fn deflate(chunk: &TerrainChunk) -> Self {
Self::DeflatedChonk(CompressedData::compress(chunk, 1))
}
pub fn quadpng(chunk: &TerrainChunk) -> Self {
if let Some(wc) = WireChonk::from_chonk(QuadPngEncoding(), WidePacking(), chunk) {
Self::QuadPng(wc)
} else {
warn!("Image encoding failure occurred, falling back to deflate");
Self::deflate(chunk)
}
}
pub fn tripng(chunk: &TerrainChunk) -> Self {
if let Some(wc) = WireChonk::from_chonk(TriPngEncoding(), WidePacking(), chunk) {
Self::TriPng(wc)
} else {
warn!("Image encoding failure occurred, falling back to deflate");
Self::deflate(chunk)
}
}
pub fn to_chunk(&self) -> Option<TerrainChunk> {
match self {
Self::DeflatedChonk(chonk) => chonk.decompress(),
Self::QuadPng(wc) => wc.to_chonk(),
Self::TriPng(wc) => wc.to_chonk(),
}
}
}
/// Messages sent from the server to the client
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ServerGeneral {
@ -106,7 +157,7 @@ pub enum ServerGeneral {
// Ingame related AND terrain stream
TerrainChunkUpdate {
key: Vec2<i32>,
chunk: Result<CompressedData<TerrainChunk>, ()>,
chunk: Result<SerializedTerrainChunk, ()>,
},
TerrainBlockUpdates(CompressedData<HashMap<Vec3<i32>, Block>>),
// Always possible

View File

@ -6,8 +6,10 @@
label_break_value,
bool_to_option,
drain_filter,
never_type,
option_unwrap_none,
option_zip
option_zip,
unwrap_infallible
)]
#![cfg_attr(not(feature = "worldgen"), feature(const_panic))]

View File

@ -34,6 +34,8 @@ pub struct NetworkRequestMetrics {
pub chunks_request_dropped: IntCounter,
pub chunks_served_from_memory: IntCounter,
pub chunks_generation_triggered: IntCounter,
pub chunks_served_lossy: IntCounter,
pub chunks_served_lossless: IntCounter,
}
pub struct ChunkGenMetrics {
@ -187,15 +189,27 @@ impl NetworkRequestMetrics {
"chunks_generation_triggered",
"number of all chunks that were requested and needs to be generated",
))?;
let chunks_served_lossy = IntCounter::with_opts(Opts::new(
"chunks_served_lossy",
"number of chunks that were sent with lossy compression requested",
))?;
let chunks_served_lossless = IntCounter::with_opts(Opts::new(
"chunks_served_lossless",
"number of chunks that were sent with lossless compression requested",
))?;
registry.register(Box::new(chunks_request_dropped.clone()))?;
registry.register(Box::new(chunks_served_from_memory.clone()))?;
registry.register(Box::new(chunks_generation_triggered.clone()))?;
registry.register(Box::new(chunks_served_lossy.clone()))?;
registry.register(Box::new(chunks_served_lossless.clone()))?;
Ok(Self {
chunks_request_dropped,
chunks_served_from_memory,
chunks_generation_triggered,
chunks_served_lossy,
chunks_served_lossless,
})
}
}

View File

@ -9,6 +9,7 @@ use vek::*;
pub struct Presence {
pub view_distance: u32,
pub kind: PresenceKind,
pub lossy_terrain_compression: bool,
}
impl Presence {
@ -16,6 +17,7 @@ impl Presence {
Self {
view_distance,
kind,
lossy_terrain_compression: false,
}
}
}

View File

@ -253,6 +253,11 @@ impl Sys {
setting.client_optin = server_authoritative;
}
},
ClientGeneral::RequestLossyTerrainCompression {
lossy_terrain_compression,
} => {
presence.lossy_terrain_compression = lossy_terrain_compression;
},
_ => tracing::error!("not a client_in_game msg"),
}
Ok(())

View File

@ -6,7 +6,7 @@ use common::{
vol::RectVolSize,
};
use common_ecs::{Job, Origin, ParMode, Phase, System};
use common_net::msg::{ClientGeneral, CompressedData, ServerGeneral};
use common_net::msg::{ClientGeneral, SerializedTerrainChunk, ServerGeneral};
use rayon::iter::ParallelIterator;
use specs::{Entities, Join, ParJoin, Read, ReadExpect, ReadStorage};
use tracing::{debug, trace};
@ -79,8 +79,16 @@ impl<'a> System<'a> for Sys {
network_metrics.chunks_served_from_memory.inc();
client.send(ServerGeneral::TerrainChunkUpdate {
key,
chunk: Ok(CompressedData::compress(&chunk, 1)),
})?
chunk: Ok(SerializedTerrainChunk::via_heuristic(
&chunk,
presence.lossy_terrain_compression,
)),
})?;
if presence.lossy_terrain_compression {
network_metrics.chunks_served_lossy.inc();
} else {
network_metrics.chunks_served_lossless.inc();
}
},
None => {
network_metrics.chunks_generation_triggered.inc();

View File

@ -1,6 +1,6 @@
use crate::{
chunk_generator::ChunkGenerator, client::Client, presence::Presence, rtsim::RtSim,
settings::Settings, SpawnPoint, Tick,
chunk_generator::ChunkGenerator, client::Client, metrics::NetworkRequestMetrics,
presence::Presence, rtsim::RtSim, settings::Settings, SpawnPoint, Tick,
};
use common::{
comp::{
@ -14,13 +14,68 @@ use common::{
LoadoutBuilder, SkillSetBuilder,
};
use common_ecs::{Job, Origin, Phase, System};
use common_net::msg::{CompressedData, ServerGeneral};
use common_net::msg::{SerializedTerrainChunk, ServerGeneral};
use common_state::TerrainChanges;
use comp::Behavior;
use specs::{Join, Read, ReadStorage, Write, WriteExpect};
use specs::{Join, Read, ReadExpect, ReadStorage, Write, WriteExpect};
use std::sync::Arc;
use vek::*;
pub(crate) struct LazyTerrainMessage {
lazy_msg_lo: Option<crate::client::PreparedMsg>,
lazy_msg_hi: Option<crate::client::PreparedMsg>,
}
impl LazyTerrainMessage {
#[allow(clippy::new_without_default)]
pub(crate) fn new() -> Self {
Self {
lazy_msg_lo: None,
lazy_msg_hi: None,
}
}
pub(crate) fn prepare_and_send<
'a,
A,
F: FnOnce() -> Result<&'a common::terrain::TerrainChunk, A>,
>(
&mut self,
network_metrics: &NetworkRequestMetrics,
client: &Client,
presence: &Presence,
chunk_key: &vek::Vec2<i32>,
generate_chunk: F,
) -> Result<(), A> {
let lazy_msg = if presence.lossy_terrain_compression {
&mut self.lazy_msg_lo
} else {
&mut self.lazy_msg_hi
};
if lazy_msg.is_none() {
*lazy_msg = Some(client.prepare(ServerGeneral::TerrainChunkUpdate {
key: *chunk_key,
chunk: Ok(match generate_chunk() {
Ok(chunk) => SerializedTerrainChunk::via_heuristic(
&chunk,
presence.lossy_terrain_compression,
),
Err(e) => return Err(e),
}),
}));
}
lazy_msg.as_ref().map(|ref msg| {
let _ = client.send_prepared(&msg);
if presence.lossy_terrain_compression {
network_metrics.chunks_served_lossy.inc();
} else {
network_metrics.chunks_served_lossless.inc();
}
});
Ok(())
}
}
/// This system will handle loading generated chunks and unloading
/// unneeded chunks.
/// 1. Inserts newly generated chunks into the TerrainGrid
@ -36,6 +91,7 @@ impl<'a> System<'a> for Sys {
Read<'a, Tick>,
Read<'a, SpawnPoint>,
Read<'a, Settings>,
ReadExpect<'a, NetworkRequestMetrics>,
WriteExpect<'a, ChunkGenerator>,
WriteExpect<'a, TerrainGrid>,
Write<'a, TerrainChanges>,
@ -56,6 +112,7 @@ impl<'a> System<'a> for Sys {
tick,
spawn_point,
server_settings,
network_metrics,
mut chunk_generator,
mut terrain,
mut terrain_changes,
@ -222,11 +279,7 @@ impl<'a> System<'a> for Sys {
// Send the chunk to all nearby players.
use rayon::iter::{IntoParallelIterator, ParallelIterator};
new_chunks.into_par_iter().for_each(|(key, chunk)| {
let mut msg = Some(ServerGeneral::TerrainChunkUpdate {
key,
chunk: Ok(CompressedData::compress(&*chunk, 1)),
});
let mut lazy_msg = None;
let mut lazy_msg = LazyTerrainMessage::new();
(&presences, &positions, &clients)
.join()
@ -240,11 +293,15 @@ impl<'a> System<'a> for Sys {
.magnitude_squared();
if adjusted_dist_sqr <= presence.view_distance.pow(2) {
if let Some(msg) = msg.take() {
lazy_msg = Some(client.prepare(msg));
};
lazy_msg.as_ref().map(|msg| client.send_prepared(msg));
lazy_msg
.prepare_and_send::<!, _>(
&network_metrics,
&client,
&presence,
&key,
|| Ok(&*chunk),
)
.into_ok();
}
});
});

View File

@ -1,4 +1,5 @@
use crate::{client::Client, presence::Presence};
use super::terrain::LazyTerrainMessage;
use crate::{client::Client, metrics::NetworkRequestMetrics, presence::Presence};
use common::{comp::Pos, terrain::TerrainGrid};
use common_ecs::{Job, Origin, Phase, System};
use common_net::msg::{CompressedData, ServerGeneral};
@ -17,6 +18,7 @@ impl<'a> System<'a> for Sys {
ReadStorage<'a, Pos>,
ReadStorage<'a, Presence>,
ReadStorage<'a, Client>,
ReadExpect<'a, NetworkRequestMetrics>,
);
const NAME: &'static str = "terrain_sync";
@ -25,25 +27,23 @@ impl<'a> System<'a> for Sys {
fn run(
_job: &mut Job<Self>,
(terrain, terrain_changes, positions, presences, clients): Self::SystemData,
(terrain, terrain_changes, positions, presences, clients, network_metrics): Self::SystemData,
) {
// Sync changed chunks
'chunk: for chunk_key in &terrain_changes.modified_chunks {
let mut lazy_msg = None;
let mut lazy_msg = LazyTerrainMessage::new();
for (presence, pos, client) in (&presences, &positions, &clients).join() {
if super::terrain::chunk_in_vd(pos.0, *chunk_key, &terrain, presence.view_distance)
{
if lazy_msg.is_none() {
lazy_msg = Some(client.prepare(ServerGeneral::TerrainChunkUpdate {
key: *chunk_key,
chunk: Ok(match terrain.get_key(*chunk_key) {
Some(chunk) => CompressedData::compress(&chunk, 1),
None => break 'chunk,
}),
}));
if let Err(()) = lazy_msg.prepare_and_send(
&network_metrics,
&client,
&presence,
chunk_key,
|| terrain.get_key(*chunk_key).ok_or(()),
) {
break 'chunk;
}
lazy_msg.as_ref().map(|ref msg| client.send_prepared(&msg));
}
}
}

View File

@ -82,6 +82,8 @@ widget_ids! {
//
particles_button,
particles_label,
lossy_terrain_compression_button,
lossy_terrain_compression_label,
//
fullscreen_button,
fullscreen_label,
@ -874,6 +876,42 @@ impl<'a> Widget for Video<'a> {
events.push(ToggleParticlesEnabled(particles_enabled));
}
// Lossy terrain compression
Text::new(
&self
.localized_strings
.get("hud.settings.lossy_terrain_compression"),
)
.font_size(self.fonts.cyri.scale(14))
.font_id(self.fonts.cyri.conrod_id)
.right_from(state.ids.particles_label, 64.0)
.color(TEXT_COLOR)
.set(state.ids.lossy_terrain_compression_label, ui);
let lossy_terrain_compression = ToggleButton::new(
self.global_state
.settings
.graphics
.lossy_terrain_compression,
self.imgs.checkbox,
self.imgs.checkbox_checked,
)
.w_h(18.0, 18.0)
.right_from(state.ids.lossy_terrain_compression_label, 10.0)
.hover_images(self.imgs.checkbox_mo, self.imgs.checkbox_checked_mo)
.press_images(self.imgs.checkbox_press, self.imgs.checkbox_checked)
.set(state.ids.lossy_terrain_compression_button, ui);
if self
.global_state
.settings
.graphics
.lossy_terrain_compression
!= lossy_terrain_compression
{
events.push(ToggleLossyTerrainCompression(lossy_terrain_compression));
}
// Resolution
let resolutions: Vec<[u16; 2]> = state
.video_modes

View File

@ -88,9 +88,13 @@ impl SessionState {
scene
.camera_mut()
.set_fov_deg(global_state.settings.graphics.fov);
client
.borrow_mut()
.request_player_physics(global_state.settings.gameplay.player_physics_behavior);
{
let mut client = client.borrow_mut();
client.request_player_physics(global_state.settings.gameplay.player_physics_behavior);
client.request_lossy_terrain_compression(
global_state.settings.graphics.lossy_terrain_compression,
);
}
let hud = Hud::new(global_state, &client.borrow());
let walk_forward_dir = scene.camera().forward_xy();
let walk_right_dir = scene.camera().right_xy();

View File

@ -71,6 +71,7 @@ pub enum Graphics {
ChangeFullscreenMode(FullScreenSettings),
ToggleParticlesEnabled(bool),
ToggleLossyTerrainCompression(bool),
AdjustWindowSize([u16; 2]),
ResetGraphicsSettings,
@ -329,6 +330,13 @@ impl SettingsChange {
Graphics::ToggleParticlesEnabled(particles_enabled) => {
settings.graphics.particles_enabled = particles_enabled;
},
Graphics::ToggleLossyTerrainCompression(lossy_terrain_compression) => {
settings.graphics.lossy_terrain_compression = lossy_terrain_compression;
session_state
.client
.borrow_mut()
.request_lossy_terrain_compression(lossy_terrain_compression);
},
Graphics::AdjustWindowSize(new_size) => {
global_state.window.set_size(new_size.into());
settings.graphics.window_size = new_size;

View File

@ -32,6 +32,7 @@ pub struct GraphicsSettings {
pub view_distance: u32,
pub sprite_render_distance: u32,
pub particles_enabled: bool,
pub lossy_terrain_compression: bool,
pub figure_lod_render_distance: u32,
pub max_fps: Fps,
pub fov: u16,
@ -50,6 +51,7 @@ impl Default for GraphicsSettings {
view_distance: 10,
sprite_render_distance: 100,
particles_enabled: true,
lossy_terrain_compression: false,
figure_lod_render_distance: 300,
max_fps: Fps::Max(60),
fov: 70,

View File

@ -7,7 +7,7 @@ edition = "2018"
[features]
tracy = ["common/tracy", "common-net/tracy"]
simd = ["vek/platform_intrinsics"]
bin_compression = ["lz-fear", "deflate", "flate2", "common-frontend"]
bin_compression = ["lz-fear", "deflate", "flate2", "common-frontend", "image/jpeg", "num-traits"]
default = ["simd"]
@ -42,6 +42,7 @@ assets_manager = {version = "0.4.3", features = ["ron"]}
lz-fear = { version = "0.1.1", optional = true }
deflate = { version = "0.9.1", optional = true }
flate2 = { version = "1.0.20", optional = true }
num-traits = { version = "0.2", optional = true }
common-frontend = { package = "veloren-common-frontend", path = "../common/frontend", optional = true }
@ -56,6 +57,6 @@ structopt = "0.3"
harness = false
name = "tree"
[[bin]]
[[example]]
name = "chunk_compression_benchmarks"
required-features = ["bin_compression"]

File diff suppressed because it is too large Load Diff

View File

@ -1,250 +0,0 @@
use common::{
spiral::Spiral2d,
terrain::{chonk::Chonk, Block, BlockKind, SpriteKind},
vol::{IntoVolIterator, RectVolSize, SizedVol, WriteVol},
volumes::dyna::{Access, ColumnAccess, Dyna},
};
use hashbrown::HashMap;
use std::{
io::{Read, Write},
time::Instant,
};
use tracing::{debug, trace};
use vek::*;
use veloren_world::{
sim::{FileOpts, WorldOpts, DEFAULT_WORLD_MAP},
World,
};
fn lz4_with_dictionary(data: &[u8], dictionary: &[u8]) -> Vec<u8> {
let mut compressed = Vec::new();
lz_fear::CompressionSettings::default()
.dictionary(0, &dictionary)
.compress(data, &mut compressed)
.unwrap();
compressed
}
#[allow(dead_code)]
fn unlz4_with_dictionary(data: &[u8], dictionary: &[u8]) -> Option<Vec<u8>> {
lz_fear::LZ4FrameReader::new(data).ok().and_then(|r| {
let mut uncompressed = Vec::new();
r.into_read_with_dictionary(dictionary)
.read_to_end(&mut uncompressed)
.ok()?;
bincode::deserialize(&*uncompressed).ok()
})
}
#[allow(dead_code)]
fn do_deflate(data: &[u8]) -> Vec<u8> {
use deflate::{write::DeflateEncoder, Compression};
let mut encoder = DeflateEncoder::new(Vec::new(), Compression::Fast);
encoder.write_all(data).expect("Write error!");
let compressed_data = encoder.finish().expect("Failed to finish compression!");
compressed_data
}
fn do_deflate_flate2(data: &[u8]) -> Vec<u8> {
use flate2::{write::DeflateEncoder, Compression};
let mut encoder = DeflateEncoder::new(Vec::new(), Compression::new(5));
encoder.write_all(data).expect("Write error!");
let compressed_data = encoder.finish().expect("Failed to finish compression!");
compressed_data
}
fn chonk_to_dyna<V: Clone, S: RectVolSize, M: Clone, A: Access>(
chonk: &Chonk<V, S, M>,
block: V,
) -> Dyna<V, M, A> {
let mut dyna = Dyna::<V, M, A>::filled(
Vec3::new(
S::RECT_SIZE.x,
S::RECT_SIZE.y,
(chonk.get_max_z() - chonk.get_min_z()) as u32,
),
block,
chonk.meta().clone(),
);
for (pos, block) in chonk.vol_iter(
Vec3::new(0, 0, chonk.get_min_z()),
Vec3::new(S::RECT_SIZE.x as _, S::RECT_SIZE.y as _, chonk.get_max_z()),
) {
dyna.set(pos - chonk.get_min_z() * Vec3::unit_z(), block.clone())
.expect("a bug here represents the arithmetic being wrong");
}
dyna
}
fn channelize_dyna<M: Clone, A: Access>(
dyna: &Dyna<Block, M, A>,
) -> (
Dyna<BlockKind, M, A>,
Vec<u8>,
Vec<u8>,
Vec<u8>,
Vec<SpriteKind>,
) {
let mut blocks = Dyna::filled(dyna.sz, BlockKind::Air, dyna.metadata().clone());
let (mut r, mut g, mut b, mut sprites) = (Vec::new(), Vec::new(), Vec::new(), Vec::new());
for (pos, block) in dyna.vol_iter(dyna.lower_bound(), dyna.upper_bound()) {
blocks.set(pos, **block).unwrap();
match (block.get_color(), block.get_sprite()) {
(Some(rgb), None) => {
r.push(rgb.r);
g.push(rgb.g);
b.push(rgb.b);
},
(None, Some(spritekind)) => {
sprites.push(spritekind);
},
_ => panic!(
"attr being used for color vs sprite is mutually exclusive (and that's required \
for this translation to be lossless), but there's no way to guarantee that at \
the type level with Block's public API"
),
}
}
(blocks, r, g, b, sprites)
}
fn histogram_to_dictionary(histogram: &HashMap<Vec<u8>, usize>, dictionary: &mut Vec<u8>) {
let mut tmp: Vec<(Vec<u8>, usize)> = histogram.iter().map(|(k, v)| (k.clone(), *v)).collect();
tmp.sort_by_key(|(_, count)| *count);
debug!("{:?}", tmp.last());
let mut i = 0;
let mut j = tmp.len() - 1;
while i < dictionary.len() && j > 0 {
let (k, v) = &tmp[j];
let dlen = dictionary.len();
let n = (i + k.len()).min(dlen);
dictionary[i..n].copy_from_slice(&k[0..k.len().min(dlen - i)]);
debug!("{}: {}: {:?}", tmp.len() - j, v, k);
j -= 1;
i = n;
}
}
fn main() {
common_frontend::init_stdout(None);
println!("Loading world");
let (world, index) = World::generate(59686, WorldOpts {
seed_elements: true,
world_file: FileOpts::LoadAsset(DEFAULT_WORLD_MAP.into()),
..WorldOpts::default()
});
println!("Loaded world");
let mut histogram: HashMap<Vec<u8>, usize> = HashMap::new();
let mut histogram2: HashMap<Vec<u8>, usize> = HashMap::new();
let mut dictionary = vec![0xffu8; 1 << 16];
let mut dictionary2 = vec![0xffu8; 1 << 16];
let k = 32;
let sz = world.sim().get_size();
let mut totals = [0.0; 5];
let mut total_timings = [0.0; 2];
let mut count = 0;
for (i, (x, y)) in Spiral2d::new()
.radius(20)
.map(|v| (v.x + sz.x as i32 / 2, v.y + sz.y as i32 / 2))
.enumerate()
{
let chunk = world.generate_chunk(index.as_index_ref(), Vec2::new(x as _, y as _), || false);
if let Ok((chunk, _)) = chunk {
let uncompressed = bincode::serialize(&chunk).unwrap();
for w in uncompressed.windows(k) {
*histogram.entry(w.to_vec()).or_default() += 1;
}
if i % 128 == 0 {
histogram_to_dictionary(&histogram, &mut dictionary);
}
let lz4chonk_pre = Instant::now();
let lz4_chonk = lz4_with_dictionary(&bincode::serialize(&chunk).unwrap(), &[]);
let lz4chonk_post = Instant::now();
//let lz4_dict_chonk = SerializedTerrainChunk::from_chunk(&chunk,
// &*dictionary);
let deflatechonk_pre = Instant::now();
let deflate_chonk = do_deflate_flate2(&bincode::serialize(&chunk).unwrap());
let deflatechonk_post = Instant::now();
let dyna: Dyna<_, _, ColumnAccess> = chonk_to_dyna(&chunk, Block::empty());
let ser_dyna = bincode::serialize(&dyna).unwrap();
for w in ser_dyna.windows(k) {
*histogram2.entry(w.to_vec()).or_default() += 1;
}
if i % 128 == 0 {
histogram_to_dictionary(&histogram2, &mut dictionary2);
}
let lz4_dyna = lz4_with_dictionary(&*ser_dyna, &[]);
//let lz4_dict_dyna = lz4_with_dictionary(&*ser_dyna, &dictionary2);
let deflate_dyna = do_deflate(&*ser_dyna);
let deflate_channeled_dyna =
do_deflate_flate2(&bincode::serialize(&channelize_dyna(&dyna)).unwrap());
let n = uncompressed.len();
let sizes = [
lz4_chonk.len() as f32 / n as f32,
deflate_chonk.len() as f32 / n as f32,
lz4_dyna.len() as f32 / n as f32,
deflate_dyna.len() as f32 / n as f32,
deflate_channeled_dyna.len() as f32 / n as f32,
];
let best_idx = sizes
.iter()
.enumerate()
.fold((1.0, 0), |(best, i), (j, ratio)| {
if ratio < &best {
(*ratio, j)
} else {
(best, i)
}
})
.1;
let timings = [
(lz4chonk_post - lz4chonk_pre).subsec_nanos(),
(deflatechonk_post - deflatechonk_pre).subsec_nanos(),
];
trace!(
"{} {}: uncompressed: {}, {:?} {} {:?}",
x,
y,
n,
sizes,
best_idx,
timings
);
for j in 0..5 {
totals[j] += sizes[j];
}
for j in 0..2 {
total_timings[j] += timings[j] as f32;
}
count += 1;
}
if i % 64 == 0 {
println!("Chunks processed: {}\n", count);
println!("Average lz4_chonk: {}", totals[0] / count as f32);
println!("Average deflate_chonk: {}", totals[1] / count as f32);
println!("Average lz4_dyna: {}", totals[2] / count as f32);
println!("Average deflate_dyna: {}", totals[3] / count as f32);
println!(
"Average deflate_channeled_dyna: {}",
totals[4] / count as f32
);
println!("");
println!(
"Average lz4_chonk nanos : {:02}",
total_timings[0] / count as f32
);
println!(
"Average deflate_chonk nanos: {:02}",
total_timings[1] / count as f32
);
println!("-----");
}
if i % 256 == 0 {
histogram.clear();
}
}
}