mirror of
https://gitlab.com/veloren/veloren.git
synced 2024-08-30 18:12:32 +00:00
Inverse depth planes.
Also added a function that has extensive details about what the projection options mean, and set our near plane much closer (0.25 to 0.0625) and far plane much farther (100000.25 to 524288.0625). In the future we may completely remove the far plane (opting for an infinite one) and may possibly improve accuracy elsewhere using analysis of other floating point values.
This commit is contained in:
parent
adf3f83b4b
commit
93568754a0
@ -49,9 +49,9 @@ vec3 wpos_at(vec2 uv) {
|
||||
vec4 clip_space = vec4((uv * 2.0 - 1.0) * vec2(1, -1), buf_depth, 1.0);
|
||||
vec4 view_space = inv * clip_space;
|
||||
view_space /= view_space.w;
|
||||
if (buf_depth == 1.0) {
|
||||
if (buf_depth == 0.0) {
|
||||
vec3 direction = normalize(view_space.xyz);
|
||||
return direction.xyz * 100000.0 + cam_pos.xyz;
|
||||
return direction.xyz * 524288.0625 + cam_pos.xyz;
|
||||
} else {
|
||||
return view_space.xyz;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ void main() {
|
||||
all_mat *
|
||||
vec4(v_pos + cam_pos.xyz, 1);
|
||||
// gl_Position = vec4(gl_Position.xy, sign(gl_Position.z) * gl_Position.w, gl_Position.w);
|
||||
gl_Position.z = gl_Position.w;
|
||||
gl_Position.z = 0;
|
||||
// gl_Position.z = gl_Position.w - 0.000001;//0.0;
|
||||
// gl_Position.z = 1.0;
|
||||
// gl_Position.z = -1.0;
|
||||
|
@ -227,9 +227,9 @@ impl FigurePipeline {
|
||||
write_mask: wgpu::ColorWrite::ALL,
|
||||
}],
|
||||
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
depth_compare: wgpu::CompareFunction::GreaterEqual,
|
||||
stencil: wgpu::StencilStateDescriptor {
|
||||
front: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
back: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
|
@ -178,9 +178,9 @@ impl FluidPipeline {
|
||||
write_mask: wgpu::ColorWrite::ALL,
|
||||
}],
|
||||
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: false,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
depth_compare: wgpu::CompareFunction::GreaterEqual,
|
||||
stencil: wgpu::StencilStateDescriptor {
|
||||
front: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
back: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
|
@ -187,9 +187,9 @@ impl LodTerrainPipeline {
|
||||
write_mask: wgpu::ColorWrite::ALL,
|
||||
}],
|
||||
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
depth_compare: wgpu::CompareFunction::GreaterEqual,
|
||||
stencil: wgpu::StencilStateDescriptor {
|
||||
front: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
back: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
|
@ -233,9 +233,9 @@ impl ParticlePipeline {
|
||||
write_mask: wgpu::ColorWrite::ALL,
|
||||
}],
|
||||
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
depth_compare: wgpu::CompareFunction::GreaterEqual,
|
||||
stencil: wgpu::StencilStateDescriptor {
|
||||
front: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
back: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
|
@ -83,9 +83,9 @@ impl SkyboxPipeline {
|
||||
write_mask: wgpu::ColorWrite::ALL,
|
||||
}],
|
||||
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
depth_compare: wgpu::CompareFunction::GreaterEqual,
|
||||
stencil: wgpu::StencilStateDescriptor {
|
||||
front: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
back: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
|
@ -270,9 +270,9 @@ impl SpritePipeline {
|
||||
write_mask: wgpu::ColorWrite::ALL,
|
||||
}],
|
||||
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
depth_compare: wgpu::CompareFunction::GreaterEqual,
|
||||
stencil: wgpu::StencilStateDescriptor {
|
||||
front: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
back: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
|
@ -261,9 +261,9 @@ impl TerrainPipeline {
|
||||
write_mask: wgpu::ColorWrite::ALL,
|
||||
}],
|
||||
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
depth_compare: wgpu::CompareFunction::GreaterEqual,
|
||||
stencil: wgpu::StencilStateDescriptor {
|
||||
front: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
back: wgpu::StencilStateFaceDescriptor::IGNORE,
|
||||
|
@ -655,12 +655,12 @@ impl Renderer {
|
||||
mip_level_count: levels,
|
||||
sample_count,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::RENDER_ATTACHMENT,
|
||||
});
|
||||
let tgt_depth_view = tgt_depth_tex.create_view(&wgpu::TextureViewDescriptor {
|
||||
label: None,
|
||||
format: Some(wgpu::TextureFormat::Depth24Plus),
|
||||
format: Some(wgpu::TextureFormat::Depth32Float),
|
||||
dimension: Some(wgpu::TextureViewDimension::D2),
|
||||
aspect: wgpu::TextureAspect::DepthOnly,
|
||||
base_mip_level: 0,
|
||||
@ -679,12 +679,13 @@ impl Renderer {
|
||||
mip_level_count: levels,
|
||||
sample_count,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Depth24Plus,
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
|
||||
});
|
||||
// TODO: Consider no depth buffer for the final draw to the window?
|
||||
let win_depth_view = tgt_depth_tex.create_view(&wgpu::TextureViewDescriptor {
|
||||
label: None,
|
||||
format: Some(wgpu::TextureFormat::Depth24Plus),
|
||||
format: Some(wgpu::TextureFormat::Depth32Float),
|
||||
dimension: Some(wgpu::TextureViewDimension::D2),
|
||||
aspect: wgpu::TextureAspect::DepthOnly,
|
||||
base_mip_level: 0,
|
||||
@ -2106,7 +2107,8 @@ fn create_pipelines(
|
||||
|
||||
let figure_vert_mod = create_shader("figure-vert", ShaderKind::Vertex)?;
|
||||
|
||||
// let terrain_point_shadow_vert_mod = create_shader("Point-light-shadows-vert", ShaderKind::Vertex)?;
|
||||
// let terrain_point_shadow_vert_mod = create_shader("Point-light-shadows-vert",
|
||||
// ShaderKind::Vertex)?;
|
||||
|
||||
let terrain_directed_shadow_vert_mod =
|
||||
create_shader("light-shadows-directed-vert", ShaderKind::Vertex)?;
|
||||
|
@ -54,7 +54,7 @@ impl<'a> Drawer<'a> {
|
||||
wgpu::RenderPassDepthStencilAttachmentDescriptor {
|
||||
attachment: &self.renderer.tgt_depth_view,
|
||||
depth_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(1.0),
|
||||
load: wgpu::LoadOp::Clear(0.0),
|
||||
store: true,
|
||||
}),
|
||||
stencil_ops: None,
|
||||
@ -157,7 +157,7 @@ impl<'a> Drawer<'a> {
|
||||
pub fn draw_point_shadow<'b: 'a>(
|
||||
&mut self,
|
||||
matrices: &[shadow::PointLightMatrix; 126],
|
||||
chunks: impl Clone + Iterator<Item=(&'b Model<terrain::Vertex>, &'b terrain::BoundLocals)>,
|
||||
chunks: impl Clone + Iterator<Item = (&'b Model<terrain::Vertex>, &'b terrain::BoundLocals)>,
|
||||
) {
|
||||
if let Some(ref shadow_renderer) = self.renderer.shadow_map {
|
||||
const STRIDE: usize = std::mem::size_of::<shadow::PointLightMatrix>();
|
||||
|
@ -1,11 +1,12 @@
|
||||
use common::{terrain::TerrainGrid, vol::ReadVol};
|
||||
use common_base::span;
|
||||
use std::f32::consts::PI;
|
||||
use core::{f32::consts::PI, fmt::Debug};
|
||||
use num::traits::{real::Real, FloatConst};
|
||||
use treeculler::Frustum;
|
||||
use vek::*;
|
||||
|
||||
pub const NEAR_PLANE: f32 = 0.25;
|
||||
pub const FAR_PLANE: f32 = 100000.0;
|
||||
pub const NEAR_PLANE: f32 = 0.0625;
|
||||
pub const FAR_PLANE: f32 = 524288.0625;
|
||||
|
||||
const FIRST_PERSON_INTERP_TIME: f32 = 0.1;
|
||||
const THIRD_PERSON_INTERP_TIME: f32 = 0.1;
|
||||
@ -31,6 +32,9 @@ pub struct Dependents {
|
||||
pub view_mat_inv: Mat4<f32>,
|
||||
pub proj_mat: Mat4<f32>,
|
||||
pub proj_mat_inv: Mat4<f32>,
|
||||
/// Specifically there for satisfying our treeculler dependency, which can't
|
||||
/// handle inverted depth planes.
|
||||
pub proj_mat_treeculler: Mat4<f32>,
|
||||
pub cam_pos: Vec3<f32>,
|
||||
pub cam_dir: Vec3<f32>,
|
||||
}
|
||||
@ -64,6 +68,249 @@ fn clamp_and_modulate(ori: Vec3<f32>) -> Vec3<f32> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Generalized method to construct a perspective projection with x ∈ [-1,1], y
|
||||
/// ∈ [-1,1], z ∈ [0,1] given fov_y_radians, aspect_ratio, 1/n, and 1/f. Note
|
||||
/// that you pass in *1/n* and *1/f*, not n and f like you normally would for a
|
||||
/// perspective projection; this is done to enable uniform handling of both
|
||||
/// finite and infinite far planes.
|
||||
///
|
||||
/// The only requirements on n and f are: 1/n ≠ 1/f, and 0 ≤ 1/n * 1/f.
|
||||
///
|
||||
/// This ensures that the near and far plane are not identical (or else your
|
||||
/// projection would not covver any distance), and that they have the same sign
|
||||
/// (or else we cannot rely on clipping to properly fix your scene). This also
|
||||
/// ensures that at least one of 1/n and 1/f is not 0, and by construction it
|
||||
/// guarantees that neither n nor f are 0; these are required in order to make
|
||||
/// sense of the definition of near and far planes, and avoid collapsing all
|
||||
/// depths to a single point.
|
||||
///
|
||||
/// For "typical" projections (matching perspective_lh_no), you would satisfy
|
||||
/// the stronger requirements. We give the typical conditions for each bullet
|
||||
/// point, and then explain the consequences of not satisfying these conditions:
|
||||
///
|
||||
/// * 1/n < 1/f (0 to 1 depth planes, meaning n = near and f = far; if f < n,
|
||||
/// depth planes go from 1 to 0, meaning f = near and n = far, aka "reverse
|
||||
/// depth").
|
||||
///
|
||||
/// This is by far the most
|
||||
/// likely thing to want to change; inverted depth coordinates have *far*
|
||||
/// better accuracy for DirectX / Metal / WGPU-like APIs, when using
|
||||
/// floating point depth, while not being *worse* than the alternative
|
||||
/// (OpenGL-like depth, or when using fixed-point / integer depth). For
|
||||
/// maximum benefit, make sure you are using Depth32F, as on most platforms
|
||||
/// this is the only depth buffer size where floating point can be used.
|
||||
///
|
||||
/// It is a bit unintuitive to prove this, but it turns out that when using
|
||||
/// 1 to 0 depth planes, the point where the depth buffer has its worst
|
||||
/// precision is not at the far plane (as with 0 to 1 depth planes) nor at
|
||||
/// the near plane, as you might expect, but exactly at far/2 (the
|
||||
/// near plane setting does not affect the point of minimum accuracy at
|
||||
/// all!). However, don't let this fool you into believing the point of
|
||||
/// worst precision has simply been moved around--for *any* fixed Δz that is
|
||||
/// the minimum amount of depth precision you want over the whole range, and
|
||||
/// any near plane, you can set the far plane farther (generally much much
|
||||
/// farther!) with reversed clip space than you can with standard clip space
|
||||
/// while still getting at least that much depth precision in the worst
|
||||
/// case. Nor is this a small worst-case; for many desirable near and far
|
||||
/// plane combinations, more than half the visible space will have
|
||||
/// completely unusable precision under 0 to 1 depth, while having much better
|
||||
/// than needed precision under 1 to 0 depth.
|
||||
///
|
||||
/// To compute the exact (at least "roughly exact") worst-case accuracy for
|
||||
/// floating point depth and a given precision target Δz, for reverse clip
|
||||
/// planes (this can be computed for the non-reversed case too, but it's
|
||||
/// painful and the values are horrible, so don't bother), we compute
|
||||
/// (assuming a finite far plane--see below for details on the infinite
|
||||
/// case) the change in the integer representation of the mantissa at z=n/2:
|
||||
///
|
||||
/// ```ignore
|
||||
/// e = floor(ln(near/(far - near))/ln(2))
|
||||
/// db/dz = 2^(2-e) / ((1 / far - 1 / near) * (far)^2)
|
||||
/// ```
|
||||
///
|
||||
/// Then the maximum precision you can safely use to get a change in the
|
||||
/// integer representation of the mantissa (assuming 32-bit floating points)
|
||||
/// is around:
|
||||
///
|
||||
/// ```ignore
|
||||
/// abs(2^(-23) / (db/dz)).
|
||||
/// ```
|
||||
///
|
||||
/// In particular, if your worst-case target accuracy over the depth range
|
||||
/// is Δz, you should be okay if:
|
||||
///
|
||||
/// ```ignore
|
||||
/// abs(Δz * (db/dz)) * 2^(23) ≥ 1.
|
||||
/// ```
|
||||
///
|
||||
/// This only accounts for precision of the final floating-point value, so
|
||||
/// it's possible that artifacts may be introduced elsewhere during the
|
||||
/// computation that reduce precision further; the most famous example of
|
||||
/// this is that OpenGL wipes out most of the precision gains by going from
|
||||
/// [-1,1] to [0,1] by letting
|
||||
///
|
||||
/// ```ignore
|
||||
/// clip space depth = depth * 0.5 + 0.5
|
||||
/// ```
|
||||
///
|
||||
/// which results in huge precision errors by removing nearly all the
|
||||
/// floating point values with the most precision (those close to 0).
|
||||
/// Fortunately, most such artifacts are absent under the wgpu/DirectX/Metal
|
||||
/// depth clip space model, so with any luck remaining depth errors due to
|
||||
/// the perspective warp itself should be minimal.
|
||||
///
|
||||
/// * 0 ≠ 1/far (finite far plane). When this is false, the far plane is at
|
||||
/// infinity; this removes the restriction of having a far plane at all, often
|
||||
/// with minimal reduction in accuracy for most values in the scene. In fact,
|
||||
/// in almost all cases with non-reversed depth planes, it *improves* accuracy
|
||||
/// over the finite case for the vast majority of the range; however, you
|
||||
/// should be using reversed depth planes, and if you are then there is a
|
||||
/// quite natural accuracy vs. distance tradeoff in the infinite case.
|
||||
///
|
||||
/// When using an infinite far plane, the worst-case accuracy is *always* at
|
||||
/// infinity, and gets progressively worse as you get farther away from the
|
||||
/// near plane. However, there is a second advantage that may not be
|
||||
/// immediately apparent: the perspective warp becomes much simpler,
|
||||
/// potentially removing artifacts! Specifically, in the 0 to 1 depth plane
|
||||
/// case, the assigned depth value (after perspective division) becomes:
|
||||
///
|
||||
/// ```ignore
|
||||
/// depth = 1 - near/z
|
||||
/// ```
|
||||
///
|
||||
/// while in the 1 to 0 depth plane case (which you should be using), the
|
||||
/// equation is even simpler:
|
||||
///
|
||||
/// ```ignore
|
||||
/// depth = near/z
|
||||
/// ```
|
||||
///
|
||||
/// In the 1 to 0 case, in particular, you can see that the depth value is
|
||||
/// *linear in z in log space.* This lets us compute, for any given target
|
||||
/// precision, a *very* simple worst-case upper bound on the maximum
|
||||
/// absolute z value for which that precision can be achieved (the upper
|
||||
/// bound is tight in some cases, but in others may be conservative):
|
||||
///
|
||||
/// ```ignore
|
||||
/// db/dz ≥ 1/z
|
||||
/// ```
|
||||
///
|
||||
/// Plugging that into our old formula, we find that we attain the required
|
||||
/// precision at least in the range (again, this is for the 1 to 0 infinite
|
||||
/// case only!):
|
||||
///
|
||||
/// ```ignore
|
||||
/// abs(z) ≤ Δz * 2^23
|
||||
/// ```
|
||||
///
|
||||
/// One thing you may notice is that this worst-case bound *does not depend
|
||||
/// on the near plane.* This means that (within reason) you can put the near
|
||||
/// plane as close as you like and still attain this bound. Of course, the
|
||||
/// bound is not completely tight, but it should not be off by more than a
|
||||
/// factor of 2 or so (informally proven, not made rigorous yet), so for most
|
||||
/// practical purposes you can set the near plane as low as you like in this
|
||||
/// case.
|
||||
///
|
||||
/// * 0 < 1/near (positive near plane--best used when moving *to* left-handed
|
||||
/// spaces, as we normally do in OpenGL and DirectX). A use case for *not*
|
||||
/// doing this is that it allows moving *from* a left-handed space *to* a
|
||||
/// right-handed space in WGPU / DirectX / Metal coordinates; this means that
|
||||
/// if matrices were already set up for OpenGL using functions like look_at_rh
|
||||
/// that assume right-handed coordinates, we can simply switch these to
|
||||
/// look_at_lh and use a right-handed perspective projection with a negative
|
||||
/// near plane, to get correct rendering behavior. Details are out of scope
|
||||
/// for this comment.
|
||||
///
|
||||
/// Note that there is one final, very important thing that affects possible
|
||||
/// precision--the actual underlying precision of the floating point format at a
|
||||
/// particular value! As your z values go up, their precision will shrink, so
|
||||
/// if at all possible try to shrink your z values down to the lowest range in
|
||||
/// which they can be. Unfortunately, this cannot be part of the perspective
|
||||
/// projection itself, because by the time z gets to the projection it is
|
||||
/// usually too late for values to still be integers (or coarse-grained powers
|
||||
/// of 2). Instead, try to scale down x, y, and z as soon as possible before
|
||||
/// submitting them to the GPU, ideally by as large as possible of a power of 2
|
||||
/// that works for your use case. Not only will this improve depth precision
|
||||
/// and recall, it will also help address other artifacts caused by values far
|
||||
/// from z (such as improperly rounded rotations, or improper line equations due
|
||||
/// to greedy meshing).
|
||||
///
|
||||
/// TODO: Consider passing fractions rather than 1/n and 1/f directly, even
|
||||
/// though the logic for why it should be okay to pass them directly is probably
|
||||
/// sound (they are both valid z values in the range, so gl_FragCoord.w will be
|
||||
/// assigned to this, meaning if they are imprecise enough then the whole
|
||||
/// calculation will be similarly imprecies).
|
||||
///
|
||||
/// TODO: Since it's a bit confusing that n and f are not always near and far,
|
||||
/// and a negative near plane can (probably) be emulated with simple actions on
|
||||
/// the perspective matrix, consider removing this functionailty and replacing
|
||||
/// our assertion with a single condition: `(1/far) * (1/near) < (1/near)²`.
|
||||
pub fn perspective_lh_zo_general<T>(
|
||||
fov_y_radians: T,
|
||||
aspect_ratio: T,
|
||||
inv_n: T,
|
||||
inv_f: T,
|
||||
) -> Mat4<T>
|
||||
where
|
||||
T: Real + FloatConst + Debug,
|
||||
{
|
||||
// Per comments, we only need these two assertions to make sure our calculations
|
||||
// make sense.
|
||||
debug_assert_ne!(
|
||||
inv_n, inv_f,
|
||||
"The near and far plane distances cannot be equal, found: {:?} = {:?}",
|
||||
inv_n, inv_f
|
||||
);
|
||||
debug_assert!(
|
||||
T::zero() <= inv_n * inv_f,
|
||||
"The near and far plane distances must have the same sign, found: {:?} * {:?} < 0",
|
||||
inv_n,
|
||||
inv_f
|
||||
);
|
||||
|
||||
// TODO: Would be nice to separate out the aspect ratio computations.
|
||||
let two = T::one() + T::one();
|
||||
let tan_half_fovy = (fov_y_radians / two).tan();
|
||||
let m00 = T::one() / (aspect_ratio * tan_half_fovy);
|
||||
let m11 = T::one() / tan_half_fovy;
|
||||
let m23 = -T::one() / (inv_n - inv_f);
|
||||
let m22 = inv_n * (-m23);
|
||||
Mat4::new(
|
||||
m00,
|
||||
T::zero(),
|
||||
T::zero(),
|
||||
T::zero(),
|
||||
T::zero(),
|
||||
m11,
|
||||
T::zero(),
|
||||
T::zero(),
|
||||
T::zero(),
|
||||
T::zero(),
|
||||
m22,
|
||||
m23,
|
||||
T::zero(),
|
||||
T::zero(),
|
||||
T::one(),
|
||||
T::zero(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Same as perspective_lh_zo_general, but for right-handed source spaces.
|
||||
pub fn perspective_rh_zo_general<T>(
|
||||
fov_y_radians: T,
|
||||
aspect_ratio: T,
|
||||
inv_n: T,
|
||||
inv_f: T,
|
||||
) -> Mat4<T>
|
||||
where
|
||||
T: Real + FloatConst + Debug,
|
||||
{
|
||||
let mut m = perspective_lh_zo_general(fov_y_radians, aspect_ratio, inv_n, inv_f);
|
||||
m[(2, 2)] = -m[(2, 2)];
|
||||
m[(3, 2)] = -m[(3, 2)];
|
||||
m
|
||||
}
|
||||
|
||||
impl Camera {
|
||||
/// Create a new `Camera` with default parameters.
|
||||
pub fn new(aspect: f32, mode: CameraMode) -> Self {
|
||||
@ -89,6 +336,7 @@ impl Camera {
|
||||
view_mat_inv: Mat4::identity(),
|
||||
proj_mat: Mat4::identity(),
|
||||
proj_mat_inv: Mat4::identity(),
|
||||
proj_mat_treeculler: Mat4::identity(),
|
||||
cam_pos: Vec3::zero(),
|
||||
cam_dir: Vec3::unit_y(),
|
||||
},
|
||||
@ -135,14 +383,19 @@ impl Camera {
|
||||
* Mat4::translation_3d(-self.focus.map(|e| e.fract()));
|
||||
self.dependents.view_mat_inv = self.dependents.view_mat.inverted();
|
||||
|
||||
// NOTE: We reverse the far and near planes to produce an inverted depth
|
||||
// buffer (1 to 0 z planes).
|
||||
self.dependents.proj_mat =
|
||||
Mat4::perspective_rh_zo(self.fov, self.aspect, NEAR_PLANE, FAR_PLANE);
|
||||
perspective_rh_zo_general(self.fov, self.aspect, 1.0 / FAR_PLANE, 1.0 / NEAR_PLANE);
|
||||
// For treeculler, we also produce a version without inverted depth.
|
||||
self.dependents.proj_mat_treeculler =
|
||||
perspective_rh_zo_general(self.fov, self.aspect, 1.0 / NEAR_PLANE, 1.0 / FAR_PLANE);
|
||||
self.dependents.proj_mat_inv = self.dependents.proj_mat.inverted();
|
||||
|
||||
// TODO: Make this more efficient.
|
||||
self.dependents.cam_pos = Vec3::from(self.dependents.view_mat_inv * Vec4::unit_w());
|
||||
self.frustum = Frustum::from_modelview_projection(
|
||||
(self.dependents.proj_mat
|
||||
(self.dependents.proj_mat_treeculler
|
||||
* self.dependents.view_mat
|
||||
* Mat4::translation_3d(-self.focus.map(|e| e.trunc())))
|
||||
.into_col_arrays(),
|
||||
|
@ -129,7 +129,7 @@ impl<'a> SceneData<'a> {
|
||||
/// W_e = 2 is the width of the image plane (for our projections, since they go
|
||||
/// from -1 to 1) n_e = near_plane is the near plane for the view frustum
|
||||
/// θ = (fov / 2) is the half-angle of the FOV (the one passed to
|
||||
/// Mat4::projection_rh_no).
|
||||
/// Mat4::projection_rh_zo).
|
||||
///
|
||||
/// Although the widths for the x and y image planes are the same, they are
|
||||
/// different in this framework due to the introduction of an aspect ratio:
|
||||
@ -657,8 +657,7 @@ impl Scene {
|
||||
&scene_data,
|
||||
focus_pos,
|
||||
self.loaded_distance,
|
||||
view_mat,
|
||||
proj_mat,
|
||||
&self.camera,
|
||||
);
|
||||
|
||||
// Maintain the figures.
|
||||
@ -696,9 +695,10 @@ impl Scene {
|
||||
// OpenGL coordinates. Note that the matrix for directional light
|
||||
// is *already* linear in the depth buffer.
|
||||
//
|
||||
// Also, observe that we flip the texture sampling matrix in order to account for the
|
||||
// fact that DirectX renders top-down.
|
||||
let texture_mat = Mat4::<f32>::scaling_3d::<Vec3<f32>>(Vec3::new(0.5, -0.5, 1.0)) * Mat4::translation_3d(Vec3::new(1.0, -1.0, 0.0));
|
||||
// Also, observe that we flip the texture sampling matrix in order to account
|
||||
// for the fact that DirectX renders top-down.
|
||||
let texture_mat = Mat4::<f32>::scaling_3d::<Vec3<f32>>(Vec3::new(0.5, -0.5, 1.0))
|
||||
* Mat4::translation_3d(Vec3::new(1.0, -1.0, 0.0));
|
||||
// We need to compute these offset matrices to transform world space coordinates
|
||||
// to the translated ones we use when multiplying by the light space
|
||||
// matrix; this helps avoid precision loss during the
|
||||
@ -722,7 +722,8 @@ impl Scene {
|
||||
let sin_gamma = (1.0 - cos_gamma * cos_gamma).sqrt();
|
||||
let gamma = sin_gamma.asin();
|
||||
let view_mat = math::Mat4::from_col_array(view_mat.into_col_array());
|
||||
// coordinates are transformed from world space (right-handed) to view space (right-handed).
|
||||
// coordinates are transformed from world space (right-handed) to view space
|
||||
// (right-handed).
|
||||
let bounds1 = math::fit_psr(
|
||||
view_mat.map_cols(math::Vec4::from),
|
||||
visible_light_volume.iter().copied(),
|
||||
@ -756,7 +757,8 @@ impl Scene {
|
||||
);
|
||||
|
||||
let light_all_mat = l_r * directed_proj_mat * light_view_mat;
|
||||
// coordinates are transformed from world space (right-handed) to rotated light space (left-handed).
|
||||
// coordinates are transformed from world space (right-handed) to rotated light
|
||||
// space (left-handed).
|
||||
let bounds0 = math::fit_psr(
|
||||
light_all_mat,
|
||||
visible_light_volume.iter().copied(),
|
||||
@ -765,11 +767,13 @@ impl Scene {
|
||||
// Vague idea: project z_n from the camera view to the light view (where it's
|
||||
// tilted by γ).
|
||||
//
|
||||
// NOTE: To transform a normal by M, we multiply by the transpose of the inverse of M.
|
||||
// For the cases below, we are transforming by an already-inverted matrix, so the
|
||||
// transpose of its inverse is just the transpose of the original matrix.
|
||||
// normals as well as points, rather than taking the transpose of the matrix,
|
||||
// is that our matrix is (for normals) a pure rotation matrix, which means it is d
|
||||
// NOTE: To transform a normal by M, we multiply by the transpose of the inverse
|
||||
// of M. For the cases below, we are transforming by an
|
||||
// already-inverted matrix, so the transpose of its inverse is
|
||||
// just the transpose of the original matrix. normals as well as
|
||||
// points, rather than taking the transpose of the matrix,
|
||||
// is that our matrix is (for normals) a pure rotation matrix, which means it is
|
||||
// d
|
||||
let (z_0, z_1) = {
|
||||
// view space, right-handed coordinates.
|
||||
let p_z = bounds1.max.z;
|
||||
@ -782,15 +786,24 @@ impl Scene {
|
||||
let light_all_inv = light_all_mat.inverted();
|
||||
|
||||
// moves from view-space (right-handed) to world-space (right-handed).
|
||||
let view_point = view_inv * math::Vec4::from_point(math::Vec3::forward_rh() * p_z/* + math::Vec4::unit_w() */);
|
||||
let view_point = view_inv
|
||||
* math::Vec4::from_point(
|
||||
math::Vec3::forward_rh() * p_z, /* + math::Vec4::unit_w() */
|
||||
);
|
||||
let view_plane = view_mat.transposed() * math::Vec4::forward_rh();
|
||||
|
||||
// moves from rotated light space (left-handed) to world space (right-handed).
|
||||
let light_point = light_all_inv * math::Vec4::from_point(math::Vec3::up() * p_y/* + math::Vec4::unit_w() */);
|
||||
let light_point = light_all_inv
|
||||
* math::Vec4::from_point(
|
||||
math::Vec3::up() * p_y, /* + math::Vec4::unit_w() */
|
||||
);
|
||||
let light_plane = light_all_mat.transposed() * math::Vec4::up();
|
||||
|
||||
// moves from rotated light space (left-handed) to world space (right-handed).
|
||||
let shadow_point = light_all_inv * math::Vec4::from_point(math::Vec3::right() * p_x/* + math::Vec4::unit_w() */);
|
||||
let shadow_point = light_all_inv
|
||||
* math::Vec4::from_point(
|
||||
math::Vec3::right() * p_x, /* + math::Vec4::unit_w() */
|
||||
);
|
||||
let shadow_plane = light_all_mat.transposed() * math::Vec4::right();
|
||||
|
||||
// Find the point at the intersection of the three planes; note that since the
|
||||
@ -834,7 +847,10 @@ impl Scene {
|
||||
//
|
||||
// NOTE: I don't think the w component should be anything but 1 here, but
|
||||
// better safe than sorry.
|
||||
(f64::from(z0.homogenized().dot(math::Vec4::forward_rh())), f64::from(z1.homogenized().dot(math::Vec4::forward_rh())))
|
||||
(
|
||||
f64::from(z0.homogenized().dot(math::Vec4::forward_rh())),
|
||||
f64::from(z1.homogenized().dot(math::Vec4::forward_rh())),
|
||||
)
|
||||
};
|
||||
|
||||
// all of this is in rotated light-space (left-handed).
|
||||
@ -848,8 +864,8 @@ impl Scene {
|
||||
let w_l_y = d;
|
||||
|
||||
// NOTE: See section 5.1.2.2 of Lloyd's thesis.
|
||||
// NOTE: Since z_1 and z_0 are in the same coordinate space, we don't have to worry
|
||||
// about the handedness of their ratio.
|
||||
// NOTE: Since z_1 and z_0 are in the same coordinate space, we don't have to
|
||||
// worry about the handedness of their ratio.
|
||||
let alpha = z_1 / z_0;
|
||||
let alpha_sqrt = alpha.sqrt();
|
||||
let directed_near_normal = if factor < 0.0 {
|
||||
@ -954,16 +970,19 @@ impl Scene {
|
||||
shadow_mats.resize_with(6, PointLightMatrix::default);
|
||||
// Now, we tackle point lights.
|
||||
// First, create a perspective projection matrix at 90 degrees (to cover a whole
|
||||
// face of the cube map we're using).
|
||||
let shadow_proj = Mat4::perspective_rh_zo(
|
||||
// face of the cube map we're using); we use a negative near plane to exactly
|
||||
// match OpenGL's behavior if we use a left-handed coordinate system everywhere
|
||||
// else.
|
||||
let shadow_proj = camera::perspective_rh_zo_general(
|
||||
90.0f32.to_radians(),
|
||||
point_shadow_aspect,
|
||||
SHADOW_NEAR,
|
||||
SHADOW_FAR,
|
||||
1.0 / SHADOW_NEAR,
|
||||
1.0 / SHADOW_FAR,
|
||||
);
|
||||
// NOTE: We negate here to emulate a right-handed projection with a negative
|
||||
// near plane, which produces the correct transformation to exactly match OpenGL's
|
||||
// rendering behavior if we use a left-handed coordinate system everywhere else.
|
||||
// near plane, which produces the correct transformation to exactly match
|
||||
// OpenGL's rendering behavior if we use a left-handed coordinate
|
||||
// system everywhere else.
|
||||
let shadow_proj = shadow_proj * Mat4::scaling_3d(-1.0);
|
||||
|
||||
// Next, construct the 6 orientations we'll use for the six faces, in terms of
|
||||
|
@ -15,7 +15,10 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
use super::{math, SceneData};
|
||||
use super::{
|
||||
camera::{self, Camera},
|
||||
math, SceneData,
|
||||
};
|
||||
use common::{
|
||||
assets::{self, AssetExt, DotVoxAsset},
|
||||
figure::Segment,
|
||||
@ -767,9 +770,14 @@ impl<V: RectRasterableVol> Terrain<V> {
|
||||
scene_data: &SceneData,
|
||||
focus_pos: Vec3<f32>,
|
||||
loaded_distance: f32,
|
||||
view_mat: Mat4<f32>,
|
||||
proj_mat: Mat4<f32>,
|
||||
camera: &Camera,
|
||||
) -> (Aabb<f32>, Vec<math::Vec3<f32>>, math::Aabr<f32>) {
|
||||
let camera::Dependents {
|
||||
view_mat,
|
||||
proj_mat_treeculler,
|
||||
..
|
||||
} = camera.dependents();
|
||||
|
||||
// Remove any models for chunks that have been recently removed.
|
||||
// Note: Does this before adding to todo list just in case removed chunks were
|
||||
// replaced with new chunks (although this would probably be recorded as
|
||||
@ -1215,7 +1223,7 @@ impl<V: RectRasterableVol> Terrain<V> {
|
||||
span!(guard, "Construct view frustum");
|
||||
let focus_off = focus_pos.map(|e| e.trunc());
|
||||
let frustum = Frustum::from_modelview_projection(
|
||||
(proj_mat * view_mat * Mat4::translation_3d(-focus_off)).into_col_arrays(),
|
||||
(proj_mat_treeculler * view_mat * Mat4::translation_3d(-focus_off)).into_col_arrays(),
|
||||
);
|
||||
drop(guard);
|
||||
|
||||
@ -1292,10 +1300,13 @@ impl<V: RectRasterableVol> Terrain<V> {
|
||||
let focus_off = math::Vec3::from(focus_off);
|
||||
let visible_bounds_fine = visible_bounding_box.as_::<f64>();
|
||||
let inv_proj_view =
|
||||
math::Mat4::from_col_arrays((proj_mat * view_mat).into_col_arrays())
|
||||
math::Mat4::from_col_arrays((proj_mat_treeculler * view_mat).into_col_arrays())
|
||||
.as_::<f64>()
|
||||
.inverted();
|
||||
let ray_direction = math::Vec3::<f32>::from(ray_direction);
|
||||
// NOTE: We use proj_mat_treeculler here because
|
||||
// calc_focused_light_volume_points makes the assumption that the
|
||||
// near plane lies before the far plane.
|
||||
let visible_light_volume = math::calc_focused_light_volume_points(
|
||||
inv_proj_view,
|
||||
ray_direction.as_::<f64>(),
|
||||
@ -1455,7 +1466,10 @@ impl<V: RectRasterableVol> Terrain<V> {
|
||||
light_data.iter().take(1).for_each(|_light| {
|
||||
drawer.draw_point_shadow(
|
||||
&global.point_light_matrices,
|
||||
chunk_iter.clone().filter(|chunk| chunk.can_shadow_point).map(|chunk| (&chunk.opaque_model, &chunk.locals)),
|
||||
chunk_iter
|
||||
.clone()
|
||||
.filter(|chunk| chunk.can_shadow_point)
|
||||
.map(|chunk| (&chunk.opaque_model, &chunk.locals)),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user