zenyx-engine/engine/src/core/render/ctx.rs
2025-04-19 21:07:54 +02:00

641 lines
22 KiB
Rust

use std::borrow::Cow;
use std::mem::offset_of;
use std::sync::Arc;
use std::time::Instant;
use cgmath::{Deg, Matrix4, Point3, Rad, SquareMatrix, Vector3, perspective};
use futures::executor::block_on;
use thiserror::Error;
use tracing::{debug, error, info, trace};
use wgpu::TextureUsages;
use wgpu::{Backends, InstanceDescriptor, util::DeviceExt};
use wgpu_text::glyph_brush::ab_glyph::FontRef;
use wgpu_text::glyph_brush::{HorizontalAlign, Layout, OwnedSection, OwnedText, VerticalAlign};
use wgpu_text::{BrushBuilder, TextBrush};
use winit::window::Window;
use crate::error::Result;
use crate::error::{ZenyxError, ZenyxErrorKind};
const SHADER_SRC: &str = include_str!("shader.wgsl");
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
pub position: [f32; 3],
pub normal: [f32; 3],
}
impl Vertex {
const ATTRIBS: [wgpu::VertexAttribute; 2] = [
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: offset_of!(Vertex, normal) as u64,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
];
fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &Self::ATTRIBS,
}
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
view: [[f32; 4]; 4],
proj: [[f32; 4]; 4],
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct ModelUniform {
model: [[f32; 4]; 4],
}
struct Camera {
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
view: Matrix4<f32>,
proj: Matrix4<f32>,
}
impl Camera {
fn new(
device: &wgpu::Device,
bind_group_layout: &wgpu::BindGroupLayout,
width: u32,
height: u32,
) -> Self {
let view = Matrix4::look_at_rh(
Point3::new(0.0, 0.0, 3.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::unit_y(),
);
let aspect = width as f32 / height as f32;
let proj = perspective(Rad::from(Deg(45.0)), aspect, 0.1, 100.0);
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Camera Uniform Buffer"),
size: std::mem::size_of::<CameraUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Camera Bind Group"),
layout: bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}],
});
Self {
uniform_buffer,
bind_group,
view,
proj,
}
}
fn resize(&mut self, width: u32, height: u32) {
let aspect = width as f32 / height as f32;
self.proj = perspective(Rad::from(Deg(45.0)), aspect, 0.1, 100.0);
}
fn update(&self, queue: &wgpu::Queue) {
let view_array: [[f32; 4]; 4] = self.view.into();
let proj_array: [[f32; 4]; 4] = self.proj.into();
let uniform = CameraUniform {
view: view_array,
proj: proj_array,
};
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::bytes_of(&uniform));
}
}
#[derive(Debug)]
struct Model {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
index_count: u32,
transform: Matrix4<f32>,
version: u32,
}
impl Model {
fn new(
device: &wgpu::Device,
vertices: &[Vertex],
indices: &[u32],
bind_group_layout: &wgpu::BindGroupLayout,
) -> Self {
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(indices), // Use proper indices
usage: wgpu::BufferUsages::INDEX,
});
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Model Uniform Buffer"),
size: std::mem::size_of::<ModelUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Model Bind Group"),
layout: bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}],
});
Self {
vertex_buffer,
index_buffer,
uniform_buffer,
bind_group,
index_count: indices.len() as u32,
transform: Matrix4::identity(),
version: 1,
}
}
fn update(&self, queue: &wgpu::Queue) {
let model_array: [[f32; 4]; 4] = self.transform.into();
let uniform = ModelUniform { model: model_array };
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::bytes_of(&uniform));
}
fn set_transform(&mut self, transform: Matrix4<f32>) {
if self.transform != transform {
self.transform = transform;
self.version += 1;
}
}
}
pub struct Renderer<'window> {
device: wgpu::Device,
queue: wgpu::Queue,
surface: wgpu::Surface<'window>,
surface_config: wgpu::SurfaceConfiguration,
camera: Camera,
models: Vec<Model>,
render_pipeline: wgpu::RenderPipeline,
depth_texture: wgpu::Texture,
depth_texture_view: wgpu::TextureView,
camera_bind_group_layout: wgpu::BindGroupLayout,
model_bind_group_layout: wgpu::BindGroupLayout,
bg_color: wgpu::Color,
start_time: Instant,
last_frame_instant: Instant,
frame_count: u32,
fps: f32,
font_state: FontState,
model_versions: Vec<u32>,
}
struct FontState {
brush: TextBrush<FontRef<'static>>,
section: OwnedSection,
scale: f32,
color: wgpu::Color,
}
impl<'window> Renderer<'window> {
pub async fn new(window: Arc<Window>) -> Result<Self> {
let instance = wgpu::Instance::new(&InstanceDescriptor {
backends: Backends::from_comma_list("dx12,metal,opengl,webgpu,vulkan"),
..Default::default()
});
let surface = instance.create_surface(Arc::clone(&window))?;
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
..Default::default()
})
.await
.ok_or_else(|| {
ZenyxError::builder(ZenyxErrorKind::AdapterRequest)
.with_message("No suitable adapter found")
.build()
})?;
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.map_err(ZenyxError::from)?;
let size = window.inner_size();
let width = size.width.max(1);
let height = size.height.max(1);
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Camera Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
std::mem::size_of::<CameraUniform>() as u64,
),
},
count: None,
}],
});
let model_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Model Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
std::mem::size_of::<ModelUniform>() as u64,
),
},
count: None,
}],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Pipeline Layout"),
bind_group_layouts: &[&camera_bind_group_layout, &model_bind_group_layout],
push_constant_ranges: &[],
});
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Main Shader"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(SHADER_SRC)),
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Main Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: surface.get_capabilities(&adapter).formats[0],
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// cull_mode: ,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
let camera = Camera::new(&device, &camera_bind_group_layout, width, height);
let surface_caps = surface.get_capabilities(&adapter);
let present_mode = [
wgpu::PresentMode::Immediate,
wgpu::PresentMode::Mailbox,
wgpu::PresentMode::AutoNoVsync,
]
.iter()
.copied()
.find(|mode| surface_caps.present_modes.contains(mode))
.unwrap_or(wgpu::PresentMode::Fifo);
debug!("Using {:#?} present mode.", present_mode);
let surface_config = wgpu::SurfaceConfiguration {
width,
height,
format: surface_caps.formats[0],
present_mode,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![],
usage: TextureUsages::RENDER_ATTACHMENT,
desired_maximum_frame_latency: 3,
};
surface.configure(&device, &surface_config);
let (depth_texture, depth_texture_view) =
create_depth_texture(&device, surface_config.width, surface_config.height);
let (depth_texture, depth_texture_view) =
create_depth_texture(&device, surface_config.width, surface_config.height);
let font_bytes = include_bytes!("DejaVuSans.ttf");
let font = FontRef::try_from_slice(font_bytes).map_err(|e| {
ZenyxError::builder(ZenyxErrorKind::DeviceRequest)
.with_message("Font loading failed")
.build()
})?;
let brush =
BrushBuilder::using_font(font).build(&device, width, height, surface_config.format);
let base_width = 1280.0;
let base_scale = 30.0;
let scale = base_scale * (surface_config.width as f32 / base_width as f32).clamp(0.5, 2.0);
let color = wgpu::Color::WHITE;
let section = OwnedSection::default()
.add_text(OwnedText::new("FPS: 0.00").with_scale(scale).with_color([
color.r as f32,
color.g as f32,
color.b as f32,
color.a as f32,
]))
.with_screen_position((10.0, 10.0))
.with_bounds((base_scale * 200.0, base_scale * 2.0))
.with_layout(
Layout::default()
.h_align(HorizontalAlign::Left)
.v_align(VerticalAlign::Top),
);
Ok(Self {
device,
queue,
surface,
surface_config,
camera,
models: Vec::new(),
render_pipeline,
camera_bind_group_layout,
model_bind_group_layout,
bg_color: wgpu::Color {
r: 0.1,
g: 0.1,
b: 0.1,
a: 1.0,
},
start_time: Instant::now(),
last_frame_instant: Instant::now(),
frame_count: 0,
depth_texture,
depth_texture_view,
fps: 0f32,
font_state: FontState {
brush,
section,
scale,
color,
},
model_versions: vec![],
})
}
pub fn new_blocking(window: Arc<Window>) -> Result<Self> {
block_on(Self::new(window))
}
pub fn add_model(&mut self, vertices: &[Vertex], indicies: &[u32]) {
let model = Model::new(
&self.device,
vertices,
indicies,
&self.model_bind_group_layout,
);
self.models.push(model);
self.model_versions.push(0);
}
pub fn resize(&mut self, new_size: (u32, u32)) {
let (width, height) = new_size;
let (depth_texture, depth_view) = create_depth_texture(&self.device, width, height);
self.surface_config.width = width.max(1);
self.surface_config.height = height.max(1);
self.surface.configure(&self.device, &self.surface_config);
self.depth_texture = depth_texture;
self.depth_texture_view = depth_view;
self.font_state
.brush
.resize_view(width as f32, height as f32, &self.queue);
let base_width = 1280.0;
let base_scale = 30.0;
let scale = base_scale * (width as f32 / base_width as f32).clamp(0.5, 2.0);
self.font_state.scale = scale;
self.camera.resize(width, height);
}
pub fn draw(&mut self) {
let elapsed = self.start_time.elapsed().as_secs_f32();
self.camera.update(&self.queue);
for (i, model) in self.models.iter_mut().enumerate() {
let angle = Rad(elapsed * 0.8 + i as f32 * 0.3);
if i % 2 == 0 {
model.set_transform(Matrix4::from_angle_y(angle));
} else {
model.set_transform(Matrix4::from_angle_x(angle) * Matrix4::from_angle_y(angle));
}
}
for (i, model) in self.models.iter().enumerate() {
if model.version > self.model_versions[i] {
model.update(&self.queue);
#[cfg(debug_assertions)]
trace!("Updating model: {:#?}", model);
self.model_versions[i] = model.version;
}
}
let surface_texture = self
.surface
.get_current_texture()
.map_err(|e| {
ZenyxError::builder(ZenyxErrorKind::SurfaceTexture)
.with_message("Failed to acquire surface texture")
.with_source(e)
.build()
})
.unwrap();
let view = surface_texture
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let fps_text = format!("FPS: {:.2}", self.fps);
self.font_state.section.text.clear();
self.font_state.section.text.push(
OwnedText::new(fps_text)
.with_scale(self.font_state.scale)
.with_color([
self.font_state.color.r as f32,
self.font_state.color.g as f32,
self.font_state.color.b as f32,
self.font_state.color.a as f32,
]),
);
if let Err(e) = self.font_state.brush.queue(
&self.device,
&self.queue,
&[self.font_state.section.clone()],
) {
error!("Failed to queue text: {}", e);
}
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Main Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(self.bg_color),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_texture_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.camera.bind_group, &[]);
for model in &self.models {
render_pass.set_bind_group(1, &model.bind_group, &[]);
render_pass.set_vertex_buffer(0, model.vertex_buffer.slice(..));
render_pass
.set_index_buffer(model.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
render_pass.draw_indexed(0..model.index_count, 0, 0..1);
}
}
{
let mut text_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Text Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
self.font_state.brush.draw(&mut text_pass);
}
self.queue.submit(Some(encoder.finish()));
surface_texture.present();
self.frame_count += 1;
let elapsed_secs = self.last_frame_instant.elapsed().as_secs_f32();
if (elapsed_secs >= 1.0) {
let fps = self.frame_count as f32 / elapsed_secs;
// trace!("Renderer FPS: {:.2}", fps);
self.fps = fps;
self.frame_count = 0;
self.last_frame_instant = Instant::now();
}
}
pub fn set_bg_color(&mut self, color: wgpu::Color) {
self.bg_color = color;
}
pub fn bg_color(&self) -> &wgpu::Color {
&self.bg_color
}
pub fn text_color(&self) -> &wgpu::Color {
&self.font_state.color
}
pub fn set_text_color(&mut self, color: wgpu::Color) {
self.font_state.color = color;
}
}
fn create_depth_texture(
device: &wgpu::Device,
width: u32,
height: u32,
) -> (wgpu::Texture, wgpu::TextureView) {
let size = wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
};
let desc = wgpu::TextureDescriptor {
label: Some("Depth Texture"),
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
(texture, view)
}