load arbitrary model data

This commit is contained in:
Chance 2025-03-29 01:31:16 -04:00 committed by BitSyndicate
parent 87289bd8b3
commit 901f7a63cf
Signed by untrusted user: bitsyndicate
GPG key ID: 443E4198D6BBA6DE
11 changed files with 959 additions and 314 deletions

View file

@ -1,14 +1,16 @@
use std::borrow::Cow;
use std::mem::offset_of;
use std::sync::Arc;
use std::time::Instant;
use std::{backtrace::Backtrace, borrow::Cow};
use cgmath::{Matrix4, Point3, Rad, Vector3, perspective};
use cgmath::{Deg, Matrix4, Point3, Rad, SquareMatrix, Vector3, perspective};
use futures::executor::block_on;
use thiserror::Error;
use tracing::{error, trace};
use wgpu::TextureUsages;
use wgpu::{Backends, InstanceDescriptor, util::DeviceExt};
use winit::window::Window;
#[derive(Debug, Error)]
#[error(transparent)]
pub enum ContextErrorKind {
@ -127,13 +129,21 @@ impl From<wgpu::RequestDeviceError> for RenderContextError {
}
}
const CUBE_SHADER: &str = r"
struct Uniforms {
mvp: mat4x4<f32>,
const SHADER_SRC: &str = r#"
struct CameraUniform {
view: mat4x4<f32>,
proj: mat4x4<f32>,
};
struct ModelUniform {
model: mat4x4<f32>,
};
@group(0) @binding(0)
var<uniform> u: Uniforms;
var<uniform> camera: CameraUniform;
@group(1) @binding(0)
var<uniform> model: ModelUniform;
struct VertexInput {
@location(0) position: vec3<f32>,
@ -148,27 +158,27 @@ struct VertexOutput {
@vertex
fn vs_main(input: VertexInput) -> VertexOutput {
var output: VertexOutput;
output.clip_position = u.mvp * vec4<f32>(input.position, 1.0);
let model_pos = model.model * vec4<f32>(input.position, 1.0);
output.clip_position = camera.proj * camera.view * model_pos;
output.normal = input.normal;
return output;
}
@fragment
fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> {
let ambient: f32 = 0.2;
let light_dir = normalize(vec3<f32>(0.5, 1.0, 0.5));
let diffuse = clamp(dot(normalize(input.normal), light_dir), 0.0, 1.0);
// Mix ambient light to ensure no face is completely dark.
let brightness = ambient + (1.0 - ambient) * diffuse;
return vec4<f32>(0.7 * brightness, 0.7 * brightness, 0.9 * brightness, 1.0);
}
";
"#;
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
normal: [f32; 3],
pub struct Vertex {
pub position: [f32; 3],
pub normal: [f32; 3],
}
impl Vertex {
@ -179,7 +189,7 @@ impl Vertex {
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
offset: offset_of!(Vertex, normal) as u64,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
@ -194,170 +204,165 @@ impl Vertex {
}
}
static CUBE_VERTICES: &[Vertex] = &[
Vertex {
position: [-0.5, -0.5, 0.5],
normal: [0.0, 0.0, 1.0],
},
Vertex {
position: [0.5, -0.5, 0.5],
normal: [0.0, 0.0, 1.0],
},
Vertex {
position: [0.5, 0.5, 0.5],
normal: [0.0, 0.0, 1.0],
},
Vertex {
position: [0.5, 0.5, 0.5],
normal: [0.0, 0.0, 1.0],
},
Vertex {
position: [-0.5, 0.5, 0.5],
normal: [0.0, 0.0, 1.0],
},
Vertex {
position: [-0.5, -0.5, 0.5],
normal: [0.0, 0.0, 1.0],
},
Vertex {
position: [0.5, -0.5, -0.5],
normal: [0.0, 0.0, -1.0],
},
Vertex {
position: [-0.5, -0.5, -0.5],
normal: [0.0, 0.0, -1.0],
},
Vertex {
position: [-0.5, 0.5, -0.5],
normal: [0.0, 0.0, -1.0],
},
Vertex {
position: [-0.5, 0.5, -0.5],
normal: [0.0, 0.0, -1.0],
},
Vertex {
position: [0.5, 0.5, -0.5],
normal: [0.0, 0.0, -1.0],
},
Vertex {
position: [0.5, -0.5, -0.5],
normal: [0.0, 0.0, -1.0],
},
Vertex {
position: [0.5, -0.5, 0.5],
normal: [1.0, 0.0, 0.0],
},
Vertex {
position: [0.5, -0.5, -0.5],
normal: [1.0, 0.0, 0.0],
},
Vertex {
position: [0.5, 0.5, -0.5],
normal: [1.0, 0.0, 0.0],
},
Vertex {
position: [0.5, 0.5, -0.5],
normal: [1.0, 0.0, 0.0],
},
Vertex {
position: [0.5, 0.5, 0.5],
normal: [1.0, 0.0, 0.0],
},
Vertex {
position: [0.5, -0.5, 0.5],
normal: [1.0, 0.0, 0.0],
},
Vertex {
position: [-0.5, -0.5, -0.5],
normal: [-1.0, 0.0, 0.0],
},
Vertex {
position: [-0.5, -0.5, 0.5],
normal: [-1.0, 0.0, 0.0],
},
Vertex {
position: [-0.5, 0.5, 0.5],
normal: [-1.0, 0.0, 0.0],
},
Vertex {
position: [-0.5, 0.5, 0.5],
normal: [-1.0, 0.0, 0.0],
},
Vertex {
position: [-0.5, 0.5, -0.5],
normal: [-1.0, 0.0, 0.0],
},
Vertex {
position: [-0.5, -0.5, -0.5],
normal: [-1.0, 0.0, 0.0],
},
Vertex {
position: [-0.5, 0.5, 0.5],
normal: [0.0, 1.0, 0.0],
},
Vertex {
position: [0.5, 0.5, 0.5],
normal: [0.0, 1.0, 0.0],
},
Vertex {
position: [0.5, 0.5, -0.5],
normal: [0.0, 1.0, 0.0],
},
Vertex {
position: [0.5, 0.5, -0.5],
normal: [0.0, 1.0, 0.0],
},
Vertex {
position: [-0.5, 0.5, -0.5],
normal: [0.0, 1.0, 0.0],
},
Vertex {
position: [-0.5, 0.5, 0.5],
normal: [0.0, 1.0, 0.0],
},
Vertex {
position: [-0.5, -0.5, -0.5],
normal: [0.0, -1.0, 0.0],
},
Vertex {
position: [0.5, -0.5, -0.5],
normal: [0.0, -1.0, 0.0],
},
Vertex {
position: [0.5, -0.5, 0.5],
normal: [0.0, -1.0, 0.0],
},
Vertex {
position: [0.5, -0.5, 0.5],
normal: [0.0, -1.0, 0.0],
},
Vertex {
position: [-0.5, -0.5, 0.5],
normal: [0.0, -1.0, 0.0],
},
Vertex {
position: [-0.5, -0.5, -0.5],
normal: [0.0, -1.0, 0.0],
},
];
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
view: [[f32; 4]; 4],
proj: [[f32; 4]; 4],
}
pub struct WgpuCtx<'window> {
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct ModelUniform {
model: [[f32; 4]; 4],
}
struct Camera {
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
view: Matrix4<f32>,
proj: Matrix4<f32>,
}
impl Camera {
fn new(
device: &wgpu::Device,
bind_group_layout: &wgpu::BindGroupLayout,
width: u32,
height: u32,
) -> Self {
let view = Matrix4::look_at_rh(
Point3::new(0.0, 0.0, 3.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::unit_y(),
);
let aspect = width as f32 / height as f32;
let proj = perspective(Rad::from(Deg(45.0)), aspect, 0.1, 100.0);
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Camera Uniform Buffer"),
size: std::mem::size_of::<CameraUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Camera Bind Group"),
layout: bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}],
});
Self {
uniform_buffer,
bind_group,
view,
proj,
}
}
fn resize(&mut self, width: u32, height: u32) {
let aspect = width as f32 / height as f32;
self.proj = perspective(Rad::from(Deg(45.0)), aspect, 0.1, 100.0);
}
fn update(&self, queue: &wgpu::Queue) {
let view_array: [[f32; 4]; 4] = self.view.into();
let proj_array: [[f32; 4]; 4] = self.proj.into();
let uniform = CameraUniform {
view: view_array,
proj: proj_array,
};
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::bytes_of(&uniform));
}
}
struct Model {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
index_count: u32, // Changed from vertex_count to index_count
transform: Matrix4<f32>,
}
impl Model {
fn new(
device: &wgpu::Device,
vertices: &[Vertex],
indices: &[u32],
bind_group_layout: &wgpu::BindGroupLayout,
) -> Self {
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(indices), // Use proper indices
usage: wgpu::BufferUsages::INDEX,
});
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Model Uniform Buffer"),
size: std::mem::size_of::<ModelUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Model Bind Group"),
layout: bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}],
});
Self {
vertex_buffer,
index_buffer,
uniform_buffer,
bind_group,
index_count: indices.len() as u32,
transform: Matrix4::identity(),
}
}
fn update(&self, queue: &wgpu::Queue) {
let model_array: [[f32; 4]; 4] = self.transform.into();
let uniform = ModelUniform { model: model_array };
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::bytes_of(&uniform));
}
fn set_transform(&mut self, transform: Matrix4<f32>) {
self.transform = transform;
}
}
pub struct Renderer<'window> {
device: wgpu::Device,
queue: wgpu::Queue,
surface: wgpu::Surface<'window>,
surface_config: wgpu::SurfaceConfiguration,
adapter: wgpu::Adapter,
camera: Camera,
models: Vec<Model>,
render_pipeline: wgpu::RenderPipeline,
uniform_buffer: wgpu::Buffer,
vertex_buffer: wgpu::Buffer,
start_time: Instant,
depth_texture: wgpu::Texture,
depth_texture_view: wgpu::TextureView,
camera_bind_group_layout: wgpu::BindGroupLayout,
model_bind_group_layout: wgpu::BindGroupLayout,
bg_color: wgpu::Color,
start_time: Instant,
last_frame_instant: Instant,
frame_count: u32,
}
impl<'window> WgpuCtx<'window> {
pub async fn new(window: Arc<Window>) -> Result<WgpuCtx<'window>, RenderContextError> {
impl<'window> Renderer<'window> {
pub async fn new(window: Arc<Window>) -> Result<Self, RenderContextError> {
let instance = wgpu::Instance::new(&InstanceDescriptor {
backends: Backends::from_comma_list("dx12,metal,opengl,webgpu"),
..Default::default()
@ -381,86 +386,97 @@ impl<'window> WgpuCtx<'window> {
None,
)
})?;
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.ctx_err(ContextErrorKind::DeviceRequest, "Device configuration")?;
let size = window.inner_size();
let width = size.width.max(1);
let height = size.height.max(1);
let surface_config = wgpu::SurfaceConfiguration {
width: width.max(1),
height: height.max(1),
format: wgpu::TextureFormat::Rgba8UnormSrgb,
present_mode: wgpu::PresentMode::AutoNoVsync,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: Vec::new(),
usage: TextureUsages::RENDER_ATTACHMENT,
desired_maximum_frame_latency: 3,
};
surface.configure(&device, &surface_config);
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Uniform Buffer"),
size: 64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Cube Vertex Buffer"),
contents: bytemuck::cast_slice(CUBE_VERTICES),
usage: wgpu::BufferUsages::VERTEX,
});
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Cube Shader"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(CUBE_SHADER)),
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Uniform Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(64),
},
count: None,
}],
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Camera Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
std::mem::size_of::<CameraUniform>() as u64,
),
},
count: None,
}],
});
let model_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Model Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
std::mem::size_of::<ModelUniform>() as u64,
),
},
count: None,
}],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Cube Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
label: Some("Pipeline Layout"),
bind_group_layouts: &[&camera_bind_group_layout, &model_bind_group_layout],
push_constant_ranges: &[],
});
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Main Shader"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(SHADER_SRC)),
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Cube Render Pipeline"),
label: Some("Main Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[Vertex::desc()],
compilation_options: wgpu::PipelineCompilationOptions::default(),
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: surface_config.format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
format: surface.get_capabilities(&adapter).formats[0],
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// cull_mode: ,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
depth_stencil: Some(wgpu::DepthStencilState {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
@ -469,15 +485,40 @@ impl<'window> WgpuCtx<'window> {
multiview: None,
cache: None,
});
Ok(WgpuCtx {
let camera = Camera::new(&device, &camera_bind_group_layout, width, height);
let surface_caps = surface.get_capabilities(&adapter);
let surface_config = wgpu::SurfaceConfiguration {
width,
height,
format: surface_caps.formats[0],
present_mode: wgpu::PresentMode::AutoNoVsync,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![],
usage: TextureUsages::RENDER_ATTACHMENT,
desired_maximum_frame_latency: 3,
};
surface.configure(&device, &surface_config);
let (depth_texture, depth_texture_view) = create_depth_texture(
&device,
surface_config.width,
surface_config.height,
// surface_config.format,
);
Ok(Self {
device,
queue,
surface,
surface_config,
adapter,
camera,
models: Vec::new(),
render_pipeline,
uniform_buffer,
vertex_buffer,
camera_bind_group_layout,
model_bind_group_layout,
bg_color: wgpu::Color {
r: 0.1,
g: 0.1,
@ -487,79 +528,101 @@ impl<'window> WgpuCtx<'window> {
start_time: Instant::now(),
last_frame_instant: Instant::now(),
frame_count: 0,
depth_texture,
depth_texture_view,
})
}
pub fn new_blocking(window: Arc<Window>) -> Result<WgpuCtx<'window>, RenderContextError> {
pub fn new_blocking(window: Arc<Window>) -> Result<Self, RenderContextError> {
block_on(Self::new(window))
}
pub fn add_model(&mut self, vertices: &[Vertex], indicies: &[u32]) {
let model = Model::new(
&self.device,
vertices,
indicies,
&self.model_bind_group_layout,
);
self.models.push(model);
}
pub fn resize(&mut self, new_size: (u32, u32)) {
let (width, height) = new_size;
let (depth_texture,depth_view) = create_depth_texture(&self.device, width, height);
self.surface_config.width = width.max(1);
self.surface_config.height = height.max(1);
self.surface.configure(&self.device, &self.surface_config);
self.depth_texture = depth_texture;
self.depth_texture_view = depth_view;
self.camera.resize(width, height);
}
pub fn draw(&mut self) {
let elapsed = self.start_time.elapsed().as_secs_f32() * 0.80f32;
let model = Matrix4::from_angle_x(Rad(elapsed)) * Matrix4::from_angle_y(Rad(elapsed));
let view = Matrix4::look_at_rh(
Point3::new(0.0, 0.0, 3.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::unit_y(),
);
let aspect = self.surface_config.width as f32 / self.surface_config.height as f32;
let proj = perspective(Rad(std::f32::consts::FRAC_PI_4), aspect, 0.1, 100.0);
let mvp = proj * view * model;
let mvp_array: [[f32; 4]; 4] = [
[mvp.x.x, mvp.x.y, mvp.x.z, mvp.x.w],
[mvp.y.x, mvp.y.y, mvp.y.z, mvp.y.w],
[mvp.z.x, mvp.z.y, mvp.z.z, mvp.z.w],
[mvp.w.x, mvp.w.y, mvp.w.z, mvp.w.w],
];
self.queue
.write_buffer(&self.uniform_buffer, 0, bytemuck::bytes_of(&mvp_array));
let elapsed = self.start_time.elapsed().as_secs_f32();
self.camera.update(&self.queue);
for (i, model) in self.models.iter_mut().enumerate() {
let angle = Rad(elapsed * 0.8 + i as f32 * 0.3);
model.set_transform(Matrix4::from_angle_x(angle) * Matrix4::from_angle_y(angle));
model.update(&self.queue);
}
let surface_texture = self
.surface
.get_current_texture()
.expect("Failed to get surface texture");
let view_texture = surface_texture
.ctx_err(
ContextErrorKind::SurfaceTexture,
"Surface texture acquisition",
)
.unwrap();
let view = surface_texture
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Cube Command Encoder"),
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Cube Render Pass"),
label: Some("Main Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view_texture,
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(self.bg_color),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_texture_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.render_pipeline);
let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Uniform Bind Group"),
layout: &self.render_pipeline.get_bind_group_layout(0),
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: self.uniform_buffer.as_entire_binding(),
}],
});
render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.draw(0..36, 0..1);
render_pass.set_bind_group(0, &self.camera.bind_group, &[]);
for model in &self.models {
render_pass.set_bind_group(1, &model.bind_group, &[]);
render_pass.set_vertex_buffer(0, model.vertex_buffer.slice(..));
render_pass
.set_index_buffer(model.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
render_pass.draw_indexed(0..model.index_count, 0, 0..1);
}
}
self.queue.submit(Some(encoder.finish()));
surface_texture.present();
@ -573,10 +636,38 @@ impl<'window> WgpuCtx<'window> {
}
}
pub fn change_bg_color(&mut self, color: wgpu::Color) {
pub fn set_bg_color(&mut self, color: wgpu::Color) {
self.bg_color = color;
}
pub fn bg_color(&self) -> wgpu::Color {
self.bg_color.clone()
self.bg_color
}
}
fn create_depth_texture(
device: &wgpu::Device,
width: u32,
height: u32,
// format: wgpu::TextureFormat,
) -> (wgpu::Texture, wgpu::TextureView) {
let size = wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
};
let desc = wgpu::TextureDescriptor {
label: Some("Depth Texture"),
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
(texture, view)
}