1
0
Fork 0

Migrate to tutorial 13 / threading

This commit is contained in:
Florian RICHER 2022-06-19 12:42:19 +02:00
parent fe8a47d14d
commit 6f68444dc7
43 changed files with 4085 additions and 1163 deletions

View file

@ -1,212 +0,0 @@
use wgpu::util::DeviceExt;
use winit::event::{ElementState, KeyboardInput, VirtualKeyCode, WindowEvent};
use crate::input::Controllable;
use super::Renderable;
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
// We need this for Rust to store our data correctly for the shaders
#[repr(C)]
// This is so we can store this in a buffer
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable, Default)]
struct CameraUniform {
// We can't use cgmath with bytemuck directly so we'll have
// to convert the Matrix4 into a 4x4 f32 array
view_proj: [[f32; 4]; 4],
}
pub struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
controller: CameraController,
uniform: CameraUniform,
bind_group: Option<wgpu::BindGroup>,
bind_group_layout: Option<wgpu::BindGroupLayout>,
buffer: Option<wgpu::Buffer>,
}
impl Camera {
pub fn new(width: f32, height: f32, speed: f32) -> Self {
Self {
eye: (0.0, 1.0, 2.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: width / height,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
controller: CameraController::new(speed),
bind_group: None,
bind_group_layout: None,
uniform: CameraUniform::default(),
buffer: None,
}
}
fn update_uniform(&mut self) {
let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
self.uniform.view_proj = (OPENGL_TO_WGPU_MATRIX * proj * view).into();
}
pub fn get_bind_group_layout(&self) -> &wgpu::BindGroupLayout {
&self.bind_group_layout.as_ref().unwrap()
}
fn update_camera(&mut self) {
use cgmath::InnerSpace;
let forward = self.target - self.eye;
let forward_norm = forward.normalize();
let forward_mag = forward.magnitude();
if self.controller.is_forward_pressed && forward_mag > self.controller.speed {
self.eye += forward_norm * self.controller.speed;
}
if self.controller.is_backward_pressed {
self.eye -= forward_norm * self.controller.speed;
}
let right = forward_norm.cross(self.up);
let forward = self.target - self.eye;
let forward_mag = forward.magnitude();
if self.controller.is_right_pressed {
self.eye = self.target - (forward + right * self.controller.speed).normalize() * forward_mag;
}
if self.controller.is_left_pressed {
self.eye = self.target - (forward - right * self.controller.speed).normalize() * forward_mag;
}
}
}
impl Renderable for Camera {
fn initialize(&mut self, device: &wgpu::Device) {
self.update_uniform();
self.buffer = Some(
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Camera Buffer"),
contents: bytemuck::cast_slice(&[self.uniform]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
}),
);
self.bind_group_layout = Some(device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("camera_bind_group_layout"),
},
));
self.bind_group = Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.bind_group_layout.as_ref().unwrap(),
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: self.buffer.as_ref().unwrap().as_entire_binding(),
}],
label: Some("camera_bind_group"),
}));
}
fn update_instances(&mut self, queue: &wgpu::Queue) {
self.update_camera();
self.update_uniform();
queue.write_buffer(
&self.buffer.as_ref().unwrap(),
0,
bytemuck::cast_slice(&[self.uniform]),
);
}
fn prepare<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>) {
render_pass.set_bind_group(1, &self.bind_group.as_ref().unwrap(), &[]);
}
fn draw<'a>(&'a self, _render_pass: &mut wgpu::RenderPass<'a>) { }
}
impl Controllable for Camera {
fn process_events(&mut self, event: &WindowEvent) -> bool {
self.controller.process_events(event)
}
}
struct CameraController {
speed: f32,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
pub fn new(speed: f32) -> Self {
Self {
speed,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
pub fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
}

View file

@ -1,60 +0,0 @@
pub struct Instance {
pub position: cgmath::Vector3<f32>,
pub rotation: cgmath::Quaternion<f32>,
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct InstanceRaw {
model: [[f32; 4]; 4],
}
impl Instance {
pub fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: (cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation))
.into(),
}
}
}
impl InstanceRaw {
pub fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
use std::mem;
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
// We need to switch from using a step mode of Vertex to Instance
// This means that our shaders will only change to use the next
// instance when the shader starts processing a new instance
step_mode: wgpu::VertexStepMode::Instance,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
// While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll
// be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later
shader_location: 5,
format: wgpu::VertexFormat::Float32x4,
},
// A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
// for each vec4. We'll have to reassemble the mat4 in
// the shader.
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
shader_location: 6,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 7,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
shader_location: 8,
format: wgpu::VertexFormat::Float32x4,
},
],
}
}
}

View file

@ -1,67 +0,0 @@
use std::sync::Arc;
use wgpu::{Device, util::DeviceExt, Queue};
use super::{Vertex, Renderable, Instance};
pub struct Mesh {
pub vertex_array: Vec<Vertex>,
pub index_array: Vec<u16>,
pub num_indices: u32,
pub instance_array: Vec<Instance>,
pub texture_bind_group: Option<Arc<wgpu::BindGroup>>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
pub instance_buffer: Option<wgpu::Buffer>,
}
impl Renderable for Mesh {
fn initialize(&mut self, device: &Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&self.vertex_array),
usage: wgpu::BufferUsages::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(&self.index_array),
usage: wgpu::BufferUsages::INDEX,
}));
self.num_indices = self.index_array.len() as u32;
let instance_data = self.instance_array
.iter()
.map(Instance::to_raw)
.collect::<Vec<_>>();
self.instance_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
}));
}
fn update_instances(&mut self, queue: &Queue) {
let instance_data = self
.instance_array
.iter()
.map(Instance::to_raw)
.collect::<Vec<_>>();
queue.write_buffer(
&self.instance_buffer.as_ref().unwrap(),
0,
bytemuck::cast_slice(&instance_data),
);
}
fn prepare<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>) {
render_pass.set_bind_group(0, &self.texture_bind_group.as_ref().unwrap(), &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.as_ref().unwrap().slice(..));
render_pass.set_vertex_buffer(1, self.instance_buffer.as_ref().unwrap().slice(..));
render_pass.set_index_buffer(self.index_buffer.as_ref().unwrap().slice(..), wgpu::IndexFormat::Uint16);
}
fn draw<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>) {
render_pass.draw_indexed(0..self.num_indices as _, 0, 0..self.instance_array.len() as _);
}
}

View file

@ -1,29 +0,0 @@
mod vertex;
pub use vertex::Vertex;
mod camera;
pub use camera::Camera;
mod texture;
pub use texture::{Texture, TextureManager};
mod instance;
pub use instance::{
Instance, InstanceRaw
};
use wgpu::{Device, Queue};
mod mesh;
pub use mesh::Mesh;
mod window;
pub use window::Window;
mod pipelines;
pub trait Renderable {
fn initialize(&mut self, device: &Device);
fn update_instances(&mut self, queue: &Queue);
fn prepare<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>);
fn draw<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>);
}

View file

@ -1,9 +0,0 @@
use wgpu::{Device, Queue};
use super::Renderable;
pub trait Processable {
fn initialize(&mut self, device: &Device, queue: &Queue, renderable_entities: Vec<Box<dyn Renderable>>);
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>, renderable_entities: Vec<Box<dyn Renderable>>);
fn render(&mut self, renderable_entities: Vec<Box<dyn Renderable>>) -> Result<(), wgpu::SurfaceError>;
}

View file

@ -1,66 +0,0 @@
mod texture;
pub use texture::Texture;
use wgpu::{BindGroup, Device, Queue};
pub struct TextureManager {
texture_bind_group_layout: wgpu::BindGroupLayout,
}
impl TextureManager {
pub fn new(device: &Device) -> Self {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
Self {
texture_bind_group_layout,
}
}
pub fn create_texture_from_bytes(
&self,
device: &Device,
queue: &Queue,
bytes: &[u8],
label: &str,
) -> BindGroup {
let diffuse_texture = Texture::from_bytes(&device, &queue, bytes, label).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some(&format!("diffuse_bind_group_{}", label)),
})
}
pub fn get_texture_bind_group_layout(&self) -> &wgpu::BindGroupLayout {
&self.texture_bind_group_layout
}
}

View file

@ -1,116 +0,0 @@
use anyhow::*;
use image::GenericImageView;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
}
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
}
pub fn from_image(
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>,
) -> Result<Self> {
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
queue.write_texture(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
size,
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
pub fn create_depth_texture(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration, label: &str) -> Self {
let size = wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
};
let desc = wgpu::TextureDescriptor {
label: Some(label),
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::TEXTURE_BINDING,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
}
);
Self { texture, view, sampler }
}
}

View file

@ -1,41 +0,0 @@
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
pub position: [f32; 3],
pub tex_coords: [f32; 2],
}
impl Vertex {
const ATTRIBS: [wgpu::VertexAttribute; 2] =
wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2];
pub fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
use std::mem;
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<Self>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &Self::ATTRIBS,
}
}
// pub fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
// wgpu::VertexBufferLayout {
// array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
// step_mode: wgpu::VertexStepMode::Vertex,
// attributes: &[
// wgpu::VertexAttribute {
// offset: 0,
// shader_location: 0,
// format: wgpu::VertexFormat::Float32x3,
// },
// wgpu::VertexAttribute {
// offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
// shader_location: 1,
// format: wgpu::VertexFormat::Float32x2,
// }
// ]
// }
// }
}

View file

@ -1,74 +0,0 @@
use winit::{
event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
pub struct Window {
title: &'static str,
}
impl Window {
pub fn new(title: &'static str) -> Self {
Self { title }
}
pub async fn run(self) {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title(self.title)
.build(&event_loop)
.unwrap();
let mut state = crate::State::new(&window).await;
event_loop.run(
move |event: Event<()>, _, control_flow: &mut ControlFlow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => {
if !state.input(&event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &&mut so we have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
}
Event::RedrawRequested(window_id) if window_id == window.id() => {
state.update();
match state.render() {
Ok(_) => {}
// Reconfigure the surface if lost
Err(wgpu::SurfaceError::Lost) => state.resize(state.size),
// The system is out of memory, we should probably quit
Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
// All other errors (Outdated, Timeout) should be resolved by the next frame
Err(e) => eprintln!("{:?}", e),
}
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
},
);
}
}