use std::sync::Arc; use vulkano::command_buffer::allocator::StandardCommandBufferAllocator; use vulkano::device::{Device, DeviceCreateInfo, DeviceExtensions, DeviceFeatures, Queue, QueueCreateInfo, QueueFlags}; use vulkano::device::physical::PhysicalDeviceType; use vulkano::instance::{Instance, InstanceCreateFlags, InstanceCreateInfo}; use vulkano::memory::allocator::StandardMemoryAllocator; use vulkano::swapchain::{acquire_next_image, Surface, SwapchainCreateInfo, SwapchainPresentInfo}; use vulkano::{sync, Validated, Version, VulkanError, VulkanLibrary}; use vulkano::command_buffer::{AutoCommandBufferBuilder, CommandBufferUsage, RenderingAttachmentInfo, RenderingInfo}; use vulkano::render_pass::{AttachmentLoadOp, AttachmentStoreOp}; use vulkano::sync::GpuFuture; use winit::application::ApplicationHandler; use winit::event::WindowEvent; use winit::event_loop::{ActiveEventLoop, EventLoop}; use winit::window::{Window, WindowId}; use crate::renderer::render_context::RenderContext; use crate::renderer::{window_size_dependent_setup, Scene}; pub struct App { instance: Arc, device: Arc, queue: Arc, memory_allocator: Arc, command_buffer_allocator: Arc, rcx: Option, scene: Option, } impl App { pub fn new(event_loop: &EventLoop<()>) -> Self { let library = VulkanLibrary::new().unwrap(); // The first step of any Vulkan program is to create an instance. // // When we create an instance, we have to pass a list of extensions that we want to enable. // // All the window-drawing functionalities are part of non-core extensions that we need to // enable manually. To do so, we ask `Surface` for the list of extensions required to draw // to a window. let required_extensions = Surface::required_extensions(event_loop).unwrap(); // Now creating the instance. let instance = Instance::new( library, InstanceCreateInfo { // Enable enumerating devices that use non-conformant Vulkan implementations. // (e.g. MoltenVK) flags: InstanceCreateFlags::ENUMERATE_PORTABILITY, enabled_extensions: required_extensions, enabled_layers: vec![ String::from("VK_LAYER_KHRONOS_validation"), String::from("VK_LAYER_MANGOHUD_overlay_x86_64"), String::from("VK_LAYER_NV_optimus"), ], ..Default::default() }, ) .unwrap(); // Choose device extensions that we're going to use. In order to present images to a // surface, we need a `Swapchain`, which is provided by the `khr_swapchain` extension. let mut device_extensions = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::empty() }; // We then choose which physical device to use. First, we enumerate all the available // physical devices, then apply filters to narrow them down to those that can support our // needs. let (physical_device, queue_family_index) = instance .enumerate_physical_devices() .unwrap() .filter(|p| { // For this example, we require at least Vulkan 1.3, or a device that has the // `khr_dynamic_rendering` extension available. p.api_version() >= Version::V1_3 || p.supported_extensions().khr_dynamic_rendering }) .filter(|p| { // Some devices may not support the extensions or features that your application, // or report properties and limits that are not sufficient for your application. // These should be filtered out here. p.supported_extensions().contains(&device_extensions) }) .filter_map(|p| { // For each physical device, we try to find a suitable queue family that will // execute our draw commands. // // Devices can provide multiple queues to run commands in parallel (for example a // draw queue and a compute queue), similar to CPU threads. This is something you // have to have to manage manually in Vulkan. Queues of the same type belong to the // same queue family. // // Here, we look for a single queue family that is suitable for our purposes. In a // real-world application, you may want to use a separate dedicated transfer queue // to handle data transfers in parallel with graphics operations. You may also need // a separate queue for compute operations, if your application uses those. p.queue_family_properties() .iter() .enumerate() .position(|(i, q)| { // We select a queue family that supports graphics operations. When drawing // to a window surface, as we do in this example, we also need to check // that queues in this queue family are capable of presenting images to the // surface. q.queue_flags.intersects(QueueFlags::GRAPHICS) && p.presentation_support(i as u32, event_loop).unwrap() }) // The code here searches for the first queue family that is suitable. If none // is found, `None` is returned to `filter_map`, which disqualifies this // physical device. .map(|i| (p, i as u32)) }) // All the physical devices that pass the filters above are suitable for the // application. However, not every device is equal, some are preferred over others. // Now, we assign each physical device a score, and pick the device with the lowest // ("best") score. // // In this example, we simply select the best-scoring device to use in the application. // In a real-world setting, you may want to use the best-scoring device only as a // "default" or "recommended" device, and let the user choose the device themself. .min_by_key(|(p, _)| { // We assign a lower score to device types that are likely to be faster/better. match p.properties().device_type { PhysicalDeviceType::DiscreteGpu => 0, PhysicalDeviceType::IntegratedGpu => 1, PhysicalDeviceType::VirtualGpu => 2, PhysicalDeviceType::Cpu => 3, PhysicalDeviceType::Other => 4, _ => 5, } }) .expect("no suitable physical device found"); // Some little debug infos. println!( "Using device: {} (type: {:?})", physical_device.properties().device_name, physical_device.properties().device_type, ); // If the selected device doesn't have Vulkan 1.3 available, then we need to enable the // `khr_dynamic_rendering` extension manually. This extension became a core part of Vulkan // in version 1.3 and later, so it's always available then and it does not need to be // enabled. We can be sure that this extension will be available on the selected physical // device, because we filtered out unsuitable devices in the device selection code above. if physical_device.api_version() < Version::V1_3 { device_extensions.khr_dynamic_rendering = true; } // Now initializing the device. This is probably the most important object of Vulkan. // // An iterator of created queues is returned by the function alongside the device. let (device, mut queues) = Device::new( // Which physical device to connect to. physical_device, DeviceCreateInfo { // The list of queues that we are going to use. Here we only use one queue, from // the previously chosen queue family. queue_create_infos: vec![QueueCreateInfo { queue_family_index, ..Default::default() }], // A list of optional features and extensions that our program needs to work // correctly. Some parts of the Vulkan specs are optional and must be enabled // manually at device creation. In this example the only things we are going to // need are the `khr_swapchain` extension that allows us to draw to a window, and // `khr_dynamic_rendering` if we don't have Vulkan 1.3 available. enabled_extensions: device_extensions, // In order to render with Vulkan 1.3's dynamic rendering, we need to enable it // here. Otherwise, we are only allowed to render with a render pass object, as in // the standard triangle example. The feature is required to be supported by the // device if it supports Vulkan 1.3 and higher, or if the `khr_dynamic_rendering` // extension is available, so we don't need to check for support. enabled_features: DeviceFeatures { dynamic_rendering: true, ..DeviceFeatures::empty() }, ..Default::default() }, ) .unwrap(); // Since we can request multiple queues, the `queues` variable is in fact an iterator. We // only use one queue in this example, so we just retrieve the first and only element of // the iterator. let queue = queues.next().unwrap(); let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); // Before we can start creating and recording command buffers, we need a way of allocating // them. Vulkano provides a command buffer allocator, which manages raw Vulkan command // pools underneath and provides a safe interface for them. let command_buffer_allocator = Arc::new(StandardCommandBufferAllocator::new( device.clone(), Default::default(), )); Self { instance, device, queue, memory_allocator, command_buffer_allocator, rcx: None, scene: None, } } } impl ApplicationHandler for App { fn resumed(&mut self, event_loop: &ActiveEventLoop) { let window_attributes = winit::window::Window::default_attributes() .with_title("Rust ASH Test") .with_inner_size(winit::dpi::PhysicalSize::new( f64::from(800), f64::from(600), )); let window = Arc::new( event_loop .create_window(window_attributes) .unwrap(), ); let surface = Surface::from_window(self.instance.clone(), window.clone()).unwrap(); self.rcx = Some(RenderContext::new(window, surface, &self.device)); self.scene = Some(Scene::initialize(&self.device, &self.rcx.as_ref().unwrap().swapchain, &self.memory_allocator)); } fn window_event(&mut self, event_loop: &ActiveEventLoop, _id: WindowId, event: WindowEvent) { let rcx = self.rcx.as_mut().unwrap(); match event { WindowEvent::CloseRequested => { log::debug!("The close button was pressed; stopping"); event_loop.exit(); } WindowEvent::Resized(_) => { rcx.recreate_swapchain = true; } WindowEvent::RedrawRequested => { let window_size = rcx.window.inner_size(); // Do not draw the frame when the screen size is zero. On Windows, this can occur // when minimizing the application. if window_size.width == 0 || window_size.height == 0 { return; } // It is important to call this function from time to time, otherwise resources // will keep accumulating and you will eventually reach an out of memory error. // Calling this function polls various fences in order to determine what the GPU // has already processed, and frees the resources that are no longer needed. rcx.previous_frame_end.as_mut().unwrap().cleanup_finished(); // Whenever the window resizes we need to recreate everything dependent on the // window size. In this example that includes the swapchain, the framebuffers and // the dynamic state viewport. if rcx.recreate_swapchain { let (new_swapchain, new_images) = rcx .swapchain .recreate(SwapchainCreateInfo { image_extent: window_size.into(), ..rcx.swapchain.create_info() }) .expect("failed to recreate swapchain"); rcx.swapchain = new_swapchain; // Now that we have new swapchain images, we must create new image views from // them as well. rcx.attachment_image_views = window_size_dependent_setup(&new_images); rcx.viewport.extent = window_size.into(); rcx.recreate_swapchain = false; } // Before we can draw on the output, we have to *acquire* an image from the // swapchain. If no image is available (which happens if you submit draw commands // too quickly), then the function will block. This operation returns the index of // the image that we are allowed to draw upon. // // This function can block if no image is available. The parameter is an optional // timeout after which the function call will return an error. let (image_index, suboptimal, acquire_future) = match acquire_next_image( rcx.swapchain.clone(), None, ) .map_err(Validated::unwrap) { Ok(r) => r, Err(VulkanError::OutOfDate) => { rcx.recreate_swapchain = true; return; } Err(e) => panic!("failed to acquire next image: {e}"), }; // `acquire_next_image` can be successful, but suboptimal. This means that the // swapchain image will still work, but it may not display correctly. With some // drivers this can be when the window resizes, but it may not cause the swapchain // to become out of date. if suboptimal { rcx.recreate_swapchain = true; } // In order to draw, we have to record a *command buffer*. The command buffer // object holds the list of commands that are going to be executed. // // Recording a command buffer is an expensive operation (usually a few hundred // microseconds), but it is known to be a hot path in the driver and is expected to // be optimized. // // Note that we have to pass a queue family when we create the command buffer. The // command buffer will only be executable on that given queue family. let mut builder = AutoCommandBufferBuilder::primary( self.command_buffer_allocator.clone(), self.queue.queue_family_index(), CommandBufferUsage::OneTimeSubmit, ) .unwrap(); builder // Before we can draw, we have to *enter a render pass*. We specify which // attachments we are going to use for rendering here, which needs to match // what was previously specified when creating the pipeline. .begin_rendering(RenderingInfo { // As before, we specify one color attachment, but now we specify the image // view to use as well as how it should be used. color_attachments: vec![Some(RenderingAttachmentInfo { // `Clear` means that we ask the GPU to clear the content of this // attachment at the start of rendering. load_op: AttachmentLoadOp::Clear, // `Store` means that we ask the GPU to store the rendered output in // the attachment image. We could also ask it to discard the result. store_op: AttachmentStoreOp::Store, // The value to clear the attachment with. Here we clear it with a blue // color. // // Only attachments that have `AttachmentLoadOp::Clear` are provided // with clear values, any others should use `None` as the clear value. clear_value: Some([0.0, 0.0, 0.0, 1.0].into()), ..RenderingAttachmentInfo::image_view( // We specify image view corresponding to the currently acquired // swapchain image, to use for this attachment. rcx.attachment_image_views[image_index as usize].clone(), ) })], ..Default::default() }) .unwrap() // We are now inside the first subpass of the render pass. // // TODO: Document state setting and how it affects subsequent draw commands. .set_viewport(0, [rcx.viewport.clone()].into_iter().collect()) .unwrap(); if let Some(scene) = self.scene.as_ref() { scene.render(&mut builder); } builder // We leave the render pass. .end_rendering() .unwrap(); // Finish recording the command buffer by calling `end`. let command_buffer = builder.build().unwrap(); let future = rcx .previous_frame_end .take() .unwrap() .join(acquire_future) .then_execute(self.queue.clone(), command_buffer) .unwrap() // The color output is now expected to contain our triangle. But in order to // show it on the screen, we have to *present* the image by calling // `then_swapchain_present`. // // This function does not actually present the image immediately. Instead it // submits a present command at the end of the queue. This means that it will // only be presented once the GPU has finished executing the command buffer // that draws the triangle. .then_swapchain_present( self.queue.clone(), SwapchainPresentInfo::swapchain_image_index( rcx.swapchain.clone(), image_index, ), ) .then_signal_fence_and_flush(); match future.map_err(Validated::unwrap) { Ok(future) => { rcx.previous_frame_end = Some(future.boxed()); } Err(VulkanError::OutOfDate) => { rcx.recreate_swapchain = true; rcx.previous_frame_end = Some(sync::now(self.device.clone()).boxed()); } Err(e) => { println!("failed to flush future: {e}"); rcx.previous_frame_end = Some(sync::now(self.device.clone()).boxed()); } } } _ => {} } } fn about_to_wait(&mut self, _event_loop: &ActiveEventLoop) { let rcx = self.rcx.as_mut().unwrap(); rcx.window.request_redraw(); } }