artyversary/crates/subtitled15/src/main.rs

128 lines
4.2 KiB
Rust

use nannou::prelude::*;
struct Model {
bind_group: wgpu::BindGroup,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer1: wgpu::Buffer,
vertex_buffer2: wgpu::Buffer,
}
// The vertex type that we will use to represent a point on our triangle.
#[repr(C)]
#[derive(Clone, Copy)]
struct Vertex {
position: [f32; 2],
}
// The vertices that make up our triangle.
const VERTICES1: [Vertex; 3] = [
Vertex {
position: [-1.0, -1.0],
},
Vertex {
position: [1.0, 1.0],
},
Vertex {
position: [-1.0, 1.0],
},
];
const VERTICES2: [Vertex; 3] = [
Vertex {
position: [1.0, -1.0],
},
Vertex {
position: [1.0, 1.0],
},
Vertex {
position: [-1.0, -1.0],
},
];
fn main() {
nannou::app(model).run();
}
fn model(app: &App) -> Model {
let w_id = app.new_window().size(512, 512).view(view).build().unwrap();
// The gpu device associated with the window's swapchain
let window = app.window(w_id).unwrap();
let device = window.swap_chain_device();
let format = Frame::TEXTURE_FORMAT;
let sample_count = window.msaa_samples();
// Load shader modules.
let vs_mod = wgpu::shader_from_spirv_bytes(device, include_bytes!("../shaders/vert.spv"));
let fs_mod = wgpu::shader_from_spirv_bytes(device, include_bytes!("../shaders/frag.spv"));
// Create the vertex buffer.
let usage = wgpu::BufferUsage::VERTEX;
let vertices_bytes = vertices_as_bytes(&VERTICES1[..]);
let vertex_buffer1 = device.create_buffer_init(&BufferInitDescriptor {
label: None,
contents: vertices_bytes,
usage,
});
let vertices_bytes = vertices_as_bytes(&VERTICES2[..]);
let vertex_buffer2 = device.create_buffer_init(&BufferInitDescriptor {
label: None,
contents: vertices_bytes,
usage,
});
// Create the render pipeline.
let bind_group_layout = wgpu::BindGroupLayoutBuilder::new().build(device);
let bind_group = wgpu::BindGroupBuilder::new().build(device, &bind_group_layout);
let pipeline_layout = wgpu::create_pipeline_layout(device, None, &[&bind_group_layout], &[]);
let render_pipeline = wgpu::RenderPipelineBuilder::from_layout(&pipeline_layout, &vs_mod)
.fragment_shader(&fs_mod)
.color_format(format)
.add_vertex_buffer::<Vertex>(&wgpu::vertex_attr_array![0 => Float32x2])
.sample_count(sample_count)
.build(device);
Model {
bind_group,
vertex_buffer1,
vertex_buffer2,
render_pipeline,
}
}
// Draw the state of your `Model` into the given `Frame` here.
fn view(_app: &App, model: &Model, frame: Frame) {
// Using this we will encode commands that will be submitted to the GPU.
let mut encoder = frame.command_encoder();
// The render pass can be thought of a single large command consisting of sub commands. Here we
// begin a render pass that outputs to the frame's texture. Then we add sub-commands for
// setting the bind group, render pipeline, vertex buffers and then finally drawing.
let mut render_pass = wgpu::RenderPassBuilder::new()
.color_attachment(frame.texture_view(), |color| color)
.begin(&mut encoder);
render_pass.set_bind_group(0, &model.bind_group, &[]);
render_pass.set_pipeline(&model.render_pipeline);
render_pass.set_vertex_buffer(0, model.vertex_buffer1.slice(..));
// We want to draw the whole range of vertices, and we're only drawing one instance of them.
let vertex_range = 0..VERTICES1.len() as u32;
let instance_range = 0..1;
render_pass.draw(vertex_range, instance_range);
render_pass.set_bind_group(0, &model.bind_group, &[]);
render_pass.set_pipeline(&model.render_pipeline);
render_pass.set_vertex_buffer(0, model.vertex_buffer2.slice(..));
// We want to draw the whole range of vertices, and we're only drawing one instance of them.
let vertex_range = 0..VERTICES2.len() as u32;
let instance_range = 0..1;
render_pass.draw(vertex_range, instance_range);
// Now we're done! The commands we added will be submitted after `view` completes.
}
// See the `nannou::wgpu::bytes` documentation for why this is necessary.
fn vertices_as_bytes(data: &[Vertex]) -> &[u8] {
unsafe { wgpu::bytes::from_slice(data) }
}