finished gif showcase

pull/17/head
Ben Hansen 4 years ago
parent f9f34459cc
commit be23faee06

4
.gitignore vendored

@ -2,4 +2,6 @@ node_modules/
target/
.vscode/
image.png
/image.png
/output*.*
output/

15
Cargo.lock generated

@ -375,6 +375,19 @@ name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "framework"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fuchsia-cprng"
version = "0.1.1"
@ -545,6 +558,8 @@ version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"framework 0.1.0",
"gif 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",

@ -0,0 +1,16 @@
[package]
name = "framework"
version = "0.1.0"
authors = ["Ben Hansen <bhbenjaminhansen@gmail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.22.4"
winit = "0.20.0"
glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
failure = "0.1"
tobj = "0.1"
wgpu = "0.4.0"

@ -0,0 +1,74 @@
use std::mem;
pub trait ToRaw {
type Output;
fn to_raw(&self) -> Self::Output;
}
pub struct RawBuffer<R: Copy + 'static> {
pub buffer: wgpu::Buffer,
pub data: Vec<R>,
}
impl<R: Copy> RawBuffer<R> {
pub fn from_slice<T: ToRaw<Output=R>>(device: &wgpu::Device, data: &[T], usage: wgpu::BufferUsage) -> Self {
let raw_data = data.iter().map(ToRaw::to_raw).collect::<Vec<R>>();
Self::from_vec(device, raw_data, usage)
}
pub fn from_vec(device: &wgpu::Device, data: Vec<R>, usage: wgpu::BufferUsage) -> Self {
let buffer = device
.create_buffer_mapped(data.len(), usage)
.fill_from_slice(&data);
Self::from_parts(buffer, data, usage)
}
pub fn from_parts(buffer: wgpu::Buffer, data: Vec<R>, usage: wgpu::BufferUsage) -> Self {
Self { buffer, data }
}
pub fn buffer_size(&self) -> wgpu::BufferAddress {
(self.data.len() * mem::size_of::<R>()) as wgpu::BufferAddress
}
}
pub struct Buffer<U: ToRaw<Output=R>, R: Copy + 'static> {
pub data: Vec<U>,
pub raw_buffer: RawBuffer<R>,
usage: wgpu::BufferUsage,
}
impl<U: ToRaw<Output=R>, R: Copy + 'static> Buffer<U, R> {
pub fn uniform(device: &wgpu::Device, datum: U) -> Self {
let data = vec![datum];
let usage = wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST;
Self::with_usage(device, data, usage)
}
pub fn storage(device: &wgpu::Device, data: Vec<U>) -> Self {
let usage = wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST;
Self::with_usage(device, data, usage)
}
pub fn staging(device: &wgpu::Device, other: &Self) -> Self {
let buffer_size = other.raw_buffer.buffer_size();
let usage = wgpu::BufferUsage::COPY_SRC | wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::MAP_WRITE;
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: buffer_size,
usage,
});
let raw_buffer = RawBuffer::from_parts(buffer, Vec::new(), usage);
Self::from_parts(Vec::new(), raw_buffer, usage)
}
pub fn with_usage(device: &wgpu::Device, data: Vec<U>, usage: wgpu::BufferUsage) -> Self {
let raw_buffer = RawBuffer::from_slice(device, &data, usage);
Self::from_parts(data, raw_buffer, usage)
}
pub fn from_parts(data: Vec<U>, raw_buffer: RawBuffer<R>, usage: wgpu::BufferUsage) -> Self {
Self { data, raw_buffer, usage }
}
}

@ -0,0 +1,25 @@
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
pub struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
pub fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}

@ -0,0 +1,9 @@
mod buffer;
mod camera;
mod model;
mod texture;
pub use buffer::*;
pub use camera::*;
pub use model::*;
pub use texture::*;

@ -1,11 +1,15 @@
use image::GenericImageView;
use std::path::Path;
use std::mem;
use crate::buffer;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
pub desc: wgpu::TextureDescriptor,
}
impl Texture {
@ -22,6 +26,10 @@ impl Texture {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
Self::from_descriptor(device, desc)
}
pub fn from_descriptor(device: &wgpu::Device, desc: wgpu::TextureDescriptor) -> Self {
let texture = device.create_texture(&desc);
let view = texture.create_default_view();
@ -37,7 +45,7 @@ impl Texture {
compare_function: wgpu::CompareFunction::Always,
});
Self { texture, view, sampler }
Self { texture, view, sampler, desc }
}
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8]) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
@ -54,7 +62,7 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
let desc = wgpu::TextureDescriptor {
size,
array_layer_count: 1,
mip_level_count: 1,
@ -62,7 +70,8 @@ impl Texture {
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
};
let texture = device.create_texture(&desc);
let buffer = device
.create_buffer_mapped(rgba.len(), wgpu::BufferUsage::COPY_SRC)
@ -101,6 +110,24 @@ impl Texture {
compare_function: wgpu::CompareFunction::Always,
});
Ok((Self { texture, view, sampler }, cmd_buffer))
Ok((Self { texture, view, sampler, desc }, cmd_buffer))
}
pub fn prepare_buffer_rgba(&self, device: &wgpu::Device) -> buffer::RawBuffer<[f32;4]> {
let num_pixels = self.desc.size.width * self.desc.size.height * self.desc.size.depth;
let buffer_size = num_pixels * mem::size_of::<[f32;4]>() as u32;
let buffer_usage = wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size as wgpu::BufferAddress,
usage: buffer_usage,
};
let buffer = device.create_buffer(&buffer_desc);
let data = Vec::with_capacity(num_pixels as usize);
let raw_buffer = buffer::RawBuffer::from_parts(buffer, data, buffer_usage);
raw_buffer
}
}
}

@ -13,4 +13,7 @@ glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
failure = "0.1"
tobj = "0.1"
wgpu = "0.4.0"
wgpu = "0.4.0"
gif = "0.10.3"
framework = { path = "../framework" }

@ -1,4 +0,0 @@
fn main() {
println!("Hello, world!");
}

@ -1,5 +0,0 @@
mod model;
mod texture;
pub use model::*;
pub use texture::*;

@ -1,4 +0,0 @@
pub trait Loopable {
pub update(&mut self) -> Option<Vec<wgpu::CommandBuffer>>,
pub render(&mut self) -> Option<Vec<wgpu::CommandBuffer>>,
}

@ -0,0 +1,203 @@
extern crate framework;
use std::mem;
use std::sync::{Arc, Mutex};
fn main() {
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let colors = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.2],
[0.0, 0.2, 0.2],
[0.2, 0.2, 0.2],
[0.2, 0.2, 0.2],
[0.0, 0.2, 0.2],
[0.0, 0.0, 0.2],
[0.0, 0.0, 0.0],
];
// create a texture to render to
let texture_size = 256u32;
let rt_desc = wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: texture_size,
height: texture_size,
depth: 1,
},
array_layer_count: colors.len() as u32,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let render_target = framework::Texture::from_descriptor(&device, rt_desc);
// create a buffer to copy the texture to so we can get the data
let pixel_size = mem::size_of::<[u8;4]>() as u32;
let buffer_size = (pixel_size * texture_size * texture_size) as wgpu::BufferAddress;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ,
};
let output_buffer = device.create_buffer(&buffer_desc);
// a simple render pipeline that draws a triangle
let render_pipeline = create_render_pipeline(&device, &render_target);
// we need to store this in and arc-mutex so we can pass it to the mapping function
let frames = Arc::new(Mutex::new(Vec::new()));
for c in &colors {
let mut encoder = device.create_command_encoder(&Default::default());
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &render_target.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
// modify the clear color so the gif changes
clear_color: wgpu::Color {
r: c[0],
g: c[1],
b: c[2],
a: 1.0,
}
}
],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&render_pipeline);
rpass.draw(0..3, 0..1);
drop(rpass);
encoder.copy_texture_to_buffer(
wgpu::TextureCopyView {
texture: &render_target.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::BufferCopyView {
buffer: &output_buffer,
offset: 0,
row_pitch: pixel_size * texture_size,
image_height: texture_size,
},
render_target.desc.size
);
queue.submit(&[encoder.finish()]);
let frames_clone = frames.clone();
output_buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[u8]>| {
match result {
Ok(mapping) => {
let data = Vec::from(mapping.data);
let mut f = frames_clone.lock().unwrap();
(*f).push(data);
}
_ => { eprintln!("Something went wrong") }
}
});
// wait for the GPU to finish
device.poll(true);
}
let mut frames = Arc::try_unwrap(frames)
.unwrap()
.into_inner()
.unwrap();
save_gif("output.gif", &mut frames, 10, texture_size as u16).unwrap();
}
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
use gif::{Frame, Encoder, Repeat, SetParameter};
let mut image = std::fs::File::create(path)?;
let mut encoder = Encoder::new(&mut image, size, size, &[])?;
encoder.set(Repeat::Infinite)?;
for mut frame in frames {
encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
}
Ok(())
}
// The image crate currently doesn't support looping gifs, so I'm not using this
// code. I'm keeping it around in case image adds looping support.
#[allow(unused)]
fn save_gif_old(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
let output = std::fs::File::create(path)?;
let mut encoder = image::gif::Encoder::new(output);
for mut data in frames {
let frame = image::gif::Frame::from_rgba_speed(size, size, &mut data, speed);
encoder.encode(&frame)?;
}
Ok(())
}
fn create_render_pipeline(device: &wgpu::Device, target: &framework::Texture) -> wgpu::RenderPipeline {
let vs_src = include_str!("res/shader.vert");
let fs_src = include_str!("res/shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: target.desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
render_pipeline
}

@ -0,0 +1,35 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
const float shininess = 32;
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
vec3 camera_dir = normalize(-v_position);
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
}

@ -0,0 +1,34 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
v_normal = normal_matrix * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
}

@ -0,0 +1,7 @@
#version 450
layout(location=0) out vec4 f_color;
void main() {
f_color = vec4(0.3, 0.2, 0.1, 1.0);
}

@ -0,0 +1,11 @@
#version 450
const vec2 positions[3] = vec2[3](
vec2(0.0, -0.5),
vec2(-0.5, 0.5),
vec2(0.5, 0.5)
);
void main() {
gl_Position = vec4(positions[gl_VertexIndex], 0.0, 1.0);
}

@ -37,9 +37,17 @@ module.exports = {
collapsable: false,
children: [
'/intermediate/tutorial10-lighting/',
'/intermediate/windowless/',
],
},
{
title: 'Showcase',
collapsable: true,
children: [
'/showcase/',
'/showcase/windowless/',
'/showcase/gifs/',
]
},
'/news/'
]
}

@ -1 +1 @@
Subproject commit bb7e290ca411ae49b3eb2e20b3dcde36af454ae1
Subproject commit 50afb738454fd9a4d69e42bea313f79386300908

@ -0,0 +1,3 @@
# Foreward
The articles in this section are not meant to be tutorials. They are showcases of the various things you can do with `wgpu`. I won't go over specifics of creating `wgpu` resources, as those will be covered elsewhere. The code for these examples is still available however, and will be accessible on Github.

@ -0,0 +1,163 @@
# Creating gifs
Sometimes you've created a nice simulation/animation, and you want to show it off. While you can record a video, that might be a bit overkill to break our your video recording if you just want something to post on twitter. That's where what [GIF](https://en.wikipedia.org/wiki/GIF)s are for.
Also, GIF is pronounced GHIF, not JIF as JIF is not only [peanut butter](https://en.wikipedia.org/wiki/Jif_%28peanut_butter%29), it is also a [different image format](https://filext.com/file-extension/JIF).
## How are we making the GIF?
We're going to create a function using the [gif crate](https://docs.rs/gif/) to encode the actual image.
```rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
use gif::{Frame, Encoder, Repeat, SetParameter};
let mut image = std::fs::File::create(path)?;
let mut encoder = Encoder::new(&mut image, size, size, &[])?;
encoder.set(Repeat::Infinite)?;
for mut frame in frames {
encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
}
Ok(())
}
```
<!-- image-rs doesn't currently support looping, so I switched to gif -->
<!-- A GIF is a type of image, and fortunately the [image crate](https://docs.rs/image/) supports GIFs natively. It's pretty simple to use. -->
<!-- ```rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
let output = std::fs::File::create(path)?;
let mut encoder = image::gif::Encoder::new(output);
for mut data in frames {
let frame = image::gif::Frame::from_rgba_speed(size, size, &mut data, speed);
encoder.encode(&frame)?;
}
Ok(())
}
``` -->
All we need to use this code is the frames of the GIF, how fast it should run, and the size of the GIF (you could use width and height seperately, but I didn't).
## How do we make the frames?
If you checked out the [windowless showcase](../windowless/#a-triangle-without-a-window), you'll know that we render directly to a `wgpu::Texture`. We'll create a texture to render to and a buffer the copy the output to.
```rust
// create a texture to render to
let texture_size = 256u32;
let rt_desc = wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: texture_size,
height: texture_size,
depth: 1,
},
array_layer_count: colors.len() as u32,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let render_target = framework::Texture::from_descriptor(&device, rt_desc);
// create a buffer to copy the texture to so we can get the data
let pixel_size = mem::size_of::<[u8;4]>() as u32;
let buffer_size = (pixel_size * texture_size * texture_size) as wgpu::BufferAddress;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ,
};
let output_buffer = device.create_buffer(&buffer_desc);
```
With that we can render a frame, and then copy that frame to a `Vec<u8>`.
```rust
// we need to store this in and arc-mutex so we can pass it to the mapping function
let frames = Arc::new(Mutex::new(Vec::new()));
for c in &colors {
let mut encoder = device.create_command_encoder(&Default::default());
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &render_target.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
// modify the clear color so the gif changes
clear_color: wgpu::Color {
r: c[0],
g: c[1],
b: c[2],
a: 1.0,
}
}
],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&render_pipeline);
rpass.draw(0..3, 0..1);
drop(rpass);
encoder.copy_texture_to_buffer(
wgpu::TextureCopyView {
texture: &render_target.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::BufferCopyView {
buffer: &output_buffer,
offset: 0,
row_pitch: pixel_size * texture_size,
image_height: texture_size,
},
render_target.desc.size
);
queue.submit(&[encoder.finish()]);
let frames_clone = frames.clone();
output_buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[u8]>| {
match result {
Ok(mapping) => {
let data = Vec::from(mapping.data);
let mut f = frames_clone.lock().unwrap();
(*f).push(data);
}
_ => { eprintln!("Something went wrong") }
}
});
// wait for the GPU to finish
device.poll(true);
}
```
Once that's done we can pull the frame data our of the `Arc<Mutex<_>>`, and pass it into `save_gif()`.
```rust
let mut frames = Arc::try_unwrap(frames)
.unwrap()
.into_inner()
.unwrap();
save_gif("output.gif", &mut frames, 1, texture_size as u16).unwrap();
```
That's the gist of it. We can improve things using a texture array, and sending the draw commands all at once, but this gets the idea across. With the shader I wrote we get the following GIF.
![./output.gif](./output.gif)
<AutoGithubLink/>

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

Loading…
Cancel
Save