437 lines
15 KiB
Rust
437 lines
15 KiB
Rust
use std::{
|
|
str::FromStr,
|
|
sync::{
|
|
mpsc::{sync_channel, Receiver, SyncSender},
|
|
Arc, Mutex,
|
|
},
|
|
thread,
|
|
};
|
|
|
|
use derive_builder::Builder;
|
|
use rand::Rng;
|
|
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
|
use serde::Deserialize;
|
|
use structopt::StructOpt;
|
|
|
|
use crate::{
|
|
canvas::Canvas,
|
|
matrices::Matrix4x4,
|
|
rays::Ray,
|
|
tuples::{Color, Tuple},
|
|
world::World,
|
|
Float, BLACK,
|
|
};
|
|
|
|
const MAX_DEPTH_RECURSION: usize = 10;
|
|
|
|
#[derive(Copy, Clone, StructOpt, Debug, Deserialize)]
|
|
#[serde(rename_all = "kebab-case")]
|
|
pub enum RenderStrategy {
|
|
Serial,
|
|
Rayon,
|
|
WorkerPool,
|
|
}
|
|
|
|
impl Default for RenderStrategy {
|
|
fn default() -> RenderStrategy {
|
|
RenderStrategy::Rayon
|
|
}
|
|
}
|
|
|
|
impl FromStr for RenderStrategy {
|
|
type Err = serde_json::error::Error;
|
|
fn from_str(s: &str) -> Result<RenderStrategy, serde_json::error::Error> {
|
|
serde_json::from_str(&format!("\"{}\"", s))
|
|
}
|
|
}
|
|
|
|
#[derive(Builder, Clone, Debug, Default)]
|
|
#[builder(setter(skip), build_fn(skip))]
|
|
pub struct Camera {
|
|
#[builder(setter(skip = "false"))]
|
|
hsize: usize,
|
|
#[builder(setter(skip = "false"))]
|
|
vsize: usize,
|
|
#[builder(setter(skip = "false"))]
|
|
field_of_view: Float,
|
|
#[builder(setter(skip = "false"))]
|
|
transform: Matrix4x4,
|
|
inverse_transform: Matrix4x4,
|
|
pixel_size: Float,
|
|
half_width: Float,
|
|
half_height: Float,
|
|
#[builder(setter(skip = "false"))]
|
|
pub render_strategy: RenderStrategy,
|
|
/// 0 renders from the center of the pixel, 1 or higher is random sampling of the pixel.
|
|
#[builder(setter(skip = "false"))]
|
|
pub samples_per_pixel: usize,
|
|
}
|
|
|
|
impl CameraBuilder {
|
|
pub fn build(&self) -> Result<Camera, CameraBuilderError> {
|
|
let hsize = match self.hsize {
|
|
Some(ref value) => Clone::clone(value),
|
|
None => {
|
|
return Err(Into::into(::derive_builder::UninitializedFieldError::from(
|
|
"hsize",
|
|
)))
|
|
}
|
|
};
|
|
let vsize = match self.vsize {
|
|
Some(ref value) => Clone::clone(value),
|
|
None => {
|
|
return Err(Into::into(::derive_builder::UninitializedFieldError::from(
|
|
"vsize",
|
|
)))
|
|
}
|
|
};
|
|
let field_of_view = match self.field_of_view {
|
|
Some(ref value) => Clone::clone(value),
|
|
None => {
|
|
return Err(Into::into(::derive_builder::UninitializedFieldError::from(
|
|
"field_of_view",
|
|
)))
|
|
}
|
|
};
|
|
let mut c = Camera::new(hsize, vsize, field_of_view);
|
|
if let Some(transform) = self.transform {
|
|
c.set_transform(transform);
|
|
}
|
|
if let Some(render_strategy) = self.render_strategy {
|
|
c.render_strategy = render_strategy;
|
|
}
|
|
if let Some(samples_per_pixel) = self.samples_per_pixel {
|
|
c.samples_per_pixel = samples_per_pixel;
|
|
}
|
|
Ok(c)
|
|
}
|
|
}
|
|
|
|
enum Request {
|
|
Line { width: usize, y: usize },
|
|
}
|
|
|
|
enum Response {
|
|
Line { y: usize, pixels: Canvas },
|
|
}
|
|
|
|
impl Camera {
|
|
/// Create a camera with a canvas of pixel hsize (height) and vsize (width)
|
|
/// with the given field of view (in radians).
|
|
pub fn new(hsize: usize, vsize: usize, field_of_view: Float) -> Camera {
|
|
let half_view = (field_of_view / 2.).tan();
|
|
let aspect = hsize as Float / vsize as Float;
|
|
let (half_width, half_height) = if aspect >= 1. {
|
|
(half_view, half_view / aspect)
|
|
} else {
|
|
(half_view * aspect, half_view)
|
|
};
|
|
let pixel_size = 2. * half_width / hsize as Float;
|
|
Camera {
|
|
hsize,
|
|
vsize,
|
|
field_of_view,
|
|
transform: Matrix4x4::identity(),
|
|
inverse_transform: Matrix4x4::identity(),
|
|
pixel_size,
|
|
half_height,
|
|
half_width,
|
|
render_strategy: RenderStrategy::Rayon,
|
|
samples_per_pixel: 0,
|
|
}
|
|
}
|
|
pub fn hsize(&self) -> usize {
|
|
self.hsize
|
|
}
|
|
pub fn vsize(&self) -> usize {
|
|
self.vsize
|
|
}
|
|
pub fn field_of_view(&self) -> Float {
|
|
self.field_of_view
|
|
}
|
|
pub fn transform(&self) -> Matrix4x4 {
|
|
self.transform
|
|
}
|
|
pub fn set_transform(&mut self, t: Matrix4x4) {
|
|
self.transform = t;
|
|
self.inverse_transform = t.inverse();
|
|
}
|
|
pub fn pixel_size(&self) -> Float {
|
|
self.pixel_size
|
|
}
|
|
pub fn supersample_rays_for_pixel(&self, px: usize, py: usize, samples: usize) -> Vec<Ray> {
|
|
let mut rng = rand::thread_rng();
|
|
|
|
(0..samples)
|
|
.map(|_| {
|
|
// The offset from the edge of the canvas to the pixel's corner.
|
|
let xoffset = (px as Float + rng.gen::<Float>()) * self.pixel_size;
|
|
let yoffset = (py as Float + rng.gen::<Float>()) * self.pixel_size;
|
|
|
|
// The untransformed coordinates of the pixle in world space.
|
|
// (Remember that the camera looks toward -z, so +x is to the left.)
|
|
let world_x = self.half_width - xoffset;
|
|
let world_y = self.half_height - yoffset;
|
|
|
|
// Using the camera matrix, transofmrm the canvas point and the origin,
|
|
// and then compute the ray's direction vector.
|
|
// (Remember that the canvas is at z>=-1).
|
|
let pixel = self.inverse_transform * Tuple::point(world_x, world_y, -1.);
|
|
let origin = self.inverse_transform * Tuple::point(0., 0., 0.);
|
|
let direction = (pixel - origin).normalize();
|
|
|
|
Ray::new(origin, direction)
|
|
})
|
|
.collect()
|
|
}
|
|
|
|
/// Calculate ray that starts at the camera and passes through the (x,y)
|
|
/// pixel on the canvas.
|
|
pub fn ray_for_pixel(&self, px: usize, py: usize) -> Ray {
|
|
// The offset from the edge of the canvas to the pixel's corner.
|
|
let xoffset = (px as Float + 0.5) * self.pixel_size;
|
|
let yoffset = (py as Float + 0.5) * self.pixel_size;
|
|
|
|
// The untransformed coordinates of the pixel in world space.
|
|
// (Remember that the camera looks toward -z, so +x is to the left.)
|
|
let world_x = self.half_width - xoffset;
|
|
let world_y = self.half_height - yoffset;
|
|
|
|
// Using the camera matrix, transform the canvas point and the origin,
|
|
// and then compute the ray's direction vector.
|
|
// (Remember that the canvas is at z>=-1).
|
|
let pixel = self.inverse_transform * Tuple::point(world_x, world_y, -1.);
|
|
let origin = self.inverse_transform * Tuple::point(0., 0., 0.);
|
|
let direction = (pixel - origin).normalize();
|
|
|
|
Ray::new(origin, direction)
|
|
}
|
|
|
|
/// Use camera to render an image of the given world.
|
|
pub fn render(&self, w: &World) -> Canvas {
|
|
use RenderStrategy::*;
|
|
|
|
match self.render_strategy {
|
|
Serial => self.render_serial(w),
|
|
Rayon => self.render_parallel_rayon(w),
|
|
WorkerPool => self.render_parallel_one_tread_per_core(w),
|
|
}
|
|
}
|
|
|
|
/// This render function spins up one thread per core, and pins the thread
|
|
/// to the core. It then sends work requests to the worker threads,
|
|
/// requesting a full line of the image by rendered. The main thread
|
|
/// collects results and stores them in the canvas returned to the user.
|
|
fn render_parallel_one_tread_per_core(&self, world: &World) -> Canvas {
|
|
let mut image = Canvas::new(self.hsize, self.vsize, BLACK);
|
|
let num_threads = num_cpus::get();
|
|
let (pixel_req_tx, pixel_req_rx) = sync_channel(2 * num_threads);
|
|
let (pixel_resp_tx, pixel_resp_rx) = sync_channel(2 * num_threads);
|
|
let pixel_req_rx = Arc::new(Mutex::new(pixel_req_rx));
|
|
|
|
// Create copy of world and camera we can share with all workers.
|
|
// It's probably okay to clone camera, but world could get large (think
|
|
// textures and high poly count models).
|
|
// TODO(wathiede): prevent second copy of world when they start getting
|
|
// large.
|
|
let world = Arc::new(world.clone());
|
|
let camera = Arc::new(self.clone());
|
|
|
|
let core_ids = core_affinity::get_core_ids().unwrap();
|
|
println!("Creating {} render threads", core_ids.len());
|
|
// Create a worker thread for each CPU core and pin the thread to the core.
|
|
let mut handles = core_ids
|
|
.into_iter()
|
|
.map(|id| {
|
|
let w = Arc::clone(&world);
|
|
let c = Arc::clone(&camera);
|
|
let pixel_req_rx = pixel_req_rx.clone();
|
|
let pixel_resp_tx = pixel_resp_tx.clone();
|
|
thread::spawn(move || {
|
|
core_affinity::set_for_current(id);
|
|
render_worker_task(&c, &w, pixel_req_rx, &pixel_resp_tx);
|
|
})
|
|
})
|
|
.collect::<Vec<_>>();
|
|
drop(pixel_req_rx);
|
|
drop(pixel_resp_tx);
|
|
|
|
// Send render requests over channels to worker threads.
|
|
let (w, h) = (camera.hsize, camera.vsize);
|
|
handles.push(thread::spawn(move || {
|
|
for y in 0..h {
|
|
pixel_req_tx
|
|
.send(Request::Line { width: w, y })
|
|
.expect("failed to send line request");
|
|
}
|
|
drop(pixel_req_tx);
|
|
}));
|
|
|
|
// Read responses from channel and blit image data.
|
|
for resp in pixel_resp_rx {
|
|
match resp {
|
|
Response::Line { y, pixels } => {
|
|
for x in 0..camera.hsize {
|
|
image.set(x, y, pixels.get(x, 0));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Wait for all the threads to exit.
|
|
for thr in handles {
|
|
thr.join().expect("thread join");
|
|
}
|
|
image
|
|
}
|
|
|
|
/// This renderer use rayon to split each row into a seperate thread. It
|
|
/// seems to have more consistent performance than worker pool, equally fast
|
|
/// as WP at WP's fastest. The downside is the flame graph looks a mess. A
|
|
/// strength over `render_parallel_one_tread_per_core` is that it doesn't
|
|
/// require `Camera` and `World` to be cloneable.
|
|
fn render_parallel_rayon(&self, w: &World) -> Canvas {
|
|
let image_mu = Mutex::new(Canvas::new(self.hsize, self.vsize, BLACK));
|
|
|
|
(0..self.vsize).into_par_iter().for_each(|y| {
|
|
let mut row_image = Canvas::new(self.hsize, 1, BLACK);
|
|
for x in 0..self.hsize {
|
|
let color = self.sample(w, x, y);
|
|
row_image.set(x, 0, color);
|
|
}
|
|
// TODO(wathiede): create a row based setter for memcpying the row as a whole.
|
|
let mut image = image_mu.lock().expect("failed to lock image mutex");
|
|
for x in 0..self.hsize {
|
|
image.set(x, y, row_image.get(x, 0));
|
|
}
|
|
});
|
|
image_mu
|
|
.into_inner()
|
|
.expect("failed to get image out of mutex")
|
|
}
|
|
|
|
/// Reference render implementation from the book. Single threaded, nothing fancy.
|
|
fn render_serial(&self, w: &World) -> Canvas {
|
|
let mut image = Canvas::new(self.hsize, self.vsize, BLACK);
|
|
for y in 0..self.vsize {
|
|
for x in 0..self.hsize {
|
|
let color = self.sample(w, x, y);
|
|
image.set(x, y, color);
|
|
}
|
|
}
|
|
image
|
|
}
|
|
|
|
fn sample(&self, w: &World, x: usize, y: usize) -> Color {
|
|
if self.samples_per_pixel > 0 {
|
|
let color = self
|
|
.supersample_rays_for_pixel(x, y, self.samples_per_pixel)
|
|
.iter()
|
|
.map(|ray| w.color_at(ray, MAX_DEPTH_RECURSION))
|
|
.fold(BLACK, |acc, c| acc + c);
|
|
color / self.samples_per_pixel as Float
|
|
} else {
|
|
let ray = self.ray_for_pixel(x, y);
|
|
w.color_at(&ray, MAX_DEPTH_RECURSION)
|
|
}
|
|
}
|
|
}
|
|
fn render_worker_task(
|
|
c: &Camera,
|
|
w: &World,
|
|
input_chan: Arc<Mutex<Receiver<Request>>>,
|
|
output_chan: &SyncSender<Response>,
|
|
) {
|
|
loop {
|
|
let job = { input_chan.lock().unwrap().recv() };
|
|
match job {
|
|
Err(_) => {
|
|
// From the docs:
|
|
// "The recv operation can only fail if the sending half of a
|
|
// channel (or sync_channel) is disconnected, implying that no
|
|
// further messages will ever be received."
|
|
return;
|
|
}
|
|
Ok(req) => match req {
|
|
Request::Line { width, y } => {
|
|
let mut pixels = Canvas::new(width, 1, BLACK);
|
|
for x in 0..width {
|
|
let color = c.sample(w, x, y);
|
|
pixels.set(x, 0, color);
|
|
}
|
|
output_chan
|
|
.send(Response::Line { y, pixels })
|
|
.expect("failed to send pixel response");
|
|
}
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use crate::{
|
|
camera::Camera,
|
|
float::consts::PI,
|
|
matrices::Matrix4x4,
|
|
transformations::view_transform,
|
|
tuples::{point, vector},
|
|
world::World,
|
|
Float, EPSILON,
|
|
};
|
|
#[test]
|
|
fn new() {
|
|
let hsize = 160;
|
|
let vsize = 120;
|
|
let field_of_view = PI / 2.;
|
|
let c = Camera::new(hsize, vsize, field_of_view);
|
|
assert_eq!(c.hsize(), 160);
|
|
assert_eq!(c.vsize(), 120);
|
|
assert_eq!(c.transform(), Matrix4x4::identity());
|
|
|
|
// Pixel size for a horizontal canvas.
|
|
let c = Camera::new(200, 150, PI / 2.);
|
|
assert!((c.pixel_size() - 0.010).abs() < EPSILON);
|
|
|
|
// Pixel size for a horizontal canvas.
|
|
let c = Camera::new(150, 200, PI / 2.);
|
|
assert!((c.pixel_size() - 0.010).abs() < EPSILON);
|
|
}
|
|
#[test]
|
|
fn ray_for_pixel() {
|
|
// Constructing a ray through the center of the canvas.
|
|
let c = Camera::new(201, 101, PI / 2.);
|
|
let r = c.ray_for_pixel(100, 50);
|
|
assert_eq!(r.origin, point(0., 0., 0.));
|
|
assert_eq!(r.direction, vector(0., 0., -1.));
|
|
|
|
// Constructing a ray through the corner of the canvas.
|
|
let c = Camera::new(201, 101, PI / 2.);
|
|
let r = c.ray_for_pixel(0, 0);
|
|
assert_eq!(r.origin, point(0., 0., 0.));
|
|
assert_eq!(r.direction, vector(0.66519, 0.33259, -0.66851));
|
|
|
|
// Constructing a ray when the camera is transformed.
|
|
let mut c = Camera::new(201, 101, PI / 2.);
|
|
c.set_transform(Matrix4x4::rotation_y(PI / 4.) * Matrix4x4::translation(0., -2., 5.));
|
|
let r = c.ray_for_pixel(100, 50);
|
|
assert_eq!(r.origin, point(0., 2., -5.));
|
|
assert_eq!(
|
|
r.direction,
|
|
vector((2. as Float).sqrt() / 2., 0., -(2. as Float).sqrt() / 2.)
|
|
);
|
|
}
|
|
#[test]
|
|
fn render() {
|
|
// Rendering a world with a camera.
|
|
let w = World::test_world();
|
|
let mut c = Camera::new(11, 11, PI / 2.);
|
|
let from = point(0., 0., -5.);
|
|
let to = point(0., 0., 0.);
|
|
let up = vector(0., 1., 0.);
|
|
c.set_transform(view_transform(from, to, up));
|
|
let image = c.render(&w);
|
|
assert_eq!(image.get(5, 5), [0.38066, 0.47583, 0.2855].into());
|
|
}
|
|
}
|