396 lines
14 KiB
Rust

use std::{
str::FromStr,
sync::{
mpsc::{sync_channel, Receiver, SyncSender},
{Arc, Mutex},
},
thread,
};
use rand::Rng;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use serde::Deserialize;
use structopt::StructOpt;
use crate::{
canvas::Canvas, matrices::Matrix4x4, rays::Ray, tuples::Tuple, world::World, Float, BLACK,
};
#[derive(Copy, Clone, StructOpt, Debug, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum RenderStrategy {
Serial,
Rayon,
WorkerPool,
}
impl FromStr for RenderStrategy {
type Err = serde_json::error::Error;
fn from_str(s: &str) -> Result<RenderStrategy, serde_json::error::Error> {
Ok(serde_json::from_str(&format!("\"{}\"", s))?)
}
}
#[derive(Clone)]
pub struct Camera {
hsize: usize,
vsize: usize,
field_of_view: Float,
transform: Matrix4x4,
inverse_transform: Matrix4x4,
pixel_size: Float,
half_width: Float,
half_height: Float,
pub render_strategy: RenderStrategy,
}
enum Request {
Line { width: usize, y: usize },
}
enum Response {
Line { y: usize, pixels: Canvas },
}
impl Camera {
/// Create a camera with a canvas of pixel hsize (height) and vsize (width)
/// with the given field of view (in radians).
///
/// # Examples
/// ```
/// use rtchallenge::{camera::Camera, float::consts::PI, matrices::Matrix4x4};
///
/// let hsize = 160;
/// let vsize = 120;
/// let field_of_view = PI / 2.;
/// let c = Camera::new(hsize, vsize, field_of_view);
/// assert_eq!(c.hsize(), 160);
/// assert_eq!(c.vsize(), 120);
/// assert_eq!(c.transform(), Matrix4x4::identity());
///
/// // Pixel size for a horizontal canvas.
/// let c = Camera::new(200, 150, PI / 2.);
/// assert_eq!(c.pixel_size(), 0.01);
///
/// // Pixel size for a horizontal canvas.
/// let c = Camera::new(150, 200, PI / 2.);
/// assert_eq!(c.pixel_size(), 0.01);
/// ```
pub fn new(hsize: usize, vsize: usize, field_of_view: Float) -> Camera {
let half_view = (field_of_view / 2.).tan();
let aspect = hsize as Float / vsize as Float;
let (half_width, half_height) = if aspect >= 1. {
(half_view, half_view / aspect)
} else {
(half_view * aspect, half_view)
};
let pixel_size = 2. * half_width / hsize as Float;
Camera {
hsize,
vsize,
field_of_view,
transform: Matrix4x4::identity(),
inverse_transform: Matrix4x4::identity(),
pixel_size,
half_height,
half_width,
render_strategy: RenderStrategy::Rayon,
}
}
pub fn hsize(&self) -> usize {
self.hsize
}
pub fn vsize(&self) -> usize {
self.vsize
}
pub fn field_of_view(&self) -> Float {
self.field_of_view
}
pub fn transform(&self) -> Matrix4x4 {
self.transform
}
pub fn set_transform(&mut self, t: Matrix4x4) {
self.transform = t;
self.inverse_transform = t.inverse();
}
pub fn pixel_size(&self) -> Float {
self.pixel_size
}
pub fn supersample_rays_for_pixel(&self, px: usize, py: usize, samples: usize) -> Vec<Ray> {
let mut rng = rand::thread_rng();
(0..samples)
.map(|_| {
// The offset from the edge of the canvas to the pixel's corner.
let xoffset = (px as Float + rng.gen::<Float>()) * self.pixel_size;
let yoffset = (py as Float + rng.gen::<Float>()) * self.pixel_size;
// The untransformed coordinates of the pixle in world space.
// (Remember that the camera looks toward -z, so +x is to the left.)
let world_x = self.half_width - xoffset;
let world_y = self.half_height - yoffset;
// Using the camera matrix, transofmrm the canvas point and the origin,
// and then compute the ray's direction vector.
// (Remember that the canvas is at z>=-1).
let pixel = self.inverse_transform * Tuple::point(world_x, world_y, -1.);
let origin = self.inverse_transform * Tuple::point(0., 0., 0.);
let direction = (pixel - origin).normalize();
Ray::new(origin, direction)
})
.collect()
}
/// Calculate ray that starts at the camera and passes through the (x,y)
/// pixel on the canvas.
///
/// # Examples
/// ```
/// use rtchallenge::{
/// camera::Camera, float::consts::PI, matrices::Matrix4x4, tuples::Tuple, Float,
/// };
///
/// // Constructing a ray through the center of the canvas.
/// let c = Camera::new(201, 101, PI / 2.);
/// let r = c.ray_for_pixel(100, 50);
/// assert_eq!(r.origin, Tuple::point(0., 0., 0.));
/// assert_eq!(r.direction, Tuple::vector(0., 0., -1.));
///
/// // Constructing a ray through the corner of the canvas.
/// let c = Camera::new(201, 101, PI / 2.);
/// let r = c.ray_for_pixel(0, 0);
/// assert_eq!(r.origin, Tuple::point(0., 0., 0.));
/// assert_eq!(r.direction, Tuple::vector(0.66519, 0.33259, -0.66851));
///
/// // Constructing a ray when the camera is transformed.
/// let mut c = Camera::new(201, 101, PI / 2.);
/// c.set_transform(Matrix4x4::rotation_y(PI / 4.) * Matrix4x4::translation(0., -2., 5.));
/// let r = c.ray_for_pixel(100, 50);
/// assert_eq!(r.origin, Tuple::point(0., 2., -5.));
/// assert_eq!(
/// r.direction,
/// Tuple::vector((2. as Float).sqrt() / 2., 0., -(2. as Float).sqrt() / 2.)
/// );
/// ```
#[cfg(not(feature = "disable_inverse_cache"))]
pub fn ray_for_pixel(&self, px: usize, py: usize) -> Ray {
// The offset from the edge of the canvas to the pixel's corner.
let xoffset = (px as Float + 0.5) * self.pixel_size;
let yoffset = (py as Float + 0.5) * self.pixel_size;
// The untransformed coordinates of the pixle in world space.
// (Remember that the camera looks toward -z, so +x is to the left.)
let world_x = self.half_width - xoffset;
let world_y = self.half_height - yoffset;
// Using the camera matrix, transofmrm the canvas point and the origin,
// and then compute the ray's direction vector.
// (Remember that the canvas is at z>=-1).
let pixel = self.inverse_transform * Tuple::point(world_x, world_y, -1.);
let origin = self.inverse_transform * Tuple::point(0., 0., 0.);
let direction = (pixel - origin).normalize();
Ray::new(origin, direction)
}
#[cfg(feature = "disable_inverse_cache")]
pub fn ray_for_pixel(&self, px: usize, py: usize) -> Ray {
// The offset from the edge of the canvas to the pixel's corner.
let xoffset = (px as Float + 0.5) * self.pixel_size;
let yoffset = (py as Float + 0.5) * self.pixel_size;
// The untransformed coordinates of the pixle in world space.
// (Remember that the camera looks toward -z, so +x is to the left.)
let world_x = self.half_width - xoffset;
let world_y = self.half_height - yoffset;
// Using the camera matrix, transofmrm the canvas point and the origin,
// and then compute the ray's direction vector.
// (Remember that the canvas is at z>=-1).
let pixel = self.transform.inverse() * Tuple::point(world_x, world_y, -1.);
let origin = self.transform.inverse() * Tuple::point(0., 0., 0.);
let direction = (pixel - origin).normalize();
Ray::new(origin, direction)
}
/// Use camera to render an image of the given world.
/// # Examples
/// ```
/// use rtchallenge::{
/// camera::Camera,
/// float::consts::PI,
/// transformations::view_transform,
/// tuples::{Color, Tuple},
/// world::World,
/// };
///
/// // Rendering a world with a camera.
/// let w = World::test_world();
/// let mut c = Camera::new(11, 11, PI / 2.);
/// let from = Tuple::point(0., 0., -5.);
/// let to = Tuple::point(0., 0., 0.);
/// let up = Tuple::vector(0., 1., 0.);
/// c.set_transform(view_transform(from, to, up));
/// let image = c.render(&w);
/// assert_eq!(image.get(5, 5), Color::new(0.38066, 0.47583, 0.2855));
/// ```
pub fn render(&self, w: &World) -> Canvas {
use RenderStrategy::*;
match self.render_strategy {
Serial => self.render_serial(w),
Rayon => self.render_parallel_rayon(w),
WorkerPool => self.render_parallel_one_tread_per_core(w),
}
}
/// This render function spins up one thread per core, and pins the thread
/// to the core. It then sends work requests to the worker threads,
/// requesting a full line of the image by rendered. The main thread
/// collects results and stores them in the canvas returned to the user.
fn render_parallel_one_tread_per_core(&self, world: &World) -> Canvas {
let mut image = Canvas::new(self.hsize, self.vsize, BLACK);
let num_threads = num_cpus::get();
let (pixel_req_tx, pixel_req_rx) = sync_channel(2 * num_threads);
let (pixel_resp_tx, pixel_resp_rx) = sync_channel(2 * num_threads);
let pixel_req_rx = Arc::new(Mutex::new(pixel_req_rx));
// Create copy of world and camera we can share with all workers.
// It's probably okay to clone camera, but world could get large (think
// textures and high poly count models).
// TODO(wathiede): prevent second copy of world when they start getting
// large.
let world = Arc::new(world.clone());
let camera = Arc::new(self.clone());
let core_ids = core_affinity::get_core_ids().unwrap();
println!("Creating {} render threads", core_ids.len());
// Create a worker thread for each CPU core and pin the thread to the core.
let mut handles = core_ids
.into_iter()
.map(|id| {
let w = Arc::clone(&world);
let c = Arc::clone(&camera);
let pixel_req_rx = pixel_req_rx.clone();
let pixel_resp_tx = pixel_resp_tx.clone();
thread::spawn(move || {
core_affinity::set_for_current(id);
render_worker(&c, &w, pixel_req_rx, &pixel_resp_tx);
})
})
.collect::<Vec<_>>();
drop(pixel_req_rx);
drop(pixel_resp_tx);
// Send render requests over channels to worker threads.
let (w, h) = (camera.hsize, camera.vsize);
handles.push(thread::spawn(move || {
for y in 0..h {
pixel_req_tx
.send(Request::Line { width: w, y })
.expect("failed to send line request");
}
drop(pixel_req_tx);
}));
// Read responses from channel and blit image data.
for resp in pixel_resp_rx {
match resp {
Response::Line { y, pixels } => {
for x in 0..camera.hsize {
image.set(x, y, pixels.get(x, 0));
}
}
}
}
// Wait for all the threads to exit.
for thr in handles {
thr.join().expect("thread join");
}
image
}
/// This renderer use rayon to split each row into a seperate thread. It
/// seems to have more consistent performance than worker pool, equally fast
/// as WP at WP's fastest. The downside is the flame graph looks a mess. A
/// strength over `render_parallel_one_tread_per_core` is that it doesn't
/// require `Camera` and `World` to be cloneable.
fn render_parallel_rayon(&self, w: &World) -> Canvas {
let image_mu = Mutex::new(Canvas::new(self.hsize, self.vsize, BLACK));
(0..self.vsize).into_par_iter().for_each(|y| {
let mut row_image = Canvas::new(self.hsize, 1, BLACK);
for x in 0..self.hsize {
const SAMPLES: usize = 0;
if SAMPLES > 0 {
let color = self
.supersample_rays_for_pixel(x, y, SAMPLES)
.iter()
.map(|ray| w.color_at(&ray))
.fold(BLACK, |acc, c| acc + c);
row_image.set(x, 0, color / SAMPLES as Float);
} else {
let ray = self.ray_for_pixel(x, y);
let color = w.color_at(&ray);
row_image.set(x, 0, color);
}
}
// TODO(wathiede): create a row based setter for memcpying the row as a whole.
let mut image = image_mu.lock().expect("failed to lock image mutex");
for x in 0..self.hsize {
image.set(x, y, row_image.get(x, 0));
}
});
image_mu
.into_inner()
.expect("failed to get image out of mutex")
}
/// Reference render implementation from the book. Single threaded, nothing fancy.
fn render_serial(&self, w: &World) -> Canvas {
let mut image = Canvas::new(self.hsize, self.vsize, BLACK);
for y in 0..self.vsize {
for x in 0..self.hsize {
let ray = self.ray_for_pixel(x, y);
let color = w.color_at(&ray);
image.set(x, y, color);
}
}
image
}
}
fn render_worker(
c: &Camera,
w: &World,
input_chan: Arc<Mutex<Receiver<Request>>>,
output_chan: &SyncSender<Response>,
) {
loop {
let job = { input_chan.lock().unwrap().recv() };
match job {
Err(_) => {
// From the docs:
// "The recv operation can only fail if the sending half of a
// channel (or sync_channel) is disconnected, implying that no
// further messages will ever be received."
return;
}
Ok(req) => match req {
Request::Line { width, y } => {
let mut pixels = Canvas::new(width, 1, BLACK);
for x in 0..width {
let ray = c.ray_for_pixel(x, y);
let color = w.color_at(&ray);
pixels.set(x, 0, color);
}
output_chan
.send(Response::Line { y, pixels })
.expect("failed to send pixel response");
}
},
}
}
}