Bill Thiede d9d183b1e5
Some checks failed
continuous-integration/drone/push Build is failing
rtiow: break project into multiple workspaces.
2019-11-09 11:56:33 -08:00

650 lines
20 KiB
Rust

use std::fmt;
use std::ops::AddAssign;
use std::ops::Range;
use std::path::Path;
use std::path::PathBuf;
use std::str;
use std::sync;
use std::sync::mpsc::sync_channel;
use std::sync::mpsc::Receiver;
use std::sync::mpsc::SyncSender;
use std::sync::Arc;
use std::sync::Mutex;
use std::thread;
use std::time;
use core_affinity;
use log::info;
use log::trace;
use num_cpus;
use rand;
use rand::Rng;
use serde_derive::Serialize;
use structopt::StructOpt;
use crate::camera::Camera;
use crate::hitable::Hit;
use crate::human;
use crate::material::Lambertian;
use crate::output;
use crate::ray::Ray;
use crate::scenes;
use crate::sphere::Sphere;
use crate::texture::ConstantTexture;
use crate::texture::EnvMap;
use crate::vec3::Vec3;
#[derive(Debug)]
pub enum Model {
BVH,
Bench,
Book,
CornellBox,
CornellSmoke,
Final,
Mandelbrot,
PerlinDebug,
Spheramid,
Test,
Tutorial,
}
impl Model {
pub fn scene(&self, opt: &Opt) -> Scene {
match self {
Model::BVH => scenes::bvh::new(&opt),
Model::Bench => scenes::bench::new(&opt),
Model::Book => scenes::book::new(&opt),
Model::CornellBox => scenes::cornell_box::new(&opt),
Model::CornellSmoke => scenes::cornell_smoke::new(&opt),
Model::Final => scenes::final_scene::new(&opt),
Model::Mandelbrot => scenes::mandelbrot::new(&opt),
Model::PerlinDebug => scenes::perlin_debug::new(&opt),
Model::Spheramid => scenes::spheramid::new(&opt),
Model::Test => scenes::test::new(&opt),
Model::Tutorial => scenes::tutorial::new(&opt),
}
}
}
#[derive(Debug)]
pub struct ModelParseError(String);
impl fmt::Display for ModelParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "unknown model enum type '{}'", self.0)
}
}
impl str::FromStr for Model {
type Err = ModelParseError;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
match s {
"bench" => Ok(Model::Bench),
"book" => Ok(Model::Book),
"bvh" => Ok(Model::BVH),
"cornell_box" => Ok(Model::CornellBox),
"cornell_smoke" => Ok(Model::CornellSmoke),
"final" => Ok(Model::Final),
"mandelbrot" => Ok(Model::Mandelbrot),
"perlin_debug" => Ok(Model::PerlinDebug),
"spheramid" => Ok(Model::Spheramid),
"test" => Ok(Model::Test),
"tutorial" => Ok(Model::Tutorial),
_ => Err(ModelParseError(s.to_owned())),
}
}
}
impl std::string::ToString for Model {
fn to_string(&self) -> String {
match self {
Model::BVH => "bvh".to_string(),
Model::Bench => "bench".to_string(),
Model::Book => "book".to_string(),
Model::CornellBox => "cornell_box".to_string(),
Model::CornellSmoke => "cornell_smoke".to_string(),
Model::Final => "final".to_string(),
Model::Mandelbrot => "mandelbrot".to_string(),
Model::PerlinDebug => "perlin_debug".to_string(),
Model::Spheramid => "spheramid".to_string(),
Model::Test => "test".to_string(),
Model::Tutorial => "tutorial".to_string(),
}
}
}
#[derive(Debug, StructOpt)]
#[structopt(name = "tracer", about = "An experimental ray tracer.")]
pub struct Opt {
/// Image width
#[structopt(short = "w", long = "width", default_value = "512")]
pub width: usize,
/// Image height
#[structopt(short = "h", long = "height", default_value = "512")]
pub height: usize,
/// Number of threads
#[structopt(short = "t", long = "num_threads")]
pub num_threads: Option<usize>,
/// Sub-samples per pixel
#[structopt(short = "s", long = "subsample", default_value = "8")]
pub subsamples: usize,
/// Select scene to render, one of: "bench", "book", "tutorial", "bvh", "test", "cornell_box",
/// "cornell_smoke", "perlin_debug", "final"
#[structopt(long = "model", default_value = "book")]
pub model: Model,
/// Path to store pprof profile data, i.e. /tmp/cpuprofile.pprof
#[structopt(long = "pprof", parse(from_os_str))]
pub pprof: Option<PathBuf>,
/// Use acceleration data structure, may be BVH or kd-tree depending on scene.
#[structopt(long = "use_accel")]
pub use_accel: bool,
/// Output directory
#[structopt(parse(from_os_str), default_value = "/tmp/tracer")]
pub output: PathBuf,
}
pub fn opt_hash(opt: &Opt) -> String {
// TODO(wathiede): add threads.
format!(
"w:{}-h:{}-s:{}-pprof:{}-model:{}-use_accel:{}-{}",
opt.width,
opt.height,
opt.subsamples,
opt.pprof.is_some(),
opt.model.to_string(),
opt.use_accel,
opt.output.display().to_string().replace("/", "_")
)
}
// TODO(wathiede): implement the skips and then the renderer could use json as an input file type.
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Scene {
#[serde(skip)]
pub world: Box<dyn Hit>,
#[serde(skip)]
pub camera: Camera,
pub subsamples: usize,
/// overrides subsamples setting.
pub adaptive_subsampling: Option<f32>,
pub num_threads: Option<usize>,
pub width: usize,
pub height: usize,
pub global_illumination: bool,
#[serde(skip)]
pub env_map: Option<EnvMap>,
}
impl Default for Scene {
fn default() -> Scene {
let lookfrom = Vec3::new(20., 20., 20.);
let lookat = Vec3::new(0., 0., 0.);
let dist_to_focus = (lookfrom - lookat).length();
let aperture = 0.1;
let time_min = 0.;
let time_max = 1.;
let camera = Camera::new(
lookfrom,
lookat,
Vec3::new(0., 1., 0.),
70.,
1.,
aperture,
dist_to_focus,
time_min,
time_max,
);
Scene {
world: Box::new(Sphere::new(
Vec3::new(0., 0., 0.),
1.0,
Lambertian::new(ConstantTexture::new([0., 1., 0.])),
)),
camera,
subsamples: 0,
adaptive_subsampling: None,
num_threads: None,
width: 0,
height: 0,
global_illumination: false,
env_map: None,
}
}
}
// color will trace ray up to 50 bounces deep accumulating color as it goes. If
// global_illumination is true, a default light background color is assumed and will light the
// world. If false, it is expected the scene has emissive light sources.
fn color(
r: Ray,
world: &dyn Hit,
depth: usize,
global_illumination: bool,
env_map: &Option<EnvMap>,
) -> (Vec3, usize) {
if let Some(rec) = world.hit(r, 0.001, std::f32::MAX) {
let (u, v) = rec.uv;
let emitted = rec.material.emitted(u, v, rec.p);
let scatter_response = rec.material.scatter(&r, &rec);
if depth < 50 && scatter_response.reflected {
let (c, rays) = color(
scatter_response.scattered,
world,
depth + 1,
global_illumination,
env_map,
);
return (emitted + scatter_response.attenutation * c, rays + 1);
} else {
return (emitted, 1);
}
}
if global_illumination {
return match env_map {
Some(env_map) => (env_map.color(r.direction.unit_vector()), 1),
None => {
let unit_direction = r.direction.unit_vector();
// No hit, choose color from background.
let t = 0.5 * (unit_direction.y + 1.);
(
Vec3::new(1., 1., 1.) * (1. - t) + Vec3::new(0.5, 0.7, 1.) * t,
1,
)
}
};
}
// No global illumination, so background is black.
(Vec3::new(0., 0., 0.), 1)
}
const MAX_ADAPTIVE_DEPTH: usize = 10;
fn trace_pixel_adaptive(
depth: usize,
threshold: f32,
x: usize,
y: usize,
x_range: Range<f32>,
y_range: Range<f32>,
scene: &Scene,
) -> (Vec3, usize) {
let w = scene.width as f32;
let h = scene.height as f32;
let x_mid = x_range.start + ((x_range.end - x_range.start) / 2.);
let y_mid = y_range.start + ((y_range.end - y_range.start) / 2.);
let mc = ((x_mid + x as f32) / w, (y_mid + y as f32) / h);
let (center, rays) = color(
scene.camera.get_ray(mc.0, mc.1),
scene.world.as_ref(),
0,
scene.global_illumination,
&scene.env_map,
);
if depth == 0 {
output::set_pixel(output::ADAPTIVE_DEPTH, x, y, [1., 0., 0.].into());
return (center, rays);
}
// t = top
// m = middle
// b = bottom
// l = left
// c = center
// r = right
let tl = (
(x_range.start + x as f32) / w,
(y_range.start + y as f32) / h,
);
let tr = ((x_range.end + x as f32) / w, (y_range.start + y as f32) / h);
let bl = ((x_range.start + x as f32) / w, (y_range.end + y as f32) / h);
let br = ((x_range.end + x as f32) / w, (y_range.end + y as f32) / h);
let (corners, rays) = [tl, tr, mc, bl, br]
.iter()
.map(|(u, v)| {
color(
scene.camera.get_ray(*u, *v),
scene.world.as_ref(),
0,
scene.global_illumination,
&scene.env_map,
)
})
.fold(
([0., 0., 0.].into(), 0),
|(p1, r1): (Vec3, usize), (p2, r2): (Vec3, usize)| ((p1 + p2), (r1 + r2)),
);
let corners = corners / 5.;
if (corners - center).length() > threshold {
let tl = trace_pixel_adaptive(
depth - 1,
threshold,
x,
y,
x_range.start..x_mid,
y_range.start..y_mid,
scene,
);
let tr = trace_pixel_adaptive(
depth - 1,
threshold,
x,
y,
x_mid..x_range.end,
y_range.start..y_mid,
scene,
);
let bl = trace_pixel_adaptive(
depth - 1,
threshold,
x,
y,
x_range.start..x_mid,
y_mid..y_range.end,
scene,
);
let br = trace_pixel_adaptive(
depth - 1,
threshold,
x,
y,
x_mid..x_range.end,
y_mid..y_range.end,
scene,
);
let pixel = (tl.0 + tr.0 + bl.0 + br.0) / 4.;
let rays = tl.1 + tr.1 + bl.1 + br.1;
(pixel, rays)
} else {
if depth == MAX_ADAPTIVE_DEPTH {
output::set_pixel(output::ADAPTIVE_DEPTH, x, y, [0., 1., 0.].into());
}
(corners, rays)
}
}
fn trace_pixel_random(x: usize, y: usize, scene: &Scene) -> (Vec3, usize) {
let mut rng = rand::thread_rng();
let u = (rng.gen_range(0., 1.) + x as f32) / scene.width as f32;
let v = (rng.gen_range(0., 1.) + y as f32) / scene.height as f32;
let ray = scene.camera.get_ray(u, v);
color(
ray,
scene.world.as_ref(),
0,
scene.global_illumination,
&scene.env_map,
)
}
#[derive(Clone, Copy)]
struct RenderStats {
rays: usize,
pixels: usize,
}
impl AddAssign for RenderStats {
fn add_assign(&mut self, other: Self) {
*self = Self {
rays: self.rays + other.rays,
pixels: self.pixels + other.pixels,
}
}
}
fn progress(
last_stat: &RenderStats,
current_stat: &RenderStats,
time_diff: time::Duration,
pixel_total: usize,
) -> String {
let human = human::Formatter::new();
let pixel_diff = current_stat.pixels - last_stat.pixels;
let ray_diff = current_stat.rays - last_stat.rays;
format!(
"{:7} / {:7}pixels ({:2}%) {:7}pixels/s {:7}rays/s",
human.format(current_stat.pixels as f64),
human.format(pixel_total as f64),
100 * current_stat.pixels / pixel_total,
human.format(pixel_diff as f64 / time_diff.as_secs_f64()),
human.format(ray_diff as f64 / time_diff.as_secs_f64())
)
}
impl Default for RenderStats {
fn default() -> Self {
RenderStats { rays: 0, pixels: 0 }
}
}
enum Request {
Pixel { x: usize, y: usize },
Line { width: usize, y: usize },
}
enum Response {
Pixel {
x: usize,
y: usize,
pixel: Vec3,
rs: RenderStats,
},
Line {
y: usize,
pixels: Vec<Vec3>,
rs: RenderStats,
},
}
fn render_pixel(scene: &Scene, x: usize, y: usize) -> (Vec3, usize) {
let (pixel, rays) = if let Some(threshold) = scene.adaptive_subsampling {
trace_pixel_adaptive(
MAX_ADAPTIVE_DEPTH,
threshold,
x,
y,
0.0..1.0,
0.0..1.0,
scene,
)
} else {
let (pixel, rays) = (0..scene.subsamples)
.map(|_| trace_pixel_random(x, y, scene))
.fold(
([0., 0., 0.].into(), 0),
|(p1, r1): (Vec3, usize), (p2, r2): (Vec3, usize)| ((p1 + p2), (r1 + r2)),
);
output::set_pixel_grey(output::RAYS_PER_PIXEL, x, y, rays as f32);
(pixel / scene.subsamples as f32, rays)
};
// Gamma correct, use gamma 2 correction, which is 1/gamma where gamma=2 which is 1/2 or
// sqrt.
(
Vec3::new(pixel[0].sqrt(), pixel[1].sqrt(), pixel[2].sqrt()),
rays,
)
}
fn render_worker(
tid: usize,
scene: &Scene,
input_chan: Arc<Mutex<Receiver<Request>>>,
output_chan: &SyncSender<Response>,
) {
loop {
let job = { input_chan.lock().unwrap().recv() };
match job {
Err(err) => {
trace!("Shutting down render_worker {}: {}", tid, err);
return;
}
Ok(req) => match req {
Request::Line { width, y } => {
trace!("tid {} width {} y {}", tid, width, y);
let batch = false;
if batch {
let (pixels, rays): (Vec<Vec3>, Vec<usize>) = (0..width)
.map(|x| render_pixel(scene, x, y))
.collect::<Vec<(_, _)>>()
.into_iter()
.unzip();
let rays = rays.iter().sum();
output_chan
.send(Response::Line {
y,
pixels,
rs: RenderStats {
rays,
pixels: width,
},
})
.expect("failed to send pixel response");
} else {
(0..width).for_each(|x| {
let (pixel, rays) = render_pixel(scene, x, y);
output_chan
.send(Response::Pixel {
x,
y,
pixel,
rs: RenderStats { rays, pixels: 1 },
})
.expect("failed to send pixel response");
});
}
}
Request::Pixel { x, y } => {
trace!("tid {} x {} y {}", tid, x, y);
let (pixel, rays) = render_pixel(scene, x, y);
output_chan
.send(Response::Pixel {
x,
y,
pixel,
rs: RenderStats { rays, pixels: 1 },
})
.expect("failed to send line response");
}
},
}
}
}
pub fn render(scene: Scene, output_dir: &Path) -> std::result::Result<(), std::io::Error> {
let num_threads = scene.num_threads.unwrap_or_else(num_cpus::get);
let (pixel_req_tx, pixel_req_rx) = sync_channel(2 * num_threads);
let (pixel_resp_tx, pixel_resp_rx) = sync_channel(2 * num_threads);
let scene = Arc::new(scene);
let pixel_req_rx = Arc::new(Mutex::new(pixel_req_rx));
info!("Adaptive subsampling: {:?}", scene.adaptive_subsampling);
// Retrieve the IDs of all active CPU cores.
let core_ids = core_affinity::get_core_ids().unwrap();
let core_ids = if core_ids.len() > num_threads {
core_ids[..num_threads].to_vec()
} else {
core_ids
};
info!("Creating {} render threads", core_ids.len());
output::register_image(
output::MAIN_IMAGE.to_string(),
(scene.width, scene.height),
output::ImageType::RGB01,
);
if scene.adaptive_subsampling.is_some() {
output::register_image(
output::ADAPTIVE_DEPTH.to_string(),
(scene.width, scene.height),
output::ImageType::RGB01,
);
}
output::register_image(
output::RAYS_PER_PIXEL.to_string(),
(scene.width, scene.height),
output::ImageType::GreyNormalized,
);
// Create a thread for each active CPU core.
let mut handles = core_ids
.into_iter()
.enumerate()
.filter(|(i, _id)| *i < num_threads)
.map(|(i, id)| {
let s = sync::Arc::clone(&scene);
let pixel_req_rx = pixel_req_rx.clone();
let pixel_resp_tx = pixel_resp_tx.clone();
thread::spawn(move || {
core_affinity::set_for_current(id);
render_worker(i, &s, pixel_req_rx, &pixel_resp_tx);
})
})
.collect::<Vec<_>>();
drop(pixel_req_rx);
drop(pixel_resp_tx);
let start_time = time::Instant::now();
let (w, h) = (scene.width, scene.height);
handles.push(thread::spawn(move || {
let batch_line_requests = true;
if batch_line_requests {
for y in 0..h {
pixel_req_tx
.send(Request::Line { width: w, y })
.expect("failed to send line request");
}
} else {
for y in 0..h {
for x in 0..w {
pixel_req_tx
.send(Request::Pixel { x, y })
.expect("failed to send pixel request");
}
}
}
drop(pixel_req_tx);
}));
info!("Rendering with {} subsamples", scene.subsamples);
let pixel_total = scene.width * scene.height;
let mut last_time = time::Instant::now();
let mut last_stat: RenderStats = Default::default();
let mut current_stat: RenderStats = Default::default();
for resp in pixel_resp_rx {
match resp {
Response::Pixel { x, y, pixel, rs } => {
current_stat += rs;
output::set_pixel(output::MAIN_IMAGE, x, y, pixel);
}
Response::Line { y, pixels, rs } => {
current_stat += rs;
for (x, pixel) in pixels.iter().enumerate() {
output::set_pixel(output::MAIN_IMAGE, x, y, *pixel);
}
}
}
let now = time::Instant::now();
let time_diff = now - last_time;
if time_diff > time::Duration::from_secs(1) {
info!(
"{}",
progress(&last_stat, &current_stat, time_diff, pixel_total)
);
last_stat = current_stat;
last_time = now;
}
}
for thr in handles {
thr.join().expect("thread join");
}
let time_diff = time::Instant::now() - start_time;
info!(
"Runtime {} seconds {}",
time_diff.as_secs_f32(),
progress(&Default::default(), &current_stat, time_diff, pixel_total)
);
output::write_images(&scene, time_diff, output_dir)
}