def test_average_luminosity(self): img = HdrImage(2, 1) img.set_pixel(0, 0, Color(0.5e1, 1.0e1, 1.5e1)) img.set_pixel(1, 0, Color(0.5e3, 1.0e3, 1.5e3)) assert pytest.approx(100.0) == img.average_luminosity(delta=0.0)
def test_coordinates(self): img = HdrImage(7, 4) assert img.valid_coordinates(0, 0) assert img.valid_coordinates(6, 3) assert not img.valid_coordinates(-1, 0) assert not img.valid_coordinates(0, -1)
def demo(width, height, angle_deg, orthogonal, pfm_output, png_output): image = HdrImage(width, height) # Create a world and populate it with a few shapes world = World() for x in [-0.5, 0.5]: for y in [-0.5, 0.5]: for z in [-0.5, 0.5]: world.add( Sphere(transformation=translation(Vec(x, y, z)) * scaling(Vec(0.1, 0.1, 0.1)))) # Place two other balls in the bottom/left part of the cube, so # that we can check if there are issues with the orientation of # the image world.add( Sphere(transformation=translation(Vec(0.0, 0.0, -0.5)) * scaling(Vec(0.1, 0.1, 0.1)))) world.add( Sphere(transformation=translation(Vec(0.0, 0.5, 0.0)) * scaling(Vec(0.1, 0.1, 0.1)))) # Initialize a camera camera_tr = rotation_z(angle_deg=angle_deg) * translation( Vec(-1.0, 0.0, 0.0)) if orthogonal: camera = OrthogonalCamera(aspect_ratio=width / height, transformation=camera_tr) else: camera = PerspectiveCamera(aspect_ratio=width / height, transformation=camera_tr) # Run the ray-tracer tracer = ImageTracer(image=image, camera=camera) def compute_color(ray: Ray) -> Color: if world.ray_intersection(ray): return WHITE else: return BLACK tracer.fire_all_rays(compute_color) # Save the HDR image with open(pfm_output, "wb") as outf: image.write_pfm(outf) print(f"HDR demo image written to {pfm_output}") # Apply tone-mapping to the image image.normalize_image(factor=1.0) image.clamp_image() # Save the LDR image with open(png_output, "wb") as outf: image.write_ldr_image(outf, "PNG") print(f"PNG demo image written to {png_output}")
def testFlatRenderer(self): sphere_color = Color(1.0, 2.0, 3.0) sphere = Sphere(transformation=translation(Vec(2, 0, 0)) * scaling(Vec(0.2, 0.2, 0.2)), material=Material(brdf=DiffuseBRDF( pigment=UniformPigment(sphere_color)))) image = HdrImage(width=3, height=3) camera = OrthogonalCamera() tracer = ImageTracer(image=image, camera=camera) world = World() world.add_shape(sphere) renderer = FlatRenderer(world=world) tracer.fire_all_rays(renderer) assert image.get_pixel(0, 0).is_close(BLACK) assert image.get_pixel(1, 0).is_close(BLACK) assert image.get_pixel(2, 0).is_close(BLACK) assert image.get_pixel(0, 1).is_close(BLACK) assert image.get_pixel(1, 1).is_close(sphere_color) assert image.get_pixel(2, 1).is_close(BLACK) assert image.get_pixel(0, 2).is_close(BLACK) assert image.get_pixel(1, 2).is_close(BLACK) assert image.get_pixel(2, 2).is_close(BLACK)
def test_antialiasing(self): num_of_rays = 0 small_image = HdrImage(width=1, height=1) camera = OrthogonalCamera(aspect_ratio=1) tracer = ImageTracer(small_image, camera, samples_per_side=10, pcg=PCG()) def trace_ray(ray: Ray) -> Color: nonlocal num_of_rays point = ray.at(1) # Check that all the rays intersect the screen within the region [−1, 1] × [−1, 1] assert pytest.approx(0.0) == point.x assert -1.0 <= point.y <= 1.0 assert -1.0 <= point.z <= 1.0 num_of_rays += 1 return Color(0.0, 0.0, 0.0) tracer.fire_all_rays(trace_ray) # Check that the number of rays that were fired is what we expect (10²) assert num_of_rays == 100
class TestImageTracer(unittest.TestCase): def setUp(self) -> None: self.image = HdrImage(width=4, height=2) self.camera = PerspectiveCamera(aspect_ratio=2) self.tracer = ImageTracer(image=self.image, camera=self.camera) def test_orientation(self): top_left_ray = self.tracer.fire_ray(0, 0, u_pixel=0.0, v_pixel=0.0) assert Point(0.0, 2.0, 1.0).is_close(top_left_ray.at(1.0)) bottom_right_ray = self.tracer.fire_ray(3, 1, u_pixel=1.0, v_pixel=1.0) assert Point(0.0, -2.0, -1.0).is_close(bottom_right_ray.at(1.0)) def test_uv_sub_mapping(self): # Here we're cheating: we are asking `ImageTracer.fire_ray` to fire one ray *outside* # the pixel we're specifying ray1 = self.tracer.fire_ray(0, 0, u_pixel=2.5, v_pixel=1.5) ray2 = self.tracer.fire_ray(2, 1, u_pixel=0.5, v_pixel=0.5) assert ray1.is_close(ray2) def test_image_coverage(self): self.tracer.fire_all_rays(lambda ray: Color(1.0, 2.0, 3.0)) for row in range(self.image.height): for col in range(self.image.width): assert self.image.get_pixel(col, row) == Color(1.0, 2.0, 3.0)
def test_normalize_image(self): img = HdrImage(2, 1) img.set_pixel(0, 0, Color(0.5e1, 1.0e1, 1.5e1)) img.set_pixel(1, 0, Color(0.5e3, 1.0e3, 1.5e3)) img.normalize_image(factor=1000.0, luminosity=100.0) assert img.get_pixel(0, 0).is_close(Color(0.5e2, 1.0e2, 1.5e2)) assert img.get_pixel(1, 0).is_close(Color(0.5e4, 1.0e4, 1.5e4))
class TestImageTracer(unittest.TestCase): def setUp(self) -> None: self.image = HdrImage(width=4, height=2) self.camera = PerspectiveCamera(aspect_ratio=2) self.tracer = ImageTracer(image=self.image, camera=self.camera) def test_orientation(self): top_left_ray = self.tracer.fire_ray(0, 0, u_pixel=0.0, v_pixel=0.0) assert Point(0.0, 2.0, 1.0).is_close(top_left_ray.at(1.0)) bottom_right_ray = self.tracer.fire_ray(3, 1, u_pixel=1.0, v_pixel=1.0) assert Point(0.0, -2.0, -1.0).is_close(bottom_right_ray.at(1.0)) def test_uv_sub_mapping(self): # Here we're cheating: we are asking `ImageTracer.fire_ray` to fire one ray *outside* # the pixel we're specifying ray1 = self.tracer.fire_ray(0, 0, u_pixel=2.5, v_pixel=1.5) ray2 = self.tracer.fire_ray(2, 1, u_pixel=0.5, v_pixel=0.5) assert ray1.is_close(ray2) def test_image_coverage(self): self.tracer.fire_all_rays(lambda ray: Color(1.0, 2.0, 3.0)) for row in range(self.image.height): for col in range(self.image.width): assert self.image.get_pixel(col, row) == Color(1.0, 2.0, 3.0) def test_antialiasing(self): num_of_rays = 0 small_image = HdrImage(width=1, height=1) camera = OrthogonalCamera(aspect_ratio=1) tracer = ImageTracer(small_image, camera, samples_per_side=10, pcg=PCG()) def trace_ray(ray: Ray) -> Color: nonlocal num_of_rays point = ray.at(1) # Check that all the rays intersect the screen within the region [−1, 1] × [−1, 1] assert pytest.approx(0.0) == point.x assert -1.0 <= point.y <= 1.0 assert -1.0 <= point.z <= 1.0 num_of_rays += 1 return Color(0.0, 0.0, 0.0) tracer.fire_all_rays(trace_ray) # Check that the number of rays that were fired is what we expect (10²) assert num_of_rays == 100
def test_clamp_image(self): img = HdrImage(2, 1) img.set_pixel(0, 0, Color(0.5e1, 1.0e1, 1.5e1)) img.set_pixel(1, 0, Color(0.5e3, 1.0e3, 1.5e3)) img.clamp_image() for cur_pixel in img.pixels: assert (cur_pixel.r >= 0) and (cur_pixel.r <= 1) assert (cur_pixel.g >= 0) and (cur_pixel.g <= 1) assert (cur_pixel.b >= 0) and (cur_pixel.b <= 1)
def testImagePigment(self): image = HdrImage(width=2, height=2) image.set_pixel(0, 0, Color(1.0, 2.0, 3.0)) image.set_pixel(1, 0, Color(2.0, 3.0, 1.0)) image.set_pixel(0, 1, Color(2.0, 1.0, 3.0)) image.set_pixel(1, 1, Color(3.0, 2.0, 1.0)) pigment = ImagePigment(image) assert pigment.get_color(Vec2d(0.0, 0.0)).is_close(Color(1.0, 2.0, 3.0)) assert pigment.get_color(Vec2d(1.0, 0.0)).is_close(Color(2.0, 3.0, 1.0)) assert pigment.get_color(Vec2d(0.0, 1.0)).is_close(Color(2.0, 1.0, 3.0)) assert pigment.get_color(Vec2d(1.0, 1.0)).is_close(Color(3.0, 2.0, 1.0))
def setUp(self) -> None: self.image = HdrImage(width=4, height=2) self.camera = PerspectiveCamera(aspect_ratio=2) self.tracer = ImageTracer(image=self.image, camera=self.camera)
def test_pfm_save(self): img = HdrImage(3, 2) img.set_pixel(0, 0, Color(1.0e1, 2.0e1, 3.0e1)) img.set_pixel(1, 0, Color(4.0e1, 5.0e1, 6.0e1)) img.set_pixel(2, 0, Color(7.0e1, 8.0e1, 9.0e1)) img.set_pixel(0, 1, Color(1.0e2, 2.0e2, 3.0e2)) img.set_pixel(1, 1, Color(4.0e2, 5.0e2, 6.0e2)) img.set_pixel(2, 1, Color(7.0e2, 8.0e2, 9.0e2)) le_buf = BytesIO() img.write_pfm(le_buf, endianness=Endianness.LITTLE_ENDIAN) assert le_buf.getvalue() == LE_REFERENCE_BYTES be_buf = BytesIO() img.write_pfm(be_buf, endianness=Endianness.BIG_ENDIAN) assert be_buf.getvalue() == BE_REFERENCE_BYTES
def test_get_set_pixel(self): img = HdrImage(7, 4) reference_color = Color(1.0, 2.0, 3.0) img.set_pixel(3, 2, reference_color) assert reference_color.is_close(img.get_pixel(3, 2))
def test_pixel_offset(self): img = HdrImage(7, 4) assert img.pixel_offset(0, 0) == 0 assert img.pixel_offset(3, 2) == 17 assert img.pixel_offset(6, 3) == 7 * 4 - 1
def test_image_creation(self): img = HdrImage(7, 4) assert img.width == 7 assert img.height == 4
def demo(width, height, algorithm, pfm_output, png_output, num_of_rays, max_depth, init_state, init_seq, samples_per_pixel, declare_float, input_scene_name): samples_per_side = int(sqrt(samples_per_pixel)) if samples_per_side**2 != samples_per_pixel: print( f"Error, the number of samples per pixel ({samples_per_pixel}) must be a perfect square" ) return variables = build_variable_table(declare_float) with open(input_scene_name, "rt") as f: try: scene = parse_scene(input_file=InputStream( stream=f, file_name=input_scene_name), variables=variables) except GrammarError as e: loc = e.location print(f"{loc.file_name}:{loc.line_num}:{loc.col_num}: {e.message}") sys.exit(1) image = HdrImage(width, height) print(f"Generating a {width}×{height} image") # Run the ray-tracer tracer = ImageTracer(image=image, camera=scene.camera, samples_per_side=samples_per_side) if algorithm == "onoff": print("Using on/off renderer") renderer = OnOffRenderer(world=scene.world, background_color=BLACK) elif algorithm == "flat": print("Using flat renderer") renderer = FlatRenderer(world=scene.world, background_color=BLACK) elif algorithm == "pathtracing": print("Using a path tracer") renderer = PathTracer( world=scene.world, pcg=PCG(init_state=init_state, init_seq=init_seq), num_of_rays=num_of_rays, max_depth=max_depth, ) elif algorithm == "pointlight": print("Using a point-light tracer") renderer = PointLightRenderer(world=scene.world, background_color=BLACK) else: print(f"Unknown renderer: {algorithm}") sys.exit(1) def print_progress(row, col): print(f"Rendering row {row + 1}/{image.height}\r", end="") start_time = process_time() tracer.fire_all_rays(renderer, callback=print_progress) elapsed_time = process_time() - start_time print(f"Rendering completed in {elapsed_time:.1f} s") # Save the HDR image with open(pfm_output, "wb") as outf: image.write_pfm(outf) print(f"HDR demo image written to {pfm_output}") # Apply tone-mapping to the image image.normalize_image(factor=1.0) image.clamp_image() # Save the LDR image with open(png_output, "wb") as outf: image.write_ldr_image(outf, "PNG") print(f"PNG demo image written to {png_output}")