Example #1
0
    def test_antialiasing(self):
        num_of_rays = 0
        small_image = HdrImage(width=1, height=1)
        camera = OrthogonalCamera(aspect_ratio=1)
        tracer = ImageTracer(small_image,
                             camera,
                             samples_per_side=10,
                             pcg=PCG())

        def trace_ray(ray: Ray) -> Color:
            nonlocal num_of_rays
            point = ray.at(1)

            # Check that all the rays intersect the screen within the region [−1, 1] × [−1, 1]
            assert pytest.approx(0.0) == point.x
            assert -1.0 <= point.y <= 1.0
            assert -1.0 <= point.z <= 1.0

            num_of_rays += 1

            return Color(0.0, 0.0, 0.0)

        tracer.fire_all_rays(trace_ray)

        # Check that the number of rays that were fired is what we expect (10²)
        assert num_of_rays == 100
Example #2
0
    def testFlatRenderer(self):
        sphere_color = Color(1.0, 2.0, 3.0)
        sphere = Sphere(transformation=translation(Vec(2, 0, 0)) *
                        scaling(Vec(0.2, 0.2, 0.2)),
                        material=Material(brdf=DiffuseBRDF(
                            pigment=UniformPigment(sphere_color))))
        image = HdrImage(width=3, height=3)
        camera = OrthogonalCamera()
        tracer = ImageTracer(image=image, camera=camera)
        world = World()
        world.add_shape(sphere)
        renderer = FlatRenderer(world=world)
        tracer.fire_all_rays(renderer)

        assert image.get_pixel(0, 0).is_close(BLACK)
        assert image.get_pixel(1, 0).is_close(BLACK)
        assert image.get_pixel(2, 0).is_close(BLACK)

        assert image.get_pixel(0, 1).is_close(BLACK)
        assert image.get_pixel(1, 1).is_close(sphere_color)
        assert image.get_pixel(2, 1).is_close(BLACK)

        assert image.get_pixel(0, 2).is_close(BLACK)
        assert image.get_pixel(1, 2).is_close(BLACK)
        assert image.get_pixel(2, 2).is_close(BLACK)
Example #3
0
class TestImageTracer(unittest.TestCase):
    def setUp(self) -> None:
        self.image = HdrImage(width=4, height=2)
        self.camera = PerspectiveCamera(aspect_ratio=2)
        self.tracer = ImageTracer(image=self.image, camera=self.camera)

    def test_orientation(self):
        top_left_ray = self.tracer.fire_ray(0, 0, u_pixel=0.0, v_pixel=0.0)
        assert Point(0.0, 2.0, 1.0).is_close(top_left_ray.at(1.0))

        bottom_right_ray = self.tracer.fire_ray(3, 1, u_pixel=1.0, v_pixel=1.0)
        assert Point(0.0, -2.0, -1.0).is_close(bottom_right_ray.at(1.0))

    def test_uv_sub_mapping(self):
        # Here we're cheating: we are asking `ImageTracer.fire_ray` to fire one ray *outside*
        # the pixel we're specifying
        ray1 = self.tracer.fire_ray(0, 0, u_pixel=2.5, v_pixel=1.5)
        ray2 = self.tracer.fire_ray(2, 1, u_pixel=0.5, v_pixel=0.5)
        assert ray1.is_close(ray2)

    def test_image_coverage(self):
        self.tracer.fire_all_rays(lambda ray: Color(1.0, 2.0, 3.0))
        for row in range(self.image.height):
            for col in range(self.image.width):
                assert self.image.get_pixel(col, row) == Color(1.0, 2.0, 3.0)
Example #4
0
def demo(width, height, angle_deg, orthogonal, pfm_output, png_output):
    image = HdrImage(width, height)

    # Create a world and populate it with a few shapes
    world = World()

    for x in [-0.5, 0.5]:
        for y in [-0.5, 0.5]:
            for z in [-0.5, 0.5]:
                world.add(
                    Sphere(transformation=translation(Vec(x, y, z)) *
                           scaling(Vec(0.1, 0.1, 0.1))))

    # Place two other balls in the bottom/left part of the cube, so
    # that we can check if there are issues with the orientation of
    # the image
    world.add(
        Sphere(transformation=translation(Vec(0.0, 0.0, -0.5)) *
               scaling(Vec(0.1, 0.1, 0.1))))
    world.add(
        Sphere(transformation=translation(Vec(0.0, 0.5, 0.0)) *
               scaling(Vec(0.1, 0.1, 0.1))))

    # Initialize a camera
    camera_tr = rotation_z(angle_deg=angle_deg) * translation(
        Vec(-1.0, 0.0, 0.0))
    if orthogonal:
        camera = OrthogonalCamera(aspect_ratio=width / height,
                                  transformation=camera_tr)
    else:
        camera = PerspectiveCamera(aspect_ratio=width / height,
                                   transformation=camera_tr)

    # Run the ray-tracer

    tracer = ImageTracer(image=image, camera=camera)

    def compute_color(ray: Ray) -> Color:
        if world.ray_intersection(ray):
            return WHITE
        else:
            return BLACK

    tracer.fire_all_rays(compute_color)

    # Save the HDR image
    with open(pfm_output, "wb") as outf:
        image.write_pfm(outf)
    print(f"HDR demo image written to {pfm_output}")

    # Apply tone-mapping to the image
    image.normalize_image(factor=1.0)
    image.clamp_image()

    # Save the LDR image
    with open(png_output, "wb") as outf:
        image.write_ldr_image(outf, "PNG")
    print(f"PNG demo image written to {png_output}")
Example #5
0
class TestImageTracer(unittest.TestCase):
    def setUp(self) -> None:
        self.image = HdrImage(width=4, height=2)
        self.camera = PerspectiveCamera(aspect_ratio=2)
        self.tracer = ImageTracer(image=self.image, camera=self.camera)

    def test_orientation(self):
        top_left_ray = self.tracer.fire_ray(0, 0, u_pixel=0.0, v_pixel=0.0)
        assert Point(0.0, 2.0, 1.0).is_close(top_left_ray.at(1.0))

        bottom_right_ray = self.tracer.fire_ray(3, 1, u_pixel=1.0, v_pixel=1.0)
        assert Point(0.0, -2.0, -1.0).is_close(bottom_right_ray.at(1.0))

    def test_uv_sub_mapping(self):
        # Here we're cheating: we are asking `ImageTracer.fire_ray` to fire one ray *outside*
        # the pixel we're specifying
        ray1 = self.tracer.fire_ray(0, 0, u_pixel=2.5, v_pixel=1.5)
        ray2 = self.tracer.fire_ray(2, 1, u_pixel=0.5, v_pixel=0.5)
        assert ray1.is_close(ray2)

    def test_image_coverage(self):
        self.tracer.fire_all_rays(lambda ray: Color(1.0, 2.0, 3.0))
        for row in range(self.image.height):
            for col in range(self.image.width):
                assert self.image.get_pixel(col, row) == Color(1.0, 2.0, 3.0)

    def test_antialiasing(self):
        num_of_rays = 0
        small_image = HdrImage(width=1, height=1)
        camera = OrthogonalCamera(aspect_ratio=1)
        tracer = ImageTracer(small_image,
                             camera,
                             samples_per_side=10,
                             pcg=PCG())

        def trace_ray(ray: Ray) -> Color:
            nonlocal num_of_rays
            point = ray.at(1)

            # Check that all the rays intersect the screen within the region [−1, 1] × [−1, 1]
            assert pytest.approx(0.0) == point.x
            assert -1.0 <= point.y <= 1.0
            assert -1.0 <= point.z <= 1.0

            num_of_rays += 1

            return Color(0.0, 0.0, 0.0)

        tracer.fire_all_rays(trace_ray)

        # Check that the number of rays that were fired is what we expect (10²)
        assert num_of_rays == 100
Example #6
0
 def setUp(self) -> None:
     self.image = HdrImage(width=4, height=2)
     self.camera = PerspectiveCamera(aspect_ratio=2)
     self.tracer = ImageTracer(image=self.image, camera=self.camera)
Example #7
0
def demo(width, height, algorithm, pfm_output, png_output, num_of_rays,
         max_depth, init_state, init_seq, samples_per_pixel, declare_float,
         input_scene_name):
    samples_per_side = int(sqrt(samples_per_pixel))
    if samples_per_side**2 != samples_per_pixel:
        print(
            f"Error, the number of samples per pixel ({samples_per_pixel}) must be a perfect square"
        )
        return

    variables = build_variable_table(declare_float)

    with open(input_scene_name, "rt") as f:
        try:
            scene = parse_scene(input_file=InputStream(
                stream=f, file_name=input_scene_name),
                                variables=variables)
        except GrammarError as e:
            loc = e.location
            print(f"{loc.file_name}:{loc.line_num}:{loc.col_num}: {e.message}")
            sys.exit(1)

    image = HdrImage(width, height)
    print(f"Generating a {width}×{height} image")

    # Run the ray-tracer
    tracer = ImageTracer(image=image,
                         camera=scene.camera,
                         samples_per_side=samples_per_side)

    if algorithm == "onoff":
        print("Using on/off renderer")
        renderer = OnOffRenderer(world=scene.world, background_color=BLACK)
    elif algorithm == "flat":
        print("Using flat renderer")
        renderer = FlatRenderer(world=scene.world, background_color=BLACK)
    elif algorithm == "pathtracing":
        print("Using a path tracer")
        renderer = PathTracer(
            world=scene.world,
            pcg=PCG(init_state=init_state, init_seq=init_seq),
            num_of_rays=num_of_rays,
            max_depth=max_depth,
        )
    elif algorithm == "pointlight":
        print("Using a point-light tracer")
        renderer = PointLightRenderer(world=scene.world,
                                      background_color=BLACK)
    else:
        print(f"Unknown renderer: {algorithm}")
        sys.exit(1)

    def print_progress(row, col):
        print(f"Rendering row {row + 1}/{image.height}\r", end="")

    start_time = process_time()
    tracer.fire_all_rays(renderer, callback=print_progress)
    elapsed_time = process_time() - start_time

    print(f"Rendering completed in {elapsed_time:.1f} s")

    # Save the HDR image
    with open(pfm_output, "wb") as outf:
        image.write_pfm(outf)
    print(f"HDR demo image written to {pfm_output}")

    # Apply tone-mapping to the image
    image.normalize_image(factor=1.0)
    image.clamp_image()

    # Save the LDR image
    with open(png_output, "wb") as outf:
        image.write_ldr_image(outf, "PNG")
    print(f"PNG demo image written to {png_output}")