Esempio n. 1
0
    def test_antialiasing(self):
        num_of_rays = 0
        small_image = HdrImage(width=1, height=1)
        camera = OrthogonalCamera(aspect_ratio=1)
        tracer = ImageTracer(small_image,
                             camera,
                             samples_per_side=10,
                             pcg=PCG())

        def trace_ray(ray: Ray) -> Color:
            nonlocal num_of_rays
            point = ray.at(1)

            # Check that all the rays intersect the screen within the region [−1, 1] × [−1, 1]
            assert pytest.approx(0.0) == point.x
            assert -1.0 <= point.y <= 1.0
            assert -1.0 <= point.z <= 1.0

            num_of_rays += 1

            return Color(0.0, 0.0, 0.0)

        tracer.fire_all_rays(trace_ray)

        # Check that the number of rays that were fired is what we expect (10²)
        assert num_of_rays == 100
Esempio n. 2
0
    def testFlatRenderer(self):
        sphere_color = Color(1.0, 2.0, 3.0)
        sphere = Sphere(transformation=translation(Vec(2, 0, 0)) *
                        scaling(Vec(0.2, 0.2, 0.2)),
                        material=Material(brdf=DiffuseBRDF(
                            pigment=UniformPigment(sphere_color))))
        image = HdrImage(width=3, height=3)
        camera = OrthogonalCamera()
        tracer = ImageTracer(image=image, camera=camera)
        world = World()
        world.add_shape(sphere)
        renderer = FlatRenderer(world=world)
        tracer.fire_all_rays(renderer)

        assert image.get_pixel(0, 0).is_close(BLACK)
        assert image.get_pixel(1, 0).is_close(BLACK)
        assert image.get_pixel(2, 0).is_close(BLACK)

        assert image.get_pixel(0, 1).is_close(BLACK)
        assert image.get_pixel(1, 1).is_close(sphere_color)
        assert image.get_pixel(2, 1).is_close(BLACK)

        assert image.get_pixel(0, 2).is_close(BLACK)
        assert image.get_pixel(1, 2).is_close(BLACK)
        assert image.get_pixel(2, 2).is_close(BLACK)
Esempio n. 3
0
def demo(width, height, angle_deg, orthogonal, pfm_output, png_output):
    image = HdrImage(width, height)

    # Create a world and populate it with a few shapes
    world = World()

    for x in [-0.5, 0.5]:
        for y in [-0.5, 0.5]:
            for z in [-0.5, 0.5]:
                world.add(
                    Sphere(transformation=translation(Vec(x, y, z)) *
                           scaling(Vec(0.1, 0.1, 0.1))))

    # Place two other balls in the bottom/left part of the cube, so
    # that we can check if there are issues with the orientation of
    # the image
    world.add(
        Sphere(transformation=translation(Vec(0.0, 0.0, -0.5)) *
               scaling(Vec(0.1, 0.1, 0.1))))
    world.add(
        Sphere(transformation=translation(Vec(0.0, 0.5, 0.0)) *
               scaling(Vec(0.1, 0.1, 0.1))))

    # Initialize a camera
    camera_tr = rotation_z(angle_deg=angle_deg) * translation(
        Vec(-1.0, 0.0, 0.0))
    if orthogonal:
        camera = OrthogonalCamera(aspect_ratio=width / height,
                                  transformation=camera_tr)
    else:
        camera = PerspectiveCamera(aspect_ratio=width / height,
                                   transformation=camera_tr)

    # Run the ray-tracer

    tracer = ImageTracer(image=image, camera=camera)

    def compute_color(ray: Ray) -> Color:
        if world.ray_intersection(ray):
            return WHITE
        else:
            return BLACK

    tracer.fire_all_rays(compute_color)

    # Save the HDR image
    with open(pfm_output, "wb") as outf:
        image.write_pfm(outf)
    print(f"HDR demo image written to {pfm_output}")

    # Apply tone-mapping to the image
    image.normalize_image(factor=1.0)
    image.clamp_image()

    # Save the LDR image
    with open(png_output, "wb") as outf:
        image.write_ldr_image(outf, "PNG")
    print(f"PNG demo image written to {png_output}")
Esempio n. 4
0
    def test_orthogonal_camera(self):
        cam = OrthogonalCamera(aspect_ratio=2.0)

        # Fire one ray for each corner of the image plane
        ray1 = cam.fire_ray(0.0, 0.0)
        ray2 = cam.fire_ray(1.0, 0.0)
        ray3 = cam.fire_ray(0.0, 1.0)
        ray4 = cam.fire_ray(1.0, 1.0)

        # Verify that the rays are parallel by verifying that cross-products vanish
        assert are_close(0.0, ray1.dir.cross(ray2.dir).squared_norm())
        assert are_close(0.0, ray1.dir.cross(ray3.dir).squared_norm())
        assert are_close(0.0, ray1.dir.cross(ray4.dir).squared_norm())

        # Verify that the ray hitting the corners have the right coordinates
        assert ray1.at(1.0).is_close(Point(0.0, 2.0, -1.0))
        assert ray2.at(1.0).is_close(Point(0.0, -2.0, -1.0))
        assert ray3.at(1.0).is_close(Point(0.0, 2.0, 1.0))
        assert ray4.at(1.0).is_close(Point(0.0, -2.0, 1.0))
Esempio n. 5
0
def parse_camera(input_file: InputStream, scene) -> Camera:
    expect_symbol(input_file, "(")
    type_kw = expect_keywords(
        input_file, [KeywordEnum.PERSPECTIVE, KeywordEnum.ORTHOGONAL])
    expect_symbol(input_file, ",")
    transformation = parse_transformation(input_file, scene)
    expect_symbol(input_file, ",")
    aspect_ratio = expect_number(input_file, scene)
    expect_symbol(input_file, ",")
    distance = expect_number(input_file, scene)
    expect_symbol(input_file, ")")

    if type_kw == KeywordEnum.PERSPECTIVE:
        result = PerspectiveCamera(screen_distance=distance,
                                   aspect_ratio=aspect_ratio,
                                   transformation=transformation)
    elif type_kw == KeywordEnum.ORTHOGONAL:
        result = OrthogonalCamera(aspect_ratio=aspect_ratio,
                                  transformation=transformation)

    return result
Esempio n. 6
0
    def test_orthogonal_camera_transform(self):
        cam = OrthogonalCamera(transformation=translation(-VEC_Y * 2.0) *
                               rotation_z(angle_deg=90))

        ray = cam.fire_ray(0.5, 0.5)
        assert ray.at(1.0).is_close(Point(0.0, -2.0, 0.0))