def demo(width, height, angle_deg, orthogonal, pfm_output, png_output): image = HdrImage(width, height) # Create a world and populate it with a few shapes world = World() for x in [-0.5, 0.5]: for y in [-0.5, 0.5]: for z in [-0.5, 0.5]: world.add( Sphere(transformation=translation(Vec(x, y, z)) * scaling(Vec(0.1, 0.1, 0.1)))) # Place two other balls in the bottom/left part of the cube, so # that we can check if there are issues with the orientation of # the image world.add( Sphere(transformation=translation(Vec(0.0, 0.0, -0.5)) * scaling(Vec(0.1, 0.1, 0.1)))) world.add( Sphere(transformation=translation(Vec(0.0, 0.5, 0.0)) * scaling(Vec(0.1, 0.1, 0.1)))) # Initialize a camera camera_tr = rotation_z(angle_deg=angle_deg) * translation( Vec(-1.0, 0.0, 0.0)) if orthogonal: camera = OrthogonalCamera(aspect_ratio=width / height, transformation=camera_tr) else: camera = PerspectiveCamera(aspect_ratio=width / height, transformation=camera_tr) # Run the ray-tracer tracer = ImageTracer(image=image, camera=camera) def compute_color(ray: Ray) -> Color: if world.ray_intersection(ray): return WHITE else: return BLACK tracer.fire_all_rays(compute_color) # Save the HDR image with open(pfm_output, "wb") as outf: image.write_pfm(outf) print(f"HDR demo image written to {pfm_output}") # Apply tone-mapping to the image image.normalize_image(factor=1.0) image.clamp_image() # Save the LDR image with open(png_output, "wb") as outf: image.write_ldr_image(outf, "PNG") print(f"PNG demo image written to {png_output}")
def test_scalings(self): tr1 = scaling(Vec(2.0, 5.0, 10.0)) assert tr1.is_consistent() tr2 = scaling(Vec(3.0, 2.0, 4.0)) assert tr2.is_consistent() expected = scaling(Vec(6.0, 10.0, 40.0)) assert expected.is_close(tr1 * tr2)
def testFlatRenderer(self): sphere_color = Color(1.0, 2.0, 3.0) sphere = Sphere(transformation=translation(Vec(2, 0, 0)) * scaling(Vec(0.2, 0.2, 0.2)), material=Material(brdf=DiffuseBRDF( pigment=UniformPigment(sphere_color)))) image = HdrImage(width=3, height=3) camera = OrthogonalCamera() tracer = ImageTracer(image=image, camera=camera) world = World() world.add_shape(sphere) renderer = FlatRenderer(world=world) tracer.fire_all_rays(renderer) assert image.get_pixel(0, 0).is_close(BLACK) assert image.get_pixel(1, 0).is_close(BLACK) assert image.get_pixel(2, 0).is_close(BLACK) assert image.get_pixel(0, 1).is_close(BLACK) assert image.get_pixel(1, 1).is_close(sphere_color) assert image.get_pixel(2, 1).is_close(BLACK) assert image.get_pixel(0, 2).is_close(BLACK) assert image.get_pixel(1, 2).is_close(BLACK) assert image.get_pixel(2, 2).is_close(BLACK)
def testNormals(self): sphere = Sphere(transformation=scaling(Vec(2.0, 1.0, 1.0))) ray = Ray(origin=Point(1.0, 1.0, 0.0), dir=Vec(-1.0, -1.0)) intersection = sphere.ray_intersection(ray) # We normalize "intersection.normal", as we are not interested in its length assert intersection.normal.normalize().is_close( Normal(1.0, 4.0, 0.0).normalize())
def testNormalDirection(self): # Scaling a sphere by -1 keeps the sphere the same but reverses its # reference frame sphere = Sphere(transformation=scaling(Vec(-1.0, -1.0, -1.0))) ray = Ray(origin=Point(0.0, 2.0, 0.0), dir=-VEC_Y) intersection = sphere.ray_intersection(ray) # We normalize "intersection.normal", as we are not interested in its length assert intersection.normal.normalize().is_close( Normal(0.0, 1.0, 0.0).normalize())
def parse_transformation(input_file, scene: Scene): result = Transformation() while True: transformation_kw = expect_keywords(input_file, [ KeywordEnum.IDENTITY, KeywordEnum.TRANSLATION, KeywordEnum.ROTATION_X, KeywordEnum.ROTATION_Y, KeywordEnum.ROTATION_Z, KeywordEnum.SCALING, ]) if transformation_kw == KeywordEnum.IDENTITY: pass # Do nothing (this is a primitive form of optimization!) elif transformation_kw == KeywordEnum.TRANSLATION: expect_symbol(input_file, "(") result *= translation(parse_vector(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.ROTATION_X: expect_symbol(input_file, "(") result *= rotation_x(expect_number(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.ROTATION_Y: expect_symbol(input_file, "(") result *= rotation_y(expect_number(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.ROTATION_Z: expect_symbol(input_file, "(") result *= rotation_z(expect_number(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.SCALING: expect_symbol(input_file, "(") result *= scaling(parse_vector(input_file, scene)) expect_symbol(input_file, ")") # We must peek the next token to check if there is another transformation that is being # chained or if the sequence ends. Thus, this is a LL(1) parser. next_kw = input_file.read_token() if (not isinstance(next_kw, SymbolToken)) or (next_kw.symbol != "*"): # Pretend you never read this token and put it back! input_file.unread_token(next_kw) break return result
import transformations as trans import random as rand import numpy as np for test in range(100): arb = rand.sample(range(0, 501), 6) sourcePoints = np.array([[arb[0], arb[1]], [arb[2], arb[3]], [arb[4], arb[5]]]) homo_sourcePoints = trans.make_homogeneous(sourcePoints) sx, sy = rand.sample(range(-10, 11), 2) s = trans.scaling(sx, sy) tx, ty = rand.sample(range(-400, 401), 2) t = trans.translating(tx, ty) θ = rand.randint(-360, 361) r = trans.rotating(θ) compound = trans.combine(s, t, r) targetPoint = compound @ homo_sourcePoints.T euc_targetPoint = trans.make_euclidean(targetPoint.T) aff = trans.learn_affine(sourcePoints, euc_targetPoint) np.testing.assert_array_almost_equal( x=compound, y=aff, decimal=4, err_msg= 'The inferred affine transformation using the function learn_affine() ' 'is not equal to the resulting transformation matrix with the one you ' 'created.') print("100 test passed")