def test_transform1(self): pfrom = Point(0, 0, 0) pto = Point(0, 0, -1) vup = Vector(0, 1, 0) t = view_transform(pfrom, pto, vup) I = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) self.assertTrue(matrix.equals(I, t))
def test_transform2(self): pfrom = Point(0, 0, 0) pto = Point(0, 0, 1) vup = Vector(0, 1, 0) t = view_transform(pfrom, pto, vup) s = scale(-1, 1, -1) self.assertTrue(matrix.equals(s, t))
def test_camera7(self): w = World() c = Camera(11, 11, pi / 2) pfrom = Point(0, 0, -5) pto = Point(0, 0, 0) vup = Vector(0, 1, 0) c.transform = view_transform(pfrom, pto, vup) image = c.render(w) self.assertTrue( Color(0.38066, 0.47583, 0.2855).equals(image.pixel_at(5, 5)))
def update(self): self.frame_origin = get_cam_frame(self.position, self.look_at, self.up) self.view_transform = view_transform(self.position, self.look_at, self.up) if hasattr(self, 'world_view'): del self.world_view if hasattr(self, 'eye_view'): del self.eye_view if hasattr(self, 'clip_view'): del self.clip_view
def test_transform4(self): pfrom = Point(1, 3, 2) pto = Point(4, -2, 8) vup = Vector(1, 1, 0) t = view_transform(pfrom, pto, vup) m = Matrix([[-0.50709, 0.50709, 0.67612, -2.36643], [0.76772, 0.60609, 0.12122, -2.82843], [-0.35857, 0.59761, -0.71714, 0.00000], [0.00000, 0.00000, 0.00000, 1.00000]]) self.assertTrue(matrix.equals(m, t))
def world_to_device(point, forward=True, **kwargs): # target to look at t = kwargs.get('look_at') # camera position p = kwargs.get('eye_pos') # up pointing vector u = kwargs.get('up') # vertical field of view fov_y = kwargs.get('fov_y') # aspect ratio widht/height a = kwargs.get('aspect') # near clipping plane n = kwargs.get('near') # far clipping plane f = kwargs.get('far') # device pixel width w = kwargs.get('width') # device pixel height h = kwargs.get('height') # Model-View-Transform-Matrix M = transform.view_transform(p, t, u) # Projection Matrix P = transform.pers_projection_transform(fov_y, a, n, f) # Viewport/Device Transform V = transform.viewport_transform((w, h)) if forward: # world coordinate w_point = point if len(w_point) == 3: w_point = np.append(w_point, [1]) # eye coordinate from world coordinate e = M @ w_point # un-normalized clip coordinate cn = P @ e # normalized clip coordinate (divide-by-w) c = cn / cn[-1] # device coordinates d = V @ c return d else: # if not forward, it becomes device_to_world func. (backward) d = point # clip coordinate c = np.linalg.inv(V) @ d # un-normalized eye coordinate en = np.linalg.inv(P) @ c # eye coordinate e = en / en[-1] # world coordinate w_point = np.linalg.inv(M) @ e return w_point
def test_transform3(self): pfrom = Point(0, 0, 8) pto = Point(0, 0, 0) vup = Vector(0, 1, 0) t = view_transform(pfrom, pto, vup) self.assertTrue(matrix.equals(translate(0, 0, -8), t))
floor, left_wall, right_wall, # middle, # right, # left ] for i in range(10): ball = Sphere() ball.transform = translate(0, 1, 1.) * rotate_z( pi * 2 / 10 * i) * translate(1, 0, 0) * scale(0.2, 0.2, 0.2) ball.material.color = Color(i * 0.1, 1 - i * 0.1, 1 - i * 0.1) objs.append(ball) world = World() world.light = Light(Point(-10, 10, -10), Color(1, 1, 1)) world.objs = objs camera = Camera(100, 50, pi / 3) camera.transform = view_transform(Point(0, 1.5, -5), Point(0, 1, 0), Vector(0, 1, 0)) canvas = camera.render(world) with open('test_034.ppm', 'w') as fout: canvas.to_ppm(fout) t2 = time() print("Time spent: {:.2f}s".format(t2 - t1))