Example #1
0
 def __init__(self, *args, **kwargs):
     Camera.__init__(self, *args, **kwargs)
     self.unit_sun_vect = self.sun_vect / np.linalg.norm(self.sun_vect)
     ## rotation_mobject lives in the phi-theta-distance space
     self.rotation_mobject = VectorizedPoint()
     ## moving_center lives in the x-y-z space
     ## It representes the center of rotation
     self.moving_center = VectorizedPoint(self.space_center)
     self.set_position(self.phi, self.theta, self.distance)
Example #2
0
 def __init__(self, mobject, point, **kwargs):
     digest_config(self, kwargs)
     target = mobject.copy()
     point_mob = VectorizedPoint(point)
     if self.point_color:
         point_mob.set_color(self.point_color)
     mobject.replace(point_mob)
     mobject.set_color(point_mob.get_color())
     Transform.__init__(self, mobject, target, **kwargs)
Example #3
0
 def __init__(self, decimal_number, number_update_func, **kwargs):
     digest_config(self, kwargs, locals())
     decimal_number.add(*[
         VectorizedPoint(decimal_number.get_corner(DOWN + LEFT))
         for x in range(self.spare_parts)
     ])
     Animation.__init__(self, decimal_number, **kwargs)
Example #4
0
 def __init__(self, decimal_number_mobject, number_update_func, **kwargs):
     digest_config(self, kwargs, locals())
     if self.num_decimal_points is None:
         self.num_decimal_points = decimal_number_mobject.num_decimal_points
     decimal_number_mobject.add(*[
         VectorizedPoint(decimal_number_mobject.get_corner(DOWN + LEFT))
         for x in range(self.spare_parts)
     ])
     if self.tracked_mobject:
         self.diff_from_tracked_mobject = \
             decimal_number_mobject.get_center() - self.tracked_mobject.get_center()
     Animation.__init__(self, decimal_number_mobject, **kwargs)
Example #5
0
 def generate_points(self):
     self.add(Rectangle(
         height = self.height,
         width = self.height/self.height_to_width,
         stroke_color = WHITE,
         stroke_width = 2,
         fill_color = self.color,
         fill_opacity = 1,
     ))
     if self.turned_over:
         self.set_fill(DARK_GREY)
         self.set_stroke(LIGHT_GREY)
         contents = VectorizedPoint(self.get_center())
     else:
         value = self.get_value()
         symbol = self.get_symbol()
         design = self.get_design(value, symbol)
         corner_numbers = self.get_corner_numbers(value, symbol)
         contents = VGroup(design, corner_numbers)
         self.design = design
         self.corner_numbers = corner_numbers
     self.add(contents)
Example #6
0
class ThreeDCamera(CameraWithPerspective):
    CONFIG = {
        "sun_vect": 5 * UP + LEFT,
        "shading_factor": 0.2,
        "distance": 5.,
        "default_distance": 5.,
        "phi": 0,  #Angle off z axis
        "theta": -TAU / 4,  #Rotation about z axis
    }

    def __init__(self, *args, **kwargs):
        Camera.__init__(self, *args, **kwargs)
        self.unit_sun_vect = self.sun_vect / np.linalg.norm(self.sun_vect)
        ## rotation_mobject lives in the phi-theta-distance space
        self.rotation_mobject = VectorizedPoint()
        ## moving_center lives in the x-y-z space
        ## It representes the center of rotation
        self.moving_center = VectorizedPoint(self.space_center)
        self.set_position(self.phi, self.theta, self.distance)

    def modified_rgb(self, vmobject, rgb):
        if should_shade_in_3d(vmobject):
            return self.get_shaded_rgb(rgb,
                                       self.get_unit_normal_vect(vmobject))
        else:
            return rgb

    def get_stroke_rgb(self, vmobject):
        return self.modified_rgb(vmobject, vmobject.get_stroke_rgb())

    def get_fill_rgb(self, vmobject):
        return self.modified_rgb(vmobject, vmobject.get_fill_rgb())

    def get_shaded_rgb(self, rgb, normal_vect):
        brightness = np.dot(normal_vect, self.unit_sun_vect)**2
        if brightness > 0:
            alpha = self.shading_factor * brightness
            return interpolate(rgb, np.ones(3), alpha)
        else:
            alpha = -self.shading_factor * brightness
            return interpolate(rgb, np.zeros(3), alpha)

    def get_unit_normal_vect(self, vmobject):
        anchors = vmobject.get_anchors()
        if len(anchors) < 3:
            return OUT
        normal = np.cross(anchors[1] - anchors[0], anchors[2] - anchors[1])
        if normal[2] < 0:
            normal = -normal
        length = np.linalg.norm(normal)
        if length == 0:
            return OUT
        return normal / length

    def display_multiple_vectorized_mobjects(self, vmobjects):
        camera_point = self.spherical_coords_to_point(
            *self.get_spherical_coords())

        def z_cmp(*vmobs):
            # Compare to three dimensional mobjects based on
            # how close they are to the camera
            # return cmp(*[
            #     -np.linalg.norm(vm.get_center()-camera_point)
            #     for vm in vmobs
            # ])
            three_d_status = map(should_shade_in_3d, vmobs)
            has_points = [vm.get_num_points() > 0 for vm in vmobs]
            if all(three_d_status) and all(has_points):
                cmp_vect = self.get_unit_normal_vect(vmobs[1])
                return cmp(
                    *[np.dot(vm.get_center(), cmp_vect) for vm in vmobs])
            else:
                return 0

        Camera.display_multiple_vectorized_mobjects(
            self, sorted(vmobjects, cmp=z_cmp))

    def get_spherical_coords(self, phi=None, theta=None, distance=None):
        curr_phi, curr_theta, curr_d = self.rotation_mobject.points[0]
        if phi is None: phi = curr_phi
        if theta is None: theta = curr_theta
        if distance is None: distance = curr_d
        return np.array([phi, theta, distance])

    def get_cartesian_coords(self, phi=None, theta=None, distance=None):
        spherical_coords_array = self.get_spherical_coords(
            phi, theta, distance)
        phi2 = spherical_coords_array[0]
        theta2 = spherical_coords_array[1]
        d2 = spherical_coords_array[2]
        return self.spherical_coords_to_point(phi2, theta2, d2)

    def get_phi(self):
        return self.get_spherical_coords()[0]

    def get_theta(self):
        return self.get_spherical_coords()[1]

    def get_distance(self):
        return self.get_spherical_coords()[2]

    def spherical_coords_to_point(self, phi, theta, distance):
        return distance * np.array([
            np.sin(phi) * np.cos(theta),
            np.sin(phi) * np.sin(theta),
            np.cos(phi)
        ])

    def get_center_of_rotation(self, x=None, y=None, z=None):
        curr_x, curr_y, curr_z = self.moving_center.points[0]
        if x is None:
            x = curr_x
        if y is None:
            y = curr_y
        if z is None:
            z = curr_z
        return np.array([x, y, z])

    def set_position(self,
                     phi=None,
                     theta=None,
                     distance=None,
                     center_x=None,
                     center_y=None,
                     center_z=None):
        point = self.get_spherical_coords(phi, theta, distance)
        self.rotation_mobject.move_to(point)
        self.phi, self.theta, self.distance = point
        center_of_rotation = self.get_center_of_rotation(
            center_x, center_y, center_z)
        self.moving_center.move_to(center_of_rotation)
        self.space_center = self.moving_center.points[0]

    def get_view_transformation_matrix(self):
        return (self.default_distance / self.get_distance()) * np.dot(
            rotation_matrix(self.get_phi(), LEFT),
            rotation_about_z(-self.get_theta() - np.pi / 2),
        )

    def points_to_pixel_coords(self, points):
        matrix = self.get_view_transformation_matrix()
        new_points = np.dot(points, matrix.T)
        self.space_center = self.moving_center.points[0]

        return Camera.points_to_pixel_coords(self, new_points)
Example #7
0
 def __init__(self, *args, **kwargs):
     Camera.__init__(self, *args, **kwargs)
     self.unit_sun_vect = self.sun_vect/np.linalg.norm(self.sun_vect)
     self.position_mobject = VectorizedPoint()
     self.set_position(self.phi, self.theta, self.distance)
Example #8
0
class ThreeDCamera(CameraWithPerspective):
    CONFIG = {
        "sun_vect" : 5*UP+LEFT,
        "shading_factor" : 0.5,
        "distance" : 5,
        "phi" : 0, #Angle off z axis
        "theta" : -np.pi/2, #Rotation about z axis
    }
    def __init__(self, *args, **kwargs):
        Camera.__init__(self, *args, **kwargs)
        self.unit_sun_vect = self.sun_vect/np.linalg.norm(self.sun_vect)
        self.position_mobject = VectorizedPoint()
        self.set_position(self.phi, self.theta, self.distance)

    def get_color(self, method):
        color = method()
        vmobject = method.im_self
        if should_shade_in_3d(vmobject):
            return Color(rgb = self.get_shaded_rgb(
                color_to_rgb(color),
                normal_vect = self.get_unit_normal_vect(vmobject)
            ))
        else:
            return color

    def get_stroke_color(self, vmobject):
        return self.get_color(vmobject.get_stroke_color)

    def get_fill_color(self, vmobject):
        return self.get_color(vmobject.get_fill_color)

    def get_shaded_rgb(self, rgb, normal_vect):
        brightness = np.dot(normal_vect, self.unit_sun_vect)**2
        if brightness > 0:
            alpha = self.shading_factor*brightness
            return interpolate(rgb, np.ones(3), alpha)
        else:
            alpha = -self.shading_factor*brightness
            return interpolate(rgb, np.zeros(3), alpha)

    def get_unit_normal_vect(self, vmobject):
        anchors = vmobject.get_anchors()
        if len(anchors) < 3:
            return OUT
        normal = np.cross(anchors[1]-anchors[0], anchors[2]-anchors[1])
        if normal[2] < 0:
            normal = -normal
        length = np.linalg.norm(normal)
        if length == 0:
            return OUT
        return normal/length

    def display_multiple_vectorized_mobjects(self, vmobjects):
        def z_cmp(*vmobs):
            #Compare to three dimensional mobjects based on their
            #z value, otherwise don't compare.
            three_d_status = map(should_shade_in_3d, vmobs)
            has_points = [vm.get_num_points() > 0 for vm in vmobs]
            if all(three_d_status) and all(has_points):
                cmp_vect = self.get_unit_normal_vect(vmobs[1])
                return cmp(*[
                    np.dot(vm.get_center(), cmp_vect)
                    for vm in vmobs
                ])
            else:
                return 0
        Camera.display_multiple_vectorized_mobjects(
            self, sorted(vmobjects, cmp = z_cmp)
        )

    def get_position(self):
        return self.position_mobject.points[0]

    def get_phi(self):
        x, y, z = self.get_position()
        return angle_of_vector([z, np.sqrt(x**2 + y**2)])

    def get_theta(self):
        x, y, z = self.get_position()
        return angle_of_vector([x, y])

    def get_distance(self):
        return np.linalg.norm(self.get_position())

    def spherical_coords_to_point(self, phi, theta, distance):
        phi = phi or self.get_phi()
        theta = theta or self.get_theta()
        distance = distance or self.get_distance()
        return distance*np.array([
            np.sin(phi)*np.cos(theta),
            np.sin(phi)*np.sin(theta),
            np.cos(phi)
        ])

    def set_position(self, phi = None, theta = None, distance = None):
        point = self.spherical_coords_to_point(phi, theta, distance)
        self.position_mobject.move_to(point)
        self.phi = self.get_phi()
        self.theta = self.get_theta()
        self.distance = self.get_distance()

    def get_view_transformation_matrix(self):
        return np.dot(
            rotation_matrix(self.get_phi(), LEFT),
            rotation_about_z(-self.get_theta() - np.pi/2),
        )

    def points_to_pixel_coords(self, points):
        matrix = self.get_view_transformation_matrix()
        new_points = np.dot(points, matrix.T)
        return Camera.points_to_pixel_coords(self, new_points)
Example #9
0
class ThreeDCamera(CameraWithPerspective):
    CONFIG = {
        "sun_vect" : 5*UP+LEFT,
        "shading_factor" : 0.2,
        "distance" : 5.,
        "default_distance" : 5.,
        "phi" : 0, #Angle off z axis
        "theta" : -TAU/4, #Rotation about z axis
    }
    def __init__(self, *args, **kwargs):
        Camera.__init__(self, *args, **kwargs)
        self.unit_sun_vect = self.sun_vect/np.linalg.norm(self.sun_vect)
        ## Lives in the phi-theta-distance space
        self.rotation_mobject = VectorizedPoint()
        self.set_position(self.phi, self.theta, self.distance)

    def get_color(self, method):
        color = method()
        vmobject = method.im_self
        if should_shade_in_3d(vmobject):
            return Color(rgb = self.get_shaded_rgb(
                color_to_rgb(color),
                normal_vect = self.get_unit_normal_vect(vmobject)
            ))
        else:
            return color

    def get_stroke_color(self, vmobject):
        return self.get_color(vmobject.get_stroke_color)

    def get_fill_color(self, vmobject):
        return self.get_color(vmobject.get_fill_color)

    def get_shaded_rgb(self, rgb, normal_vect):
        brightness = np.dot(normal_vect, self.unit_sun_vect)**2
        if brightness > 0:
            alpha = self.shading_factor*brightness
            return interpolate(rgb, np.ones(3), alpha)
        else:
            alpha = -self.shading_factor*brightness
            return interpolate(rgb, np.zeros(3), alpha)

    def get_unit_normal_vect(self, vmobject):
        anchors = vmobject.get_anchors()
        if len(anchors) < 3:
            return OUT
        normal = np.cross(anchors[1]-anchors[0], anchors[2]-anchors[1])
        if normal[2] < 0:
            normal = -normal
        length = np.linalg.norm(normal)
        if length == 0:
            return OUT
        return normal/length

    def display_multiple_vectorized_mobjects(self, vmobjects):
        camera_point = self.spherical_coords_to_point(
            *self.get_spherical_coords()
        )
        def z_cmp(*vmobs):
            #Compare to three dimensional mobjects based on 
            #how close they are to the camera
            return cmp(*[
                -np.linalg.norm(vm.get_center()-camera_point)
                for vm in vmobs
            ])
            # three_d_status = map(should_shade_in_3d, vmobs)
            # has_points = [vm.get_num_points() > 0 for vm in vmobs]
            # if all(three_d_status) and all(has_points):
            #     cmp_vect = self.get_unit_normal_vect(vmobs[1])
            #     return cmp(*[
            #         np.dot(vm.get_center(), cmp_vect)
            #         for vm in vmobs
            #     ])
            # else:
            #     return 0
        Camera.display_multiple_vectorized_mobjects(
            self, sorted(vmobjects, cmp = z_cmp)
        )

    def get_spherical_coords(self, phi = None, theta = None, distance = None):
        curr_phi, curr_theta, curr_d = self.rotation_mobject.points[0]
        if phi is None: phi = curr_phi
        if theta is None: theta = curr_theta
        if distance is None: distance = curr_d
        return np.array([phi, theta, distance])

    def get_phi(self):
        return self.get_spherical_coords()[0]

    def get_theta(self):
        return self.get_spherical_coords()[1]

    def get_distance(self):
        return self.get_spherical_coords()[2]

    def spherical_coords_to_point(self, phi, theta, distance):
        return distance*np.array([
            np.sin(phi)*np.cos(theta),
            np.sin(phi)*np.sin(theta),
            np.cos(phi)
        ])

    def set_position(self, phi = None, theta = None, distance = None):
        point = self.get_spherical_coords(phi, theta, distance)
        self.rotation_mobject.move_to(point)
        self.phi, self.theta, self.distance = point

    def get_view_transformation_matrix(self):
        return (self.default_distance / self.get_distance()) * np.dot(
            rotation_matrix(self.get_phi(), LEFT),
            rotation_about_z(-self.get_theta() - np.pi/2),
        )

    def points_to_pixel_coords(self, points):
        matrix = self.get_view_transformation_matrix()
        new_points = np.dot(points, matrix.T)
        return Camera.points_to_pixel_coords(self, new_points)