def add_camera_animation(
    op,
    cameras,
    parent_collection,
    number_interpolation_frames,
    interpolation_type,
    consider_missing_cameras_during_animation,
    remove_rotation_discontinuities,
    image_dp,
    image_fp_type,
):
    log_report("INFO", "Adding Camera Animation: ...")

    if len(cameras) == 0:
        return

    if consider_missing_cameras_during_animation:
        cameras = enhance_cameras_with_dummy_cameras(op, cameras, image_dp,
                                                     image_fp_type)

    # Using the first reconstructed camera as template for the animated camera. The values
    # are adjusted with add_transformation_animation() and add_camera_intrinsics_animation().
    some_cam = cameras[0]
    bcamera = add_single_camera(op, "Animated Camera", some_cam)
    cam_obj = add_obj(bcamera, "Animated Camera", parent_collection)
    cameras_sorted = sorted(cameras,
                            key=lambda camera: camera.get_relative_fp())

    transformations_sorted = []
    camera_intrinsics_sorted = []
    for camera in cameras_sorted:
        if isinstance(camera, DummyCamera):
            matrix_world = None
            camera_intrinsics = None
        else:
            matrix_world = compute_camera_matrix_world(camera)
            shift_x, shift_y = compute_shift(camera,
                                             relativ_to_largest_extend=True)
            camera_intrinsics = CameraIntrinsics(camera.get_field_of_view(),
                                                 shift_x, shift_y)

        transformations_sorted.append(matrix_world)
        camera_intrinsics_sorted.append(camera_intrinsics)

    add_transformation_animation(
        op=op,
        animated_obj_name=cam_obj.name,
        transformations_sorted=transformations_sorted,
        number_interpolation_frames=number_interpolation_frames,
        interpolation_type=interpolation_type,
        remove_rotation_discontinuities=remove_rotation_discontinuities,
    )

    add_camera_intrinsics_animation(
        op=op,
        animated_obj_name=cam_obj.name,
        intrinsics_sorted=camera_intrinsics_sorted,
        number_interpolation_frames=number_interpolation_frames,
    )
コード例 #2
0
def add_points_as_mesh(op, points, reconstruction_collection):
    log_report("INFO", "Adding Points as Mesh: ...", op)
    stop_watch = StopWatch()
    point_cloud_obj_name = "Mesh Point Cloud"
    point_cloud_mesh = bpy.data.meshes.new(point_cloud_obj_name)
    point_cloud_mesh.update()
    point_cloud_mesh.validate()
    point_world_coordinates = [tuple(point.coord) for point in points]
    point_cloud_mesh.from_pydata(point_world_coordinates, [], [])
    point_cloud_obj = add_obj(point_cloud_mesh, point_cloud_obj_name,
                              reconstruction_collection)

    log_report("INFO", "Duration: " + str(stop_watch.get_elapsed_time()), op)
    log_report("INFO", "Adding Points as Mesh: Done", op)
    return point_cloud_obj.name
コード例 #3
0
def add_particle_system(coords, particle_obj, point_cloud_obj_name,
                        reconstruction_collection):
    point_cloud_mesh = bpy.data.meshes.new(point_cloud_obj_name)
    point_cloud_mesh.update()
    point_cloud_mesh.validate()
    point_cloud_mesh.from_pydata(coords, [], [])
    point_cloud_obj = add_obj(point_cloud_mesh, point_cloud_obj_name,
                              reconstruction_collection)

    if len(point_cloud_obj.particle_systems) == 0:
        point_cloud_obj.modifiers.new("particle sys", type="PARTICLE_SYSTEM")
        particle_sys = point_cloud_obj.particle_systems[0]
        settings = particle_sys.settings
        settings.type = "HAIR"
        settings.use_advanced_hair = True
        settings.emit_from = "VERT"
        settings.count = len(coords)
        # The final object extent is hair_length * obj.scale
        settings.hair_length = 100  # This must not be 0
        settings.use_emit_random = False
        settings.render_type = "OBJECT"
        settings.instance_object = particle_obj
    return point_cloud_obj
def add_camera_image_plane(
    matrix_world,
    blender_image,
    camera,
    name,
    transparency,
    add_image_plane_emission,
    image_planes_collection,
    op,
):
    """
    Create mesh for image plane
    """
    # log_report('INFO', 'add_camera_image_plane: ...')
    # log_report('INFO', 'name: ' + str(name))

    width = camera.width
    height = camera.height
    focal_length = camera.get_focal_length()
    p_x, p_y = camera.get_principal_point()

    assert width is not None and height is not None

    bpy.context.scene.render.engine = "CYCLES"
    mesh = bpy.data.meshes.new(name)
    mesh.update()
    mesh.validate()

    plane_distance = 1.0  # Distance from camera position
    # Right vector in view frustum at plane_distance:
    right = Vector((1, 0, 0)) * (width / focal_length) * plane_distance
    # Up vector in view frustum at plane_distance:
    up = Vector((0, 1, 0)) * (height / focal_length) * plane_distance
    # Camera view direction:
    view_dir = -Vector((0, 0, 1)) * plane_distance
    plane_center = view_dir

    shift_x, shift_y = compute_shift(camera, relativ_to_largest_extend=False)

    corners = ((-0.5, -0.5), (+0.5, -0.5), (+0.5, +0.5), (-0.5, +0.5))
    points = [
        (plane_center + (c[0] + shift_x) * right + (c[1] + shift_y) * up)[0:3]
        for c in corners
    ]
    mesh.from_pydata(points, [], [[0, 1, 2, 3]])
    mesh.uv_layers.new()

    # Add mesh to new image plane object:
    mesh_obj = add_obj(mesh, name, image_planes_collection)

    image_plane_material = bpy.data.materials.new(name="image_plane_material")
    # Adds "Principled BSDF" and a "Material Output" node
    image_plane_material.use_nodes = True

    nodes = image_plane_material.node_tree.nodes
    links = image_plane_material.node_tree.links

    shader_node_tex_image = nodes.new(type="ShaderNodeTexImage")
    shader_node_principled_bsdf = nodes.get("Principled BSDF")
    shader_node_principled_bsdf.inputs["Alpha"].default_value = transparency

    links.new(
        shader_node_tex_image.outputs["Color"],
        shader_node_principled_bsdf.inputs["Base Color"],
    )

    if add_image_plane_emission:
        links.new(
            shader_node_tex_image.outputs["Color"],
            shader_node_principled_bsdf.inputs["Emission"],
        )

    shader_node_tex_image.image = blender_image

    # Assign it to object
    if mesh_obj.data.materials:
        # assign to 1st material slot
        mesh_obj.data.materials[0] = image_plane_material
    else:
        # no slots
        mesh_obj.data.materials.append(image_plane_material)

    mesh_obj.matrix_world = matrix_world
    mesh.update()
    mesh.validate()
    # log_report('INFO', 'add_camera_image_plane: Done')
    return mesh_obj
def add_cameras(
    op,
    cameras,
    parent_collection,
    image_dp=None,
    add_background_images=False,
    add_image_planes=False,
    add_depth_maps_as_point_cloud=True,
    convert_camera_coordinate_system=True,
    camera_collection_name="Cameras",
    image_plane_collection_name="Image Planes",
    depth_map_collection_name="Depth Maps",
    camera_scale=1.0,
    image_plane_transparency=0.5,
    add_image_plane_emission=True,
    use_default_depth_map_color=False,
    depth_map_default_color=(1.0, 0.0, 0.0),
    depth_map_display_sparsity=10,
    depth_map_id_or_name_str="",
):
    """
    ======== The images are currently only shown in BLENDER RENDER ========
    ======== Make sure to enable TEXTURE SHADING in the 3D view to make the images visible ========

    :param cameras:
    :param image_dp:
    :param add_image_planes:
    :param convert_camera_coordinate_system:
    :param camera_collection_name:
    :param image_plane_collection_name:
    :return:
    """
    log_report("INFO", "Adding Cameras: ...")
    stop_watch = StopWatch()
    camera_collection = add_collection(camera_collection_name,
                                       parent_collection)

    if add_image_planes:
        log_report("INFO", "Adding image planes: True")
        image_planes_collection = add_collection(image_plane_collection_name,
                                                 parent_collection)
        camera_image_plane_pair_collection = add_collection(
            "Camera Image Plane Pair Collection", parent_collection)
    else:
        log_report("INFO", "Adding image planes: False")

    if add_depth_maps_as_point_cloud:
        log_report("INFO", "Adding depth maps as point cloud: True")
        depth_map_collection = add_collection(depth_map_collection_name,
                                              parent_collection)
        camera_depth_map_pair_collection = add_collection(
            "Camera Depth Map Pair Collection", parent_collection)
    else:
        log_report("INFO", "Adding depth maps as point cloud: False")

    depth_map_id_or_name_str = depth_map_id_or_name_str.rstrip()
    if depth_map_id_or_name_str == "":
        depth_map_indices = None
    else:
        depth_map_indices = []
        cam_rel_fp_to_idx = {}
        for idx, camera in enumerate(cameras):
            rel_fp = camera.get_relative_fp()
            cam_rel_fp_to_idx[rel_fp] = idx
        for id_or_name in depth_map_id_or_name_str.split(" "):
            if is_int(id_or_name):
                depth_map_indices.append(int(id_or_name))
            else:
                if id_or_name in cam_rel_fp_to_idx:
                    depth_map_indices.append(cam_rel_fp_to_idx[id_or_name])
                else:
                    log_report(
                        "WARNING",
                        "Could not find depth map name " + id_or_name + ". " +
                        "Possible values are: " +
                        str(cam_rel_fp_to_idx.keys()),
                    )

    # Adding cameras and image planes:
    for index, camera in enumerate(cameras):

        # camera_name = "Camera %d" % index     # original code
        # Replace the camera name so it matches the image name (without extension)
        blender_image_name_stem = camera.get_blender_obj_gui_str()
        camera_name = blender_image_name_stem + "_cam"
        bcamera = add_single_camera(op, camera_name, camera)
        camera_object = add_obj(bcamera, camera_name, camera_collection)
        matrix_world = compute_camera_matrix_world(camera)
        camera_object.matrix_world = matrix_world
        camera_object.scale *= camera_scale

        if not add_image_planes and not add_background_images:
            continue

        if camera.has_undistorted_absolute_fp():
            image_path = camera.get_undistored_absolute_fp()
        else:
            image_path = camera.get_absolute_fp()

        if not os.path.isfile(image_path):
            log_report("WARNING", "Could not find image at " + str(image_path))
            continue

        blender_image = bpy.data.images.load(image_path)

        if add_background_images:
            camera_data = bpy.data.objects[camera_name].data
            camera_data.show_background_images = True
            background_image = camera_data.background_images.new()
            background_image.image = blender_image

        if add_image_planes and not camera.is_panoramic():
            # Group image plane and camera:
            camera_image_plane_pair_collection_current = add_collection(
                "Camera Image Plane Pair Collection %s" %
                blender_image_name_stem,
                camera_image_plane_pair_collection,
            )

            image_plane_name = blender_image_name_stem + "_image_plane"

            image_plane_obj = add_camera_image_plane(
                matrix_world,
                blender_image,
                camera=camera,
                name=image_plane_name,
                transparency=image_plane_transparency,
                add_image_plane_emission=add_image_plane_emission,
                image_planes_collection=image_planes_collection,
                op=op,
            )

            camera_image_plane_pair_collection_current.objects.link(
                camera_object)
            camera_image_plane_pair_collection_current.objects.link(
                image_plane_obj)

        if not add_depth_maps_as_point_cloud:
            continue

        if camera.depth_map_fp is None:
            continue

        if depth_map_indices is not None:
            if index not in depth_map_indices:
                continue

        depth_map_fp = camera.depth_map_fp

        # Group image plane and camera:
        camera_depth_map_pair_collection_current = add_collection(
            "Camera Depth Map Pair Collection %s" %
            os.path.basename(depth_map_fp),
            camera_depth_map_pair_collection,
        )

        depth_map_world_coords = camera.convert_depth_map_to_world_coords(
            depth_map_display_sparsity=depth_map_display_sparsity)

        if use_default_depth_map_color:
            color = depth_map_default_color
        else:
            color = color_from_value(val=index,
                                     min_val=0,
                                     max_val=len(cameras))

        depth_map_anchor_handle = draw_coords(
            op,
            depth_map_world_coords,
            # TODO Setting this to true causes an error message
            add_points_to_point_cloud_handle=False,
            reconstruction_collection=depth_map_collection,
            object_anchor_handle_name=camera.get_blender_obj_gui_str() +
            "_depth_point_cloud",
            color=color,
        )

        camera_depth_map_pair_collection_current.objects.link(camera_object)
        camera_depth_map_pair_collection_current.objects.link(
            depth_map_anchor_handle)

    log_report("INFO", "Duration: " + str(stop_watch.get_elapsed_time()))
    log_report("INFO", "Adding Cameras: Done")
def add_camera_animation(
    cameras,
    parent_collection,
    animation_frame_source,
    add_background_images,
    number_interpolation_frames,
    interpolation_type,
    consider_missing_cameras_during_animation,
    remove_rotation_discontinuities,
    image_dp,
    image_fp_type,
    op=None,
):
    log_report("INFO", "Adding Camera Animation: ...")

    if len(cameras) == 0:
        return

    if animation_frame_source == "ORIGINAL":
        number_interpolation_frames = 0
    elif animation_frame_source == "ADJUSTED":
        add_background_images = False
    else:
        assert False

    if consider_missing_cameras_during_animation:
        cameras = enhance_cameras_with_dummy_cameras(cameras, image_dp,
                                                     image_fp_type, op)

    # Using the first reconstructed camera as template for the animated camera.
    # The values are adjusted with add_transformation_animation() and
    # add_camera_intrinsics_animation().
    some_cam = cameras[0]
    bcamera = add_single_camera("Animated Camera", some_cam, op)
    cam_obj = add_obj(bcamera, "Animated Camera", parent_collection)
    cameras_sorted = sorted(cameras,
                            key=lambda camera: camera.get_relative_fp())

    transformations_sorted = []
    camera_intrinsics_sorted = []
    for camera in cameras_sorted:
        if isinstance(camera, DummyCamera):
            matrix_world = None
            camera_intrinsics = None
        else:
            matrix_world = compute_camera_matrix_world(camera)
            shift_x, shift_y = compute_shift(camera,
                                             relativ_to_largest_extend=True)
            camera_intrinsics = CameraIntrinsics(camera.get_field_of_view(),
                                                 shift_x, shift_y)

        transformations_sorted.append(matrix_world)
        camera_intrinsics_sorted.append(camera_intrinsics)

    add_transformation_animation(
        animated_obj_name=cam_obj.name,
        transformations_sorted=transformations_sorted,
        number_interpolation_frames=number_interpolation_frames,
        interpolation_type=interpolation_type,
        remove_rotation_discontinuities=remove_rotation_discontinuities,
        op=op,
    )

    add_camera_intrinsics_animation(
        animated_obj_name=cam_obj.name,
        intrinsics_sorted=camera_intrinsics_sorted,
        number_interpolation_frames=number_interpolation_frames,
        op=op,
    )

    if add_background_images:
        # https://docs.blender.org/api/current/bpy.types.CameraBackgroundImage.html
        camera_data = bpy.data.objects[cam_obj.name].data
        camera_data.show_background_images = True
        bg_img = camera_data.background_images.new()
        dp = os.path.dirname(cameras_sorted[0].get_absolute_fp())

        first_fn = cameras_sorted[0].get_file_name()

        # Remove previously created movie clips
        movie_clip_name = os.path.basename(first_fn)
        if movie_clip_name in bpy.data.movieclips:
            bpy.data.movieclips.remove(bpy.data.movieclips[movie_clip_name])

        if os.path.isfile(os.path.join(dp, first_fn)):

            # The first entry (ALL OTHERS ARE IGNORED) in the "files" parameter
            # in bpy.ops.clip.open() is used to determine the image in the
            # image sequence. All images with higher sequence numbers are added
            # to the movie clip.
            first_sequence_fn = [{"name": first_fn}]

            # https://docs.blender.org/api/current/bpy.types.MovieClip.html
            # https://docs.blender.org/api/current/bpy.types.Sequences.html
            # Using a video clip instead of an image sequence has the advantage
            # that Blender automatically adjusts the start offset of the image
            # sequence (e.g. if the first image of the sequence is
            #  100_7110.JPG, then one would have to set the offset to manually
            # to 7109)
            bpy.ops.clip.open(directory=dp, files=first_sequence_fn)
            bg_img.source = "MOVIE_CLIP"

            # The clip created with bpy.ops.clip.open() has the same name than
            # the first image name of the image sequence.
            bg_img.clip = bpy.data.movieclips[movie_clip_name]