Beispiel #1
0
    def parse_optical_flow_exr_file(path_to_exr_file):

        logger.info('parse_optical_flow_exr_file: ...')
        logger.vinfo('path_to_exr_file', path_to_exr_file)

        pt = Imath.PixelType(Imath.PixelType.FLOAT)
        exr_file = OpenEXR.InputFile(path_to_exr_file)

        # To examine the content of the exr file use:
        logger.vinfo('exr_file.header()', exr_file.header())

        dw = exr_file.header()['dataWindow']

        size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
        logger.vinfo('size', size)

        red_channel_str = exr_file.channel('R', pt)
        red_exr_data_as_np = np.fromstring(red_channel_str, dtype=np.float32)
        red_exr_data_as_np.shape = (size[1], size[0]
                                    )  # Numpy arrays are (row, col)

        green_channel_str = exr_file.channel('G', pt)
        green_exr_data_as_np = np.fromstring(green_channel_str,
                                             dtype=np.float32)
        green_exr_data_as_np.shape = (size[1], size[0]
                                      )  # Numpy arrays are (row, col)

        exr_data_as_np = np.dstack((red_exr_data_as_np, green_exr_data_as_np))

        logger.vinfo('shape', exr_data_as_np.shape)

        logger.info('parse_optical_flow_exr_file: Done')
        return exr_data_as_np
Beispiel #2
0
    def convert_depth_map_to_world_coords(
            self,
            depth_map,
            depth_map_semantic,
            shift_to_pixel_center,  # False for Colmap, True for MVE
            depth_map_display_sparsity=100,
            inverted_cam_model=False):
        """
        Do not confuse z_buffer with depth_buffer!
        z_buffer contains values in [0,1]
        depth_buffer contains the actual distance values

        :param depth_buffer_matrix:
        :param n_th_result_point:
        :return:
        """
        logger.info('Converting depth map to world coordinates: ...')
        cam_coords = self.convert_depth_map_to_cam_coords(
            depth_map,
            depth_map_semantic,
            shift_to_pixel_center,
            depth_map_display_sparsity,
            inverted_cam_model=inverted_cam_model)

        world_coords = self.cam_to_world_coord_multiple_coords(cam_coords)

        logger.info('Converting depth map to world coordinates: Done')
        return world_coords
Beispiel #3
0
def set_bone_head_tail(armature_object_name, bone_name, head_location=None, tail_location=None):

    """ A CHILD is attached with its HEAD to the TAIL of the PARENT """

    logger.info('set_bone_head_tail: ...')

    # ===================
    # If head_location and tail_location is set to the same value, THE BONE DISAPPEARS
    # ===================
    assert not head_location == tail_location

    set_mode(
        active_object_name=armature_object_name,
        mode='EDIT',
        configure_scene_for_basic_ops=False)
    armature_object = bpy.data.objects[armature_object_name]

    if head_location is not None:
        logger.debug('head_location: ' + str(head_location))
        armature_object.data.edit_bones[bone_name].head = Vector(head_location)

    if tail_location is not None:
        logger.debug('tail_location: ' + str(tail_location))
        armature_object.data.edit_bones[bone_name].tail = Vector(tail_location)

    bpy.ops.object.mode_set(mode='OBJECT')
    logger.info('set_bone_head_tail: Done')
Beispiel #4
0
def check_ops_prerequisites(active_object_name, selected_object_names):

    logger.info('check_ops_prerequisites: ...')

    # Check if objects are all visible, otherwise a poll error will be thrown
    assert bpy.data.objects[active_object_name].hide == False
    for selected_obj_name in selected_object_names:
        assert bpy.data.objects[selected_obj_name].hide == False

    # Check if ALL objects are in the same layer, otherwise the execution of the operation will have no effect

    true_active_object_layers = set([
        index for index in range(0, 20)
        if bpy.data.objects[active_object_name].layers[index]
    ])
    true_layers_per_selected_object = []
    for selected_obj_name in selected_object_names:
        true_selected_object_layers = set([
            index for index in range(0, 20)
            if bpy.data.objects[selected_obj_name].layers[index]
        ])
        true_layers_per_selected_object.append(true_selected_object_layers)

    common_layers = set.intersection(*([true_active_object_layers] +
                                       true_layers_per_selected_object))
    if len(common_layers) == 0:
        logger.error('COMMON_LAYERS: ' + str(common_layers) +
                     ' EMPTY, THIS MUST NOT HAPPEN')
    assert len(common_layers) > 0

    logger.info('check_ops_prerequisites: Done')
Beispiel #5
0
def add_or_overwrite_bone_constraint_follow_path(armature_object_name,
                                                 bone_name,
                                                 constraint_name,
                                                 target_curve_name,
                                                 use_curve_follow=False,  # default blender value
                                                 use_fixed_location=False,  # default blender value
                                                 forward_axis='FORWARD_Y'
                                                 ):

    logger.info('add_bone_constraint_follow_path: ...')

    set_mode(
        active_object_name=armature_object_name,
        mode='POSE',
        configure_scene_for_basic_ops=False)
    armature_object = bpy.data.objects[armature_object_name]
    current_bone = armature_object.pose.bones[bone_name]

    # Note: The name / identifier of a constraint of a specific object is UNIQUE
    if constraint_name in current_bone.constraints:
        logger.warning('Follow Path constraint exists already')
        # Make sure the constraint has the correct type
        assert current_bone.constraints[constraint_name].type == ConstraintTypes.FOLLOW_PATH
        current_constraint = current_bone.constraints[constraint_name]
    else:
        current_constraint = current_bone.constraints.new(ConstraintTypes.FOLLOW_PATH)
    current_constraint.name = constraint_name
    current_constraint.target = bpy.data.objects[target_curve_name]
    current_constraint.use_curve_follow = use_curve_follow
    current_constraint.use_fixed_location = use_fixed_location
    current_constraint.forward_axis = forward_axis

    bpy.ops.object.mode_set(mode='OBJECT')
    logger.info('add_bone_constraint_follow_path: Done')
    def compute_gt_lines_per_frame(content_list):

        logger.info('compute_gt_lines_per_frame: ...')
        # logger.vinfo('content_list', content_list)

        content_list = [x.strip() for x in content_list]

        frame_idx_list = []
        for idx, content_line in enumerate(content_list):
            if 'frame' in content_line:
                # DONT test for  'jpg' in content_line, since that is not true for the legacy data structure
                frame_idx_list.append(idx)
        # logger.vinfo('frame_idx_list', frame_idx_list)

        num_lines_per_frame = None
        for idx in range(len(frame_idx_list))[1:]:
            gt_lines_per_frame_tmp = frame_idx_list[idx] - frame_idx_list[idx -
                                                                          1]
            if num_lines_per_frame is not None:
                assert num_lines_per_frame == gt_lines_per_frame_tmp
            num_lines_per_frame = gt_lines_per_frame_tmp

        # logger.vinfo('num_lines_per_frame', num_lines_per_frame)
        assert num_lines_per_frame is not None
        if not num_lines_per_frame in TrajectoryFileHandler.gt_lines_per_frame_values:
            logger.vinfo('num_lines_per_frame', num_lines_per_frame)
            assert False

        # split content per frame (index)
        lines_per_frame = list(
            TrajectoryFileHandler._chunks(content_list, num_lines_per_frame))

        logger.info('compute_gt_lines_per_frame: Done')
        return lines_per_frame, num_lines_per_frame
def configure_scene_animation(frame_end_number, fps=12):
    """
    ======== Note ========
    It is NOT possible to set a starting frame number
    Since all frames must be animated to get the correct object pose at frame X
    (And the animation method is called later)
    ======== ======== ========

    # BU = blender units
    # 1 meter == 0.01 BU, 10 meters == 0.1 BU, 100 meters = 1 BU
    :param number_frames_per_meter:
    :param curve_names:
    :param fps:
    :return:
    """

    logger.info('configure_scene_animation: ...')

    bpy.context.scene.frame_end = frame_end_number
    bpy.context.scene.render.fps = fps

    # set the frame index to 0, to reset any previous pose
    bpy.context.scene.frame_current = 0
    # the first index is the starting index
    bpy.context.scene.frame_current = 1

    logger.info('configure_scene_animation: Done')
def recenter_objects_if_necessary(tire_fl_name, tire_fr_name, tire_bl_name, tire_br_name):
    logger.info('recenter_objects_if_necessary: ...')

    front_axle_center = compute_xy_axle_center(tire_fl_name, tire_fr_name)
    logger.vinfo('front_axle_center', front_axle_center)
    front_axle_center_xy0 = front_axle_center.xyz
    front_axle_center_xy0.z = 0

    back_axle_center = compute_xy_axle_center(tire_bl_name, tire_br_name)
    logger.vinfo('back_axle_center', back_axle_center)
    back_axle_center_xy0 = back_axle_center.xyz
    back_axle_center_xy0.z = 0

    axle_diff = front_axle_center_xy0 - back_axle_center_xy0

    y_is_dir_axis = abs(axle_diff.x) < eps
    x_is_dir_axis = abs(axle_diff.y) < eps

    if not (y_is_dir_axis is not x_is_dir_axis):
        logger.vinfo('y_is_dir_axis', y_is_dir_axis)
        logger.vinfo('x_is_dir_axis', x_is_dir_axis)
        assert False

    if y_is_dir_axis:
        if abs(front_axle_center.x) > eps:
            for obj in bpy.data.objects:
                obj.location.x -= front_axle_center.x
    elif x_is_dir_axis:
        if (front_axle_center.y) > eps:
            for obj in bpy.data.objects:
                obj.location.y -= front_axle_center.y
    else:
        assert False
    logger.info('recenter_objects_if_necessary: Done')
Beispiel #9
0
def _load_blend_file(path_to_blend_file):

    # https://www.blender.org/api/blender_python_api_2_72b_release/bpy.types.BlendDataLibraries.html

    # append the data block from .blend file
    with bpy.data.libraries.load(path_to_blend_file) as (data_from, data_to):
        # the loaded objects can be accessed from 'data_to' outside of the context
        # since loading the data replaces the strings for the datablocks or None
        # if the datablock could not be loaded.

        logger.info(len(data_from.objects))
        # We copy the objects, since we want to manually add them to the our scene
        # This is not compatible with ALL object types ?!?!?
        data_to.objects = data_from.objects

        # https://blender.stackexchange.com/questions/6357/is-it-possible-to-append-objects-to-the-same-layer-as-they-are-in-the-source-fil

        # we could also copy
        # data_to.materials = data_from.materials
        # data_to.images = data_from.images
        # data_to_textures = data_from.textures

    # ====== IMPORTANT ======
    # * after finishing the call " bpy.data.libraries.load" all elements are automatically added / appended to bpy.data
    # ====== ====== ======
    return data_to
Beispiel #10
0
 def set_dense_points_from_ply(self, input_path_to_ply_file):
     logger.info('set_dense_points_from_ply: ...')
     assert self.dense_points is None
     dense_points, _ = PLYFileHandler.parse_ply_file(input_path_to_ply_file)
     if len(dense_points) > 0:
         self.dense_points = dense_points
     logger.info('set_dense_points_from_ply: Done')
Beispiel #11
0
    def set_active_cam_from_opengl_cam_to_world_mat(
            self,
            cam_to_world_opengl_np_mat,
            calibration_np_mat,
            width,
            height,
            max_clipping_range=sys.float_info.max):
        logger.info('set_active_cam_from_opengl_cam_to_world_mat: ...')

        # https://github.com/Kitware/VTK/blob/master/Rendering/Core/vtkCamera.h
        # https://github.com/Kitware/VTK/blob/master/Rendering/Core/vtkCamera.cxx

        # =============================================================
        # ModelViewTransform = ViewTransform * ModelTransform
        # ViewTransform depends only on Position, FocalPoint and ViewUp
        # =============================================================

        self.set_active_cam_intrinsics(calibration_np_mat, width, height,
                                       max_clipping_range)

        self.set_active_cam_model_view_transformation(
            cam_to_world_opengl_np_mat)

        # IMPORTANT OTHERWISE THE VISUALIZATION IS BUGGY
        self.vtk_renderer.ResetCameraClippingRange()
        # # Always call the render function of the window, not the renderer itself
        # self.vtk_render_window.Render()

        logger.info('set_active_cam_from_opengl_cam_to_world_mat: Done')
Beispiel #12
0
    def convert_z_buffer_mat_to_world_coords(self,
                                             z_buffer_matrix,
                                             n_th_result_point=10):
        """ z_buffer contains values in [0,1]  """

        # https://vtk.org/doc/nightly/html/classvtkDepthImageToPointCloud.html
        # https://github.com/Kitware/VTK/blob/master/Rendering/Image/vtkDepthImageToPointCloud.h
        #   TODO
        #   e.g. vtk.vtkDepthImageToPointCloud() is part of the python interface

        world_coords = []
        index = 0
        num_values = z_buffer_matrix.shape[0] * z_buffer_matrix.shape[1]
        for (y, x), z_value in np.ndenumerate(z_buffer_matrix):
            if index % 10000 == 0:
                logger.info('index: ' + str(index) + ' of ' + str(num_values))
            # We assume that points lying on the clipping plane are not part of the scene
            if z_value < 1.0:
                if index % n_th_result_point == 0:
                    # https://www.vtk.org/doc/nightly/html/classvtkViewport.html
                    self.vtk_renderer.SetDisplayPoint(
                        x, y, z_value)  # TODO REMOVE REDUNDANT?
                    # https://www.vtk.org/doc/nightly/html/classvtkRenderer.html

                    world_coord = self._vtk_convert_display_coord_to_world_coord(
                        x, y, z_value)
                    world_coords.append(world_coord)
            index += 1

        return world_coords
    def _count_missing_images(self, sorted_file_list):
        logger.debug('_count_missing_images: ...')
        number_missing_images = 0
        try:
            text_str, current_number_str = self.split_text_and_number_string(
                os.path.splitext(sorted_file_list[0])[0])
            last_number = int(current_number_str)

            for file_name in sorted_file_list[1:]:
                text_str, current_number_str = self.split_text_and_number_string(
                    os.path.splitext(file_name)[0])
                current_number = int(current_number_str)
                if current_number != last_number + 1:
                    current_missing_images = current_number - last_number - 1
                    logger.info('There are ' + str(current_missing_images) +
                                ' images missing between ' + str(last_number) +
                                ' and ' + str(current_number))
                    number_missing_images += current_missing_images
                last_number = current_number
            valid_count = True
        except ValueError:
            valid_count = False

        logger.debug('_count_missing_images: Done')
        return number_missing_images, valid_count
    def write_MVS_colmap_file(points, dense_id_to_file_name, mvs_colmap_ofp):
        logger.info('write_MVS_colmap_file: ...')
        logger.vinfo('mvs_colmap_ofp', mvs_colmap_ofp)

        mvs_content = []
        mvs_content.append('# DENSE_IMAGE_ID to image_name')
        mvs_content.append(dense_id_to_file_name.size())
        for dense_id, file_name in dense_id_to_file_name.items():
            mvs_content.append(str(dense_id) + ' ' + file_name)

        mvs_content.append(
            '# 3D point list with one line of data per point:\n')
        mvs_content.append(
            '#   POINT3D_ID, X, Y, Z, NX, NY, NZ, R, G, B, TRACK[] as (DENSE_ID, DENSE_COL, DENSE_ROW)\n'
        )
        mvs_content.append('# Number of points: x, mean track length: 0\n')
        for point in points:
            # From the docs:
            # <Point>  = <XYZ> <RGB> <number of measurements> <List of Measurements>
            current_line = ' '.join(list(map(str, point.coord)))
            current_line += ' ' + ' '.join(list(map(str, point.normal)))
            current_line += ' ' + ' '.join(list(map(str, point.color)))
            if point.measurements is not None:
                for measurement in point.measurements:
                    current_line += ' ' + str(measurement.camera_index)
                    current_line += ' ' + str(measurement.x)  # x = col
                    current_line += ' ' + str(measurement.y)  # y = row

            mvs_content.append(current_line + ' ' + '\n')

        with open(mvs_colmap_ofp, 'wb') as output_file:
            output_file.writelines([item.encode() for item in mvs_content])

        logger.info('write_MVS_colmap_file: Done')
    def save_triangles_in_ply_file(output_path_to_file, triangles):
        logger.info('save_triangles_in_ply_file: ...')
        points = []
        faces = []
        for triangle_index, triangle in enumerate(triangles):
            assert isinstance(triangle.vertex_0, np.ndarray) \
                   and isinstance(triangle.vertex_1, np.ndarray) \
                   and isinstance(triangle.vertex_2,np.ndarray)

            # FIXME this will result in duplicated points (in the ply file)
            points += [
                Point(triangle.vertex_0),
                Point(triangle.vertex_1),
                Point(triangle.vertex_2)
            ]
            # FIXME these face indices refer to duplicated points
            faces.append(
                Face(initial_indices=[
                    3 * triangle_index, 3 * triangle_index +
                    1, 3 * triangle_index + 2
                ]))

        PLYFileHandler.write_ply_file(ofp=output_path_to_file,
                                      vertices=points,
                                      faces=faces)

        logger.info('save_triangles_in_ply_file: Done')
def create_additional_depth_output_nodes(scene,
                                         output_path=None,
                                         image_stem=None,
                                         leading_zeroes_template='#####'):

    logger.info('create_additional_depth_output_nodes: ...')

    default_render_layer = scene.render.layers.get(
        scene.render.layers.active.name)

    default_render_layer.pass_alpha_threshold = 0
    scene.use_nodes = True
    scene_nodes = scene.node_tree.nodes
    scene_links = scene.node_tree.links

    default_render_layers_node = scene_nodes.get('Render Layers')

    depth_image_output_node = scene_nodes.new('CompositorNodeOutputFile')
    depth_image_output_node.format.file_format = 'OPEN_EXR'
    depth_image_output_node.format.use_zbuffer = True  # Store floats

    if output_path is not None:
        depth_image_output_node.base_path = output_path
    if image_stem is not None:
        depth_image_output_node.file_slots[
            0].path = image_stem + leading_zeroes_template
    scene_links.new(default_render_layers_node.outputs['Depth'],
                    depth_image_output_node.inputs['Image'])

    logger.info('create_additional_depth_output_nodes: Done')
    return depth_image_output_node
Beispiel #17
0
    def create_object_to_world_transformation_files(
        rec_method_transf_dir,
        camera_object_trajectory,
        world_to_world_transf=np.identity(4)):
        logger.info('create_object_to_world_transformation_files: ...')
        logger.vinfo(
            'path_to_reconstruction_method_transformation_file_folder',
            rec_method_transf_dir)

        if not os.path.isdir(rec_method_transf_dir):
            os.mkdir(rec_method_transf_dir)

        for frame_name in sorted(
                camera_object_trajectory.get_frame_names_sorted()):
            transformation_file_path = os.path.join(rec_method_transf_dir,
                                                    str(frame_name) + '.txt')
            logger.vinfo('frame_name', frame_name)

            #object_pose = camera_object_trajectory[frame_name]['object_pose']
            object_matrix_world = camera_object_trajectory.get_object_matrix_world(
                frame_name)

            np.savetxt(transformation_file_path,
                       world_to_world_transf.dot(object_matrix_world))

            # with open(transformation_file_path, 'w') as output_file:
            #     output_file.writelines([item for item in file_content])
        logger.info('create_object_to_world_transformation_files: Done')
Beispiel #18
0
    def compute_reprojection_errors(cams, points, image_path,
                                    max_allowed_mean_projection_error_in_pix,
                                    sparse_reconstruction_type):
        """
        All points with a measurement in a certain image must be visible in the corresponding image
        :param cams:
        :param points:
        :return:
        """
        logger.info('verify_cams_and_points: ...')
        rec = Reconstruction(cams, points, image_path,
                             sparse_reconstruction_type)
        logger.vinfo('len(points)', len(points))
        repr_errors = []
        for iteration, point in enumerate(points):

            for measurement in point.measurements:

                cam = rec.get_camera_with_camera_index(
                    measurement.camera_index)
                repro_error = cam.compute_reprojection_error_single_point(
                    point)
                repr_errors.append(repro_error)

        logger.vinfo('max_repr_error', max(repr_errors))
        mean_repr_error = np.mean(repr_errors)
        logger.vinfo('mean_repr_error', mean_repr_error)
        assert mean_repr_error < max_allowed_mean_projection_error_in_pix
        logger.info('verify_cams_and_points: Done')
Beispiel #19
0
    def __ply_data_faces_to_face_list(ply_data):
        faces = []
        ply_data_face_type = None
        ply_data_face_data_type = None
        if 'face' in ply_data:
            # read faces
            ply_data_face_type = ply_data['face'].dtype
            logger.info('Found ' + str(len(ply_data['face'].data)) + ' faces')
            for line in ply_data['face'].data['vertex_indices']:
                current_face = Face()
                current_face.vertex_indices = np.array(
                    [line[0], line[1], line[2]])
                faces.append(current_face)

            # REPLACE 'ply_data_face_data_type', PARSING DOES NOT YIELD THE CORRECT INFORMATION

            # For example, when constructing a "face" element, if all the faces are triangles (a common occurrence),
            # it's okay to have a "vertex_indices" field of type 'i4' and shape (3,) instead of type object and shape ().
            # However, if the serialized PLY file is read back in using plyfile, the "vertex_indices" property will be
            # represented as an object-typed field, each of whose values is an array of type 'i4' and length 3.
            # The reason is simply that the PLY format provides no way to find out that each "vertex_indices" field
            # has length 3 without actually reading all the data, so plyfile has to assume that this is a
            # variable-length property.

            #ply_data_face_data_type = ply_data['face'].data.dtype
            ply_data_face_data_type = [('vertex_indices', 'i4', (3, ))]

            # extend the data typ
            face_names = ply_data['face'].data.dtype.names
            if 'red' in face_names and 'green' in face_names and 'blue' in face_names:
                ply_data_face_data_type = [('vertex_indices', 'i4', (3, )),
                                           ('red', 'u1'), ('green', 'u1'),
                                           ('blue', 'u1')]

        return faces, ply_data_face_type, ply_data_face_data_type
Beispiel #20
0
    def __ply_data_vertices_to_vetex_list(ply_data):

        vertex_data_type_names = ply_data['vertex'].data.dtype.names
        use_color = False
        if 'red' in vertex_data_type_names and 'green' in vertex_data_type_names and 'blue' in vertex_data_type_names:
            use_color = True

        vertices = []
        value_keys = [
            x for x, y in sorted(ply_data['vertex'].data.dtype.fields.items(),
                                 key=lambda k: k[1])
        ]
        non_scalar_value_keys = [
            'x', 'y', 'z', 'red', 'green', 'blue', 'nx', 'ny', 'nz',
            'measurements'
        ]
        scalar_value_keys = [
            value_key for value_key in value_keys
            if not value_key in non_scalar_value_keys
        ]
        logger.info('Found the following vertex properties: ' +
                    str(value_keys))

        #scalar_value_keys = [value_key for (value_key, some_value) in ]
        #logger.info(scalar_value_keys)

        logger.info('Found ' + str(len(ply_data['vertex'].data)) + ' vertices')
        # print(type(line)) == numpy.void
        # print(line.dtype) == [('x', '<f4'), ('y', '<f4'), ('z', '<f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ('measurements', 'O')]
        for point_index, line in enumerate(ply_data['vertex'].data):
            current_point = Point()
            current_point.coord = np.array([line['x'], line['y'], line['z']])
            if use_color:
                current_point.color = np.array(
                    [line['red'], line['green'], line['blue']])
            current_point.id = point_index

            for scalar_value_key in scalar_value_keys:
                current_point.scalars[scalar_value_key] = line[
                    scalar_value_key]

            if 'measurements' in line.dtype.names:
                elements_per_measurement = 4
                current_point.measurements = []
                for measurement_idx in range(
                        len(line['measurements']) / elements_per_measurement):
                    array_idx = measurement_idx * elements_per_measurement
                    slice = line['measurements'][array_idx:array_idx +
                                                 elements_per_measurement]
                    current_point.measurements.append(
                        Measurement.init_from_list(slice))

            vertices.append(current_point)

        ply_data_vertex_dtype = ply_data['vertex'].dtype
        ply_data_vertex_data_dtype = ply_data['vertex'].data.dtype

        return vertices, ply_data_vertex_dtype, ply_data_vertex_data_dtype
Beispiel #21
0
def set_tire_diameter(car_rig_name, tire_diameter):
    logger.info('set_wheel_size_and_path_length: ...')

    # we asume that all wheels of the car have the same diameter
    # "wheel diameter front" is a custom property of the object
    bpy.data.objects[car_rig_name]["Wheel Diameter Front"] = tire_diameter
    bpy.data.objects[car_rig_name]["Wheel Diameter Back"] = tire_diameter

    logger.info('set_wheel_size_and_path_length: Done')
Beispiel #22
0
def set_rig_ground_detect(ground_detect_name,
                          ground_name,
                          ground_detect_distance=0):
    logger.info('set_rig_ground: ...')
    bpy.data.objects[ground_detect_name].modifiers[
        'Shrinkwrap'].target = bpy.data.objects[ground_name]
    bpy.data.objects[ground_detect_name].modifiers[
        'Shrinkwrap'].offset = ground_detect_distance
    logger.info('set_rig_ground: Done')
Beispiel #23
0
def add_bone_to_armature(armature_object_name,
                         bone_name,
                         bone_head_pos,
                         bone_tail_pos,
                         world_coordinates=False):  # set to true, to use world coordinates

    """

    :param armature_object_name:
    :param bone_name:
    :param bone_head_pos: in local or world coordinates (point which is attached to parent)
    :param bone_tail_pos: in local or world coordinates
    :param world_coordinates
    :return:
    """

    logger.info('add_bone_to_armature: ...')

    # https://docs.blender.org/api/blender_python_api_2_75_0/info_gotcha.html#editbones-posebones-bone-bones

    set_mode(active_object_name=armature_object_name, mode='EDIT', configure_scene_for_basic_ops=False)

    # if parent_bone is not None:
    #     armature_object.data.bones[parent_bone.name].select = True

    armature_object = bpy.data.objects[armature_object_name]

    # Create single bone
    bone = armature_object.data.edit_bones.new(bone_name)

    # https://docs.blender.org/api/blender_python_api_current/bpy.types.Object.html#bpy.types.Object.matrix_world
    if world_coordinates:
        world_to_object_matrix = armature_object.matrix_world.inverted()
        bone_head_pos = (world_to_object_matrix * bone_head_pos.to_4d()).to_3d()
        bone_tail_pos = (world_to_object_matrix * bone_tail_pos.to_4d()).to_3d()

    bone.head = bone_head_pos
    bone.tail = bone_tail_pos

    # logger.info('parent_bone')
    # logger.info(parent_bone)

    # if parent_bone is not None:
    #     bone.parent = parent_bone
    #     # Connect this bone with its parent (or not)
    #     # (i.e. moving parent/child moves also child/parent)
    #     bone.use_connect = use_connect

    # https://docs.blender.org/api/blender_python_api_2_75_0/info_gotcha.html#armature-mode-switching
    #   While writing scripts that deal with armatures you may find you have to switch between modes,
    #   when doing so take care when switching out of editmode not to keep references to the edit-bones or
    #   their head/tail vectors. Further access to these will crash blender so its important the script clearly
    #   separates sections of the code which operate in different modes.

    bpy.ops.object.mode_set(mode='OBJECT')

    logger.info('add_bone_to_armature: Done')
Beispiel #24
0
    def append_suffix_to_files_in_folder(idp, odp, suffix):
        logger.info('append_suffix_to_files_in_folder:...')

        ifn_s = [ele for ele in os.listdir(idp) if os.path.isfile(os.path.join(idp, ele))]

        for ifn in ifn_s:
            name, ext = os.path.splitext(ifn)
            shutil.copyfile(os.path.join(idp, ifn), os.path.join(odp, name + suffix + ext))
        logger.info('append_suffix_to_files_in_folder:Done')
    def _get_blender_internal_texture_type_to_file_paths(material):

        some_other_name = material.name
        logger.debug(some_other_name)

        # fprint('material: ' + material.name)
        texture_name_set = set()
        texture_type_to_file_path = defaultdict(lambda: None)
        for texture_slot in material.texture_slots:

            if texture_slot:
                texture = texture_slot.texture

                texture_name_set.add(texture)
                # fprint('texture: ' + texture.name)
                if hasattr(texture, 'image'):
                    logger.debug('Material: ' + material.name + ', Texture: ' +
                                 texture.name)

                    logger.debug('use_map_color_diffuse: ' +
                                 str(texture_slot.use_map_color_diffuse))
                    logger.debug('use_map_normal: ' +
                                 str(texture_slot.use_map_normal))

                    # ==== Remark ====
                    # Relative paths start with '//' and are relative to the blend file.
                    # The prefix of paths to textures packed inside the .blend file are dependent on the original
                    # file path. For example <blend_file_folder>/textures/texture_file.ext, i.e. look like the
                    # following '//textures/<texturename>.<textureextension>'

                    if texture.image.packed_file is not None:
                        logger.debug('Image is packed')
                        # If the texture is packed, the file is definitively valid, otherwise check the file
                        image_is_valid = True
                    else:
                        logger.debug('Image is an external source')
                        image_is_valid = os.path.isfile(
                            bpy.path.abspath(texture.image.filepath))

                    if image_is_valid:
                        if texture_slot.use_map_color_diffuse:
                            NodeUtility._collect_texture(
                                texture_type_to_file_path,
                                NodeUtility.USE_MAP_COLOR_DIFFUSE,
                                texture.image.filepath)

                        elif texture_slot.use_map_normal:
                            NodeUtility._collect_texture(
                                texture_type_to_file_path,
                                NodeUtility.USE_MAP_NORMAL,
                                texture.image.filepath)

        logger.info('texture_type_to_file_path: ' +
                    str(texture_type_to_file_path))

        return texture_type_to_file_path
Beispiel #26
0
 def update_dense_measurements_with_point_projections(self):
     """
     The measurement values (x and y image coordinates)
     provided by Colmap MVS are (currently) incorrect.
     :return:
     """
     logger.info('update_dense_measurements_with_point_projections: ...')
     self.dense_points = self.update_measurements_with_point_projections(
         self.dense_points)
     logger.info('update_dense_measurements_with_point_projections: Done')
Beispiel #27
0
def configure_libs_for_blender():
    """
    This method is called by "Blender_Script_Executor"
    :return:
    """

    # Add the parent folder of the BlenderUtility to the python path
    # in order to enable acess the BlenderUtility package and all subpackages (e.g. Utility)
    sys.path.insert(
        0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))

    from Utility.Logging_Extension import logger
    from Utility.Config import Config

    additional_system_paths = []

    parent_dp = os.path.dirname(os.path.realpath(__file__))
    example_config_path = os.path.join(parent_dp, 'Config',
                                       'blender_script_executor_example.cfg')
    config_path = os.path.join(parent_dp, 'Config',
                               'blender_script_executor.cfg')
    if not os.path.isfile(config_path):
        copyfile(example_config_path, config_path)

    blender_script_config = Config(path_to_config_file=config_path)
    custom_paths = blender_script_config.get_option_value('custom_paths', list)

    additional_system_paths += custom_paths

    python_type = blender_script_config.get_option_value('python_type', str)

    assert python_type in [DEFAULT, ANACONDA]

    if python_type == DEFAULT:
        logger.info(
            'Adding Default Python Paths to Blender Executable (No Anaconda Paths)'
        )
        python_sys_path_default_list = blender_script_config.get_option_value(
            'python_sys_path_default', list)
        additional_system_paths += python_sys_path_default_list

        python_sys_path_local_list = blender_script_config.get_option_value(
            'python_sys_path_local', list)
        additional_system_paths += python_sys_path_local_list

    elif python_type == ANACONDA:

        logger.info('Adding ANACONDA3 Paths to Blender Executable')
        python_anaconda_path_list = blender_script_config.get_option_value(
            'python_anaconda_path', list)
        additional_system_paths += python_anaconda_path_list
    else:
        assert False

    add_paths_to_system_path_if_necessary(additional_system_paths)
Beispiel #28
0
 def check_visibility_of_points_cam_coords(self, points_cam_coord):
     """
     Checks the visibility of points in camera coordinates. Returns a binary array
     $ >>> points_world_coordinates.shape
     $ >>> (n,3)                 # where n is the number of points
     """
     logger.info('check_visibility_of_cam_points: ...')
     _, visibility_array = \
         self.project_multiple_cam_coords_into_camera_image_as_image_coord(points_cam_coord)
     logger.info('check_visibility_of_cam_points: Done')
     return visibility_array
Beispiel #29
0
    def __init__(self, obj_rec, back_rec, path_to_frames=None, max_amount_cams=None):
        self.obj_rec = obj_rec
        self.back_rec = back_rec

        self.path_to_frames = path_to_frames

        obj_cam_index_to_camera = obj_rec.get_camera_index_to_camera()
        assert isinstance(back_rec, Background_Reconstruction)
        back_cam_index_to_camera = back_rec.get_camera_index_to_camera()

        self.valid_cam_names_sorted = ObjectBackgroundReconstruction.compute_valid_cam_names(
            obj_cam_index_to_camera, back_cam_index_to_camera)

        # Object Cameras
        self.valid_obj_cam_index_to_cam = \
            ObjectBackgroundReconstruction.compute_valid_cam_index_to_camera(
                self.valid_cam_names_sorted, obj_cam_index_to_camera)

        self.valid_cam_name_to_obj_cam_index = \
            self.compute_cam_name_to_cam_index(self.valid_obj_cam_index_to_cam)

        self.valid_cam_name_to_obj_cam = self.compute_cam_name_to_obj_cam()

        self.valid_obj_cam_index_to_img_index = \
            self.compute_valid_obj_cam_index_to_img_index()

        # Background Cameras
        self.valid_back_cam_index_to_cam = \
            ObjectBackgroundReconstruction.compute_valid_cam_index_to_camera(
                self.valid_cam_names_sorted, back_cam_index_to_camera)

        self.valid_cam_name_to_back_cam_index = \
            self.compute_cam_name_to_cam_index(self.valid_back_cam_index_to_cam)

        self.valid_cam_name_to_back_cam = self.compute_cam_name_to_back_cam()

        self.valid_back_cam_index_to_img_index = \
            self.compute_valid_back_cam_index_to_img_index()

        if len(obj_cam_index_to_camera) > len(self.valid_obj_cam_index_to_cam):
            logger.info(
                'Removing ' +
                str(len(obj_cam_index_to_camera) -
                    len(self.valid_obj_cam_index_to_cam)) +
                ' object cameras')

        if len(back_cam_index_to_camera) > len(self.valid_back_cam_index_to_cam):
            logger.info(
                'Removing ' +
                str(len(back_cam_index_to_camera) -
                    len(self.valid_back_cam_index_to_cam)) +
                ' background cameras')

        self.check_validity_of_valid_cams()
Beispiel #30
0
def perform_gt_post_processing(directory_path):

    gt_specific_dn = 'ground_truth_files'
    gt_general_idp = os.path.join(directory_path, 'general_ground_truth_files')

    for possible_target_folder, dirs, files in os.walk(directory_path):

        if os.path.basename(possible_target_folder) == gt_specific_dn:
            logger.info('gt_specific_dn: ' + possible_target_folder)

            post_process_ground_truth(gt_specific_dp=possible_target_folder,
                                      gt_general_idp=gt_general_idp)