def _parse_nvm_points(input_file, num_3D_points):

        points = []
        for point_index in range(num_3D_points):
            # From the VSFM docs:
            # <Point>  = <XYZ> <RGB> <number of measurements> <List of Measurements>
            point_line = input_file.readline()
            point_line_elements = (point_line.rstrip()).split()
            xyz_vec = list(map(float, point_line_elements[0:3]))
            rgb_vec = list(map(int, point_line_elements[3:6]))
            number_measurements = int(point_line_elements[6])
            measurements = point_line_elements[7:]

            current_point_measurement = []
            for measurement_index in range(0, number_measurements):
                # From the VSFM docs:
                # <Measurement> = <Image index> <Feature Index> <xy>
                current_measurement = measurements[measurement_index *
                                                   4:(measurement_index + 1) *
                                                   4]
                image_index = int(current_measurement[0])
                feature_index = int(current_measurement[1])
                x_in_nvm_file = float(current_measurement[2])
                y_in_nvm_file = float(current_measurement[3])
                current_point_measurement.append(
                    Measurement(image_index, feature_index, x_in_nvm_file,
                                y_in_nvm_file))
            current_point = Point(coord=xyz_vec,
                                  color=rgb_vec,
                                  measurements=current_point_measurement,
                                  id=point_index,
                                  scalars=None)
            points.append(current_point)

        return points
예제 #2
0
    def parse_points(json_data, op, view_index_to_absolute_fp=None):

        try:
            from PIL import Image
            compute_color = (not view_index_to_absolute_fp is None)
        except ImportError:
            compute_color = False
            
        structure = json_data['structure']

        if compute_color:
            op.report({'INFO'},'Computing color information from files: ...')
            view_index_to_image = {}
            for view_index, absolute_fp in view_index_to_absolute_fp.items():
                pil_image = Image.open(absolute_fp)
                view_index_to_image[view_index] = pil_image

            op.report({'INFO'},'Computing color information from files: Done')

        points = []
        for json_point in structure:

            r = g = b = 0

            # color information can only be computed if input files are provided
            if compute_color:
                for observation in json_point['value']['observations']:
                    view_index = int(observation['key'])

                    # REMARK: The order of ndarray.shape (first height, then width) is complimentary to
                    # pils image.size (first width, then height).
                    # That means
                    # height, width = segmentation_as_matrix.shape
                    # width, height = image.size

                    # Therefore: x_in_openmvg_file == x_image == y_ndarray
                    # and y_in_openmvg_file == y_image == x_ndarray
                    x_in_json_file = float(observation['value']['x'][0])    # x has index 0
                    y_in_json_file = float(observation['value']['x'][1])    # y has index 1

                    current_image = view_index_to_image[view_index]
                    current_r, current_g, current_b = current_image.getpixel((x_in_json_file, y_in_json_file))
                    r += current_r
                    g += current_g
                    b += current_b

                # normalize the rgb values
                amount_observations = len(json_point['value']['observations'])
                r /= amount_observations
                g /= amount_observations
                b /= amount_observations

            custom_point = Point(
                coord=np.array(json_point['value']['X'], dtype=float),
                color=np.array([r, g, b], dtype=int),
                id=int(json_point['key']),
                scalars=[])

            points.append(custom_point)
        return points
예제 #3
0
    def register_points_draw_callback(self, object_anchor, points):
        coords, colors = Point.split_points(points)
        draw_callback_handler = DrawCallBackHandler()
        draw_callback_handler.register_points_draw_callback(
            self, object_anchor, coords, colors)
        self.draw_callback_handler_list.append(draw_callback_handler)

        self.anchor_to_point_coords[object_anchor] = coords
        self.anchor_to_point_colors[object_anchor] = colors
예제 #4
0
 def parse_points(json_data, op):
     points = []
     json_points = json_data['points']
     for point_id in json_points:
         json_point = json_points[point_id]
         custom_point = Point(coord=np.array(json_point['coordinates'],
                                             dtype=float),
                              color=np.array(json_point['color'],
                                             dtype=int),
                              id=point_id,
                              scalars=[])
         points.append(custom_point)
     return points
    def convert_points(id_to_col_points3D):
        # Point3D = collections.namedtuple(
        #   "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])

        col_points3D = id_to_col_points3D.values()
        points3D = []
        for col_point3D in col_points3D:
            current_point = Point(coord=col_point3D.xyz,
                                  color=col_point3D.rgb,
                                  id=col_point3D.id,
                                  scalars=None)
            points3D.append(current_point)

        return points3D
    def convert_points(id_to_col_points3D):
        # From photogrammetry_importer\ext\read_write_model.py
        #   Point3D = collections.namedtuple(
        #       "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])

        col_points3D = id_to_col_points3D.values()
        points3D = []
        for col_point3D in col_points3D:
            current_point = Point(coord=col_point3D.xyz,
                                  color=col_point3D.rgb,
                                  id=col_point3D.id,
                                  scalars=None)
            points3D.append(current_point)

        return points3D
예제 #7
0
def draw_points(op, points, add_points_to_point_cloud_handle, reconstruction_collection=None):

    log_report('INFO', 'Add particle draw handlers', op)

    coords, colors = Point.split_points(points)
    object_anchor_handle = add_empty(
        "OpenGL Point Cloud", reconstruction_collection)
    if add_points_to_point_cloud_handle:
        object_anchor_handle['particle_coords'] = coords
        object_anchor_handle['particle_colors'] = colors
        bpy.context.scene['contains_opengl_point_clouds'] = True

    draw_manager = DrawManager.get_singleton()
    draw_manager.register_points_draw_callback(
        object_anchor_handle, coords, colors)
예제 #8
0
    def __ply_data_vertices_to_vetex_list(ply_data):

        vertex_data_type_names = ply_data['vertex'].data.dtype.names
        use_color = False
        if 'red' in vertex_data_type_names and 'green' in vertex_data_type_names and 'blue' in vertex_data_type_names:
            use_color = True

        vertices = []
        value_keys = [
            x for x, y in sorted(ply_data['vertex'].data.dtype.fields.items(),
                                 key=lambda k: k[1])
        ]
        non_scalar_value_keys = [
            'x', 'y', 'z', 'red', 'green', 'blue', 'nx', 'ny', 'nz',
            'measurements'
        ]
        scalar_value_keys = [
            value_key for value_key in value_keys
            if not value_key in non_scalar_value_keys
        ]
        print('Found the following vertex properties: ' + str(value_keys))

        #scalar_value_keys = [value_key for (value_key, some_value) in ]
        #logger.info(scalar_value_keys)

        print('Found ' + str(len(ply_data['vertex'].data)) + ' vertices')
        for point_index, line in enumerate(ply_data['vertex'].data):
            coord = np.array([line['x'], line['y'], line['z']])
            if use_color:
                color = np.array([line['red'], line['green'], line['blue']])
            else:
                color = np.array([255, 255, 255])
            scalars = dict()
            for scalar_value_key in scalar_value_keys:
                scalars[scalar_value_key] = line[scalar_value_key]

            current_point = Point(coord=coord,
                                  color=color,
                                  measurements=None,
                                  id=point_index,
                                  scalars=None)
            vertices.append(current_point)

        ply_data_vertex_dtype = ply_data['vertex'].dtype
        ply_data_vertex_data_dtype = ply_data['vertex'].data.dtype

        return vertices, ply_data_vertex_dtype, ply_data_vertex_data_dtype
    def _parse_nvm_points(input_file, num_3D_points):

        points = []
        for point_index in range(num_3D_points):
            # From the VSFM docs:
            # <Point>  = <XYZ> <RGB> <number of measurements> <List of Measurements>
            point_line = input_file.readline()
            point_line_elements = (point_line.rstrip()).split()
            xyz_vec = list(map(float, point_line_elements[0:3]))
            rgb_vec = list(map(int, point_line_elements[3:6]))
            current_point = Point(coord=xyz_vec,
                                  color=rgb_vec,
                                  id=point_index,
                                  scalars=None)
            points.append(current_point)

        return points
def export_selected_cameras_and_vertices_of_meshes(op, odp):
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: ...')
    cameras = []
    points = []

    point_index = 0
    camera_index = 0
    for obj in bpy.context.selected_objects:
        if obj.type == 'CAMERA':
            op.report({'INFO'}, 'obj.name: ' + str(obj.name))
            calibration_mat = get_calibration_mat(op, obj)
            # op.report({'INFO'}, 'calibration_mat:' )
            # op.report({'INFO'}, str(calibration_mat))

            camera_matrix_computer_vision = get_computer_vision_camera_matrix(
                op, obj)

            cam = Camera()
            cam.id = camera_index
            cam.set_relative_fp(str(obj.name), Camera.IMAGE_FP_TYPE_NAME)
            cam.image_dp = odp
            cam.width = bpy.context.scene.render.resolution_x
            cam.height = bpy.context.scene.render.resolution_y

            cam.set_calibration(calibration_mat, radial_distortion=0)
            cam.set_4x4_cam_to_world_mat(camera_matrix_computer_vision)
            cameras.append(cam)
            camera_index += 1

        else:
            if obj.data is not None:
                obj_points = []
                for vert in obj.data.vertices:
                    coord_world = obj.matrix_world @ vert.co
                    obj_points.append(
                        Point(coord=coord_world,
                              color=[0, 255, 0],
                              id=point_index,
                              scalars=[]))

                    point_index += 1
                points += obj_points
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: Done')
    return cameras, points
예제 #11
0
    def parse_points(json_data, image_index_to_camera_index, op):

        points = []
        is_valid_file = 'structure' in json_data

        if not is_valid_file:
            op.report(
                {'ERROR'},
                'FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the SfM reconstruction results: structure.')
            return points

        structure = json_data['structure']
        for json_point in structure:
            custom_point = Point(
                coord=np.array(json_point['X'], dtype=float),
                color=np.array(json_point['color'], dtype=int),
                id=int(json_point['landmarkId']),
                scalars=[])
            points.append(custom_point)
        return points
def export_selected_cameras_and_vertices_of_meshes(op):
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: ...')
    cameras = []
    points = []

    point_index = 0
    camera_index = 0
    for obj in bpy.context.selected_objects:
        if obj.type == 'CAMERA':
            op.report({'INFO'}, 'obj.name: ' + str(obj.name))
            calibration_mat = get_calibration_mat(op, obj)
            # op.report({'INFO'}, 'calibration_mat:' )
            # op.report({'INFO'}, str(calibration_mat))

            camera_matrix_computer_vision = get_computer_vision_camera_matrix(
                op, obj)

            cam = Camera()
            cam.file_name = str('camera_index')
            cam.set_calibration(calibration_mat, radial_distortion=0)
            cam.set_4x4_cam_to_world_mat(camera_matrix_computer_vision)
            cameras.append(cam)
            camera_index += 1

        else:
            if obj.data is not None:
                obj_points = []
                for vert in obj.data.vertices:
                    coord_world = obj.matrix_world * vert.co
                    obj_points.append(
                        Point(coord=coord_world,
                              color=[255, 255, 255],
                              measurements=[],
                              id=point_index,
                              scalars=[]))

                    point_index += 1
                points += obj_points
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: Done')
    return cameras, points
def create_particle_color_nodes(node_tree,
                                points,
                                set_particle_color_flag,
                                particle_overwrite_color=None):

    if set_particle_color_flag:
        assert particle_overwrite_color is not None
        if 'RGB' in node_tree.nodes:
            particle_color_node = node_tree.nodes['RGB']
        else:
            particle_color_node = node_tree.nodes.new("ShaderNodeRGB")

        rgba_vec = Vector(particle_overwrite_color).to_4d()  # creates a copy
        particle_color_node.outputs['Color'].default_value = rgba_vec

    else:
        if 'Image Texture' in node_tree.nodes:
            particle_color_node = node_tree.nodes['Image Texture']
        else:
            particle_color_node = node_tree.nodes.new("ShaderNodeTexImage")

        coords, colors = Point.split_points(points)
        particle_color_node.image = compute_particle_color_texture(colors)

        particle_info_node = node_tree.nodes.new('ShaderNodeParticleInfo')
        divide_node = node_tree.nodes.new('ShaderNodeMath')
        divide_node.operation = 'DIVIDE'
        node_tree.links.new(particle_info_node.outputs['Index'],
                            divide_node.inputs[0])
        divide_node.inputs[1].default_value = len(points)
        shader_node_combine = node_tree.nodes.new('ShaderNodeCombineXYZ')
        node_tree.links.new(divide_node.outputs['Value'],
                            shader_node_combine.inputs['X'])
        node_tree.links.new(shader_node_combine.outputs['Vector'],
                            particle_color_node.inputs['Vector'])

    return particle_color_node
예제 #14
0
    def parse_points(json_data, op, view_index_to_absolute_fp=None):

        compute_color = True
        try:
            from PIL import Image
        except ImportError:
            log_report(
                'WARNING',
                'Can not compute point cloud color information, since Pillow is not installed.',
                op)
            compute_color = False

        if view_index_to_absolute_fp is None:
            log_report(
                'WARNING',
                'Can not compute point cloud color information, since path to images is not correctly set.',
                op)
            compute_color = False

        if compute_color:
            log_report(
                'INFO',
                'Try to collect color information from files (this might take a while)',
                op)
            view_index_to_image = {}
            for view_index, absolute_fp in view_index_to_absolute_fp.items():
                if os.path.isfile(absolute_fp):
                    pil_image = Image.open(absolute_fp)
                    view_index_to_image[view_index] = pil_image
                else:
                    log_report(
                        'WARNING',
                        'Can not compute point cloud color information, since image file path is incorrect.',
                        op)
                    compute_color = False
                    break

        if compute_color:
            log_report(
                'INFO',
                'Compute color information from files (this might take a while)',
                op)

        points = []
        structure = json_data['structure']
        for json_point in structure:

            r = g = b = 0

            # color information can only be computed if input files are provided
            if compute_color:
                for observation in json_point['value']['observations']:
                    view_index = int(observation['key'])

                    # REMARK: The order of ndarray.shape (first height, then width) is complimentary to
                    # pils image.size (first width, then height).
                    # That means
                    # height, width = segmentation_as_matrix.shape
                    # width, height = image.size

                    # Therefore: x_in_openmvg_file == x_image == y_ndarray
                    # and y_in_openmvg_file == y_image == x_ndarray
                    x_in_json_file = float(
                        observation['value']['x'][0])  # x has index 0
                    y_in_json_file = float(
                        observation['value']['x'][1])  # y has index 1

                    current_image = view_index_to_image[view_index]
                    current_r, current_g, current_b = current_image.getpixel(
                        (x_in_json_file, y_in_json_file))
                    r += current_r
                    g += current_g
                    b += current_b

                # normalize the rgb values
                amount_observations = len(json_point['value']['observations'])
                r /= amount_observations
                g /= amount_observations
                b /= amount_observations

            custom_point = Point(coord=np.array(json_point['value']['X'],
                                                dtype=float),
                                 color=np.array([r, g, b], dtype=int),
                                 id=int(json_point['key']),
                                 scalars=[])

            points.append(custom_point)
        return points
예제 #15
0
 def write_ply_file_from_vertex_mat(output_path_to_file, vertex_mat):
     vertices = []
     for entry in vertex_mat:
         vertices.append(Point(coord=entry))
     PLYFileHandler.write_ply_file(output_path_to_file, vertices)
예제 #16
0
    def parse_points(json_data, image_index_to_camera_index, op, path_to_input_files=None, view_index_to_file_name=None):

        points = []
        is_valid_file = 'structure' in json_data

        if not is_valid_file:
            op.report(
                {'ERROR'},
                'FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the SfM reconstruction results: structure.')
            return points

        structure = json_data['structure']

        compute_color = (not path_to_input_files is None) and (not view_index_to_file_name is None)

        if compute_color:
            op.report({'INFO'},'Computing color information from files: ...')
            view_index_to_image = {}
            for view_index, file_name in view_index_to_file_name.items():
                image_path = os.path.join(path_to_input_files, file_name)
                if not os.path.isfile(image_path):
                    compute_color = False
                    break
                pil_image = Image.open(image_path)
                view_index_to_image[view_index] = pil_image

            op.report({'INFO'},'Computing color information from files: Done')


        for json_point in structure:

            r = g = b = 0

            # color information can only be computed if input files are provided
            if compute_color:
                for observation in json_point['observations']:
                    view_index = int(observation['observationId'])

                    # REMARK: pil/pillow: image.size == (width, height)
                    x_in_json_file = float(observation['x'][0])    # x has index 0
                    y_in_json_file = float(observation['x'][1])    # y has index 1

                    current_image = view_index_to_image[view_index]
                    current_r, current_g, current_b = current_image.getpixel((x_in_json_file, y_in_json_file))
                    r += current_r
                    g += current_g
                    b += current_b

                # normalize the rgb values
                amount_observations = len(json_point['observations'])
                r /= amount_observations
                g /= amount_observations
                b /= amount_observations

            custom_point = Point(
                coord=np.array(json_point['X'], dtype=float),
                color=np.array([r, g, b], dtype=int),
                id=int(json_point['landmarkId']),
                scalars=[])

            points.append(custom_point)
        return points