def save_triangles_in_ply_file(output_path_to_file, triangles):
        logger.info('save_triangles_in_ply_file: ...')
        points = []
        faces = []
        for triangle_index, triangle in enumerate(triangles):
            assert isinstance(triangle.vertex_0, np.ndarray) \
                   and isinstance(triangle.vertex_1, np.ndarray) \
                   and isinstance(triangle.vertex_2,np.ndarray)

            # FIXME this will result in duplicated points (in the ply file)
            points += [
                Point(triangle.vertex_0),
                Point(triangle.vertex_1),
                Point(triangle.vertex_2)
            ]
            # FIXME these face indices refer to duplicated points
            faces.append(
                Face(initial_indices=[
                    3 * triangle_index, 3 * triangle_index +
                    1, 3 * triangle_index + 2
                ]))

        PLYFileHandler.write_ply_file(ofp=output_path_to_file,
                                      vertices=points,
                                      faces=faces)

        logger.info('save_triangles_in_ply_file: Done')
예제 #2
0
    def _parse_nvm_points(input_file, num_3D_points):

        points = []

        for point_index in range(num_3D_points):

            # From the docs:
            # <Point>  = <XYZ> <RGB> <number of measurements> <List of Measurements>
            point_line = input_file.readline()

            point_line_elements = (point_line.rstrip()).split()
            xyz_vec = list(map(float, point_line_elements[0:3]))
            rgb_vec = list(map(int, point_line_elements[3:6]))

            number_measurements = int(point_line_elements[6])
            measurements = point_line_elements[7:]
            class_ids = defaultdict(lambda: 0)

            current_point_measurement = []

            for measurement_index in range(0, number_measurements):

                # From the docs:
                # <Measurement> = <Image index> <Feature Index> <xy>
                current_measurement = measurements[measurement_index *
                                                   4:(measurement_index + 1) *
                                                   4]
                # print current_measurement
                image_index = int(current_measurement[0])
                feature_index = int(current_measurement[1])

                # REMARK: The order of ndarray.shape (first height, then width) is complimentary to
                # pils image.size (first width, then height).
                # That means
                # height, width = segmentation_as_matrix.shape
                # width, height = image.size

                # Therefore
                #   x_in_nvm_file == x_image == y_ndarray and
                #   y_in_nvm_file == y_image == x_ndarray
                x_in_nvm_file = float(current_measurement[2])
                y_in_nvm_file = float(current_measurement[3])

                current_point_measurement.append(
                    Measurement(image_index, feature_index, x_in_nvm_file,
                                y_in_nvm_file))

                # REMARK: x_in_nvm_file and y_in_nvm_file are relative to the image center

            current_point = Point(coord=xyz_vec, color=rgb_vec)
            current_point.measurements = current_point_measurement
            current_point.id = point_index

            points.append(current_point)

        return points
    def save_single_triangle_in_ply_file(output_path_to_file, corner_0,
                                         corner_1, corner_2):
        assert isinstance(corner_0, np.ndarray) and \
               isinstance(corner_1, np.ndarray) and \
               isinstance(corner_2, np.ndarray)
        points = [Point(corner_0), Point(corner_1), Point(corner_2)]

        # the first 3 points are building the triangle
        faces = [Face(initial_indices=[0, 1, 2])]

        PLYFileHandler.write_ply_file(ofp=output_path_to_file,
                                      vertices=points,
                                      faces=faces)
예제 #4
0
    def convert_colmap_points_to_points(id_to_col_points3D):
        # From photogrammetry_importer\ext\read_write_model.py
        #   Point3D = collections.namedtuple(
        #       "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])

        col_points3D = id_to_col_points3D.values()
        points3D = []
        for col_point3D in col_points3D:
            current_point = Point(coord=col_point3D.xyz, color=col_point3D.rgb)

            current_point.id = col_point3D.id
            points3D.append(current_point)

        return points3D
예제 #5
0
    def compute_ray_triangles_closest_intersection_point(ray,
                                                         triangles,
                                                         color_intersection_point=[0, 255, 0]):
        """
        Returns the parameter of the closest intersection of ray with any triangle in triangles and the corresponding
        intersection point.
        :param ray:
        :param triangles:
        :param color_intersection_point:
        :return:
        """

        assert ray is not None

        # the closest intersection is the intersection with the smallest t value
        t = None
        # print('len(triangles)')
        # print(len(triangles))
        for triangle in triangles:
            #print(triangle)
            t_temp, u_temp, v_temp = GeometryCollection.compute_ray_triangle_intersection_intersecting_parameter(
                ray, triangle)
            # check if we found a valid intersection
            if t_temp is not None:
                if t is None:
                    t = t_temp
                elif t is not None and t_temp < t:
                    t = t_temp

        if t is not None:
            return Point(coord=ray.pos_vec + t * ray.dir_vec, color=color_intersection_point), t
        else:
            return None, None
예제 #6
0
 def _readPoints(self, pointCount):
     points = []
     for index in range(pointCount):
         # Add points:
         position = np.array(self._readFloatItems())
         color = np.array(self._readIntItems())
         points.append(Point(coord=position, color=color))
         self._readLine()  # Skip mapped image features
     return points
예제 #7
0
    def parse_asc_or_pts_or_csv(ifp, delimiter, only_data):

        points = []
        with open(ifp, 'r') as ifc:

            data_semantics = None
            if not only_data:
                line = ifc.readline()
                if line.startswith('//'):
                    data_semantics = PointDataFileHandler.parse_header(line)
                    line = ifc.readline()
                    num_points = int(line.strip())
                else:
                    num_points = int(line.strip())

            lines_as_tuples = PointDataFileHandler.read_lines_as_tuples(
                ifc, delimiter=delimiter)

            if data_semantics is None:
                # Determine the semantics of the data
                data_tuple = lines_as_tuples[0]
                data_semantics = PointDataFileHandler.guess_data_semantics(
                    data_tuple)

            if data_semantics.pseudo_color:
                factor = 255
            else:
                factor = 1

            for data_tuple in lines_as_tuples:
                point = Point()

                point.coord = [
                    float(data_tuple[data_semantics.x_idx]),
                    float(data_tuple[data_semantics.y_idx]),
                    float(data_tuple[data_semantics.z_idx])
                ]
                point.color = [
                    int(factor * float(data_tuple[data_semantics.r_idx])),
                    int(factor * float(data_tuple[data_semantics.g_idx])),
                    int(factor * float(data_tuple[data_semantics.b_idx]))
                ]
                points.append(point)
        return points
예제 #8
0
    def __ply_data_vertices_to_vetex_list(ply_data):

        vertex_data_type_names = ply_data['vertex'].data.dtype.names
        use_color = False
        if 'red' in vertex_data_type_names and 'green' in vertex_data_type_names and 'blue' in vertex_data_type_names:
            use_color = True

        vertices = []
        value_keys = [
            x for x, y in sorted(ply_data['vertex'].data.dtype.fields.items(),
                                 key=lambda k: k[1])
        ]
        non_scalar_value_keys = [
            'x', 'y', 'z', 'red', 'green', 'blue', 'nx', 'ny', 'nz',
            'measurements'
        ]
        scalar_value_keys = [
            value_key for value_key in value_keys
            if not value_key in non_scalar_value_keys
        ]
        logger.info('Found the following vertex properties: ' +
                    str(value_keys))

        #scalar_value_keys = [value_key for (value_key, some_value) in ]
        #logger.info(scalar_value_keys)

        logger.info('Found ' + str(len(ply_data['vertex'].data)) + ' vertices')
        # print(type(line)) == numpy.void
        # print(line.dtype) == [('x', '<f4'), ('y', '<f4'), ('z', '<f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ('measurements', 'O')]
        for point_index, line in enumerate(ply_data['vertex'].data):
            current_point = Point()
            current_point.coord = np.array([line['x'], line['y'], line['z']])
            if use_color:
                current_point.color = np.array(
                    [line['red'], line['green'], line['blue']])
            current_point.id = point_index

            for scalar_value_key in scalar_value_keys:
                current_point.scalars[scalar_value_key] = line[
                    scalar_value_key]

            if 'measurements' in line.dtype.names:
                elements_per_measurement = 4
                current_point.measurements = []
                for measurement_idx in range(
                        len(line['measurements']) / elements_per_measurement):
                    array_idx = measurement_idx * elements_per_measurement
                    slice = line['measurements'][array_idx:array_idx +
                                                 elements_per_measurement]
                    current_point.measurements.append(
                        Measurement.init_from_list(slice))

            vertices.append(current_point)

        ply_data_vertex_dtype = ply_data['vertex'].dtype
        ply_data_vertex_data_dtype = ply_data['vertex'].data.dtype

        return vertices, ply_data_vertex_dtype, ply_data_vertex_data_dtype
    def save_rectangle_in_ply_file(output_path_to_file, corner_0, corner_1,
                                   corner_2, corner_3):
        assert isinstance(corner_0, np.ndarray) and \
               isinstance(corner_1, np.ndarray) and \
               isinstance(corner_2, np.ndarray) is isinstance(corner_3, np.ndarray)
        points = [
            Point(corner_0),
            Point(corner_1),
            Point(corner_2),
            Point(corner_3)
        ]

        # represent the rectangle by 2 triangles
        face_1 = Face(initial_indices=[0, 1, 2])
        face_2 = Face(initial_indices=[3, 2, 1])
        faces = [face_1, face_2]

        PLYFileHandler.write_ply_file(ofp=output_path_to_file,
                                      vertices=points,
                                      faces=faces)
예제 #10
0
    def generate_points_along_ray(ray, color=np.array([255, 0 , 0]), amount_steps=100, scale=1.0):
        points_along_ray = []

        # the length of the direction vector is the distance in which we need too draw points
        length = np.linalg.norm(ray.dir_vec)
        step_width = length / float(amount_steps)

        ray_direction_vector_normalized = ray.dir_vec / np.linalg.norm(ray.dir_vec)

        # we need amount_steps+1 to cover also the start and the end point
        for step in range(amount_steps+1):
            coords = ray.pos_vec + float(step_width) * float(step) * ray_direction_vector_normalized * float(scale)
            point = Point(coord=coords, color=color)
            points_along_ray.append(point)
        return points_along_ray
def write_camera_centers_as_ply(camera_object_trajectory,
                                camera_trajectory_ply_file_path):
    camera_centers = []
    for frame_name in camera_object_trajectory.get_frame_names_sorted():
        cam = camera_object_trajectory.get_camera(frame_name)

        if cam.is_monocular_cam():
            camera_centers.append(cam.get_camera_center())
        else:
            camera_centers.append(cam.get_left_camera().get_camera_center())
            camera_centers.append(cam.get_right_camera().get_camera_center())

    camera_center_points = []
    for center in camera_centers:
        camera_center_points.append(Point(coord=center, color=(0, 255, 0)))

    PLYFileHandler.write_ply_file(
        ofp=camera_trajectory_ply_file_path,
        vertices=camera_center_points,
        plain_text_output=True)  # Binary output does only work
예제 #12
0
    def compute_ray_plane_intersection(ray, plane, color_intersection_point=[0, 255, 0]):
        """
        Note: A plane has no spatial margin.
        If a spatial margin is required use "compute_rays_triangles_closest_intersection_points()"
        :param ray:
        :param plane:
        :param color_intersection_point:
        :return: Point, t
        """

        # https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection
        assert ray is not None

        # the closest intersection is the intersection with the smallest t value
        t = None

        ray_dir_dot_plane_normal = (ray.dir_vec).dot(plane.normal_vec)
        if ray_dir_dot_plane_normal != 0:   # check parallel case
            t = (plane.pos_vec - ray.pos_vec).dot(plane.normal_vec) / float(ray_dir_dot_plane_normal)

        if t is not None:
            return Point(coord=ray.pos_vec + t * ray.dir_vec, color=color_intersection_point), t
        else:
            return None, None
예제 #13
0
def post_process_depth_output(model_idp,
                              animation_transformations_file_suffix,
                              input_depth_image_folder_name,
                              output_nvm_folder_name,
                              lazy=True):

    logger.info('post_process_depth_output: ...')

    # Input path
    input_depth_image_folder_path = os.path.join(
        model_idp, input_depth_image_folder_name)
    animation_transformations_file = os.path.join(
        model_idp, animation_transformations_file_suffix)

    if os.path.isdir(input_depth_image_folder_path):

        # Output path
        nvm_folder_path = os.path.join(model_idp, output_nvm_folder_name)
        if not os.path.isdir(nvm_folder_path):
            os.mkdir(nvm_folder_path)
        logger.vinfo('nvm_folder_path', nvm_folder_path)

        input_depth_list = get_image_paths_in_folder(
            input_depth_image_folder_path, ext='.exr')

        # Required to avoid "IOError: broken data stream when reading image file" error
        ImageFile.LOAD_TRUNCATED_IMAGES = True

        camera_object_trajectory = TrajectoryFileHandler.parse_camera_and_object_trajectory_file(
            animation_transformations_file)

        logger.vinfo('camera_object_trajectory', camera_object_trajectory)

        for input_depth_exr_path in input_depth_list:
            frame_exr_name = os.path.basename(input_depth_exr_path)
            frame_exr_name_stem = os.path.splitext(
                os.path.basename(frame_exr_name))[0]
            frame_jpg_name = frame_exr_name_stem + '.jpg'
            output_nvm_path = os.path.join(nvm_folder_path,
                                           frame_exr_name_stem) + '.nvm'

            logger.vinfo('output_nvm_path', output_nvm_path)

            nvm_file_missing = not os.path.isfile(output_nvm_path)

            if nvm_file_missing or not lazy:

                exr_data_as_np = EXRFileHandler.parse_depth_exr_file(
                    input_depth_exr_path)
                height, width = exr_data_as_np.shape

                cam = camera_object_trajectory.get_camera(frame_jpg_name)
                cam.file_name = frame_jpg_name
                cam.height = height
                cam.width = width
                logger.vinfo('width', width)
                logger.vinfo('height', height)
                cameras = [cam]

                coords_world_coord = cam.convert_depth_buffer_to_world_coords(
                    exr_data_as_np, invert_y=True)

                points = []
                for coord in coords_world_coord:
                    points.append(Point(coord=coord))

                NVMFileHandler.write_nvm_file(output_nvm_path, cameras, points)

    else:
        logger.info('EXR FOLDER MISSING, SKIPPING ...')

    logger.info('post_process_depth_output: Done')
예제 #14
0
    def parse_points(json_data, image_index_to_camera_index, path_to_input_files=None, view_index_to_file_name=None):

        compute_color = (not path_to_input_files is None) and (not view_index_to_file_name is None)
        structure = json_data['structure']

        if compute_color:
            logger.info('Computing color information from files: ...')
            view_index_to_image = {}
            for view_index, file_name in view_index_to_file_name.items():
                image_path = os.path.join(path_to_input_files, file_name)
                pil_image = Image.open(image_path)
                view_index_to_image[view_index] = pil_image

            logger.info('Computing color information from files: Done')

            logger.vinfo('view_index_to_image.keys()', view_index_to_image.keys())

        points = []
        for json_point in structure:
            custom_point = Point()
            position = json_point['value']['X']
            custom_point.set_coord(np.array(position))
            custom_point.id = int(json_point['key'])
            custom_point.measurements = []

            r = g = b = 0

            # color information can only be computed if input files are provided
            if compute_color:
                for observation in json_point['value']['observations']:
                    view_index = int(observation['key'])

                    # REMARK: The order of ndarray.shape (first height, then width) is complimentary to
                    # pils image.size (first width, then height).
                    # That means
                    # height, width = segmentation_as_matrix.shape
                    # width, height = image.size

                    # Therefore: x_in_openmvg_file == x_image == y_ndarray
                    # and y_in_openmvg_file == y_image == x_ndarray
                    x_in_json_file = float(observation['value']['x'][0])    # x has index 0
                    y_in_json_file = float(observation['value']['x'][1])    # y has index 1

                    current_image = view_index_to_image[view_index]
                    current_r, current_g, current_b = current_image.getpixel((x_in_json_file, y_in_json_file))
                    r += current_r
                    g += current_g
                    b += current_b

                    # Measurements is a list of < Measurement >
                    # < Measurement > = < Image index > < Feature Index > < xy >
                    custom_point.measurements.append(
                        Measurement(
                            camera_index= image_index_to_camera_index[view_index],
                            feature_index=int(observation['value']['id_feat']),
                            x=x_in_json_file,
                            y=y_in_json_file,
                            x_y_are_image_coords=True))

                # normalize the rgb values
                amount_observations = len(json_point['value']['observations'])
                r /= amount_observations
                g /= amount_observations
                b /= amount_observations

            custom_point.set_color(np.array([r, g, b], dtype=int))
            points.append(custom_point)
        return points
예제 #15
0
    def parse_bundler_file(input_visual_fsm_bundle_out_file_name):

        # =======
        # REMARK: The camera coordinate frame is transformed to
        #         the same camera coordinate frame used by VisualSfM
        #         i.e. a rotation around the x axis by 180 degree
        # =======

        input_file = open(input_visual_fsm_bundle_out_file_name, 'r')
        # first line is just a comment, throw it away
        (input_file.readline()).rstrip()

        # read amount cameras and points
        current_line = (input_file.readline()).rstrip()
        amount_cams = 0
        amount_points = 0
        amount_cams, amount_points = map(int, current_line.split(' '))
        print(amount_cams)
        print(amount_points)

        cameras = []

        # for camera_index in range(0, int(amount_cams)):
        for camera_index in range(0, amount_cams):

            # Each camera entry <cameraI> contains the estimated camera intrinsics and extrinsics, and has the form:
            # <f> <k1> <k2>   [the focal length, followed by two radial distortion coeffs]
            # <R>             [a 3x3 matrix representing the camera rotation]
            # <t>             [a 3-vector describing the camera translation]

            current_line = (input_file.readline()).rstrip()     # read internal camera parameters
            # TODO Handle Radial Distortion
            focal_length = float(current_line.split(' ')[0])
            camera_calibration_matrix = np.asarray(
                [[focal_length, 0, 0], [0, focal_length, 0], [0, 0, 1]],
                dtype=float)

            # use map to convert all string numbers to floats
            # python 3 requires a explicit cast from 'map object' to list
            current_line = (input_file.readline()).rstrip()
            first_row = list(map(float, current_line.split(' ')))
            current_line = (input_file.readline()).rstrip()
            second_row = list(map(float, current_line.split(' ')))
            current_line = (input_file.readline()).rstrip()
            third_row = list(map(float, current_line.split(' ')))

            cam_rotation_matrix = np.asarray(
                [first_row, second_row, third_row],
                dtype=float)

            # this line contains the translation vector and not the camera center
            current_line = (input_file.readline()).rstrip()     # read translation vector
            translation_vector = np.asarray(
                list(map(float, current_line.split(' '))),
                dtype=float)

            # Transform the camera coordinate system from the computer vision camera coordinate frame to the computer
            # vision camera coordinate frame (i.e. rotate the camera matrix around the x axis by 180 degree)
            # inverting the y and the z axis transform the points in the camera coordinate frame used by visualsfm
            cam_rotation_matrix = TransformationCollection.invert_y_and_z_axis(cam_rotation_matrix)
            translation_vector = TransformationCollection.invert_y_and_z_axis(translation_vector)


            current_camera = Camera()
            current_camera.set_rotation_mat(cam_rotation_matrix)
            # set camera center AFTER rotation (since setting the center sets also the translation vector)
            #current_camera.set_camera_center_after_rotation(center_vec)
            current_camera.set_camera_translation_vector_after_rotation(translation_vector)

            # from cam coordinates to world coordinates
            cam_rotation_matrix_inv = np.linalg.inv(cam_rotation_matrix)
            current_camera.rotation_inv_mat = cam_rotation_matrix_inv
            current_camera.calibration_mat = camera_calibration_matrix
            current_camera.id = camera_index

            cameras.append(current_camera)

        points = []
        for point_index in range(0, amount_points):

            current_point = Point()

            # Each point entry has the form:
            # <position>      [a 3-vector describing the 3D position of the point]
            # <color>         [a 3-vector describing the RGB color of the point]
            # <view list>     [a list of views the point is visible in]

            current_line = (input_file.readline()).rstrip()         # read the 3d position
            point_center = np.array(list(map(float, current_line.split(' '))))
            current_point._coord = point_center

            current_line = (input_file.readline()).rstrip()         # read the color
            point_color = np.array(list(map(int, current_line.split(' '))))
            current_point._color = point_color

            current_line = (input_file.readline()).rstrip()         # read the view list
            measurement_values = current_line.split(' ')
            measurements = [measurement_values[1+i*4:1+i*4+4] for i in range(int(measurement_values[0]))]
            # Remark: The last measurement is inverted, so the y axis points downwards (like in visualsfm)
            current_point.measurements = [
                (int(measurement[0]), int(measurement[1]), float(measurement[2]), -float(measurement[3]))
                for measurement in measurements]
            current_point.id = point_index

            points.append(current_point)

        return cameras, points
        # build a rectangle around the position vector
        corner_0 = pos_vec + size_ratio * dir_vec_1 - size_ratio * dir_vec_2
        corner_1 = pos_vec + size_ratio * dir_vec_1 + size_ratio * dir_vec_2
        corner_2 = pos_vec - size_ratio * dir_vec_1 - size_ratio * dir_vec_2
        corner_3 = pos_vec - size_ratio * dir_vec_1 + size_ratio * dir_vec_2
        return corner_0, corner_1, corner_2, corner_3

    @staticmethod
    def save_plane_in_ply_file(output_path_to_file,
                               pos_vec,
                               dir_vec_1,
                               dir_vec_2,
                               size_ratio=1):

        corner_0, corner_1, corner_2, corner_3 = GeometryFileHandler.approximate_plane_by_corners(
            pos_vec, dir_vec_1, dir_vec_2, size_ratio)
        GeometryFileHandler.save_rectangle_in_ply_file(output_path_to_file,
                                                       corner_0, corner_1,
                                                       corner_2, corner_3)


if __name__ == '__main__':
    output_path_to_file = ''

    pos_vec = Point(coord=np.array([0, 0, 0]), color=[255, 0, 0])
    dir_vec_1 = Point(coord=[0, 1, 0], color=[0, 255, 0])
    dir_vec_2 = Point(coord=[0, 0, 1], color=[0, 0, 255])

    GeometryFileHandler.save_plane_in_ply_file(output_path_to_file, pos_vec,
                                               dir_vec_1, dir_vec_2)
예제 #17
0
 def write_ply_file_from_vertex_mat(output_path_to_file, vertex_mat):
     vertices = []
     for entry in vertex_mat:
         vertices.append(Point(coord=entry))
     PLYFileHandler.write_ply_file(output_path_to_file, vertices)
max_clipping_range = 100.0
render_interface.load_vtk_mesh_or_point_cloud(poly_ifp, texture_ifp)

calibration_np_mat = cam.get_calibration_mat()
cam_to_world_mat_computer_vision = cam.get_4x4_cam_to_world_mat()
render_interface.set_active_cam_from_computer_vision_cam_to_world_mat(
    cam_to_world_mat_computer_vision,
    calibration_np_mat,
    cam.width,
    cam.height,
    max_clipping_range=max_clipping_range)

principal_pt = [calibration_np_mat[0][2], calibration_np_mat[1][2]]
logger.vinfo('principal_pt', principal_pt)
render_interface.set_principal_point(principal_pt, width, height)

render_interface.render()

render_interface.write_z_buffer_to_disc(z_buffer_ofp)
render_interface.write_z_buffer_visualization_to_disc(z_buffer_viz_ofp)

world_coords_valid = render_interface.get_z_buffer_as_world_coords(
    n_th_result_point=1)
points_world_coord = Point.get_points_from_coords(world_coords_valid)
ColmapFileHandler.write_colmap_model(odp=colmap_result_model_odp,
                                     cameras=[cam],
                                     points=points_world_coord)

render_interface.add_point_cloud(world_coords_valid)
render_interface.render_and_start()
    def parse_MVS_colmap_file(mvs_colmap_ifp,
                              file_name_to_camera_id,
                              with_nxnynz=True,
                              n_th_point=1):
        """
        :param mvs_colmap_ifp:
        :return:
        """
        logger.info('parse_MVS_colmap_file: ...')
        logger.vinfo('mvs_colmap_ifp', mvs_colmap_ifp)
        logger.vinfo('n_th_point', n_th_point)

        camera_index_to_current_feature_index = defaultdict(int)
        points = []

        with open(mvs_colmap_ifp, 'r') as input_file:

            remaining_content = input_file.readlines()
            remaining_content = [x.strip() for x in remaining_content]

            # first line is a comment
            remaining_content = remaining_content[1:]  # skipp 1 line

            dense_index_to_image_name = dict()

            num_dense_images = int(remaining_content[0])
            remaining_content = remaining_content[1:]  # skipp 1 line
            for index in range(num_dense_images):
                dense_index_and_image_name = remaining_content[index].split()
                assert len(dense_index_and_image_name) == 2
                dense_index_to_image_name[int(dense_index_and_image_name[0])] = \
                    dense_index_and_image_name[1]

            remaining_content = remaining_content[
                num_dense_images:]  # skipp lines

            #logger.info(dense_index_to_image_name)

            # next 3 lines are comments
            #logger.info(remaining_content[0])
            #logger.info(remaining_content[1])
            #logger.info(remaining_content[2])
            remaining_content = remaining_content[3:]

            dense_index_to_camera_index = {}
            if file_name_to_camera_id is not None:
                for dense_index, image_name in dense_index_to_image_name.items(
                ):
                    camera_id = file_name_to_camera_id[image_name]
                    dense_index_to_camera_index[dense_index] = camera_id

            #logger.info(dense_index_to_camera_index)

            # DENSE_IMAGE_ID != CAMERA_ID and DENSE_IMAGE_ID != IMAGE_ID
            # POINT3D_ID, X, Y, Z, NX, NY, NZ, R, G, B, TRACK[] as (DENSE_IMAGE_ID, DENSE_COL, DENSE_ROW)
            remaining_content = remaining_content[::n_th_point]
            num_points = len(remaining_content)
            for index, point_line in enumerate(remaining_content):

                if index % 100000 == 0:
                    logger.pinfo(index, num_points)

                point_line_elements = (point_line.rstrip()).split()
                current_point = Point()

                # Adjust the point_3d id w.r.t n_th_point
                point_3d_id = index  # int(point_line_elements[0])
                current_point.id = point_3d_id

                xyz_vec = list(map(float, point_line_elements[1:4]))
                current_point.set_coord(xyz_vec)

                if with_nxnynz:
                    nxnynz_vec = list(map(float, point_line_elements[4:7]))
                    current_point.set_normal(nxnynz_vec)

                    rgb_vec = list(map(int, point_line_elements[7:10]))
                    current_point.set_color(rgb_vec)

                    img_id_col_row_triples_as_str = point_line_elements[10:]

                else:
                    rgb_vec = list(map(int, point_line_elements[4:7]))
                    current_point.set_color(rgb_vec)

                    img_id_col_row_triples_as_str = point_line_elements[7:]

                assert len(img_id_col_row_triples_as_str) % 3 == 0

                dense_image_ids = list(
                    map(int, img_id_col_row_triples_as_str[0::3]))
                cols = list(map(float, img_id_col_row_triples_as_str[1::3]))
                rows = list(map(float, img_id_col_row_triples_as_str[2::3]))

                measurements = []
                #
                for dense_image_id, col, row in zip(dense_image_ids, cols,
                                                    rows):

                    if file_name_to_camera_id is not None:
                        camera_id = dense_index_to_camera_index[dense_image_id]
                    else:
                        camera_id = dense_image_id

                    measurement = Measurement(
                        camera_index=camera_id,
                        # Adding None here would create parsing problems
                        feature_index=camera_index_to_current_feature_index[
                            camera_id],
                        x=col,
                        y=row,
                        x_y_are_image_coords=True)
                    measurements.append(measurement)

                    camera_index_to_current_feature_index[camera_id] += 1

                current_point.measurements = measurements
                points.append(current_point)

            # Detect bad point ids
            point_ids = [point.id for point in points]
            assert len(list(set(point_ids))) == len(point_ids)

        # Compute REAL 2D image coordinates (the one of colmap are probably not correct)
        #cameras_background
        logger.info('parse_MVS_colmap_file: Done')

        return points