Пример #1
0
    def __init__(self, data_path):
        """
        Create dataset instance. Empty directories (for EXIF, matches, etc) will be created if they don't exist
        already.

        :param data_path: Path to directory containing dataset
        """
        self.data_path = data_path

        # Load configuration.
        config_file = os.path.join(self.data_path, 'config.yaml')
        self.config = config.load_config(config_file)

        # Load list of images.
        image_list_file = os.path.join(self.data_path, 'image_list.txt')
        if os.path.isfile(image_list_file):
            with open(image_list_file) as fin:
                lines = fin.read().splitlines()
            self.set_image_list(lines)
        else:
            self.set_image_path(os.path.join(self.data_path, 'images'))

        # Create output folders.
        for p in [self.__exif_path(),
                  self.__feature_path(),
                  self.__matches_path()]:
            io.mkdir_p(p)
Пример #2
0
    def export(self, reconstruction, graph, data):
        exporter = csfm.OpenMVSExporter()
        for camera in reconstruction.cameras.values():
            if camera.projection_type == 'perspective':
                w, h = camera.width, camera.height
                K = np.array([
                    [camera.focal, 0, (w - 1.0) / 2 / max(w, h)],
                    [0, camera.focal, (h - 1.0) / 2 / max(w, h)],
                    [0, 0, 1],
                ])
                exporter.add_camera(str(camera.id), K)

        for shot in reconstruction.shots.values():
            if shot.camera.projection_type == 'perspective':
                image_path = data._undistorted_image_file(shot.id)
                exporter.add_shot(
                    str(os.path.abspath(image_path)),
                    str(shot.id),
                    str(shot.camera.id),
                    shot.pose.get_rotation_matrix(),
                    shot.pose.get_origin())

        for point in reconstruction.points.values():
            shots = graph[point.id].keys()
            coordinates = np.array(point.coordinates, dtype=np.float64)
            exporter.add_point(coordinates, shots)

        io.mkdir_p(data.data_path + '/openmvs')
        exporter.export(data.data_path + '/openmvs/scene.mvs')
Пример #3
0
    def create_submodels(self, clusters, no_symlinks=False):
        data = DataSet(self.data_path)
        for i, cluster in enumerate(clusters):
            # create sub model dirs
            submodel_path = self._submodel_path(i)
            submodel_images_path = self._submodel_images_path(i)
            io.mkdir_p(submodel_path)
            io.mkdir_p(submodel_images_path)

            # link images and create image list file
            image_list_path = os.path.join(submodel_path, 'image_list.txt')
            with io.open_wt(image_list_path) as txtfile:
                for image in cluster:
                    src = data.image_files[image]
                    dst = os.path.join(submodel_images_path, image)
                    if not os.path.isfile(dst):
                        os.symlink(src, dst)
                    dst_relpath = os.path.relpath(dst, submodel_path)
                    txtfile.write(dst_relpath + "\n")

            # copy config.yaml if exists
            config_file_path = os.path.join(self.data_path, 'config.yaml')
            if os.path.exists(config_file_path):
                shutil.copyfile(config_file_path, os.path.join(submodel_path, 'config.yaml'))

            if no_symlinks:
                reference_file_path = os.path.join(self.data_path, 'reference_lla.json')
                if os.path.exists(reference_file_path):
                    shutil.copyfile(reference_file_path, os.path.join(submodel_path, 'reference_lla.json'))
            else:
                # create symlinks to metadata files
                for symlink_path in ['camera_models.json', 'reference_lla.json',
                                     'exif', 'features', 'matches']:
                    self._create_symlink(submodel_path, symlink_path)
Пример #4
0
    def create_submodels(self, clusters):
        data = DataSet(self.data_path)
        for i, cluster in enumerate(clusters):
            # create sub model dirs
            submodel_path = self._submodel_path(i)
            submodel_images_path = self._submodel_images_path(i)
            io.mkdir_p(submodel_path)
            io.mkdir_p(submodel_images_path)

            # create image list file
            image_list_path = os.path.join(submodel_path, 'image_list.txt')
            with io.open_wt(image_list_path) as txtfile:
                for image in cluster:
                    src = data.image_files[image]
                    dst = os.path.join(submodel_images_path, image)
                    src_relpath = os.path.relpath(src, submodel_images_path)
                    if not os.path.isfile(dst):
                        os.symlink(src_relpath, dst)
                    dst_relpath = os.path.relpath(dst, submodel_path)
                    txtfile.write(dst_relpath + "\n")

            # copy config.yaml if exists
            config_file_path = os.path.join(self.data_path, 'config.yaml')
            if os.path.exists(config_file_path):
                shutil.copyfile(config_file_path, os.path.join(submodel_path, 'config.yaml'))

            # create symlinks to additional files
            filenames = ['camera_models.json', 'reference_lla.json', 'exif',
                         'features', 'matches', 'masks', 'mask_list.txt',
                         'segmentations']
            for filename in filenames:
                self._create_symlink(submodel_path, filename)
def extract_keyframes_from_video(video, reconstruction):
    '''
    Reads video and extracts a frame for each shot in reconstruction
    '''
    image_path = 'images'
    mkdir_p(image_path)
    T = 0.1  # TODO(pau) get this from config
    cap = cv2.VideoCapture(video)
    video_idx = 0

    shot_ids = sorted(reconstruction['shots'].keys())
    for shot_id in shot_ids:
        shot = reconstruction['shots'][shot_id]
        timestamp = shot['created_at']
        keyframe_idx = int(round(timestamp / T))

        while video_idx <= keyframe_idx:
            for i in range(20):
                ret, frame = cap.read()
                if ret:
                    break
                else:
                    print 'retrying'
            if not ret:
                raise RuntimeError(
                    'Cound not find keyframe {} in video'.format(shot_id))
            if video_idx == keyframe_idx:
                cv2.imwrite(os.path.join(image_path, shot_id), frame)
            video_idx += 1

    cap.release()
Пример #6
0
 def save_pruned_depthmap(self, image, points, normals, colors, labels, detections):
     io.mkdir_p(self._depthmap_path())
     filepath = self._depthmap_file(image, 'pruned.npz')
     np.savez_compressed(filepath,
                         points=points, normals=normals,
                         colors=colors, labels=labels,
                         detections=detections)
Пример #7
0
def import_video_with_gpx(video_file, gpx_file, output_path, dx, dt=None, start_time=None, visual=False, image_description=None):

    points = geotag_from_gpx.get_lat_lon_time(gpx_file)

    orientation = video_orientation(video_file)

    if start_time:
        video_start_time = dateutil.parser.parse(start_time)
    else:
        try:
            exifdate = Popen(['exiftool', '-CreateDate', '-b', video_file], stdout=PIPE).stdout.read()
            video_start_time = datetime.datetime.strptime(exifdate,'%Y:%m:%d %H:%M:%S')
        except:
            print 'Video recording timestamp not found. Using first GPS point time.'
            video_start_time = points[0][0]
        try:
            duration = Popen(['exiftool', '-MediaDuration', '-b', video_file], stdout=PIPE).stdout.read()
            video_duration = float(duration)
            video_end_time = video_start_time + datetime.timedelta(seconds=video_duration)
        except:
            print 'Video end time not found. Using last GPS point time.'
            video_end_time = points[-1][0]

    print 'GPS track starts at:', points[0][0]
    print 'Video starts at:', video_start_time

    # Extract video frames.
    io.mkdir_p(output_path)
    key_points = geotag_from_gpx.sample_gpx(points, dx, dt)

    cap = cv2.VideoCapture(video_file)
    image_files = []
    for p in key_points:
        dt = (p[0] - video_start_time).total_seconds()
        if dt > 0:
            CAP_PROP_POS_MSEC = cv2.CAP_PROP_POS_MSEC if context.OPENCV3 else cv2.cv.CV_CAP_PROP_POS_MSEC
            cap.set(CAP_PROP_POS_MSEC, int(dt * 1000))
            ret, frame = cap.read()
            if ret:
                print 'Grabbing frame for time', p[0]
                filepath = os.path.join(output_path, p[0].strftime("%Y_%m_%d_%H_%M_%S_%f")[:-3] + '.jpg')
                cv2.imwrite(filepath, frame)
                geotag_from_gpx.add_exif_using_timestamp(filepath, points, timestamp=p[0], orientation=orientation)

                # Display the resulting frame
                if visual:
                    # Display the resulting frame
                    max_display_size = 800
                    resize_ratio = float(max_display_size) / max(frame.shape[0], frame.shape[1])
                    frame = cv2.resize(frame, dsize=(0, 0), fx=resize_ratio, fy=resize_ratio)
                    cv2.imshow('frame', frame)
                    if cv2.waitKey(1) & 0xFF == 27:
                        break
                image_files.append(filepath)
    # When everything done, release the capture
    cap.release()
    if visual:
        cv2.destroyAllWindows()
    return image_files
Пример #8
0
 def __save_features(self, filepath, image, points, descriptors, colors=None):
     io.mkdir_p(self.__feature_path())
     feature_type = self.config.get('feature_type')
     if ((feature_type == 'AKAZE' and self.config.get('akaze_descriptor') in ['MLDB_UPRIGHT', 'MLDB']) or
         (feature_type == 'HAHOG' and self.config.get('hahog_normalize_to_uchar', False))):
         feature_data_type = np.uint8
     else:
         feature_data_type = np.float32
     np.savez(filepath,
              points=points.astype(np.float32),
              descriptors=descriptors.astype(feature_data_type),
              colors=colors)
Пример #9
0
    def __init__(self, data_path):
        '''
        Create meta dataset instance for large scale reconstruction.

        :param data_path: Path to directory containing meta dataset
        '''
        self.data_path = data_path

        config_file = os.path.join(self.data_path, 'config.yaml')
        self.config = config.load_config(config_file)

        self._image_list_file_name = 'image_list_with_gps.tsv'
        self._clusters_file_name = 'clusters.npz'
        self._clusters_with_neighbors_file_name = 'clusters_with_neighbors.npz'
        self._clusters_with_neighbors_geojson_file_name = 'clusters_with_neighbors.geojson'

        io.mkdir_p(self._submodels_path())
Пример #10
0
 def save_exif(self, image, data):
     io.mkdir_p(self._exif_path())
     with io.open_wt(self._exif_file(image)) as fout:
         io.json_dump(data, fout)
Пример #11
0
 def save_epipolar(self, im1, im2, R, t, X=[], inliers=[]):
     io.mkdir_p(self.__epipolar_path())
     np.savez(self.__epipolar_file(im1, im2), R=R, t=t, X=X, inliers=inliers)
Пример #12
0
 def save_undistorted_image(self, image, array):
     io.mkdir_p(self._undistorted_image_path())
     cv2.imwrite(self._undistorted_image_file(image), array[:, :, ::-1])
Пример #13
0
 def save_report(self, report_str, path):
     """Save report string to a file."""
     filepath = os.path.join(self._report_path(), path)
     io.mkdir_p(os.path.dirname(filepath))
     with io.open_wt(filepath) as fout:
         return fout.write(report_str)
Пример #14
0
 def save_raw_depthmap(self, image, depth, plane, score, nghbr, nghbrs):
     io.mkdir_p(self._depthmap_path())
     filepath = self._depthmap_file(image, 'raw.npz')
     np.savez_compressed(filepath, depth=depth, plane=plane, score=score, nghbr=nghbr, nghbrs=nghbrs)
Пример #15
0
 def save_undistorted_segmentation(self, image, array):
     """Save the undistorted image segmentation."""
     io.mkdir_p(self._undistorted_segmentation_path())
     io.imwrite(self._undistorted_segmentation_file(image), array)
Пример #16
0
 def save_clean_depthmap(self, image, depth, plane, score):
     io.mkdir_p(self._depthmap_path())
     filepath = self._depthmap_file(image, 'clean.npz')
     np.savez_compressed(filepath, depth=depth, plane=plane, score=score)
Пример #17
0
 def save_raw_depthmap(self, image, depth, plane, score):
     io.mkdir_p(self._depthmap_path())
     filepath = self._depthmap_file(image, 'raw.npz')
     np.savez(filepath, depth=depth, plane=plane, score=score)
Пример #18
0
 def save_undistorted_shot_ids(self, ushot_dict):
     filename = os.path.join(self.data_path, "undistorted_shot_ids.json")
     io.mkdir_p(self.data_path)
     with io.open_wt(filename) as fout:
         io.json_dump(ushot_dict, fout, minify=False)
Пример #19
0
 def save_exif(self, image, data):
     io.mkdir_p(self.__exif_path())
     with open(self.__exif_file(image), 'w') as fout:
         fout.write(io.json_dumps(data))
Пример #20
0
 def save_matches(self, image, matches):
     io.mkdir_p(self.__matches_path())
     with gzip.open(self.__matches_file(image), 'wb') as fout:
         pickle.dump(matches, fout)
Пример #21
0
 def save_undistorted_reconstruction(self, reconstruction):
     io.mkdir_p(self.data_path)
     return self.base.save_reconstruction(reconstruction,
                                          filename=os.path.join(
                                              self.subfolder,
                                              'reconstruction.json'))
Пример #22
0
 def save_clean_depthmap(self, image, depth, plane, score):
     io.mkdir_p(self._depthmap_path())
     filepath = self._depthmap_file(image, 'clean.npz')
     np.savez_compressed(filepath, depth=depth, plane=plane, score=score)
Пример #23
0
 def save_undistorted_segmentation(self, image, array):
     """Save the undistorted image segmentation."""
     io.mkdir_p(self._undistorted_segmentation_path())
     io.imwrite(self._undistorted_segmentation_file(image), array)
Пример #24
0
 def save_undistorted_mask(self, image, array):
     """Save the undistorted image mask."""
     io.mkdir_p(self._undistorted_mask_path())
     io.imwrite(self._undistorted_mask_file(image), array)
Пример #25
0
 def save_undistorted_image(self, image, array):
     """Save undistorted image pixels."""
     io.mkdir_p(self._undistorted_image_path())
     io.imwrite(self._undistorted_image_file(image), array)
Пример #26
0
                             'submodels, images are grouped into clusters. '
                             'This value regulates the number of images that '
                             'each cluster should have on average.')

    parser.add_argument('--submodel-overlap',
                        type=float,
                        metavar='<positive integer>',
                        default=150,
                        help='Radius of the overlap between submodels. '
                        'After grouping images into clusters, images '
                        'that are closer than this radius to a cluster '
                        'are added to the cluster. This is done to ensure '
                        'that neighboring submodels overlap.')

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_command_line()
    data_path = args.dataset

    resize_images(data_path, args)

    image_path = os.path.join(data_path, 'images')
    opensfm_path = os.path.join(data_path, 'opensfm')

    mkdir_p(opensfm_path)
    create_image_list(image_path, opensfm_path)
    create_config(opensfm_path, args)
    link_image_groups(data_path, opensfm_path)
Пример #27
0
 def save_report(self, report_str, path):
     """Save report string to a file."""
     filepath = os.path.join(self._report_path(), path)
     io.mkdir_p(os.path.dirname(filepath))
     with io.open_wt(filepath) as fout:
         return fout.write(report_str)
Пример #28
0
 def save_undistorted_mask(self, image, array):
     """Save the undistorted image mask."""
     io.mkdir_p(self._undistorted_mask_path())
     io.imwrite(self._undistorted_mask_file(image), array)
Пример #29
0
 def save_epipolar(self, im1, im2, R, t, X=[], inliers=[]):
     io.mkdir_p(self.__epipolar_path())
     np.savez(self.__epipolar_file(im1, im2), R=R, t=t, X=X, inliers=inliers)
Пример #30
0
 def save_raw_depthmap(self, image, depth, plane, score, nghbr, nghbrs):
     io.mkdir_p(self._depthmap_path())
     filepath = self._depthmap_file(image, 'raw.npz')
     np.savez_compressed(filepath, depth=depth, plane=plane, score=score, nghbr=nghbr, nghbrs=nghbrs)
Пример #31
0
 def save_undistorted_image(self, image, array):
     io.mkdir_p(self._undistorted_image_path())
     cv2.imwrite(self._undistorted_image_file(image), array[:, :, ::-1])
Пример #32
0
 def save_exif(self, image, data):
     io.mkdir_p(self._exif_path())
     with io.open_wt(self._exif_file(image)) as fout:
         io.json_dump(data, fout)
Пример #33
0
 def _save_features(self, filepath, points, descriptors, colors=None):
     io.mkdir_p(self._feature_path())
     features.save_features(filepath, points, descriptors, colors,
                            self.config)
Пример #34
0
 def save_matches(self, image, matches):
     io.mkdir_p(self._matches_path())
     with gzip.open(self._matches_file(image), 'wb') as fout:
         pickle.dump(matches, fout)
Пример #35
0
 def save_exif(self, image, data):
     io.mkdir_p(self.__exif_path())
     with open(self.__exif_file(image), 'w') as fout:
         io.json_dump(data, fout)
Пример #36
0
 def save_undistorted_image(self, image, array):
     """Save undistorted image pixels."""
     io.mkdir_p(self._undistorted_image_path())
     io.imwrite(self._undistorted_image_file(image), array)
Пример #37
0
def import_video_with_gpx(video_file,
                          gpx_file,
                          output_path,
                          dx,
                          dt=None,
                          start_time=None,
                          visual=False,
                          image_description=None):

    points = geotag_from_gpx.get_lat_lon_time(gpx_file)

    orientation = video_orientation(video_file)

    if start_time:
        video_start_time = dateutil.parser.parse(start_time)
    else:
        try:
            exifdate = Popen(['exiftool', '-CreateDate', '-b', video_file],
                             stdout=PIPE).stdout.read()
            video_start_time = datetime.datetime.strptime(
                exifdate, '%Y:%m:%d %H:%M:%S')
        except:
            print(
                'Video recording timestamp not found. Using first GPS point time.'
            )
            video_start_time = points[0][0]
        try:
            duration = Popen(['exiftool', '-MediaDuration', '-b', video_file],
                             stdout=PIPE).stdout.read()
            video_duration = float(duration)
            video_end_time = video_start_time + datetime.timedelta(
                seconds=video_duration)
        except:
            print('Video end time not found. Using last GPS point time.')
            video_end_time = points[-1][0]

    print('GPS track starts at: {}'.format(points[0][0]))
    print('Video starts at: {}'.format(video_start_time))

    # Extract video frames.
    io.mkdir_p(output_path)
    key_points = geotag_from_gpx.sample_gpx(points, dx, dt)

    cap = cv2.VideoCapture(video_file)
    image_files = []
    for p in key_points:
        dt = (p[0] - video_start_time).total_seconds()
        if dt > 0:
            CAP_PROP_POS_MSEC = cv2.CAP_PROP_POS_MSEC if context.OPENCV3 else cv2.cv.CV_CAP_PROP_POS_MSEC
            cap.set(CAP_PROP_POS_MSEC, int(dt * 1000))
            ret, frame = cap.read()
            if ret:
                print('Grabbing frame for time {}'.format(p[0]))
                filepath = os.path.join(
                    output_path,
                    p[0].strftime("%Y_%m_%d_%H_%M_%S_%f")[:-3] + '.jpg')
                cv2.imwrite(filepath, frame)
                geotag_from_gpx.add_exif_using_timestamp(
                    filepath, points, timestamp=p[0], orientation=orientation)

                # Display the resulting frame
                if visual:
                    # Display the resulting frame
                    max_display_size = 800
                    resize_ratio = float(max_display_size) / max(
                        frame.shape[0], frame.shape[1])
                    frame = cv2.resize(frame,
                                       dsize=(0, 0),
                                       fx=resize_ratio,
                                       fy=resize_ratio)
                    cv2.imshow('frame', frame)
                    if cv2.waitKey(1) & 0xFF == 27:
                        break
                image_files.append(filepath)
    # When everything done, release the capture
    cap.release()
    if visual:
        cv2.destroyAllWindows()
    return image_files