예제 #1
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()
        # print '#'*100
        # print images
        # print '#'*100
        exifs = {im: data.load_exif(im) for im in images}
        if data.config.get('image_matcher_type', False) == 'VOCAB_TREE':
            pairs, preport = match_candidates_from_vocab_tree(images, exifs, data)
        elif data.config.get('image_matcher_type', False) == 'BRUTEFORCE':
            pairs, preport = match_candidates_bruteforce(images, exifs, data)
        else:
            pairs, preport = match_candidates_from_metadata(images, exifs, data)

        # import sys
        # sys.exit(1)

        num_pairs = sum(len(c) for c in pairs.values())
        logger.info('Matching {} image pairs'.format(num_pairs))

        ctx = Context()
        ctx.data = data
        ctx.cameras = ctx.data.load_camera_models()
        ctx.exifs = exifs
        ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
        args = list(match_arguments(pairs, ctx))

        start = timer()
        processes = ctx.data.config['processes']
        parallel_map(match, args, processes)
        end = timer()
        with open(ctx.data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
        self.write_report(data, preport, pairs, end - start)
예제 #2
0
    def undistort_images(self, graph, reconstruction, data):
        urec = types.Reconstruction()
        urec.points = reconstruction.points

        logger.debug('Undistorting the reconstruction')
        undistorted_shots = {}
        for shot in reconstruction.shots.values():
            if shot.camera.projection_type == 'perspective':
                urec.add_camera(shot.camera)
                urec.add_shot(shot)
                undistorted_shots[shot.id] = [shot]
            elif shot.camera.projection_type == 'fisheye':
                shot.camera = perspective_camera_from_fisheye(shot.camera)
                urec.add_camera(shot.camera)
                urec.add_shot(shot)
                undistorted_shots[shot.id] = [shot]
            elif shot.camera.projection_type in [
                    'equirectangular', 'spherical'
            ]:
                subshot_width = int(data.config['depthmap_resolution'])
                subshots = perspective_views_of_a_panorama(shot, subshot_width)
                for subshot in subshots:
                    urec.add_camera(subshot.camera)
                    urec.add_shot(subshot)
                    add_subshot_tracks(graph, shot, subshot)
                undistorted_shots[shot.id] = subshots
        data.save_undistorted_reconstruction([urec])

        arguments = []
        for shot in reconstruction.shots.values():
            arguments.append((shot, undistorted_shots[shot.id], data))

        processes = data.config['processes']
        parallel_map(undistort_image, arguments, processes)
예제 #3
0
def undistort_reconstruction_and_images(tracks_manager, reconstruction, data,
                                        udata):
    undistorted_shots = undistort_reconstruction(tracks_manager,
                                                 reconstruction, data, udata)

    arguments = []
    for shot in reconstruction.shots.values():
        arguments.append((shot, undistorted_shots[shot.id], data, udata))

    processes = data.config["processes"]
    parallel_map(undistort_image_and_masks, arguments, processes)
예제 #4
0
def remove_banding( num_processes, mkv_search_paths = [], working_dir = None):
    
    if working_dir is None:
        working_dir = os.getcwd()

    # find raw mkv files input

    valid_dir = None
    for mkv_path in mkv_search_paths:
        
        mkv_files = glob.glob( os.path.join(mkv_path, '*.mkv') )
        if mkv_files:
            valid_dir = mkv_path
            break
    
    if valid_dir is None:
        valid_dir = working_dir
        mkv_files = glob.glob(os.path.join(working_dir, '*.mkv'))

    if mkv_files and len(mkv_files) == 4:
        mkv_dirs = []

        # use ffmpeg to extract frames from mkv file
        for mkv_file in mkv_files:
            os.chdir(valid_dir)
            mkv_dir = os.path.splitext(mkv_file)[0]
            mkv_dirs.append(mkv_dir)
            os.makedirs(mkv_dir, exist_ok=True)
            os.chdir(mkv_dir)

            #subprocess.call(['ffmpeg', '-i', mkv_file, 'img%04d.jpg', '-codec', 'copy'])
            ffmpeg.input(mkv_file).output('%04d.jpg').run()

        # call horizontal_banding_removal for each set of 4 frames
        args = [(idx, mkv_dirs) for idx in range(1, len(glob.glob(os.path.join(mkv_dirs[0], '*.jpg')))+1)]
        parallel_map(remove, args, num_processes)

        # save original mkv. use ffmpeg to encode processed frames to new mkv.
        for mkv_file in mkv_files:
            os.chdir(valid_dir)
            mkv_file_orig = mkv_file + ".orig"
            shutil.move(mkv_file, mkv_file_orig)

        for mkv_dir in mkv_dirs:
            os.chdir(mkv_dir)

            #subprocess.call(['ffmpeg', '-i', 'img%04d.jpg', '-r', '7', '-codec', 'copy', mkv_file])
            ffmpeg.input('%04d.jpg').output(mkv_dir + ".mkv").run()
            
            os.chdir(working_dir)
            
            shutil.rmtree(mkv_dir)

    os.chdir(working_dir)
예제 #5
0
    def undistort_reconstruction(self, graph, reconstruction, data):
        urec = types.Reconstruction()
        urec.points = reconstruction.points

        logger.debug('Undistorting the reconstruction')
        undistorted_shots = {}
        for shot in reconstruction.shots.values():
            if shot.camera.projection_type == 'perspective':
                urec.add_camera(shot.camera)
                urec.add_shot(shot)
                undistorted_shots[shot.id] = [shot]
            elif shot.camera.projection_type == 'brown':
                ushot = types.Shot()
                ushot.id = shot.id
                ushot.camera = perspective_camera_from_brown(shot.camera)
                ushot.pose = shot.pose
                ushot.metadata = shot.metadata
                urec.add_camera(ushot.camera)
                urec.add_shot(ushot)
                undistorted_shots[shot.id] = [ushot]
            elif shot.camera.projection_type == 'fisheye':
                ushot = types.Shot()
                ushot.id = shot.id
                ushot.camera = perspective_camera_from_fisheye(shot.camera)
                ushot.pose = shot.pose
                ushot.metadata = shot.metadata
                urec.add_camera(ushot.camera)
                urec.add_shot(ushot)
                undistorted_shots[shot.id] = [ushot]
            elif shot.camera.projection_type in [
                    'equirectangular', 'spherical'
            ]:
                subshot_width = int(data.config['depthmap_resolution'])
                subshots = perspective_views_of_a_panorama(shot, subshot_width)
                for subshot in subshots:
                    urec.add_camera(subshot.camera)
                    urec.add_shot(subshot)
                    add_subshot_tracks(graph, shot, subshot)
                undistorted_shots[shot.id] = subshots
        data.save_undistorted_reconstruction([urec])

        arguments = []
        for shot in reconstruction.shots.values():
            arguments.append(
                (shot, undistorted_shots[shot.id], data, 'image_as_array',
                 'save_undistorted_image', cv2.INTER_AREA))
            arguments.append(
                (shot, undistorted_shots[shot.id], data,
                 'segmentation_as_array', 'save_undistorted_segmentation',
                 cv2.INTER_NEAREST))

        processes = data.config['processes']
        parallel_map(undistort_image, arguments, processes)
예제 #6
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()

        arguments = [(image, data) for image in images]

        start = time.time()
        processes = data.config.get('processes', 1)
        parallel_map(detect, arguments, processes)
        end = time.time()
        with open(data.profile_log(), 'a') as fout:
            fout.write('detect_features: {0}\n'.format(end - start))
예제 #7
0
def run_dataset(data):
    """ Compute features for all images. """

    images = data.images()

    arguments = [(image, data) for image in images]

    start = timer()
    processes = data.config["processes"]
    parallel_map(detect, arguments, processes, 1)
    end = timer()
    write_report(data, end - start)
예제 #8
0
def run_dataset(data: DataSetBase):
    """Compute features for all images."""

    start = timer()

    default_queue_size = 10
    max_queue_size = 200
    mem_available = log.memory_available()
    if mem_available:
        expected_mb = mem_available / 2
        expected_images = min(max_queue_size,
                              int(expected_mb / average_image_size(data)))
        logger.info(f"Capping memory usage to ~ {expected_mb} MB")
    else:
        expected_images = default_queue_size
    logger.info(f"Expecting to process {expected_images} images.")

    process_queue = queue.Queue(expected_images)
    arguments: List[Tuple[str, Any]] = []

    all_images = data.images()
    processes = data.config["processes"]

    if processes == 1:
        for image in all_images:
            counter = Counter()
            read_images(process_queue, data, [image], counter, 1)
            run_detection(process_queue)
            process_queue.get()
    else:
        counter = Counter()
        read_processes = data.config["read_processes"]
        if 1.5 * read_processes >= processes:
            read_processes = max(1, processes // 2)

        chunk_size = math.ceil(len(all_images) / read_processes)
        chunks_count = math.ceil(len(all_images) / chunk_size)
        read_processes = min(read_processes, chunks_count)

        expected: int = len(all_images)
        for i in range(read_processes):
            images_chunk = all_images[i * chunk_size:(i + 1) * chunk_size]
            arguments.append((
                "producer",
                (process_queue, data, images_chunk, counter, expected),
            ))
        for _ in range(processes):
            arguments.append(("consumer", (process_queue)))
        parallel_map(process, arguments, processes, 1)

    end = timer()
    write_report(data, end - start)
예제 #9
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()

        arguments = [(image, data) for image in images]

        start = timer()
        processes = data.config['processes']
        parallel_map(detect, arguments, processes)
        end = timer()
        with open(data.profile_log(), 'a') as fout:
            fout.write('detect_features: {0}\n'.format(end - start))

        self.write_report(data, end - start)
예제 #10
0
def run_features_processing(data: DataSetBase, images: List[str], force: bool) -> None:
    """Main entry point for running features extraction on a list of images."""
    default_queue_size = 10
    max_queue_size = 200
    mem_available = log.memory_available()
    if mem_available:
        expected_mb = mem_available / 2
        expected_images = min(
            max_queue_size, int(expected_mb / average_image_size(data))
        )
        logger.info(f"Capping memory usage to ~ {expected_mb} MB")
    else:
        expected_images = default_queue_size
    logger.info(f"Expecting to process {expected_images} images.")

    process_queue = queue.Queue(expected_images)
    arguments: List[Tuple[str, Any]] = []

    processes = data.config["processes"]

    if processes == 1:
        for image in images:
            counter = Counter()
            read_images(process_queue, data, [image], counter, 1, force)
            run_detection(process_queue)
            process_queue.get()
    else:
        counter = Counter()
        read_processes = data.config["read_processes"]
        if 1.5 * read_processes >= processes:
            read_processes = max(1, processes // 2)

        chunk_size = math.ceil(len(images) / read_processes)
        chunks_count = math.ceil(len(images) / chunk_size)
        read_processes = min(read_processes, chunks_count)

        expected: int = len(images)
        for i in range(read_processes):
            images_chunk = images[i * chunk_size : (i + 1) * chunk_size]
            arguments.append(
                (
                    "producer",
                    (process_queue, data, images_chunk, counter, expected, force),
                )
            )
        for _ in range(processes):
            arguments.append(("consumer", (process_queue)))
        parallel_map(process, arguments, processes, 1)
예제 #11
0
def compute_vlad_affinity(
    data: DataSetBase,
    images_ref,
    images_cand,
    exifs,
    reference,
    max_gps_distance,
    max_gps_neighbors,
):
    """Compute afinity scores between references and candidates
    images using VLAD-based distance.
    """
    preempted_candidates, need_load = preempt_candidates(
        images_ref, images_cand, exifs, reference, max_gps_neighbors,
        max_gps_distance)

    # construct VLAD histograms
    logger.info("Computing %d VLAD histograms" % len(need_load))
    histograms = vlad_histograms(need_load, data)

    # parallel VLAD neighbors computation
    args, processes, batch_size = create_parallel_matching_args(
        data, preempted_candidates, histograms)
    logger.info("Computing VLAD candidates with %d processes" % processes)
    return context.parallel_map(match_vlad_unwrap_args, args, processes,
                                batch_size)
예제 #12
0
def match_images_with_pairs(data, exifs, ref_images, pairs):
    """ Perform pair matchings given pairs. """

    # Store per each image in ref for processing
    per_image = {im: [] for im in ref_images}
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info('Matching {} image pairs'.format(len(pairs)))
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info('Matched {} pairs for {} ref_images in '
                '{} seconds.'.format(len(pairs), len(ref_images),
                                     timer() - start))

    # Index results per pair
    resulting_pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            resulting_pairs[im1, im2] = m

    return resulting_pairs
예제 #13
0
def match_images_with_pairs(data: DataSetBase, config_override, exifs, pairs):
    """ Perform pair matchings given pairs. """
    cameras = data.load_camera_models()
    args = list(match_arguments(pairs, data, config_override, cameras, exifs))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info("Matching {} image pairs".format(len(pairs)))
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config["processes"],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format(
        len(pairs),
        log_projection_types(pairs, exifs, cameras),
        timer() - start,
        (timer() - start) / len(pairs) if pairs else 0,
    ))

    # Index results per pair
    resulting_pairs = {}
    for im1, im2, m in matches:
        resulting_pairs[im1, im2] = m
    return resulting_pairs
예제 #14
0
def compute_bow_affinity(
    data: DataSetBase,
    images_ref: List[str],
    images_cand: List[str],
    exifs: Dict[str, Any],
    reference: geo.TopocentricConverter,
    max_gps_distance: float,
    max_gps_neighbors: int,
) -> List[Tuple[str, List[float], List[str]]]:
    """Compute afinity scores between references and candidates
    images using BoW-based distance.
    """
    preempted_candidates, need_load = preempt_candidates(
        images_ref, images_cand, exifs, reference, max_gps_neighbors, max_gps_distance
    )

    # construct BoW histograms
    logger.info("Computing %d BoW histograms" % len(need_load))
    histograms = load_histograms(data, need_load)

    # parallel VLAD neighbors computation
    args, processes, batch_size = create_parallel_matching_args(
        data, preempted_candidates, histograms
    )
    logger.info("Computing BoW candidates with %d processes" % processes)
    return context.parallel_map(match_bow_unwrap_args, args, processes, batch_size)
예제 #15
0
def compute_vlad_affinity(
    data: DataSetBase,
    images_ref: List[str],
    images_cand: List[str],
    exifs: Dict[str, Any],
    reference: geo.TopocentricConverter,
    max_gps_distance: float,
    max_gps_neighbors: int,
    histograms: Dict[str, np.ndarray],
) -> List[Tuple[str, List[float], List[str]]]:
    """Compute afinity scores between references and candidates
    images using VLAD-based distance.
    """
    preempted_candidates, need_load = preempt_candidates(
        images_ref, images_cand, exifs, reference, max_gps_neighbors, max_gps_distance
    )

    if len(preempted_candidates) == 0:
        logger.warning(
            f"Couln't preempt any candidate with GPS, using ALL {len(images_cand)} as candidates"
        )
        preempted_candidates = {image: images_cand for image in images_ref}
        need_load = set(images_ref + images_cand)

    # construct VLAD histograms
    need_load = {im for im in need_load if im not in histograms}
    logger.info("Computing %d VLAD histograms" % len(need_load))
    histograms.update(vlad_histograms(need_load, data))

    # parallel VLAD neighbors computation
    args, processes, batch_size = create_parallel_matching_args(
        data, preempted_candidates, histograms
    )
    logger.info("Computing VLAD candidates with %d processes" % processes)
    return context.parallel_map(match_vlad_unwrap_args, args, processes, batch_size)
예제 #16
0
def match_candidates_with_bow(data, images_ref, images_cand,
                              exifs, reference, max_neighbors,
                              max_gps_distance, max_gps_neighbors,
                              enforce_other_cameras):
    """Find candidate matching pairs using BoW-based distance.

    If max_gps_distance > 0, then we use first restrain a set of
    candidates using max_gps_neighbors neighbors selected using
    GPS distance.

    If enforce_other_cameras is True, we keep max_neighbors images
    with same cameras AND max_neighbors images from any other different
    camera.
    """
    if max_neighbors <= 0:
        return set()

    preempted_candidates, need_load = preempt_candidates(
            images_ref, images_cand,
            exifs, reference,
            max_gps_neighbors, max_gps_distance)

    # construct BoW histograms
    logger.info("Computing %d BoW histograms" % len(need_load))
    histograms = load_histograms(data, need_load)

    # parallel VLAD neighbors computation
    args, processes, batch_size = create_parallel_matching_args(
        data, preempted_candidates, histograms)
    logger.info("Computing BoW candidates with %d processes" % processes)
    results = context.parallel_map(match_bow_unwrap_args, args, processes, batch_size)

    return construct_pairs(results, max_neighbors, exifs, enforce_other_cameras)
예제 #17
0
def match_images_with_pairs(
    data: DataSetBase,
    config_override: Dict[str, Any],
    exifs: Dict[str, Any],
    pairs: List[Tuple[str, str]],
    poses: Optional[Dict[str, pygeometry.Pose]] = None,
) -> Dict[Tuple[str, str], List[Tuple[int, int]]]:
    """Perform pair matchings given pairs."""
    cameras = data.load_camera_models()
    args = list(
        match_arguments(pairs, data, config_override, cameras, exifs, poses))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info("Matching {} image pairs".format(len(pairs)))
    processes = config_override.get("processes", data.config["processes"])
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(processes,
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format(
        len(pairs),
        log_projection_types(pairs, exifs, cameras),
        timer() - start,
        (timer() - start) / len(pairs) if pairs else 0,
    ))

    # Index results per pair
    resulting_pairs = {}
    for im1, im2, m in matches:
        resulting_pairs[im1, im2] = m
    return resulting_pairs
예제 #18
0
def match_images(data, ref_images, cand_images):
    """ Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.
    """

    # Get EXIFs data
    all_images = list(set(ref_images+cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images, cand_images, exifs, data)
    logger.info('Matching {} image pairs'.format(len(pairs)))

    # Store per each image in ref for processing
    per_image = defaultdict(list)
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    for im in ref_images:
        match_against = []
        for im1, im2 in pairs:
            if im == im1:
                match_against.append(im2)
            elif im == im2:
                match_against.append(im1)
        logger.info("Matching {} to: {}".format(im, sorted(match_against)))

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    ctx.pdr_shots_dict = None
    if ctx.data.pdr_shots_exist():
        ctx.pdr_shots_dict = ctx.data.load_pdr_shots()
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'], mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes, jobs_per_process)
    logger.debug('Matched {} pairs in {} seconds.'.format(
        len(pairs), timer()-start))

    # Index results per pair
    pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            pairs[im1, im2] = m

    return pairs, preport
예제 #19
0
def match_candidates_with_bow(data, images_ref, images_cand,
                              exifs, max_neighbors, order_neighbors,
                              max_pdr_distance, max_index_range,
                              enforce_other_cameras):
    """Find candidate matching pairs using BoW-based distance.

    If enforce_other_cameras is True, we keep max_neighbors images
    with same cameras AND  max_neighbors images from any other different
    camera.
    """
    if max_neighbors <= 0:
        return set()

    # restrict bow searching to 150 index neighbors
    preempted_cand = defaultdict(list)
    n = (max_index_range + 1) // 2
    m = (order_neighbors + 1) // 2

    for i, image_ref in enumerate(images_ref):
        a = max(0, i - n)
        b = min(len(images_cand), i + n)
        c = max(0, i - m)
        d = min(len(images_cand), i + m)
        for j in list(range(a, c)) + list(range(d, b)):
            preempted_cand[image_ref].append(images_cand[j])

    # reduce sets of images from which to load words (RAM saver)
    need_load = set(preempted_cand.keys())
    for v in preempted_cand.values():
        need_load.update(v)

    # construct BoW histograms
    logger.info("Computing %d BoW histograms" % len(need_load))
    histograms = load_histograms(data, need_load)
    args = list(match_bow_arguments(preempted_cand, histograms))

    # parallel BoW neighbors computation
    per_process = 512
    processes = context.processes_that_fit_in_memory(data.config['processes'], per_process)
    batch_size = int(max(1, len(args)/(2*processes)))
    logger.info("Computing BoW candidates with %d processes" % processes)
    results = context.parallel_map(match_bow_unwrap_args, args, processes, batch_size)

    # construct final sets of pairs to match
    pairs = set()
    for im, order, other in results:
        if enforce_other_cameras:
            pairs = pairs.union(pairs_from_neighbors(im, exifs, order, other, max_neighbors))
        else:
            for i in order[:max_neighbors]:
                logger.debug("im={}, i={}, other={}".format(im, i, other[i]))
                dist = calc_pdr_distance(data, im, other[i])
                if dist < max_pdr_distance * 0.3048:
                    pairs.add(tuple(sorted((im, other[i]))))
                    logger.debug("adding pair {} - {}, pdr distance {} feet".format(im, other[i], dist/0.3048))
                else:
                    logger.debug("not adding pair {} - {}, pdr distance {} feet".format(im, other[i], dist/0.3048))
    return pairs
예제 #20
0
    def undistort_images(self, graph, reconstruction, data):
        urec = types.Reconstruction()
        urec.points = reconstruction.points

        logger.debug('Undistorting the reconstruction')
        undistorted_shots = {}
        for shot in reconstruction.shots.values():
            if shot.camera.projection_type == 'perspective':
                urec.add_camera(shot.camera)
                urec.add_shot(shot)
                undistorted_shots[shot.id] = [shot]
            elif shot.camera.projection_type == 'brown':
                ushot = types.Shot()
                ushot.id = shot.id
                ushot.camera = perspective_camera_from_brown(shot.camera)
                ushot.pose = shot.pose
                ushot.metadata = shot.metadata
                urec.add_camera(ushot.camera)
                urec.add_shot(ushot)
                undistorted_shots[shot.id] = [ushot]
            elif shot.camera.projection_type == 'fisheye':
                ushot = types.Shot()
                ushot.id = shot.id
                ushot.camera = perspective_camera_from_fisheye(shot.camera)
                ushot.pose = shot.pose
                ushot.metadata = shot.metadata
                urec.add_camera(ushot.camera)
                urec.add_shot(ushot)
                undistorted_shots[shot.id] = [ushot]
            elif shot.camera.projection_type in ['equirectangular', 'spherical']:
                subshot_width = int(data.config['depthmap_resolution'])
                subshots = perspective_views_of_a_panorama(shot, subshot_width)
                for subshot in subshots:
                    urec.add_camera(subshot.camera)
                    urec.add_shot(subshot)
                    add_subshot_tracks(graph, shot, subshot)
                undistorted_shots[shot.id] = subshots
        data.save_undistorted_reconstruction([urec])

        arguments = []
        for shot in reconstruction.shots.values():
            arguments.append((shot, undistorted_shots[shot.id], data))

        processes = data.config['processes']
        parallel_map(undistort_image, arguments, processes)
예제 #21
0
def gen_ss(W, processes):
    size_w = W
    size_h = W // 2

    # This class helps load input images from different sources.
    vs = VideoStreamer(input_dir, size_h, size_w, skip, img_glob)

    print('==> Loading pre-trained network.')
    current_dir = os.path.dirname(__file__)
    weights_path = os.path.join(os.path.dirname(current_dir), weights_file)

    # This class runs the SuperPoint network and processes its outputs.
    fe = SuperPointFrontend(weights_path=weights_path,
                            nms_dist=nms_dist,
                            conf_thresh=conf_thresh,
                            nn_thresh=nn_thresh,
                            cuda=cuda)
    print('==> Successfully loaded pre-trained network.')

    # This class helps merge consecutive point matches into tracks.
    tracker = PointTracker(max_length, nn_thresh=fe.nn_thresh)

    # Create a window to display the demo.
    if display:
        win = 'SuperPoint Tracker'
        cv2.namedWindow(win)
    else:
        print('Skipping visualization, will not show a GUI.')

    # Create output directory if desired.

    if write:
        print('==> Will write outputs to %s' % write_dir)
        if not os.path.exists(write_dir):
            os.makedirs(write_dir)

    print('==> Running Demo.')
    arguments = [(idx, vs, fe, tracker) for idx in range(len(vs.listing))]
    parallel_map(detect, arguments, processes)

    # Close any remaining windows.
    if display:
        cv2.destroyAllWindows()

    print('.. finished Extracting Super Points.')
예제 #22
0
    def run(self, args):

        data = dataset.DataSet(args.dataset)
        images = data.images()

        arguments = [(image, data) for image in images]

        start = timer()

        processes = data.config['processes']
        parallel_map(equi_to_unfolded_cube, arguments, processes)

        end = timer()

        with open(data.profile_log(), 'a') as fout:
            fout.write('create_unfolded_cube: {0}\n'.format(end - start))

        self.write_report(data, end - start)
예제 #23
0
def match_candidates_with_bow(data, images_ref, images_cand, exifs, reference,
                              max_neighbors, max_gps_distance,
                              max_gps_neighbors, enforce_other_cameras):
    """Find candidate matching pairs using BoW-based distance.

    If max_gps_distance > 0, then we use first restrain a set of
    candidates using max_gps_neighbors neighbors selected using
    GPS distance.

    If enforce_other_cameras is True, we keep max_neighbors images
    with same cameras AND  max_neighbors images from any other different
    camera.
    """
    if max_neighbors <= 0:
        return set()

    # preempt candidates images using GPS
    preempted_cand = {im: images_cand for im in images_ref}
    if max_gps_distance > 0 or max_gps_neighbors > 0:
        gps_pairs = match_candidates_by_distance(images_ref, images_cand,
                                                 exifs, reference,
                                                 max_gps_neighbors,
                                                 max_gps_distance)
        preempted_cand = defaultdict(list)
        for p in gps_pairs:
            preempted_cand[p[0]].append(p[1])
            preempted_cand[p[1]].append(p[0])

    # reduce sets of images from which to load words (RAM saver)
    need_load = set(preempted_cand.keys())
    for v in preempted_cand.values():
        need_load.update(v)

    # construct BoW histograms
    logger.info("Computing %d BoW histograms" % len(need_load))
    histograms = load_histograms(data, need_load)
    args = list(match_bow_arguments(preempted_cand, histograms))

    # parralel BoW neighbors computation
    per_process = 512
    processes = context.processes_that_fit_in_memory(data.config['processes'],
                                                     per_process)
    batch_size = max(1, len(args) / (2 * processes))
    logger.info("Computing BoW candidates with %d processes" % processes)
    results = context.parallel_map(match_bow_unwrap_args, args, processes,
                                   batch_size)

    # construct final sets of pairs to match
    pairs = set()
    for im, order, other in results:
        if enforce_other_cameras:
            pairs = pairs.union(
                pairs_from_neighbors(im, exifs, order, other, max_neighbors))
        else:
            for i in order[:max_neighbors]:
                pairs.add(tuple(sorted((im, other[i]))))
    return pairs
예제 #24
0
def undistort_reconstruction_with_images(
    tracks_manager: Optional[pymap.TracksManager],
    reconstruction: types.Reconstruction,
    data: DataSetBase,
    udata: UndistortedDataSet,
    skip_images: bool = False,
) -> Dict[pymap.Shot, List[pymap.Shot]]:
    undistorted_shots = undistort_reconstruction(
        tracks_manager, reconstruction, data, udata
    )
    if not skip_images:
        arguments = []
        for shot_id, subshots in undistorted_shots.items():
            arguments.append((reconstruction.shots[shot_id], subshots, data, udata))

        processes = data.config["processes"]
        parallel_map(undistort_image_and_masks, arguments, processes)
    return undistorted_shots
예제 #25
0
    def undistort_reconstruction(self, graph, reconstruction, opensfm_config,
                                 udata, file_path, self_compute, self_path):
        urec = types.Reconstruction()
        urec.points = reconstruction.points
        ugraph = nx.Graph()

        logger.debug('Undistorting the reconstruction')
        undistorted_shots = {}
        for shot in reconstruction.shots.values():
            if shot.camera.projection_type == 'perspective':
                camera = perspective_camera_from_perspective(shot.camera)
                subshots = [get_shot_with_different_camera(shot, camera)]
            elif shot.camera.projection_type == 'brown':
                camera = perspective_camera_from_brown(shot.camera)
                subshots = [get_shot_with_different_camera(shot, camera)]
            elif shot.camera.projection_type == 'fisheye':
                camera = perspective_camera_from_fisheye(shot.camera)
                subshots = [get_shot_with_different_camera(shot, camera)]
            elif shot.camera.projection_type in [
                    'equirectangular', 'spherical'
            ]:
                subshot_width = int(opensfm_config['depthmap_resolution'])
                subshots = perspective_views_of_a_panorama(shot, subshot_width)

            for subshot in subshots:
                urec.add_camera(subshot.camera)
                urec.add_shot(subshot)
                if graph:
                    add_subshot_tracks(graph, ugraph, shot, subshot)
            undistorted_shots[shot.id] = subshots

        udata.save_undistorted_reconstruction([urec])
        if graph:
            udata.save_undistorted_tracks_graph(ugraph)

        arguments = []
        for shot in reconstruction.shots.values():
            arguments.append(
                (shot, undistorted_shots[shot.id], opensfm_config, udata,
                 file_path, self.imageFilter, self_compute, self_path))

        processes = opensfm_config['processes']
        parallel_map(undistort_image_and_masks, arguments, processes)
예제 #26
0
    def undistort_reconstruction(self, tracks_manager, reconstruction, data,
                                 udata):
        urec = types.Reconstruction()
        urec.points = reconstruction.points
        utracks_manager = pysfm.TracksManager()
        logger.debug('Undistorting the reconstruction')
        undistorted_shots = {}
        for shot in reconstruction.shots.values():
            if shot.camera.projection_type == 'perspective':
                camera = perspective_camera_from_perspective(shot.camera)
                urec.add_camera(camera)
                subshots = [get_shot_with_different_camera(urec, shot, camera)]
            elif shot.camera.projection_type == 'brown':
                camera = perspective_camera_from_brown(shot.camera)
                urec.add_camera(camera)
                subshots = [get_shot_with_different_camera(urec, shot, camera)]
            elif shot.camera.projection_type in ['fisheye', 'fisheye_opencv']:
                camera = perspective_camera_from_fisheye(shot.camera)
                urec.add_camera(camera)
                subshots = [get_shot_with_different_camera(urec, shot, camera)]
            elif shot.camera.projection_type in [
                    'equirectangular', 'spherical'
            ]:
                subshot_width = int(data.config['depthmap_resolution'])
                subshots = perspective_views_of_a_panorama(
                    shot, subshot_width, urec)

            for subshot in subshots:
                if tracks_manager:
                    add_subshot_tracks(tracks_manager, utracks_manager, shot,
                                       subshot)
            undistorted_shots[shot.id] = subshots

        udata.save_undistorted_reconstruction([urec])
        if tracks_manager:
            udata.save_undistorted_tracks_manager(utracks_manager)

        arguments = []
        for shot in reconstruction.shots.values():
            arguments.append((shot, undistorted_shots[shot.id], data, udata))

        processes = data.config['processes']
        parallel_map(undistort_image_and_masks, arguments, processes)
예제 #27
0
def compute_image_pairs(track_dict, data):
    """All matched image pairs sorted by reconstructability."""
    args = _pair_reconstructability_arguments(track_dict, data)
    processes = data.config['processes']
    result = parallel_map(_compute_pair_reconstructability, args, processes)
    result = list(result)
    pairs = [(im1, im2) for im1, im2, r in result if r > 0]
    score = [r for im1, im2, r in result if r > 0]
    order = np.argsort(-np.array(score))
    return [pairs[o] for o in order]
예제 #28
0
파일: matching.py 프로젝트: mfzhang/OpenSfM
def match_images(data, ref_images, cand_images, overwrite):
    """ Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.

    If 'overwrite' is set to True, matches of a given images will be
    overwritten with the new ones, if False, they're going to be updated,
    keeping the previous ones.
    """

    # Get EXIFs data
    all_images = list(set(ref_images + cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images, cand_images, exifs, data)
    logger.info('Matching {} image pairs'.format(len(pairs)))

    # Store per each image in ref for processing
    per_image = {im: [] for im in ref_images}
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    ctx.overwrite = overwrite
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.debug('Matched {} pairs in {} seconds.'.format(
        len(pairs),
        timer() - start))

    # Index results per pair
    pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            pairs[im1, im2] = m

    return pairs, preport
예제 #29
0
def undistort_reconstruction(tracks_manager, reconstruction, data, udata):
    urec = types.Reconstruction()
    urec.points = reconstruction.points
    utracks_manager = pysfm.TracksManager()
    logger.debug("Undistorting the reconstruction")
    undistorted_shots = {}
    for shot in reconstruction.shots.values():
        if shot.camera.projection_type == "perspective":
            camera = perspective_camera_from_perspective(shot.camera)
            urec.add_camera(camera)
            subshots = [get_shot_with_different_camera(urec, shot, camera)]
        elif shot.camera.projection_type == "brown":
            camera = perspective_camera_from_brown(shot.camera)
            urec.add_camera(camera)
            subshots = [get_shot_with_different_camera(urec, shot, camera)]
        elif shot.camera.projection_type in ["fisheye", "fisheye_opencv"]:
            camera = perspective_camera_from_fisheye(shot.camera)
            urec.add_camera(camera)
            subshots = [get_shot_with_different_camera(urec, shot, camera)]
        elif pygeometry.Camera.is_panorama(shot.camera.projection_type):
            subshot_width = int(data.config["depthmap_resolution"])
            subshots = perspective_views_of_a_panorama(shot, subshot_width,
                                                       urec)

        for subshot in subshots:
            if tracks_manager:
                add_subshot_tracks(tracks_manager, utracks_manager, shot,
                                   subshot)
        undistorted_shots[shot.id] = subshots

    udata.save_undistorted_reconstruction([urec])
    if tracks_manager:
        udata.save_undistorted_tracks_manager(utracks_manager)

    arguments = []
    for shot in reconstruction.shots.values():
        arguments.append((shot, undistorted_shots[shot.id], data, udata))

    processes = data.config["processes"]
    parallel_map(undistort_image_and_masks, arguments, processes)
예제 #30
0
def vlad_histograms(images: Iterable[str], data: DataSetBase) -> Dict[str, np.ndarray]:
    """Construct VLAD histograms from the image features.

    Returns a dictionary of VLAD vectors for the images.
    """
    batch_size = 4
    vlads = context.parallel_map(
        vlad_histogram_unwrap_args,
        [(data, image) for image in images],
        data.config["processes"],
        batch_size,
    )
    return {v[0]: v[1] for v in vlads if v}
예제 #31
0
def compute_image_pairs(track_dict: Dict[Tuple[str, str],
                                         tracking.TPairTracks],
                        data: DataSetBase) -> List[Tuple[str, str]]:
    """All matched image pairs sorted by reconstructability."""
    cameras = data.load_camera_models()
    args = _pair_reconstructability_arguments(track_dict, cameras, data)
    processes = data.config["processes"]
    result = parallel_map(_compute_pair_reconstructability, args, processes)
    result = list(result)
    pairs = [(im1, im2) for im1, im2, r in result if r > 0]
    score = [r for im1, im2, r in result if r > 0]
    order = np.argsort(-np.array(score))
    return [pairs[o] for o in order]
예제 #32
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()
        exifs = {im: data.load_exif(im) for im in images}
        pairs = match_candidates_from_metadata(images, exifs, data)

        num_pairs = sum(len(c) for c in pairs.values())
        logger.info('Matching {} image pairs'.format(num_pairs))

        ctx = Context()
        ctx.data = data
        ctx.cameras = ctx.data.load_camera_models()
        ctx.exifs = exifs
        ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
        args = list(match_arguments(pairs, ctx))

        start = time.time()
        processes = ctx.data.config.get('processes', 1)
        parallel_map(match, args, processes)
        end = time.time()
        with open(ctx.data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
예제 #33
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()
        exifs = {im: data.load_exif(im) for im in images}
        pairs, preport = match_candidates_from_metadata(images, exifs, data)

        num_pairs = sum(len(c) for c in pairs.values())
        logger.info('Matching {} image pairs'.format(num_pairs))

        ctx = Context()
        ctx.data = data
        ctx.cameras = ctx.data.load_camera_models()
        ctx.exifs = exifs
        ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
        args = list(match_arguments(pairs, ctx))

        start = timer()
        processes = ctx.data.config['processes']
        parallel_map(match, args, processes)
        end = timer()
        with open(ctx.data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
        self.write_report(data, preport, pairs, end - start)
예제 #34
0
    def undistort_reconstruction(self, graph, reconstruction, data):
        urec = types.Reconstruction()
        urec.points = reconstruction.points
        ugraph = nx.Graph()

        logger.debug('Undistorting the reconstruction')
        undistorted_shots = {}
        for shot in reconstruction.shots.values():
            if shot.camera.projection_type == 'perspective':
                camera = perspective_camera_from_perspective(shot.camera)
                subshots = [get_shot_with_different_camera(shot, camera)]
            elif shot.camera.projection_type == 'brown':
                camera = perspective_camera_from_brown(shot.camera)
                subshots = [get_shot_with_different_camera(shot, camera)]
            elif shot.camera.projection_type == 'fisheye':
                camera = perspective_camera_from_fisheye(shot.camera)
                subshots = [get_shot_with_different_camera(shot, camera)]
            elif shot.camera.projection_type in ['equirectangular', 'spherical']:
                subshot_width = int(data.config['depthmap_resolution'])
                subshots = perspective_views_of_a_panorama(shot, subshot_width)

            for subshot in subshots:
                urec.add_camera(subshot.camera)
                urec.add_shot(subshot)
                add_subshot_tracks(graph, ugraph, shot, subshot)
            undistorted_shots[shot.id] = subshots

        data.save_undistorted_reconstruction([urec])
        data.save_undistorted_tracks_graph(ugraph)

        arguments = []
        for shot in reconstruction.shots.values():
            arguments.append((shot, undistorted_shots[shot.id], data))

        processes = data.config['processes']
        parallel_map(undistort_image_and_masks, arguments, processes)
예제 #35
0
def compute_depthmaps(data, graph, reconstruction):
    """Compute and refine depthmaps for all shots."""
    logger.info('Computing neighbors')
    processes = data.config.get('processes', 1)
    num_neighbors = data.config['depthmap_num_neighbors']
    tracks, _ = matching.tracks_and_images(graph)
    common_tracks = matching.all_common_tracks(graph, tracks, include_features=False)

    neighbors = {}
    for shot in reconstruction.shots.values():
        neighbors[shot.id] = find_neighboring_images(
            shot, common_tracks, reconstruction, num_neighbors)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        min_depth, max_depth = compute_depth_range(graph, reconstruction, shot)
        arguments.append((data, neighbors[shot.id], min_depth, max_depth, shot))
    parallel_map(compute_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(clean_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(prune_depthmap_catched, arguments, processes)

    merge_depthmaps(data, graph, reconstruction, neighbors)