Пример #1
0
def match_images_with_pairs(data: DataSetBase, config_override, exifs, pairs):
    """ Perform pair matchings given pairs. """
    cameras = data.load_camera_models()
    args = list(match_arguments(pairs, data, config_override, cameras, exifs))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info("Matching {} image pairs".format(len(pairs)))
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config["processes"],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format(
        len(pairs),
        log_projection_types(pairs, exifs, cameras),
        timer() - start,
        (timer() - start) / len(pairs) if pairs else 0,
    ))

    # Index results per pair
    resulting_pairs = {}
    for im1, im2, m in matches:
        resulting_pairs[im1, im2] = m
    return resulting_pairs
Пример #2
0
def match_images_with_pairs(
    data: DataSetBase,
    config_override: Dict[str, Any],
    exifs: Dict[str, Any],
    pairs: List[Tuple[str, str]],
    poses: Optional[Dict[str, pygeometry.Pose]] = None,
) -> Dict[Tuple[str, str], List[Tuple[int, int]]]:
    """Perform pair matchings given pairs."""
    cameras = data.load_camera_models()
    args = list(
        match_arguments(pairs, data, config_override, cameras, exifs, poses))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info("Matching {} image pairs".format(len(pairs)))
    processes = config_override.get("processes", data.config["processes"])
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(processes,
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format(
        len(pairs),
        log_projection_types(pairs, exifs, cameras),
        timer() - start,
        (timer() - start) / len(pairs) if pairs else 0,
    ))

    # Index results per pair
    resulting_pairs = {}
    for im1, im2, m in matches:
        resulting_pairs[im1, im2] = m
    return resulting_pairs
Пример #3
0
def match_images_with_pairs(data, exifs, ref_images, pairs):
    """ Perform pair matchings given pairs. """

    # Store per each image in ref for processing
    per_image = {im: [] for im in ref_images}
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info('Matching {} image pairs'.format(len(pairs)))
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info('Matched {} pairs for {} ref_images in '
                '{} seconds.'.format(len(pairs), len(ref_images),
                                     timer() - start))

    # Index results per pair
    resulting_pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            resulting_pairs[im1, im2] = m

    return resulting_pairs
Пример #4
0
def match_images(data, ref_images, cand_images):
    """ Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.
    """

    # Get EXIFs data
    all_images = list(set(ref_images+cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images, cand_images, exifs, data)
    logger.info('Matching {} image pairs'.format(len(pairs)))

    # Store per each image in ref for processing
    per_image = defaultdict(list)
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    for im in ref_images:
        match_against = []
        for im1, im2 in pairs:
            if im == im1:
                match_against.append(im2)
            elif im == im2:
                match_against.append(im1)
        logger.info("Matching {} to: {}".format(im, sorted(match_against)))

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    ctx.pdr_shots_dict = None
    if ctx.data.pdr_shots_exist():
        ctx.pdr_shots_dict = ctx.data.load_pdr_shots()
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'], mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes, jobs_per_process)
    logger.debug('Matched {} pairs in {} seconds.'.format(
        len(pairs), timer()-start))

    # Index results per pair
    pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            pairs[im1, im2] = m

    return pairs, preport
Пример #5
0
def match_candidates_with_bow(data, images_ref, images_cand,
                              exifs, max_neighbors, order_neighbors,
                              max_pdr_distance, max_index_range,
                              enforce_other_cameras):
    """Find candidate matching pairs using BoW-based distance.

    If enforce_other_cameras is True, we keep max_neighbors images
    with same cameras AND  max_neighbors images from any other different
    camera.
    """
    if max_neighbors <= 0:
        return set()

    # restrict bow searching to 150 index neighbors
    preempted_cand = defaultdict(list)
    n = (max_index_range + 1) // 2
    m = (order_neighbors + 1) // 2

    for i, image_ref in enumerate(images_ref):
        a = max(0, i - n)
        b = min(len(images_cand), i + n)
        c = max(0, i - m)
        d = min(len(images_cand), i + m)
        for j in list(range(a, c)) + list(range(d, b)):
            preempted_cand[image_ref].append(images_cand[j])

    # reduce sets of images from which to load words (RAM saver)
    need_load = set(preempted_cand.keys())
    for v in preempted_cand.values():
        need_load.update(v)

    # construct BoW histograms
    logger.info("Computing %d BoW histograms" % len(need_load))
    histograms = load_histograms(data, need_load)
    args = list(match_bow_arguments(preempted_cand, histograms))

    # parallel BoW neighbors computation
    per_process = 512
    processes = context.processes_that_fit_in_memory(data.config['processes'], per_process)
    batch_size = int(max(1, len(args)/(2*processes)))
    logger.info("Computing BoW candidates with %d processes" % processes)
    results = context.parallel_map(match_bow_unwrap_args, args, processes, batch_size)

    # construct final sets of pairs to match
    pairs = set()
    for im, order, other in results:
        if enforce_other_cameras:
            pairs = pairs.union(pairs_from_neighbors(im, exifs, order, other, max_neighbors))
        else:
            for i in order[:max_neighbors]:
                logger.debug("im={}, i={}, other={}".format(im, i, other[i]))
                dist = calc_pdr_distance(data, im, other[i])
                if dist < max_pdr_distance * 0.3048:
                    pairs.add(tuple(sorted((im, other[i]))))
                    logger.debug("adding pair {} - {}, pdr distance {} feet".format(im, other[i], dist/0.3048))
                else:
                    logger.debug("not adding pair {} - {}, pdr distance {} feet".format(im, other[i], dist/0.3048))
    return pairs
Пример #6
0
def create_parallel_matching_args(data, preempted_cand, histograms):
    """Create arguments to matching function"""
    args = list(match_histogram_arguments(preempted_cand, histograms))

     # parallel VLAD neighbors computation
    per_process = 512
    processes = context.processes_that_fit_in_memory(data.config['processes'], per_process)
    batch_size = max(1, len(args)/(2*processes))
    return args, processes, batch_size
Пример #7
0
def match_candidates_with_bow(data, images_ref, images_cand, exifs, reference,
                              max_neighbors, max_gps_distance,
                              max_gps_neighbors, enforce_other_cameras):
    """Find candidate matching pairs using BoW-based distance.

    If max_gps_distance > 0, then we use first restrain a set of
    candidates using max_gps_neighbors neighbors selected using
    GPS distance.

    If enforce_other_cameras is True, we keep max_neighbors images
    with same cameras AND  max_neighbors images from any other different
    camera.
    """
    if max_neighbors <= 0:
        return set()

    # preempt candidates images using GPS
    preempted_cand = {im: images_cand for im in images_ref}
    if max_gps_distance > 0 or max_gps_neighbors > 0:
        gps_pairs = match_candidates_by_distance(images_ref, images_cand,
                                                 exifs, reference,
                                                 max_gps_neighbors,
                                                 max_gps_distance)
        preempted_cand = defaultdict(list)
        for p in gps_pairs:
            preempted_cand[p[0]].append(p[1])
            preempted_cand[p[1]].append(p[0])

    # reduce sets of images from which to load words (RAM saver)
    need_load = set(preempted_cand.keys())
    for v in preempted_cand.values():
        need_load.update(v)

    # construct BoW histograms
    logger.info("Computing %d BoW histograms" % len(need_load))
    histograms = load_histograms(data, need_load)
    args = list(match_bow_arguments(preempted_cand, histograms))

    # parralel BoW neighbors computation
    per_process = 512
    processes = context.processes_that_fit_in_memory(data.config['processes'],
                                                     per_process)
    batch_size = max(1, len(args) / (2 * processes))
    logger.info("Computing BoW candidates with %d processes" % processes)
    results = context.parallel_map(match_bow_unwrap_args, args, processes,
                                   batch_size)

    # construct final sets of pairs to match
    pairs = set()
    for im, order, other in results:
        if enforce_other_cameras:
            pairs = pairs.union(
                pairs_from_neighbors(im, exifs, order, other, max_neighbors))
        else:
            for i in order[:max_neighbors]:
                pairs.add(tuple(sorted((im, other[i]))))
    return pairs
Пример #8
0
def match_images(data, ref_images, cand_images, overwrite):
    """ Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.

    If 'overwrite' is set to True, matches of a given images will be
    overwritten with the new ones, if False, they're going to be updated,
    keeping the previous ones.
    """

    # Get EXIFs data
    all_images = list(set(ref_images + cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images, cand_images, exifs, data)
    logger.info('Matching {} image pairs'.format(len(pairs)))

    # Store per each image in ref for processing
    per_image = {im: [] for im in ref_images}
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    ctx.overwrite = overwrite
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.debug('Matched {} pairs in {} seconds.'.format(
        len(pairs),
        timer() - start))

    # Index results per pair
    pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            pairs[im1, im2] = m

    return pairs, preport
Пример #9
0
def create_parallel_matching_args(
    data: DataSetBase,
    preempted_cand: Dict[str, list],
    histograms: Dict[str, np.ndarray],
) -> Tuple[List[Tuple[str, list, Dict[str, np.ndarray]]], int, int]:
    """Create arguments to matching function"""
    args = [(im, cands, histograms) for im, cands in preempted_cand.items()]

    # parallel VLAD neighbors computation
    per_process = 512
    processes = context.processes_that_fit_in_memory(data.config["processes"],
                                                     per_process)
    batch_size = max(1, len(args) // (2 * processes))
    return args, processes, batch_size
Пример #10
0
def match_images_with_pairs(file_path,opensfm_config, exifs, ref_images, pairs):
    """ Perform pair matchings given pairs. """

    # Store per each image in ref for processing
    try:
	    per_image = {im: [] for im in ref_images}
	    for im1, im2 in pairs:
		per_image[im1].append(im2)

	    ctx = Context()
	    #ctx.data = data
	    ctx.opensfm_config = opensfm_config
	    ctx.feature_path = file_path+'features'
	    ctx.file_path = file_path

	    ctx.cameras = opensfm_interface.load_camera_models(file_path)
	    ctx.exifs = exifs
	    args = list(match_arguments(per_image, ctx))


    

	    # Perform all pair matchings in parallel
	    start = timer()
	    logger.info('Matching {} image pairs'.format(len(pairs)))
	    mem_per_process = 1024
	    jobs_per_process = 2
	    processes = context.processes_that_fit_in_memory(opensfm_config['processes'], mem_per_process)
	    logger.info("Computing pair matching with %d processes" % processes)
	    matches = context.parallel_map(match_unwrap_args, args, processes, jobs_per_process)
	    logger.info(
		'Matched {} pairs for {} ref_images {} '
		'in {} seconds ({} seconds/pair).'.format(
		    len(pairs),
		    len(ref_images),
		    log_projection_types(pairs, ctx.exifs, ctx.cameras),
		    timer() - start,
		    (timer() - start) / len(pairs) if pairs else 0))

	    # Index results per pair
	    resulting_pairs = {}
	    for im1, im1_matches in matches:
		for im2, m in im1_matches.items():
		    resulting_pairs[im1, im2] = m

    	    return resulting_pairs
    except Exception as e:
	print(e.message)
	print('exception in new matching paris')