Пример #1
0
def match_images(data: DataSetBase, config_override, ref_images, cand_images):
    """Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.
    """

    # Get EXIFs data
    all_images = list(set(ref_images + cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images,
        cand_images,
        exifs,
        data,
        config_override,
    )

    # Match them !
    return (
        match_images_with_pairs(data, config_override, exifs, ref_images, pairs),
        preport,
    )
Пример #2
0
def match_candidates_from_metadata(data,
                                   neighbors=NEIGHBORS,
                                   assert_count=NEIGHBORS):
    assert neighbors >= assert_count

    args = Args(data.data_path)
    commands.extract_metadata.Command().run(args)
    commands.detect_features.Command().run(args)

    ims = sorted(data.images())
    ims_ref = ims[:1]
    ims_cand = ims[1:]

    exifs = {im: data.load_exif(im) for im in ims}

    pairs, _ = pairs_selection.match_candidates_from_metadata(
        ims_ref, ims_cand, exifs, data)

    matches = [p[1] for p in pairs]
    names = ['{}.jpg'.format(str(i).zfill(2)) for i in range(2, 2 + neighbors)]
    count = 0
    for name in names:
        if name in matches:
            count += 1

    assert count >= assert_count
Пример #3
0
def match_candidates_from_metadata(data,
                                   neighbors=NEIGHBORS,
                                   assert_count=NEIGHBORS):
    assert neighbors >= assert_count

    ims = sorted(data.images())
    ims_ref = ims[:1]
    ims_cand = ims[1:]

    exifs = {im: data.load_exif(im) for im in ims}

    pairs, _ = pairs_selection.match_candidates_from_metadata(
        ims_ref,
        ims_cand,
        exifs,
        data,
        {},
    )

    matches = [p[1] for p in pairs]
    names = ["{}.jpg".format(str(i).zfill(2)) for i in range(2, 2 + neighbors)]
    count = 0
    for name in names:
        if name in matches:
            count += 1

    assert count >= assert_count
Пример #4
0
def match_images(data, ref_images, cand_images):
    """ Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.
    """

    # Get EXIFs data
    all_images = list(set(ref_images+cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images, cand_images, exifs, data)
    logger.info('Matching {} image pairs'.format(len(pairs)))

    # Store per each image in ref for processing
    per_image = defaultdict(list)
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    for im in ref_images:
        match_against = []
        for im1, im2 in pairs:
            if im == im1:
                match_against.append(im2)
            elif im == im2:
                match_against.append(im1)
        logger.info("Matching {} to: {}".format(im, sorted(match_against)))

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    ctx.pdr_shots_dict = None
    if ctx.data.pdr_shots_exist():
        ctx.pdr_shots_dict = ctx.data.load_pdr_shots()
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'], mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes, jobs_per_process)
    logger.debug('Matched {} pairs in {} seconds.'.format(
        len(pairs), timer()-start))

    # Index results per pair
    pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            pairs[im1, im2] = m

    return pairs, preport
Пример #5
0
def match_images(data, ref_images, cand_images, overwrite):
    """ Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.

    If 'overwrite' is set to True, matches of a given images will be
    overwritten with the new ones, if False, they're going to be updated,
    keeping the previous ones.
    """

    # Get EXIFs data
    all_images = list(set(ref_images + cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images, cand_images, exifs, data)
    logger.info('Matching {} image pairs'.format(len(pairs)))

    # Store per each image in ref for processing
    per_image = {im: [] for im in ref_images}
    for im1, im2 in pairs:
        per_image[im1].append(im2)

    ctx = Context()
    ctx.data = data
    ctx.cameras = ctx.data.load_camera_models()
    ctx.exifs = exifs
    ctx.overwrite = overwrite
    args = list(match_arguments(per_image, ctx))

    # Perform all pair matchings in parallel
    start = timer()
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config['processes'],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.debug('Matched {} pairs in {} seconds.'.format(
        len(pairs),
        timer() - start))

    # Index results per pair
    pairs = {}
    for im1, im1_matches in matches:
        for im2, m in im1_matches.items():
            pairs[im1, im2] = m

    return pairs, preport
Пример #6
0
def match_images(data, ref_images, cand_images):
    """ Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.
    """
    config = data.config
    # Get EXIFs data
    all_images = list(set(ref_images + cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images, cand_images, exifs, data)

    # Match them !
    if config["feature_type"] == "SIFT_GPU":
        return match_images_with_pairs_gpu(data, exifs, ref_images,
                                           pairs), preport
    return match_images_with_pairs(data, exifs, ref_images, pairs), preport
Пример #7
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()
        exifs = {im: data.load_exif(im) for im in images}
        pairs, preport = pairs_selection.match_candidates_from_metadata(
            images, images, exifs, data)

        num_pairs = sum(len(c) for c in pairs.values())
        logger.info('Matching {} image pairs'.format(num_pairs))

        ctx = Context()
        ctx.data = data
        ctx.cameras = ctx.data.load_camera_models()
        ctx.exifs = exifs
        args = list(match_arguments(pairs, ctx))

        start = timer()
        processes = ctx.data.config['processes']
        parallel_map(match, args, processes)
        end = timer()

        with open(ctx.data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
        self.write_report(data, preport, pairs, end - start)