Пример #1
0
def _compute_pair_reconstructability(args):
    log.setup()
    im1, im2, p1, p2, camera1, camera2, threshold = args
    R, inliers = two_view_reconstruction_rotation_only(
        p1, p2, camera1, camera2, threshold)
    r = pairwise_reconstructability(len(p1), len(inliers))
    return (im1, im2, r)
Пример #2
0
def prune_depthmap(arguments):
    """Prune depthmap to remove redundant points."""
    log.setup()

    data: UndistortedDataSet = arguments[0]
    neighbors = arguments[1]
    shot = arguments[2]

    if data.pruned_depthmap_exists(shot.id):
        logger.info("Using precomputed pruned depthmap {}".format(shot.id))
        return
    logger.info("Pruning depthmap for image {}".format(shot.id))

    dp = pydense.DepthmapPruner()
    dp.set_same_depth_threshold(data.config["depthmap_same_depth_threshold"])
    add_views_to_depth_pruner(data, neighbors, dp)
    points, normals, colors, labels, detections = dp.prune()

    # Save and display results
    data.save_pruned_depthmap(shot.id, points, normals, colors, labels,
                              detections)

    if data.config["depthmap_save_debug_files"]:
        data.save_point_cloud(points, normals, colors, labels, detections,
                              "pruned.npz.ply")
Пример #3
0
def prune_depthmap(arguments):
    """Prune depthmap to remove redundant points."""
    log.setup()

    data, udata, neighbors, shot = arguments

    # if udata.pruned_depthmap_exists(shot.id):
    #     logger.info("Using precomputed pruned depthmap {}".format(shot.id))
    #     return
    logger.info("Pruning depthmap for image {}".format(shot.id))

    dp = pydense.DepthmapPruner()
    dp.set_same_depth_threshold(udata.config['depthmap_same_depth_threshold'])
    add_views_to_depth_pruner(data,udata, neighbors, dp)
    points, normals, colors, labels, detections = dp.prune()
    # Save and display results
    udata.save_pruned_depthmap(shot.id, points, normals, colors, labels, detections)
    depthmap={}
    depthmap.update({'points':points})
    depthmap.update({'normals':normals})
    depthmap.update({'colors':colors})
    depthmap.update({'labels':labels})
    depthmap.update({'detections':detections})
    data.save_pruned_depthmap(shot.id, depthmap)
    #pruned_depthmap.update({shot.id:depthmap})


    if udata.config['depthmap_save_debug_files']:
        ply_line=pruned_point_cloud_to_ply(points, normals, colors, labels, detections)
        data.save_ply_line(shot.id, ply_line)
        #pruned_ply.update({shot.id:ply_line})
        with io.open_wt(udata._depthmap_file(shot.id, 'pruned.npz.ply')) as fp:
            ply_line=point_cloud_to_ply(points, normals, colors, labels, detections, fp)
Пример #4
0
def match_unwrap_args(args):
    """Wrapper for parallel processing of pair matching.

    Compute all pair matchings of a given image and save them.
    """
    log.setup()
    im1, candidates, ctx = args

    print(args)

    im1_matches = {}
    p1, f1, _ = feature_loader.instance.load_points_features_colors(
        ctx.data, im1)
    camera1 = ctx.cameras[ctx.exifs[im1]['camera']]

    for im2 in candidates:
        p2, f2, _ = feature_loader.instance.load_points_features_colors(
            ctx.data, im2)
        camera2 = ctx.cameras[ctx.exifs[im2]['camera']]

        im1_matches[im2] = match(im1, im2, camera1, camera2, ctx.data)

    num_matches = sum(1 for m in im1_matches.values() if len(m) > 0)
    logger.debug('Image {} matches: {} out of {}'.format(
        im1, num_matches, len(candidates)))

    return im1, im1_matches
Пример #5
0
def match_unwrap_args(
    args: Tuple[str, str, Dict[str, pygeometry.Camera], Dict[str, Any],
                DataSetBase, Dict[str, Any], Optional[Dict[str,
                                                           pygeometry.Pose]], ]
) -> Tuple[str, str, np.ndarray]:
    """Wrapper for parallel processing of pair matching.

    Compute all pair matchings of a given image and save them.
    """
    log.setup()
    im1 = args[0]
    im2 = args[1]
    cameras = args[2]
    exifs = args[3]
    data: DataSetBase = args[4]
    config_override = args[5]
    poses = args[6]
    if poses:
        pose1 = poses[im1]
        pose2 = poses[im2]
        pose = pose2.relative_to(pose1)
    else:
        pose = None
    camera1 = cameras[exifs[im1]["camera"]]
    camera2 = cameras[exifs[im2]["camera"]]
    matches = match(im1, im2, camera1, camera2, data, config_override, pose)
    return im1, im2, matches
Пример #6
0
def clean_depthmap(arguments):
    """Clean depthmap by checking consistency with neighbors."""
    log.setup()

    data, neighbors, shot = arguments

    if data.clean_depthmap_exists(shot.id):
        logger.info("Using precomputed clean depthmap {}".format(shot.id))
        return
    logger.info("Cleaning depthmap for image {}".format(shot.id))

    dc = csfm.DepthmapCleaner()
    dc.set_same_depth_threshold(data.config['depthmap_same_depth_threshold'])
    dc.set_min_consistent_views(data.config['depthmap_min_consistent_views'])
    add_views_to_depth_cleaner(data, neighbors, dc)
    depth = dc.clean()

    # Save and display results
    raw_depth, raw_plane, raw_score, raw_nghbr, nghbrs = data.load_raw_depthmap(
        shot.id)
    data.save_clean_depthmap(shot.id, depth, raw_plane, raw_score)

    if data.config['depthmap_save_debug_files']:
        image = data.load_undistorted_image(shot.id)
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        with io.open_wt(data._depthmap_file(shot.id, 'clean.npz.ply')) as fout:
            fout.write(ply)
Пример #7
0
def undistort_image(arguments):
    log.setup()

    shot, undistorted_shots, data = arguments
    logger.debug('Undistorting image {}'.format(shot.id))

    if shot.camera.projection_type == 'perspective':
        image = data.image_as_array(shot.id)
        undistorted = undistort_perspective_image(image, shot.camera)
        data.save_undistorted_image(shot.id, undistorted)
    elif shot.camera.projection_type == 'brown':
        image = data.image_as_array(shot.id)
        new_camera = undistorted_shots[0].camera
        undistorted = undistort_brown_image(image, shot.camera, new_camera)
        data.save_undistorted_image(shot.id, undistorted)
    elif shot.camera.projection_type == 'fisheye':
        image = data.image_as_array(shot.id)
        undistorted = undistort_fisheye_image(image, shot.camera)
        data.save_undistorted_image(shot.id, undistorted)
    elif shot.camera.projection_type in ['equirectangular', 'spherical']:
        original = data.image_as_array(shot.id)
        subshot_width = int(data.config['depthmap_resolution'])
        width = 4 * subshot_width
        height = width / 2
        image = cv2.resize(original, (width, height),
                           interpolation=cv2.INTER_AREA)
        for subshot in undistorted_shots:
            undistorted = render_perspective_view_of_a_panorama(
                image, shot, subshot)
            data.save_undistorted_image(subshot.id, undistorted)
    else:
        raise NotImplementedError(
            'Undistort not implemented for projection type: {}'.format(
                shot.camera.projection_type))
Пример #8
0
def undistort_image_and_masks(arguments) -> None:
    shot, undistorted_shots, data, udata = arguments
    log.setup()
    logger.debug("Undistorting image {}".format(shot.id))
    max_size = data.config["undistorted_image_max_size"]

    # Undistort image
    image = data.load_image(shot.id, unchanged=True, anydepth=True)
    if image is not None:
        undistorted = undistort_image(
            shot, undistorted_shots, image, cv2.INTER_AREA, max_size
        )
        for k, v in undistorted.items():
            udata.save_undistorted_image(k, v)

    # Undistort mask
    mask = data.load_mask(shot.id)
    if mask is not None:
        undistorted = undistort_image(
            shot, undistorted_shots, mask, cv2.INTER_NEAREST, max_size
        )
        for k, v in undistorted.items():
            udata.save_undistorted_mask(k, v)

    # Undistort segmentation
    segmentation = data.load_segmentation(shot.id)
    if segmentation is not None:
        undistorted = undistort_image(
            shot, undistorted_shots, segmentation, cv2.INTER_NEAREST, max_size
        )
        for k, v in undistorted.items():
            udata.save_undistorted_segmentation(k, v)
Пример #9
0
def undistort_image(arguments):
    log.setup()

    shot, undistorted_shots, data = arguments
    logger.debug('Undistorting image {}'.format(shot.id))

    if shot.camera.projection_type == 'perspective':
        image = data.image_as_array(shot.id)
        undistorted = undistort_perspective_image(image, shot.camera)
        data.save_undistorted_image(shot.id, undistorted)
    elif shot.camera.projection_type == 'brown':
        image = data.image_as_array(shot.id)
        new_camera = undistorted_shots[0].camera
        undistorted = undistort_brown_image(image, shot.camera, new_camera)
        data.save_undistorted_image(shot.id, undistorted)
    elif shot.camera.projection_type == 'fisheye':
        image = data.image_as_array(shot.id)
        undistorted = undistort_fisheye_image(image, shot.camera)
        data.save_undistorted_image(shot.id, undistorted)
    elif shot.camera.projection_type in ['equirectangular', 'spherical']:
        original = data.image_as_array(shot.id)
        subshot_width = int(data.config['depthmap_resolution'])
        width = 4 * subshot_width
        height = width // 2
        image = cv2.resize(original, (width, height), interpolation=cv2.INTER_AREA)
        for subshot in undistorted_shots:
            undistorted = render_perspective_view_of_a_panorama(
                image, shot, subshot)
            data.save_undistorted_image(subshot.id, undistorted)
    else:
        raise NotImplementedError(
            'Undistort not implemented for projection type: {}'.format(
                shot.camera.projection_type))
Пример #10
0
def command_runner(all_commands_types, dataset_factory):
    """ Main entry point for running the passed SfM commands types."""
    log.setup()

    # Create the top-level parser
    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers(help="Command to run",
                                       dest="command",
                                       metavar="command")

    command_objects = [c.Command() for c in all_commands_types]

    for command in command_objects:
        subparser = subparsers.add_parser(command.name, help=command.help)
        command.add_arguments(subparser)

    # Parse arguments
    args = parser.parse_args()

    # Instanciate datast
    data = dataset_factory(args.dataset)

    # Run the selected subcommand
    for command in command_objects:
        if args.command == command.name:
            command.run(data, args)
def compute_depthmap(arguments):
    """Compute depthmap for a single shot."""
    log.setup()

    data, neighbors, min_depth, max_depth, shot = arguments
    method = data.config['depthmap_method']

    if data.raw_depthmap_exists(shot.id):
        logger.info("Using precomputed raw depthmap {}".format(shot.id))
        return
    logger.info("Computing depthmap for image {0} with {1}".format(shot.id, method))

    de = csfm.DepthmapEstimator()
    de.set_depth_range(min_depth, max_depth, 100)
    de.set_patchmatch_iterations(data.config['depthmap_patchmatch_iterations'])
    de.set_min_patch_sd(data.config['depthmap_min_patch_sd'])
    add_views_to_depth_estimator(data, neighbors, de)

    if (method == 'BRUTE_FORCE'):
        depth, plane, score, nghbr = de.compute_brute_force()
    elif (method == 'PATCH_MATCH'):
        depth, plane, score, nghbr = de.compute_patch_match()
    elif (method == 'PATCH_MATCH_SAMPLE'):
        depth, plane, score, nghbr = de.compute_patch_match_sample()
    else:
        raise ValueError('Unknown depthmap method type ' \
            '(must be BRUTE_FORCE, PATCH_MATCH or PATCH_MATCH_SAMPLE)')

    good_score = score > data.config['depthmap_min_correlation_score']
    depth = depth * (depth < max_depth) * good_score

    # Save and display results
    neighbor_ids = [i.id for i in neighbors[1:]]
    data.save_raw_depthmap(shot.id, depth, plane, score, nghbr, neighbor_ids)

    if data.config['depthmap_save_debug_files']:
        image = data.undistorted_image_as_array(shot.id)
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        with open(data._depthmap_file(shot.id, 'raw.npz.ply'), 'w') as fout:
            fout.write(ply)

    if data.config.get('interactive'):
        import matplotlib.pyplot as plt
        plt.figure()
        plt.suptitle("Shot: " + shot.id + ", neighbors: " + ', '.join(neighbor_ids))
        plt.subplot(2, 3, 1)
        plt.imshow(image)
        plt.subplot(2, 3, 2)
        plt.imshow(color_plane_normals(plane))
        plt.subplot(2, 3, 3)
        plt.imshow(depth)
        plt.colorbar()
        plt.subplot(2, 3, 4)
        plt.imshow(score)
        plt.colorbar()
        plt.subplot(2, 3, 5)
        plt.imshow(nghbr)
        plt.colorbar()
        plt.show()
Пример #12
0
def detect(args):
    log.setup()

    image, data = args
    logger.info('Extracting {} features for image {}'.format(
        data.feature_type().upper(), image))

    if not data.feature_index_exists(image):
        mask = data.mask_as_array(image)
        if mask is not None:
            logger.info('Found mask to apply for image {}'.format(image))
        preemptive_max = data.config.get('preemptive_max', 200)
        p_unsorted, f_unsorted, c_unsorted = features.extract_features(
            data.image_as_array(image), data.config, mask)
        if len(p_unsorted) == 0:
            return

        size = p_unsorted[:, 2]
        order = np.argsort(size)
        p_sorted = p_unsorted[order, :]
        f_sorted = f_unsorted[order, :]
        c_sorted = c_unsorted[order, :]
        p_pre = p_sorted[-preemptive_max:]
        f_pre = f_sorted[-preemptive_max:]
        data.save_features(image, p_sorted, f_sorted, c_sorted)
        data.save_preemptive_features(image, p_pre, f_pre)

        if data.config.get('matcher_type', 'FLANN') == 'FLANN':
            index = features.build_flann_index(f_sorted, data.config)
            data.save_feature_index(image, index)
Пример #13
0
def match_unwrap_args(args):
    """Wrapper for parallel processing of pair matching.

    Compute all pair matchings of a given image and save them.
    """
    log.setup()
    im1 = args[0]
    candidates = args[1]
    data: DataSetBase = args[2]
    config_override = args[3]
    cameras = args[4]
    exifs = args[5]

    im1_matches = {}
    camera1 = cameras[exifs[im1]["camera"]]

    for im2 in candidates:
        camera2 = cameras[exifs[im2]["camera"]]
        im1_matches[im2] = match(im1, im2, camera1, camera2, data, config_override)

    num_matches = sum(1 for m in im1_matches.values() if len(m) > 0)
    logger.debug(
        "Image {} matches: {} out of {}".format(im1, num_matches, len(candidates))
    )

    return im1, im1_matches
Пример #14
0
def command_runner(all_commands_types: List[Any], dataset_factory: Callable,
                   dataset_choices: List[str]) -> None:
    """ Main entry point for running the passed SfM commands types."""
    log.setup()

    # Create the top-level parser
    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers(help="Command to run",
                                       dest="command",
                                       metavar="command")

    command_objects = [c.Command() for c in all_commands_types]

    for command in command_objects:
        subparser = subparsers.add_parser(command.name, help=command.help)
        command.add_arguments(subparser)
        subparser.add_argument(
            "--dataset-type",
            type=str,
            required=False,
            default="opensfm",
            choices=dataset_choices,
        )

    # Parse arguments
    args = parser.parse_args()

    # Instanciate datast
    with dataset_factory(args.dataset, args.dataset_type) as data:
        # Run the selected subcommand
        for command in command_objects:
            if args.command == command.name:
                command.run(data, args)
Пример #15
0
def detect(args):
    image, data = args

    log.setup()

    need_words = data.config[
        'matcher_type'] == 'WORDS' or data.config['matching_bow_neighbors'] > 0
    need_flann = data.config['matcher_type'] == 'FLANN'
    has_words = not need_words or data.words_exist(image)
    has_flann = not need_flann or data.feature_index_exists(image)
    has_features = data.features_exist(image)

    if has_features and has_flann and has_words:
        logger.info('Skip recomputing {} features for image {}'.format(
            data.feature_type().upper(), image))
        return

    logger.info('Extracting {} features for image {}'.format(
        data.feature_type().upper(), image))

    start = timer()

    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        data.load_image(image), data.config)

    fmask = data.load_features_mask(image, p_unmasked)

    p_unsorted = p_unmasked[fmask]
    f_unsorted = f_unmasked[fmask]
    c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning('No features found in image {}'.format(image))
        return

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    data.save_features(image, p_sorted, f_sorted, c_sorted)

    if need_flann:
        index = features.build_flann_index(f_sorted, data.config)
        data.save_feature_index(image, index)
    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config['bow_words_to_match']
        closest_words = bows.map_to_words(f_sorted, n_closest,
                                          data.config['bow_matcher_type'])
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), 'features/{}.json'.format(image))
Пример #16
0
def _compute_pair_reconstructability(
        args: TPairArguments) -> Tuple[str, str, float]:
    log.setup()
    im1, im2, p1, p2, camera1, camera2, threshold = args
    R, inliers = two_view_reconstruction_rotation_only(p1, p2, camera1,
                                                       camera2, threshold)
    r = pairwise_reconstructability(len(p1), len(inliers))
    return (im1, im2, r)
Пример #17
0
def undistort_image_and_masks(arguments):
    shot, undistorted_shots, opensfm_config, udata, file_path, imageFilter, self_compute, self_path = arguments
    log.setup()
    logger.debug('Undistorting image {}'.format(shot.id))

    # Undistort
    image = None
    if self_compute:
        image = opensfm_interface.load_image(os.path.join(
            self_path, 'images', shot.id),
                                             unchanged=True,
                                             anydepth=True)

    else:
        image = opensfm_interface.load_image(os.path.join(
            file_path, 'images', shot.id),
                                             unchanged=True,
                                             anydepth=True)
    if image is not None:
        if imageFilter is not None:
            image = imageFilter(shot.id, image)
        max_size = opensfm_config['undistorted_image_max_size']
        undistorted = undistort_image(shot, undistorted_shots, image,
                                      cv2.INTER_AREA, max_size)
        for k, v in undistorted.items():
            print('k value in undistored images: ' + str(k))
            udata.save_undistorted_image(k, v)

    # Undistort mask
    mask_files = {}
    mask = opensfm_interface.load_mask(mask_files, shot.id)
    if mask is not None:
        undistorted = undistort_image(shot, undistorted_shots, mask,
                                      cv2.INTER_NEAREST, 1e9)
        for k, v in undistorted.items():
            udata.save_undistorted_mask(k, v)

    # Undistort segmentation
    segmentation = opensfm_interface.load_segmentation(file_path, shot.id)

    if segmentation is not None:
        undistorted = undistort_image(shot, undistorted_shots, segmentation,
                                      cv2.INTER_NEAREST, 1e9)
        for k, v in undistorted.items():
            udata.save_undistorted_segmentation(k, v)

    # Undistort detections
    detection = None
    if self_compute:
        detection = opensfm_interface.load_detection(self_path, shot.id)
    else:
        detection = opensfm_interface.load_detection(file_path, shot.id)

    if detection is not None:
        undistorted = undistort_image(shot, undistorted_shots, detection,
                                      cv2.INTER_NEAREST, 1e9)
        for k, v in undistorted.items():
            udata.save_undistorted_detection(k, v)
Пример #18
0
def match(args):
    """Compute all matches for a single image"""
    log.setup()

    im1, candidates, i, n, ctx = args
    logger.info('Matching {}  -  {} / {}'.format(im1, i + 1, n))

    config = ctx.data.config
    robust_matching_min_match = config['robust_matching_min_match']
    lowes_ratio = config['lowes_ratio']

    im1_all_robust_matches = {}
    im1_valid_rmatches = {}
    im1_T = {}
    im1_F = {}
    im1_valid_inliers = {}

    im1_fmr = ctx.data.load_feature_matching_results(im1)
    p1, f1, c1 = ctx.data.load_features(im1)

    for im2 in candidates:
        # robust matching
        t_robust_matching = timer()
        camera1 = ctx.cameras[ctx.exifs[im1]['camera']]
        camera2 = ctx.cameras[ctx.exifs[im2]['camera']]

        unthresholded_matches = im1_fmr[im2]
        classified_matches = np.concatenate(( \
            np.array(unthresholded_matches['indices1']).reshape((-1,1)),
            np.array(unthresholded_matches['indices2']).reshape((-1,1)),
            np.array(unthresholded_matches['distances1']).reshape((-1,1)),
            np.array(unthresholded_matches['distances2']).reshape((-1,1)),
            np.array(unthresholded_matches['scores']).reshape((-1,1))
        ), axis=1)

        lowes_matches_indices = np.where(
            (classified_matches[:, 2] <= lowes_ratio)
            & (classified_matches[:, 3] <= lowes_ratio))[0]
        relevant_feature_matching_indices = np.argsort(
            classified_matches[:, 4])[::-1][:len(lowes_matches_indices)]
        thresholded_matches = classified_matches[
            relevant_feature_matching_indices, :]
        p2, f2, c2 = ctx.data.load_features(im2)
        rmatches, T, F, validity = classifier.robust_match_fundamental_weighted(
            p1, p2, thresholded_matches, config)

        im1_all_robust_matches[im2] = thresholded_matches
        im1_valid_rmatches[im2] = 1
        im1_T[im2] = T.tolist()
        im1_F[im2] = F.tolist()
        im1_valid_inliers[im2] = validity

        logger.debug("Full matching {0} / {1}, time: {2}s".format(
            len(rmatches), len(thresholded_matches),
            timer() - t_robust_matching))

    ctx.data.save_weighted_matches(im1, im1_all_robust_matches)
Пример #19
0
def match(args):
    """Compute all matches for a single image"""
    log.setup()

    im1, candidates, i, n, ctx = args
    logger.info('Matching {}  -  {} / {}'.format(im1, i + 1, n))

    config = ctx.data.config
    matcher_type = config['matcher_type']
    robust_matching_min_match = config['robust_matching_min_match']

    im1_matches = {}

    for im2 in candidates:
        # symmetric matching
        t = timer()
        p1, f1, c1 = ctx.data.load_features(im1)
        p2, f2, c2 = ctx.data.load_features(im2)

        if matcher_type == 'WORDS':
            w1 = ctx.data.load_words(im1)
            w2 = ctx.data.load_words(im2)
            matches = matching.match_words_symmetric(f1, w1, f2, w2, config)
        elif matcher_type == 'FLANN':
            i1 = ctx.data.load_feature_index(im1, f1)
            i2 = ctx.data.load_feature_index(im2, f2)
            matches = matching.match_flann_symmetric(f1, i1, f2, i2, config)
        elif matcher_type == 'BRUTEFORCE':
            matches = matching.match_brute_force_symmetric(f1, f2, config)
        else:
            raise ValueError("Invalid matcher_type: {}".format(matcher_type))

        logger.debug('{} - {} has {} candidate matches'.format(
            im1, im2, len(matches)))
        if len(matches) < robust_matching_min_match:
            im1_matches[im2] = []
            continue

        # robust matching
        t_robust_matching = timer()
        camera1 = ctx.cameras[ctx.exifs[im1]['camera']]
        camera2 = ctx.cameras[ctx.exifs[im2]['camera']]

        rmatches = matching.robust_match(p1, p2, camera1, camera2, matches,
                                         config)

        if len(rmatches) < robust_matching_min_match:
            im1_matches[im2] = []
            continue
        im1_matches[im2] = rmatches
        logger.debug('Robust matching time : {0}s'.format(timer() -
                                                          t_robust_matching))

        logger.debug("Full matching {0} / {1}, time: {2}s".format(
            len(rmatches), len(matches),
            timer() - t))
    ctx.data.save_matches(im1, im1_matches)
Пример #20
0
def detect(args):
    image, data = args

    log.setup()

    need_words = (data.config["matcher_type"] == "WORDS"
                  or data.config["matching_bow_neighbors"] > 0)
    has_words = not need_words or data.words_exist(image)
    has_features = data.features_exist(image)

    if has_features and has_words:
        logger.info("Skip recomputing {} features for image {}".format(
            data.feature_type().upper(), image))
        return

    logger.info("Extracting {} features for image {}".format(
        data.feature_type().upper(), image))

    start = timer()

    image_array = data.load_image(image)
    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        image_array, data.config, is_high_res_panorama(data, image,
                                                       image_array))

    fmask = data.load_features_mask(image, p_unmasked)

    p_unsorted = p_unmasked[fmask]
    f_unsorted = f_unmasked[fmask]
    c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning("No features found in image {}".format(image))

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    data.save_features(image, p_sorted, f_sorted, c_sorted)

    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config["bow_words_to_match"]
        closest_words = bows.map_to_words(f_sorted, n_closest,
                                          data.config["bow_matcher_type"])
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), "features/{}.json".format(image))
Пример #21
0
def clean_depthmap(arguments):
    """Clean depthmap by checking consistency with neighbors."""
    log.setup()

    data, neighbors, shot, file_pathx, self_compute, self_path = arguments

    if data.clean_depthmap_exists(shot.id):
        logger.info("Using precomputed clean depthmap {}".format(shot.id))
        return
    logger.info("Cleaning depthmap for image {}".format(shot.id))

    dc = csfm.DepthmapCleaner()
    dc.set_same_depth_threshold(data.config['depthmap_same_depth_threshold'])
    dc.set_min_consistent_views(data.config['depthmap_min_consistent_views'])
    add_views_to_depth_cleaner(data, neighbors, dc, self_compute)
    depth = dc.clean()

    # Save and display results
    get_raw = None
    if self_compute:
        udata = opensfm_interface.UndistortedDataSet(data.path, data.config,
                                                     'undistorted')
        get_raw = udata.load_raw_depthmap(shot.id)

    else:
        get_raw = data.load_raw_depthmap(shot.id)

    print('here in for data load get raw depth map')

    raw_depth, raw_plane, raw_score, raw_nghbr, nghbrs = get_raw

    #raw_depth, raw_plane, raw_score, raw_nghbr, nghbrs = data.load_raw_depthmap(shot.id)

    data.save_clean_depthmap(shot.id, depth, raw_plane, raw_score)

    if data.config['depthmap_save_debug_files']:
        image = data.load_undistorted_image(shot.id)
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        with io.open_wt(data._depthmap_file(shot.id, 'clean.npz.ply')) as fout:
            fout.write(ply)

    if data.config.get('interactive'):
        import matplotlib.pyplot as plt
        plt.figure()
        plt.suptitle("Shot: " + shot.id)
        plt.subplot(2, 2, 1)
        plt.imshow(raw_depth)
        plt.colorbar()
        plt.subplot(2, 2, 2)
        plt.imshow(depth)
        plt.colorbar()
        plt.show()
Пример #22
0
def formulate_graph(args):
    log.setup()

    data, images, scores, criteria, edge_threshold = args
    start = timer()
    G = nx.Graph()
    for i, img1 in enumerate(sorted(images)):
        for j, img2 in enumerate(sorted(images)):
            if j <= i:
                continue
            if img1 in scores and img2 in scores[img1]:
                if criteria == 'inlier-logp':
                    inlier_logp = -np.log(scores[img1][img2])
                    if inlier_logp < -np.log(edge_threshold):
                        G.add_edge(img1, img2, weight=inlier_logp)
                    # if img1 == '0000.jpg' and img2 == '0013.jpg':
                    #     print ('{} - {}  :  {}  /  {}    et: {}'.format(img1, img2, scores[img1][img2], inlier_logp, -np.log(edge_threshold)))
                    # else:
                    #     logger.info('scores: {} / {}    et: {}'.format(scores[img1][img2], inlier_logp, edge_threshold))
                elif 'cost' in criteria:
                    if scores[img1][img2] < edge_threshold:
                        G.add_edge(img1, img2, weight=scores[img1][img2])
                else:
                    if scores[img1][img2] >= edge_threshold:
                        G.add_edge(img1, img2, weight=scores[img1][img2])

    try:
        pagerank = nx.pagerank(G, alpha=0.9)
    except:
        pagerank = {}
        for n in G.nodes():
            pagerank[n] = 1.0
    lcc = nx.clustering(G, nodes=G.nodes(), weight='weight')

    for n in G.nodes():
        G.node[n]['pagerank'] = pagerank[n]
        G.node[n]['lcc'] = lcc[n]

    end = timer()
    report = {
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), 'similarity-graphs.json')

    # G = nx.minimum_spanning_tree(G)
    # i1 = sorted(images)[0]
    # i2 = sorted(images)[-1]
    # G.add_edge(i1, i2, weight=scores[i1][i2])
    return G
Пример #23
0
def match_unwrap_args(args):
    """Wrapper for parallel processing of pair matching.

    Compute all pair matchings of a given image and save them.
    """
    log.setup()
    im1 = args[0]
    im2 = args[1]
    cameras = args[2]
    exifs = args[3]
    data: DataSetBase = args[4]
    config_override = args[5]
    camera1 = cameras[exifs[im1]["camera"]]
    camera2 = cameras[exifs[im2]["camera"]]
    matches = match(im1, im2, camera1, camera2, data, config_override)
    return im1, im2, matches
Пример #24
0
def remove(args):
    log.setup()

    idx, mkv_dirs = args

    # TESTING - only correct 10 images
    #if idx > 10:
        #return

    img_set = []
    for mkv_dir in mkv_dirs:
        img_set.append(os.path.join(mkv_dir, _int_to_shot_id(idx)))

    banding_cnt = 0
    for img_file in img_set:
        freq = csfm.is_banding_present(img_file)
        if freq != -1:
            logger.info('{}: {}Hz banding'.format(idx, freq))
            banding_cnt += 1
        else:
            logger.info('{}: no banding'.format(idx))

    logger.info('{}: banding found in {}/4 images'.format(_int_to_shot_id(idx), banding_cnt))

    if banding_cnt >= 1:
        logger.info('{}: removing banding...'.format(idx))

        for img_file in img_set:
            img_original = cv.imread(img_file)
            #csfm.run_notch_filter()
            ##img_corrected = gamma_correction(img_original)
            #img_corrected = basic_linear_transform(img_original)
            img_corrected = horizontal_banding_removal(img_original)

            # high intensity pixels' color is distorted by the filtering process above.
            # so we replace them by their original values
            grayscaled = cv.cvtColor(img_original, cv.COLOR_BGR2GRAY)
            retval, mask = cv.threshold(grayscaled, 220, 255, cv.THRESH_BINARY)
            img_corrected[mask == 255] = img_original[mask == 255]

            # there is a 72x64 pixel area in each raw image that appears to be 2D barcode
            # used by the NC Tech Immersive Studio. don't know if the size of this area
            # changes when image resolution changes (currently 3968x3008), so need to keep
            # an eye on it if/when we switch camera resolution
            img_corrected[0:64, 0:72] = img_original[0:64, 0:72]

            cv.imwrite(img_file, img_corrected)
Пример #25
0
def detect(feature_path, image_path, image, opensfm_config):

    log.setup()

    need_words = opensfm_config['matcher_type'] == 'WORDS' or opensfm_config[
        'matching_bow_neighbors'] > 0
    #has_words = not need_words or data.words_exist(image)
    #has_features = data.features_exist(image)

    # if has_features and has_words:
    #     logger.info('Skip recomputing {} features for image {}'.format(
    #         data.feature_type().upper(), image))
    #     return

    #logger.info('Extracting {} features for image {}'.format(data.feature_type().upper(), image))

    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        load_image(image_path), opensfm_config)

    #p_unmasked is points
    mask_files = defaultdict(lambda: None)
    fmask = load_features_mask(feature_path, image, image_path, p_unmasked,
                               mask_files, opensfm_config)

    p_unsorted = p_unmasked[fmask]
    f_unsorted = f_unmasked[fmask]
    c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        #logger.warning('No features found in image {}'.format(image))
        return

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    save_features(feature_path, opensfm_config, image, p_sorted, f_sorted,
                  c_sorted)

    if need_words:
        bows = bow.load_bows(opensfm_config)
        n_closest = opensfm_config['bow_words_to_match']
        closest_words = bows.map_to_words(f_sorted, n_closest,
                                          opensfm_config['bow_matcher_type'])
        save_words(feature_path, image_path, closest_words)
Пример #26
0
def vt_rankings(args):
    log.setup()

    images, data = args
    libvot = data.config['libvot']

    start = timer()
    subprocess.Popen("ls -d {}/images/* > {}/vt_image_list.txt".format(data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()
    subprocess.Popen("{}/build/bin/libvot_feature -thread_num 10 -output_folder {}/sift/ {}/vt_image_list.txt".format(libvot, data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()
    subprocess.Popen("ls -d {}/sift/*.sift > {}/vt_sift_list.txt".format(data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()
    subprocess.Popen("{}/build/bin/image_search {}/vt_sift_list.txt {}/vocab_out".format(libvot, data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()

    end = timer()
    report = {
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report),
                     'rankings.json')
Пример #27
0
def compute_depthmap(arguments):
    """Compute depthmap for a single shot."""
    log.setup()

    data, neighbors, min_depth, max_depth, shot = arguments
    method = data.config['depthmap_method']

    if data.raw_depthmap_exists(shot.id):
        logger.info("Using precomputed raw depthmap {}".format(shot.id))
        return
    logger.info("Computing depthmap for image {0} with {1}".format(
        shot.id, method))

    de = csfm.DepthmapEstimator()
    de.set_depth_range(min_depth, max_depth, 100)
    de.set_patchmatch_iterations(data.config['depthmap_patchmatch_iterations'])
    de.set_patch_size(data.config['depthmap_patch_size'])
    de.set_min_patch_sd(data.config['depthmap_min_patch_sd'])
    add_views_to_depth_estimator(data, neighbors, de)

    if (method == 'BRUTE_FORCE'):
        depth, plane, score, nghbr = de.compute_brute_force()
    elif (method == 'PATCH_MATCH'):
        depth, plane, score, nghbr = de.compute_patch_match()
    elif (method == 'PATCH_MATCH_SAMPLE'):
        depth, plane, score, nghbr = de.compute_patch_match_sample()
    else:
        raise ValueError(
            'Unknown depthmap method type '
            '(must be BRUTE_FORCE, PATCH_MATCH or PATCH_MATCH_SAMPLE)')

    good_score = score > data.config['depthmap_min_correlation_score']
    depth = depth * (depth < max_depth) * good_score

    # Save and display results
    neighbor_ids = [i.id for i in neighbors[1:]]
    data.save_raw_depthmap(shot.id, depth, plane, score, nghbr, neighbor_ids)

    if data.config['depthmap_save_debug_files']:
        image = data.load_undistorted_image(shot.id)
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        with io.open_wt(data._depthmap_file(shot.id, 'raw.npz.ply')) as fout:
            fout.write(ply)
Пример #28
0
def clean_depthmap(arguments):
    """Clean depthmap by checking consistency with neighbors."""
    log.setup()

    data: UndistortedDataSet = arguments[0]
    neighbors = arguments[1]
    shot = arguments[2]

    if data.clean_depthmap_exists(shot.id):
        logger.info("Using precomputed clean depthmap {}".format(shot.id))
        return
    logger.info("Cleaning depthmap for image {}".format(shot.id))

    dc = pydense.DepthmapCleaner()
    dc.set_same_depth_threshold(data.config["depthmap_same_depth_threshold"])
    dc.set_min_consistent_views(data.config["depthmap_min_consistent_views"])
    add_views_to_depth_cleaner(data, neighbors, dc)
    depth = dc.clean()

    # Save and display results
    raw_depth, raw_plane, raw_score, raw_nghbr, nghbrs = data.load_raw_depthmap(
        shot.id)
    data.save_clean_depthmap(shot.id, depth, raw_plane, raw_score)

    if data.config["depthmap_save_debug_files"]:
        image = data.load_undistorted_image(shot.id)
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        with io.open_wt(data.depthmap_file(shot.id, "clean.npz.ply")) as fout:
            fout.write(ply)

    if data.config.get("interactive"):
        import matplotlib.pyplot as plt

        plt.figure()
        plt.suptitle("Shot: " + shot.id)
        plt.subplot(2, 2, 1)
        plt.imshow(raw_depth)
        plt.colorbar()
        plt.subplot(2, 2, 2)
        plt.imshow(depth)
        plt.colorbar()
        plt.show()
Пример #29
0
def match_unwrap_args(args):
    """ Wrapper for parralel processing of pair matching

    Compute all pair matchings of a given image and save them.
    """
    log.setup()
    im1, candidates, ctx = args

    need_words = 'WORDS' in ctx.data.config['matcher_type']
    need_index = 'FLANN' in ctx.data.config['matcher_type']

    im1_matches = {}
    p1, f1, _ = feature_loader.load_points_features_colors(ctx.data, im1)
    w1 = feature_loader.load_words(ctx.data, im1) if need_words else None
    m1 = feature_loader.load_masks(ctx.data, im1)
    camera1 = ctx.cameras[ctx.exifs[im1]['camera']]

    f1_filtered = f1 if m1 is None else f1[m1]
    i1 = feature_loader.load_features_index(
        ctx.data, im1, f1_filtered) if need_index else None

    for im2 in candidates:
        p2, f2, _ = feature_loader.load_points_features_colors(ctx.data, im2)
        w2 = feature_loader.load_words(ctx.data, im2) if need_words else None
        m2 = feature_loader.load_masks(ctx.data, im2)
        camera2 = ctx.cameras[ctx.exifs[im2]['camera']]

        f2_filtered = f2 if m2 is None else f2[m2]
        i2 = feature_loader.load_features_index(
            ctx.data, im2, f2_filtered) if need_index else None

        im1_matches[im2] = match(im1, im2, camera1, camera2, p1, p2, f1, f2,
                                 w1, w2, i1, i2, m1, m2, ctx.data)

    num_matches = sum(1 for m in im1_matches.values() if len(m) > 0)
    logger.debug('Image {} matches: {} out of {}'.format(
        im1, num_matches, len(candidates)))

    all_im1_matches = {} if ctx.overwrite else ctx.data.load_matches(im1)
    all_im1_matches.update(im1_matches)
    ctx.data.save_matches(im1, all_im1_matches)
    return im1, im1_matches
Пример #30
0
def match_unwrap_args(args):
    """Wrapper for parallel processing of pair matching.

    Compute all pair matchings of a given image and save them.
    """
    log.setup()
    im1, candidates, ctx = args

    im1_matches = {}
    camera1 = ctx.cameras[ctx.exifs[im1]['camera']]

    for im2 in candidates:
        camera2 = ctx.cameras[ctx.exifs[im2]['camera']]
        im1_matches[im2] = match(im1, im2, camera1, camera2, ctx.data)

    num_matches = sum(1 for m in im1_matches.values() if len(m) > 0)
    logger.debug('Image {} matches: {} out of {}'.format(
        im1, num_matches, len(candidates)))

    return im1, im1_matches
Пример #31
0
def detect(args):
    log.setup()

    image, data = args
    logger.info('Extracting {} features for image {}'.format(
        data.feature_type().upper(), image))

    if not data.feature_index_exists(image):
        start = timer()
        mask = data.mask_as_array(image)
        if mask is not None:
            logger.info('Found mask to apply for image {}'.format(image))
        preemptive_max = data.config['preemptive_max']
        p_unsorted, f_unsorted, c_unsorted = features.extract_features(
            data.image_as_array(image), data.config, mask)
        if len(p_unsorted) == 0:
            return

        size = p_unsorted[:, 2]
        order = np.argsort(size)
        p_sorted = p_unsorted[order, :]
        f_sorted = f_unsorted[order, :]
        c_sorted = c_unsorted[order, :]
        p_pre = p_sorted[-preemptive_max:]
        f_pre = f_sorted[-preemptive_max:]
        data.save_features(image, p_sorted, f_sorted, c_sorted)
        data.save_preemptive_features(image, p_pre, f_pre)

        if data.config['matcher_type'] == 'FLANN':
            index = features.build_flann_index(f_sorted, data.config)
            data.save_feature_index(image, index)

        end = timer()
        report = {
            "image": image,
            "num_features": len(p_sorted),
            "wall_time": end - start,
        }
        data.save_report(io.json_dumps(report),
                         'features/{}.json'.format(image))
def detect(args):
    log.setup()

    image, data = args
    logger.info('Extracting {} features for image {}'.format(
        data.feature_type().upper(), image))

    if not data.feature_index_exists(image):
        start = timer()
        mask = data.load_combined_mask(image)
        if mask is not None:
            logger.info('Found mask to apply for image {}'.format(image))
        preemptive_max = data.config['preemptive_max']
        p_unsorted, f_unsorted, c_unsorted = features.extract_features(
            data.load_image(image), data.config, mask)
        if len(p_unsorted) == 0:
            return

        size = p_unsorted[:, 2]
        order = np.argsort(size)
        p_sorted = p_unsorted[order, :]
        f_sorted = f_unsorted[order, :]
        c_sorted = c_unsorted[order, :]
        p_pre = p_sorted[-preemptive_max:]
        f_pre = f_sorted[-preemptive_max:]
        data.save_features(image, p_sorted, f_sorted, c_sorted)
        data.save_preemptive_features(image, p_pre, f_pre)

        if data.config['matcher_type'] == 'FLANN':
            index = features.build_flann_index(f_sorted, data.config)
            data.save_feature_index(image, index)

        end = timer()
        report = {
            "image": image,
            "num_features": len(p_sorted),
            "wall_time": end - start,
        }
        data.save_report(io.json_dumps(report),
                         'features/{}.json'.format(image))
Пример #33
0
def clean_depthmap(arguments):
    """Clean depthmap by checking consistency with neighbors."""
    log.setup()

    data, neighbors, shot = arguments

    if data.clean_depthmap_exists(shot.id):
        logger.info("Using precomputed clean depthmap {}".format(shot.id))
        return
    logger.info("Cleaning depthmap for image {}".format(shot.id))

    dc = csfm.DepthmapCleaner()
    dc.set_same_depth_threshold(data.config['depthmap_same_depth_threshold'])
    dc.set_min_consistent_views(data.config['depthmap_min_consistent_views'])
    add_views_to_depth_cleaner(data, neighbors, dc)
    depth = dc.clean()

    # Save and display results
    raw_depth, raw_plane, raw_score, raw_nghbr, nghbrs = data.load_raw_depthmap(shot.id)
    data.save_clean_depthmap(shot.id, depth, raw_plane, raw_score)

    if data.config['depthmap_save_debug_files']:
        image = data.undistorted_image_as_array(shot.id)
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        with io.open_wt(data._depthmap_file(shot.id, 'clean.npz.ply')) as fout:
            fout.write(ply)

    if data.config.get('interactive'):
        import matplotlib.pyplot as plt
        plt.figure()
        plt.suptitle("Shot: " + shot.id)
        plt.subplot(2, 2, 1)
        plt.imshow(raw_depth)
        plt.colorbar()
        plt.subplot(2, 2, 2)
        plt.imshow(depth)
        plt.colorbar()
        plt.show()
Пример #34
0
def prune_depthmap(arguments):
    """Prune depthmap to remove redundant points."""
    log.setup()

    data, neighbors, shot = arguments

    if data.pruned_depthmap_exists(shot.id):
        logger.info("Using precomputed pruned depthmap {}".format(shot.id))
        return
    logger.info("Pruning depthmap for image {}".format(shot.id))

    dp = csfm.DepthmapPruner()
    dp.set_same_depth_threshold(data.config['depthmap_same_depth_threshold'])
    add_views_to_depth_pruner(data, neighbors, dp)
    points, normals, colors = dp.prune()

    # Save and display results
    data.save_pruned_depthmap(shot.id, points, normals, colors)

    if data.config['depthmap_save_debug_files']:
        ply = point_cloud_to_ply(points, normals, colors)
        with io.open_wt(data._depthmap_file(shot.id, 'pruned.npz.ply')) as fout:
            fout.write(ply)
Пример #35
0
def undistort_image_and_masks(arguments):
    shot, undistorted_shots, data = arguments
    log.setup()
    logger.debug('Undistorting image {}'.format(shot.id))

    # Undistort image
    image = data.load_image(shot.id)
    if image is not None:
        max_size = data.config['undistorted_image_max_size']
        undistorted = undistort_image(shot, undistorted_shots, image,
                                      cv2.INTER_AREA, max_size)
        for k, v in undistorted.items():
            data.save_undistorted_image(k, v)

    # Undistort mask
    mask = data.load_mask(shot.id)
    if mask is not None:
        undistorted = undistort_image(shot, undistorted_shots, mask,
                                      cv2.INTER_NEAREST, 1e9)
        for k, v in undistorted.items():
            data.save_undistorted_mask(k, v)

    # Undistort segmentation
    segmentation = data.load_segmentation(shot.id)
    if segmentation is not None:
        undistorted = undistort_image(shot, undistorted_shots, segmentation,
                                      cv2.INTER_NEAREST, 1e9)
        for k, v in undistorted.items():
            data.save_undistorted_segmentation(k, v)

    # Undistort detections
    detection = data.load_detection(shot.id)
    if detection is not None:
        undistorted = undistort_image(shot, undistorted_shots, detection,
                                      cv2.INTER_NEAREST, 1e9)
        for k, v in undistorted.items():
            data.save_undistorted_detection(k, v)
Пример #36
0
def compute_depthmap(arguments):
    """Compute depthmap for a single shot."""
    log.setup()

    data, neighbors, min_depth, max_depth, shot = arguments
    method = data.config['depthmap_method']

    if data.raw_depthmap_exists(shot.id):
        logger.info("Using precomputed raw depthmap {}".format(shot.id))
        return
    logger.info("Computing depthmap for image {0} with {1}".format(shot.id, method))

    de = csfm.DepthmapEstimator()
    de.set_depth_range(min_depth, max_depth, 100)
    de.set_patchmatch_iterations(data.config['depthmap_patchmatch_iterations'])
    de.set_min_patch_sd(data.config['depthmap_min_patch_sd'])
    add_views_to_depth_estimator(data, neighbors, de)

    if (method == 'BRUTE_FORCE'):
        depth, plane, score, nghbr = de.compute_brute_force()
    elif (method == 'PATCH_MATCH'):
        depth, plane, score, nghbr = de.compute_patch_match()
    elif (method == 'PATCH_MATCH_SAMPLE'):
        depth, plane, score, nghbr = de.compute_patch_match_sample()
    else:
        raise ValueError(
            'Unknown depthmap method type '
            '(must be BRUTE_FORCE, PATCH_MATCH or PATCH_MATCH_SAMPLE)')

    good_score = score > data.config['depthmap_min_correlation_score']
    depth = depth * (depth < max_depth) * good_score

    # Save and display results
    neighbor_ids = [i.id for i in neighbors[1:]]
    data.save_raw_depthmap(shot.id, depth, plane, score, nghbr, neighbor_ids)

    if data.config['depthmap_save_debug_files']:
        image = data.undistorted_image_as_array(shot.id)
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        with io.open_wt(data._depthmap_file(shot.id, 'raw.npz.ply')) as fout:
            fout.write(ply)

    if data.config.get('interactive'):
        import matplotlib.pyplot as plt
        plt.figure()
        plt.suptitle("Shot: " + shot.id + ", neighbors: " + ', '.join(neighbor_ids))
        plt.subplot(2, 3, 1)
        plt.imshow(image)
        plt.subplot(2, 3, 2)
        plt.imshow(color_plane_normals(plane))
        plt.subplot(2, 3, 3)
        plt.imshow(depth)
        plt.colorbar()
        plt.subplot(2, 3, 4)
        plt.imshow(score)
        plt.colorbar()
        plt.subplot(2, 3, 5)
        plt.imshow(nghbr)
        plt.colorbar()
        plt.show()
Пример #37
0
def match(args):
    """Compute all matches for a single image"""
    log.setup()

    im1, candidates, i, n, ctx = args
    logger.info('Matching {}  -  {} / {}'.format(im1, i + 1, n))

    config = ctx.data.config
    robust_matching_min_match = config['robust_matching_min_match']
    preemptive_threshold = config['preemptive_threshold']
    lowes_ratio = config['lowes_ratio']
    preemptive_lowes_ratio = config['preemptive_lowes_ratio']

    im1_matches = {}

    for im2 in candidates:
        # preemptive matching
        if preemptive_threshold > 0:
            t = timer()
            config['lowes_ratio'] = preemptive_lowes_ratio
            matches_pre = matching.match_lowe_bf(
                ctx.f_pre[im1], ctx.f_pre[im2], config)
            config['lowes_ratio'] = lowes_ratio
            logger.debug("Preemptive matching {0}, time: {1}s".format(
                len(matches_pre), timer() - t))
            if len(matches_pre) < preemptive_threshold:
                logger.debug(
                    "Discarding based of preemptive matches {0} < {1}".format(
                        len(matches_pre), preemptive_threshold))
                continue

        # symmetric matching
        t = timer()
        p1, f1, c1 = ctx.data.load_features(im1)
        p2, f2, c2 = ctx.data.load_features(im2)

        if config['matcher_type'] == 'FLANN':
            i1 = ctx.data.load_feature_index(im1, f1)
            i2 = ctx.data.load_feature_index(im2, f2)
        else:
            i1 = None
            i2 = None

        matches = matching.match_symmetric(f1, i1, f2, i2, config)
        logger.debug('{} - {} has {} candidate matches'.format(
            im1, im2, len(matches)))
        if len(matches) < robust_matching_min_match:
            im1_matches[im2] = []
            continue

        # robust matching
        t_robust_matching = timer()
        camera1 = ctx.cameras[ctx.exifs[im1]['camera']]
        camera2 = ctx.cameras[ctx.exifs[im2]['camera']]

        rmatches = matching.robust_match(p1, p2, camera1, camera2, matches,
                                         config)

        if len(rmatches) < robust_matching_min_match:
            im1_matches[im2] = []
            continue
        im1_matches[im2] = rmatches
        logger.debug('Robust matching time : {0}s'.format(
            timer() - t_robust_matching))

        logger.debug("Full matching {0} / {1}, time: {2}s".format(
            len(rmatches), len(matches), timer() - t))
    ctx.data.save_matches(im1, im1_matches)