Esempio n. 1
0
def run_dataset(data: DataSetBase):
    """ Link matches pair-wise matches into tracks. """

    start = timer()
    features, colors, segmentations, instances = tracking.load_features(
        data, data.images())
    features_end = timer()
    matches = tracking.load_matches(data, data.images())
    matches_end = timer()
    tracks_manager = tracking.create_tracks_manager(
        features,
        colors,
        segmentations,
        instances,
        matches,
        data.config,
    )
    tracks_end = timer()
    data.save_tracks_manager(tracks_manager)
    write_report(
        data,
        tracks_manager,
        features_end - start,
        matches_end - features_end,
        tracks_end - matches_end,
    )
def run_dataset(data: DataSetBase, input, output):
    recs_base = data.load_reconstruction(input)
    if len(recs_base) == 0:
        return

    rec_base = recs_base[0]
    tracks_manager = data.load_tracks_manager()
    rec_base.add_correspondences_from_tracks_manager(tracks_manager)

    images = data.images()
    remaining_images = set(images) - set(rec_base.shots)
    gcp = data.load_ground_control_points()
    report = {}
    rec_report = {}
    report["extend_reconstruction"] = [rec_report]
    rec, rec_report["grow"] = reconstruction.grow_reconstruction(
        data,
        tracks_manager,
        rec_base,
        remaining_images,
        gcp,
    )
    rec_report["num_remaining_images"] = len(remaining_images)
    report["not_reconstructed_images"] = list(remaining_images)
    data.save_reconstruction([rec], output)
    data.save_report(io.json_dumps(report), "reconstruction.json")
Esempio n. 3
0
def run_dataset(data: DataSetBase):
    """ Match features between image pairs. """

    images = data.images()

    start = timer()
    pairs_matches, preport = matching.match_images(data, {}, images, images)
    matching.save_matches(data, images, pairs_matches)
    end = timer()

    write_report(data, preport, list(pairs_matches.keys()), end - start)
Esempio n. 4
0
def write_report(data: DataSetBase, wall_time):
    image_reports = []
    for image in data.images():
        try:
            txt = data.load_report("features/{}.json".format(image))
            image_reports.append(io.json_loads(txt))
        except IOError:
            logger.warning("No feature report image {}".format(image))

    report = {"wall_time": wall_time, "image_reports": image_reports}
    data.save_report(io.json_dumps(report), "features.json")
Esempio n. 5
0
def run_dataset(data: DataSetBase):
    """ Compute features for all images. """

    images = data.images()

    arguments = [(image, data) for image in images]

    start = timer()
    processes = data.config["processes"]
    parallel_map(detect, arguments, processes, 1)
    end = timer()
    write_report(data, end - start)
Esempio n. 6
0
def run_dataset(data: DataSetBase):
    """Compute features for all images."""

    start = timer()

    default_queue_size = 10
    max_queue_size = 200
    mem_available = log.memory_available()
    if mem_available:
        expected_mb = mem_available / 2
        expected_images = min(max_queue_size,
                              int(expected_mb / average_image_size(data)))
        logger.info(f"Capping memory usage to ~ {expected_mb} MB")
    else:
        expected_images = default_queue_size
    logger.info(f"Expecting to process {expected_images} images.")

    process_queue = queue.Queue(expected_images)
    arguments: List[Tuple[str, Any]] = []

    all_images = data.images()
    processes = data.config["processes"]

    if processes == 1:
        for image in all_images:
            counter = Counter()
            read_images(process_queue, data, [image], counter, 1)
            run_detection(process_queue)
            process_queue.get()
    else:
        counter = Counter()
        read_processes = data.config["read_processes"]
        if 1.5 * read_processes >= processes:
            read_processes = max(1, processes // 2)

        chunk_size = math.ceil(len(all_images) / read_processes)
        chunks_count = math.ceil(len(all_images) / chunk_size)
        read_processes = min(read_processes, chunks_count)

        expected: int = len(all_images)
        for i in range(read_processes):
            images_chunk = all_images[i * chunk_size:(i + 1) * chunk_size]
            arguments.append((
                "producer",
                (process_queue, data, images_chunk, counter, expected),
            ))
        for _ in range(processes):
            arguments.append(("consumer", (process_queue)))
        parallel_map(process, arguments, processes, 1)

    end = timer()
    write_report(data, end - start)
Esempio n. 7
0
def features_statistics(data: DataSetBase, tracks_manager, reconstructions):
    stats = {}
    detected = []
    for im in data.images():
        features_data = data.load_features(im)
        if not features_data:
            continue
        detected.append(len(features_data.points))
    if len(detected) > 0:
        stats["detected_features"] = {
            "min": min(detected),
            "max": max(detected),
            "mean": int(np.mean(detected)),
            "median": int(np.median(detected)),
        }
    else:
        stats["detected_features"] = {
            "min": -1,
            "max": -1,
            "mean": -1,
            "median": -1
        }

    per_shots = defaultdict(int)
    for rec in reconstructions:
        all_points_keys = set(rec.points.keys())
        for shot_id in rec.shots:
            if shot_id not in tracks_manager.get_shot_ids():
                continue
            for point_id in tracks_manager.get_shot_observations(shot_id):
                if point_id not in all_points_keys:
                    continue
                per_shots[shot_id] += 1
    per_shots = list(per_shots.values())

    stats["reconstructed_features"] = {
        "min": int(min(per_shots)) if len(per_shots) > 0 else -1,
        "max": int(max(per_shots)) if len(per_shots) > 0 else -1,
        "mean": int(np.mean(per_shots)) if len(per_shots) > 0 else -1,
        "median": int(np.median(per_shots)) if len(per_shots) > 0 else -1,
    }
    return stats
Esempio n. 8
0
def run_dataset(data: DataSetBase):
    """ Extract metadata from images' EXIF tag. """

    start = time.time()

    exif_overrides = {}
    if data.exif_overrides_exists():
        exif_overrides = data.load_exif_overrides()

    camera_models = {}
    for image in data.images():
        if data.exif_exists(image):
            logging.info("Loading existing EXIF for {}".format(image))
            d = data.load_exif(image)
        else:
            logging.info("Extracting EXIF for {}".format(image))
            d = _extract_exif(image, data)

            if image in exif_overrides:
                d.update(exif_overrides[image])

            data.save_exif(image, d)

        if d["camera"] not in camera_models:
            camera = exif.camera_from_exif_metadata(d, data)
            camera_models[d["camera"]] = camera

    # Override any camera specified in the camera models overrides file.
    if data.camera_models_overrides_exists():
        overrides = data.load_camera_models_overrides()
        if "all" in overrides:
            for key in camera_models:
                camera_models[key] = copy.copy(overrides["all"])
                camera_models[key].id = key
        else:
            for key, value in overrides.items():
                camera_models[key] = value
    data.save_camera_models(camera_models)

    end = time.time()
    with data.io_handler.open(data.profile_log(), "a") as fout:
        fout.write("extract_metadata: {0}\n".format(end - start))
Esempio n. 9
0
def run_dataset(data: DataSetBase):
    """ Compute features for all images. """

    start = timer()
    process_queue = queue.Queue()
    arguments: List[Tuple[str, Any]] = []

    all_images = data.images()
    processes = data.config["processes"]

    if processes == 1:
        for image in all_images:
            counter = Counter()
            read_images(process_queue, data, [image], counter, 1)
            run_detection(process_queue)
            process_queue.get()
    else:
        counter = Counter()
        read_processes = data.config["read_processes"]
        if 1.5 * read_processes >= processes:
            read_processes = max(1, processes // 2)

        chunk_size = math.ceil(len(all_images) / read_processes)
        chunks_count = math.ceil(len(all_images) / chunk_size)
        read_processes = min(read_processes, chunks_count)

        expected: int = len(all_images)
        for i in range(read_processes):
            images_chunk = all_images[i * chunk_size:(i + 1) * chunk_size]
            arguments.append((
                "producer",
                (process_queue, data, images_chunk, counter, expected),
            ))
        for _ in range(processes):
            arguments.append(("consumer", (process_queue)))
        parallel_map(process, arguments, processes, 1)

    end = timer()
    write_report(data, end - start)
Esempio n. 10
0
def reconstruction_statistics(data: DataSetBase, tracks_manager,
                              reconstructions):
    stats = {}

    stats["components"] = len(reconstructions)
    gps_count = 0
    for rec in reconstructions:
        for shot in rec.shots.values():
            gps_count += shot.metadata.gps_position.has_value
    stats["has_gps"] = gps_count > 2
    stats["has_gcp"] = True if data.load_ground_control_points() else False

    stats["initial_points_count"] = tracks_manager.num_tracks()
    stats["initial_shots_count"] = len(data.images())

    stats["reconstructed_points_count"] = 0
    stats["reconstructed_shots_count"] = 0
    stats["observations_count"] = 0
    hist_agg = defaultdict(int)

    for rec in reconstructions:
        if len(rec.points) > 0:
            stats["reconstructed_points_count"] += len(rec.points)
        stats["reconstructed_shots_count"] += len(rec.shots)

        # get tracks length distrbution for current reconstruction
        hist, values = _length_histogram(tracks_manager, rec.points)

        # update aggregrated histogram
        for length, count_tracks in zip(hist, values):
            hist_agg[length] += count_tracks

    # observations total and average tracks lengths
    hist_agg = sorted(hist_agg.items(), key=lambda x: x[0])
    lengths, counts = np.array([int(x[0]) for x in hist_agg
                                ]), np.array([x[1] for x in hist_agg])

    points_count = stats["reconstructed_points_count"]
    points_count_over_two = sum(counts[1:])
    stats["observations_count"] = int(sum(lengths * counts))
    stats["average_track_length"] = ((stats["observations_count"] /
                                      points_count)
                                     if points_count > 0 else -1)
    stats["average_track_length_over_two"] = (
        (int(sum(lengths[1:] * counts[1:])) /
         points_count_over_two) if points_count_over_two > 0 else -1)
    stats["histogram_track_length"] = {k: v for k, v in hist_agg}

    (
        avg_normalized,
        avg_pixels,
        (hist_normalized, bins_normalized),
        (hist_pixels, bins_pixels),
    ) = _projection_error(tracks_manager, reconstructions)
    stats["reprojection_error_normalized"] = avg_normalized
    stats["reprojection_error_pixels"] = avg_pixels
    stats["reprojection_histogram_normalized"] = (
        list(map(float, hist_normalized)),
        list(map(float, bins_normalized)),
    )
    stats["reprojection_histogram_pixels"] = (
        list(map(float, hist_pixels)),
        list(map(float, bins_pixels)),
    )

    return stats