コード例 #1
0
def main():
    init_logging()

    config = load_config()
    build_config = config.build
    db = TrackDatabase(os.path.join(config.tracks_folder, "dataset.hdf5"))
    dataset = Dataset(db, "dataset", config)
    tracks_loaded, total_tracks = dataset.load_tracks()
    print(
        "Loaded {}/{} tracks, found {:.1f}k segments".format(
            tracks_loaded, total_tracks, len(dataset.segments) / 1000
        )
    )
    for key, value in dataset.filtered_stats.items():
        if value != 0:
            print("  {} filtered {}".format(key, value))
    print()

    show_tracks_breakdown(dataset)
    print()
    show_segments_breakdown(dataset)
    print()
    show_cameras_breakdown(dataset)
    print()

    print("Splitting data set into train / validation")
    datasets = split_dataset_by_cameras(db, dataset, build_config)
    # if build_config.use_previous_split:
    #     split = get_previous_validation_bins(build_config.previous_split)
    #     datasets = split_dataset(db, dataset, build_config, split)
    # else:
    #     datasets = split_dataset(db, dataset, build_config)

    pickle.dump(datasets, open(dataset_db_path(config), "wb"))
コード例 #2
0
    def __init__(self, config, tracker_config):

        CPTVFileProcessor.__init__(self, config, tracker_config)

        self.hints = {}
        self.enable_track_output = True
        self.compression = (
            blosc_zstd if self.config.extract.enable_compression else None
        )

        self.previewer = Previewer.create_if_required(config, config.extract.preview)

        # normally poor quality tracks are filtered out, enabling this will let them through.
        self.disable_track_filters = False
        # disables background subtraction
        self.disable_background_subtraction = False

        os.makedirs(self.config.tracks_folder, mode=0o775, exist_ok=True)
        self.database = TrackDatabase(
            os.path.join(self.config.tracks_folder, "dataset.hdf5")
        )

        # load hints.  Hints are a way to give extra information to the tracker when necessary.
        # if os.path.exists(config.extract.hints_file):
        if config.extract.hints_file:
            self.load_hints(config.extract.hints_file)
コード例 #3
0
    def __init__(self, config):

        self.config = config
        os.makedirs(self.config.tracks_folder, mode=0o775, exist_ok=True)
        self.database = TrackDatabase(
            os.path.join(self.config.tracks_folder, "dataset.hdf5"))

        self.compression = (tools.gzip_compression
                            if self.config.load.enable_compression else None)
        self.track_config = config.tracking
        # number of threads to use when processing jobs.
        self.workers_threads = config.worker_threads
        self.previewer = Previewer.create_if_required(config,
                                                      config.load.preview)
        self.track_extractor = ClipTrackExtractor(
            self.config.tracking,
            self.config.use_opt_flow
            or config.load.preview == Previewer.PREVIEW_TRACKING,
            self.config.load.cache_to_disk,
        )
コード例 #4
0
    def __init__(self, out_folder):

        CPTVFileProcessor.__init__(self)

        self.hints = {}
        self.colormap = plt.get_cmap('jet')
        self.verbose = False
        self.out_folder = out_folder
        self.overwrite_mode = CPTVTrackExtractor.OM_NONE
        self.enable_previews = False
        self.enable_track_output = True

        # normally poor quality tracks are filtered out, enabling this will let them through.
        self.disable_track_filters = False
        # disables background subtraction
        self.disable_background_subtraction = False

        self.high_quality_optical_flow = False

        self.database = TrackDatabase(os.path.join(self.out_folder, 'dataset.hdf5'))

        self.worker_pool_init = init_workers
コード例 #5
0
def main():

    global dataset
    global db

    db = TrackDatabase(os.path.join(DATASET_FOLDER, 'dataset.hdf5'))
    dataset = Dataset(db, 'dataset')

    total_tracks = len(db.get_all_track_ids())

    tracks_loaded = dataset.load_tracks(track_filter)

    print("Loaded {}/{} tracks, found {:.1f}k segments".format(
        tracks_loaded, total_tracks,
        len(dataset.segments) / 1000))
    for key, value in filtered_stats.items():
        if value != 0:
            print("  {} filtered {}".format(key, value))
    print()

    labels = sorted(list(set(dataset.tracks_by_label.keys())))
    dataset.labels = labels

    show_tracks_breakdown()
    print()
    show_segments_breakdown()
    print()
    show_cameras_breakdown()
    print()

    print("Splitting data set into train / validation")
    if USE_PREVIOUS_SPLIT:
        split = get_bin_split('template.dat')
        datasets = split_dataset_days(split)
    else:
        datasets = split_dataset_days()

    pickle.dump(datasets,
                open(os.path.join(DATASET_FOLDER, 'datasets.dat'), 'wb'))
コード例 #6
0
def main():
    init_logging()
    args = parse_args()
    config = load_config(args.config_file)
    db = TrackDatabase(os.path.join(config.tracks_folder, "dataset.hdf5"))
    dataset = Dataset(
        db, "dataset", config, consecutive_segments=args.consecutive_segments
    )
    tracks_loaded, total_tracks = dataset.load_tracks(before_date=args.date)
    print(
        "Loaded {}/{} tracks, found {:.1f}k segments".format(
            tracks_loaded, total_tracks, len(dataset.segments) / 1000
        )
    )
    for key, value in dataset.filtered_stats.items():
        if value != 0:
            print("  {} filtered {}".format(key, value))
    print()
    show_tracks_breakdown(dataset)
    print()
    show_segments_breakdown(dataset)
    print()
    show_important_frames_breakdown(dataset)
    print()
    show_cameras_breakdown(dataset)
    print()

    print("Splitting data set into train / validation")
    datasets = split_dataset_by_cameras(db, dataset, config, args)
    if args.date is None:
        args.date = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=7)
    test = test_dataset(db, config, args.date)
    datasets = (*datasets, test)
    print_counts(dataset, *datasets)
    print_cameras(*datasets)
    pickle.dump(datasets, open(dataset_db_path(config), "wb"))
コード例 #7
0
 def load_db(self):
     self.db = TrackDatabase(self.db_file)
コード例 #8
0
    model_file = args.model_file
    weights = args.weights
else:
    model_file = config.classify.models[0].model_file
    weights = config.classify.models[0].model_weights

base_dir = config.tracks_folder

model = KerasModel(train_config=config.train)
model.load_model(model_file, training=False, weights=weights)

if args.track_id or args.clip_id:
    if args.clip_id is None:
        logging.error("Need clip id and track id")
        sys.exit(0)
    db = TrackDatabase(os.path.join(config.tracks_folder, "dataset.hdf5"))
    evaluate_db_clip(model, db, args.clip_id, args.track_id)
    sys.exit(0)

dataset = pickle.load(open(os.path.join(base_dir, args.dataset), "rb"))
logging.info("running on %s ", dataset.name)
dataset.recalculate_segments(segment_type=1)

dir = os.path.dirname(model_file)
meta = json.load(open(os.path.join(dir, "metadata.txt"), "r"))
mapped_labels = meta.get("mapped_labels")
label_probabilities = meta.get("label_probabilities")
dataset.lbl_p = label_probabilities
if mapped_labels:
    dataset.regroup(mapped_labels)
print("dataset labels arenow ", dataset.labels)