def show_video_list(split, label, path): """ Show the list of videos for the given split, class label and project. If the necessary files for annotation haven't been prepared yet, this is done now. """ path = f'/{urllib.parse.unquote(path)}' # Make path absolute split = urllib.parse.unquote((split)) label = urllib.parse.unquote(label) frames_dir = join(path, f"frames_{split}", label) tags_dir = join(path, f"tags_{split}", label) logreg_dir = join(path, 'logreg', label) os.makedirs(logreg_dir, exist_ok=True) os.makedirs(tags_dir, exist_ok=True) # load feature extractor if needed _load_feature_extractor() # compute the features and frames missing. compute_frames_features(inference_engine, split, label, path) videos = os.listdir(frames_dir) videos.sort() logreg_path = join(logreg_dir, 'logreg.joblib') if os.path.isfile(logreg_path): global logreg logreg = load(logreg_path) folder_id = zip(videos, list(range(len(videos)))) return render_template('video_list.html', folders=folder_id, split=split, label=label, path=path)
def prepare_annotation(project): """ Prepare all files needed for annotating the videos in the given project. """ project = urllib.parse.unquote(project) dataset_path = utils.lookup_project_path(project) # load feature extractor inference_engine, model_config = utils.load_feature_extractor(dataset_path) for split in SPLITS: print(f'\n\tPreparing videos in the {split}-set') for label in os.listdir(directories.get_videos_dir( dataset_path, split)): videos_dir = directories.get_videos_dir(dataset_path, split, label) frames_dir = directories.get_frames_dir(dataset_path, split, label) features_dir = directories.get_features_dir(dataset_path, split, model_config, label=label) compute_frames_features(inference_engine=inference_engine, videos_dir=videos_dir, frames_dir=frames_dir, features_dir=features_dir) return redirect(url_for("project_details", project=project))
def show_video_list(project, split, label): """ Show the list of videos for the given split, class label and project. If the necessary files for annotation haven't been prepared yet, this is done now. """ project = urllib.parse.unquote(project) path = utils.lookup_project_path(project) split = urllib.parse.unquote(split) label = urllib.parse.unquote(label) frames_dir = join(path, f"frames_{split}", label) tags_dir = join(path, f"tags_{split}", label) logreg_dir = join(path, 'logreg', label) os.makedirs(logreg_dir, exist_ok=True) os.makedirs(tags_dir, exist_ok=True) # load feature extractor inference_engine = utils.load_feature_extractor(path) # compute the features and frames missing. compute_frames_features(inference_engine, split, label, path) videos = os.listdir(frames_dir) videos = natsorted(videos, alg=ns.IC) tagged_list = set(os.listdir(tags_dir)) tagged = [f'{video}.json' in tagged_list for video in videos] video_list = zip(videos, tagged, list(range(len(videos)))) return render_template('video_list.html', video_list=video_list, split=split, label=label, path=path, project=project)
def prepare_annotation(path): """ Prepare all files needed for annotating the videos in the given project. """ path = f'/{urllib.parse.unquote(path)}' # Make path absolute # load feature extractor if needed _load_feature_extractor() for split in ['train', 'valid']: print("\n" + "-" * 10 + f"Preparing videos in the {split}-set" + "-" * 10) for label in os.listdir(join(path, f'videos_{split}')): compute_frames_features(inference_engine, split, label, path) return redirect(url_for("project_details", path=path))
def prepare_annotation(project): """ Prepare all files needed for annotating the videos in the given project. """ project = urllib.parse.unquote(project) path = utils.lookup_project_path(project) # load feature extractor inference_engine = utils.load_feature_extractor(path) for split in utils.SPLITS: print("\n" + "-" * 10 + f"Preparing videos in the {split}-set" + "-" * 10) for label in os.listdir(join(path, f'videos_{split}')): compute_frames_features(inference_engine, split, label, path) return redirect(url_for("project_details", project=project))