コード例 #1
0
ファイル: finetuning.py プロジェクト: corneliusboehm/sense
def compute_frames_and_features(inference_engine: InferenceEngine,
                                project_path: str, videos_dir: str,
                                frames_dir: str, features_dir: str):
    """
    Split the videos in the given directory into frames and compute features on each frame.
    Results are stored in the given directories for frames and features.

    :param inference_engine:
        Initialized InferenceEngine that can be used for computing the features.
    :param project_path:
        The path of the current project.
    :param videos_dir:
        Directory where the videos are stored.
    :param frames_dir:
        Directory where frames should be stored. One sub-directory will be created per video with extracted frames as
        numbered .jpg files in there.
    :param features_dir:
        Directory where computed features should be stored. One .npy file will be created per video.
    """
    assisted_tagging = utils.get_project_setting(project_path,
                                                 'assisted_tagging')

    # Create features and frames folders
    os.makedirs(features_dir, exist_ok=True)
    os.makedirs(frames_dir, exist_ok=True)

    # Loop through all videos for the given class-label
    videos = glob.glob(os.path.join(videos_dir, '*.mp4'))
    num_videos = len(videos)
    for idx, video_path in enumerate(videos):
        print(
            f'\r  {videos_dir}  -->  Processing video {idx + 1} / {num_videos}',
            end='' if idx < (num_videos - 1) else '\n')

        video_name = os.path.basename(video_path).replace('.mp4', '')
        path_frames = os.path.join(frames_dir, video_name)
        path_features = os.path.join(features_dir, f'{video_name}.npy')

        features_needed = (assisted_tagging
                           and not os.path.exists(path_features))

        frames = extract_frames(video_path=video_path,
                                inference_engine=inference_engine,
                                path_frames=path_frames,
                                return_frames=features_needed)

        if features_needed:
            compute_features(path_features=path_features,
                             inference_engine=inference_engine,
                             frames=frames,
                             batch_size=64,
                             num_timesteps=1)
コード例 #2
0
def submit_annotation():
    """
    Submit annotated tags for all frames and save them to a json file.
    """
    data = request.form  # a multi-dict containing POST data
    idx = int(data['idx'])
    fps = float(data['fps'])
    path = data['path']
    project = data['project']
    split = data['split']
    label = data['label']
    video = data['video']
    next_frame_idx = idx + 1

    frames_dir = directories.get_frames_dir(path, split, label)
    tags_dir = directories.get_tags_dir(path, split, label)
    description = {'file': f'{video}.mp4', 'fps': fps}

    out_annotation = os.path.join(tags_dir, f'{video}.json')
    time_annotation = []

    for frame_idx in range(int(data['n_images'])):
        time_annotation.append(int(data[f'{frame_idx}_tag']))

    description['time_annotation'] = time_annotation

    with open(out_annotation, 'w') as f:
        json.dump(description, f, indent=2)

    # Automatic re-training of the logistic regression model
    if utils.get_project_setting(path, 'assisted_tagging'):
        train_logreg(path=path)

    if next_frame_idx >= len(os.listdir(frames_dir)):
        return redirect(
            url_for('.show_video_list',
                    project=project,
                    split=split,
                    label=label))

    return redirect(
        url_for('.annotate',
                split=split,
                label=label,
                project=project,
                idx=next_frame_idx))
コード例 #3
0
 def inject_temporal_status(project):
     path = utils.lookup_project_path(project)
     temporal_status = utils.get_project_setting(path, 'temporal')
     return temporal_status
コード例 #4
0
def annotate(project, split, label, idx):
    """
    For the given class label, show all frames for annotating the selected video.
    """
    project = urllib.parse.unquote(project)
    path = project_utils.lookup_project_path(project)
    label = urllib.parse.unquote(label)
    split = urllib.parse.unquote(split)

    _, model_config = utils.load_feature_extractor(path)

    frames_dir = directories.get_frames_dir(path, split, label)
    features_dir = directories.get_features_dir(path,
                                                split,
                                                model_config,
                                                label=label)
    tags_dir = directories.get_tags_dir(path, split, label)
    logreg_dir = directories.get_logreg_dir(path, model_config, label)

    videos = os.listdir(frames_dir)
    videos = natsorted(videos, alg=ns.IC)

    # The list of images in the folder
    images = [
        image
        for image in glob.glob(os.path.join(frames_dir, videos[idx], '*'))
        if utils.is_image_file(image)
    ]
    classes = [-1] * len(images)

    # Load logistic regression model if available and assisted tagging is enabled
    if utils.get_project_setting(path, 'assisted_tagging'):
        logreg_path = os.path.join(logreg_dir, 'logreg.joblib')
        features_path = os.path.join(features_dir, f'{videos[idx]}.npy')
        if os.path.isfile(logreg_path) and os.path.isfile(features_path):
            logreg = load(logreg_path)
            features = np.load(features_path).mean(axis=(2, 3))
            classes = list(logreg.predict(features))

    # Natural sort images, so that they are sorted by number
    images = natsorted(images, alg=ns.IC)
    # Extract image file name (without full path) and include class label
    images = [(os.path.basename(image), _class)
              for image, _class in zip(images, classes)]

    # Load existing annotations
    annotations_file = os.path.join(tags_dir, f'{videos[idx]}.json')
    if os.path.exists(annotations_file):
        with open(annotations_file, 'r') as f:
            data = json.load(f)
            annotations = data['time_annotation']
    else:
        # Use "background" label for all frames per default
        annotations = [0] * len(images)

    # Read tags from config
    config = project_utils.load_project_config(path)
    tags = config['classes'][label]

    return render_template('frame_annotation.html',
                           images=images,
                           annotations=annotations,
                           idx=idx,
                           fps=16,
                           n_images=len(images),
                           video_name=videos[idx],
                           project_config=config,
                           split=split,
                           label=label,
                           path=path,
                           tags=tags,
                           project=project,
                           n_videos=len(videos))