Esempio n. 1
0
def extract_features(path_in, model_config, net, num_layers_finetune, use_gpu, num_timesteps=1, log_fn=print):
    # Create inference engine
    inference_engine = engine.InferenceEngine(net, use_gpu=use_gpu)

    # extract features
    for split in SPLITS:
        videos_dir = directories.get_videos_dir(path_in, split)
        features_dir = directories.get_features_dir(path_in, split, model_config, num_layers_finetune)
        video_files = glob.glob(os.path.join(videos_dir, "*", "*.mp4"))

        num_videos = len(video_files)
        log_fn(f"\nFound {num_videos} videos to process in the {split}-set")
        for video_index, video_path in enumerate(video_files):
            log_fn(f'\rExtract features from video {video_index + 1} / {num_videos}')
            path_features = video_path.replace(videos_dir, features_dir).replace(".mp4", ".npy")

            if os.path.isfile(path_features):
                log_fn("\tSkipped - feature was already precomputed.")
            else:
                # Read all frames
                frames = extract_frames(video_path=video_path,
                                        inference_engine=inference_engine)
                compute_features(path_features=path_features,
                                 inference_engine=inference_engine,
                                 frames=frames,
                                 batch_size=16,
                                 num_timesteps=num_timesteps)

        log_fn('\n')
Esempio n. 2
0
def extract_features(path_in, net, num_layers_finetune, use_gpu, num_timesteps=1):
    # Create inference engine
    inference_engine = engine.InferenceEngine(net, use_gpu=use_gpu)

    # extract features
    for dataset in ["train", "valid"]:
        videos_dir = os.path.join(path_in, f"videos_{dataset}")
        features_dir = os.path.join(path_in, f"features_{dataset}_num_layers_to_finetune={num_layers_finetune}")
        video_files = glob.glob(os.path.join(videos_dir, "*", "*.mp4"))

        print(f"\nFound {len(video_files)} videos to process in the {dataset}set")

        for video_index, video_path in enumerate(video_files):
            print(f"\rExtract features from video {video_index + 1} / {len(video_files)}",
                  end="")
            path_out = video_path.replace(videos_dir, features_dir).replace(".mp4", ".npy")

            if os.path.isfile(path_out):
                print("\n\tSkipped - feature was already precomputed.")
            else:
                # Read all frames
                compute_features(video_path, path_out, inference_engine,
                                 num_timesteps=num_timesteps, path_frames=None, batch_size=16)

        print('\n')
Esempio n. 3
0
def load_feature_extractor(project_path):
    feature_extractor = backbone_networks.StridedInflatedEfficientNet()

    # Remove internal padding for feature extraction and training
    checkpoint = torch.load('resources/backbone/strided_inflated_efficientnet.ckpt')
    feature_extractor.load_state_dict(checkpoint)
    feature_extractor.eval()

    # Create Inference Engine
    use_gpu = get_project_setting(project_path, 'use_gpu')
    inference_engine = engine.InferenceEngine(feature_extractor, use_gpu=use_gpu)

    return inference_engine
Esempio n. 4
0
def _load_feature_extractor():
    global inference_engine
    import torch
    from sense import engine
    from sense import feature_extractors
    if inference_engine is None:
        feature_extractor = feature_extractors.StridedInflatedEfficientNet()

        # Remove internal padding for feature extraction and training
        checkpoint = torch.load('resources/backbone/strided_inflated_efficientnet.ckpt')
        feature_extractor.load_state_dict(checkpoint)
        feature_extractor.eval()

        # Create Inference Engine
        inference_engine = engine.InferenceEngine(feature_extractor, use_gpu=True)
Esempio n. 5
0
if __name__ == "__main__":
    # Parse argument
    args = docopt(__doc__)
    dataset_path = join(os.getcwd(), args['--data_path'])

    # Load feature extractor
    feature_extractor = feature_extractors.StridedInflatedEfficientNet()

    # Remove internal padding for feature extraction and training
    checkpoint = torch.load(
        'resources/backbone/strided_inflated_efficientnet.ckpt')
    feature_extractor.load_state_dict(checkpoint)
    feature_extractor.eval()

    # Create Inference Engine
    inference_engine = engine.InferenceEngine(feature_extractor, use_gpu=True)

    for split in ['train', 'valid']:
        print("\n" + "-" * 10 + f"Preparing videos in the {split}-set" +
              "-" * 10)
        for label in os.listdir(join(dataset_path, f'videos_{split}')):
            # Get data-set from path, given split and label
            folder = join(dataset_path, f'videos_{split}', label)

            # Create features and frames folders for the given split and label
            features_folder = join(dataset_path, f'features_{split}', label)
            frames_folder = join(dataset_path, f'frames_{split}', label)
            os.makedirs(features_folder, exist_ok=True)
            os.makedirs(frames_folder, exist_ok=True)

            # Loop through all videos for the given class-label