Beispiel #1
0
def extract(model, filepath, vid):
    """Extract features by inception_v3."""
    # data loader for frames in ingle video
    data_loader = get_dataloader(dataset="VideoFrame",
                                 path=filepath,
                                 num_frames=cfg.num_frames,
                                 batch_size=cfg.batch_size)
    # extract features by inception_v3
    feats = None
    for step, frames in enumerate(data_loader):
        print("--> extract features [{}/{}]".format(step + 1,
                                                    len(data_loader)))
        feat = model(make_variable(frames))
        feats = concat_feat_var(feats, feat.data.cpu())

    print("--> save feats to {}".format(
        cfg.inception_v3_feats_path.format(vid)))
    torch.save(feats, cfg.inception_v3_feats_path.format(vid))
    print("--> delete original video file: {}".format(filepath))
    os.remove(filepath)
Beispiel #2
0
if __name__ == '__main__':
    # init models and data loader
    model = make_cuda(
        inception_v3(pretrained=True, transform_input=True, extract_feat=True))
    pca = PCAWrapper(n_components=cfg.n_components)
    model.eval()

    # data loader for frames in ingle video
    # data_loader = get_dataloader(dataset="VideoFrame",
    #                              path=cfg.video_file,
    #                              num_frames=cfg.num_frames,
    #                              batch_size=cfg.batch_size)
    # data loader for frames decoded from several videos
    data_loader = get_dataloader(dataset="FrameImage",
                                 path=cfg.frame_root,
                                 batch_size=cfg.batch_size)

    # extract features by inception_v3
    feats = None
    for step, frames in enumerate(data_loader):
        print("extracting feature [{}/{}]".format(step + 1, len(data_loader)))
        feat = model(make_variable(frames))
        feats = concat_feat(feats, feat.data.cpu())

    # recude dimensions by PCA
    X = feats.numpy()
    pca.fit(X)
    X_ = pca.transform(X)
    print("reduce X {} to X_ {}".format(X.shape, X_.shape))
Beispiel #3
0
from models import PCAWrapper, inception_v3

if __name__ == '__main__':
    # init Inception v3 model
    model = make_cuda(
        inception_v3(pretrained=True, transform_input=True, extract_feat=True))
    model.eval()

    # init PCA model
    pca = PCAWrapper(n_components=cfg.n_components,
                     batch_size=cfg.pca_batch_size)
    pca.load_params(filepath=cfg.pca_model)

    # data loader for frames in ingle video
    data_loader = get_dataloader(dataset="VideoFrame",
                                 path=cfg.video_file,
                                 num_frames=cfg.num_frames,
                                 batch_size=cfg.batch_size)

    # init writer
    writer = RecordWriter(filepath="data/test.tfrecord", level="frame")

    # extract features by inception_v3
    feats = None
    for step, frames in enumerate(data_loader):
        print("extracting feature [{}/{}]".format(step + 1, len(data_loader)))
        feat = model(make_variable(frames))
        feat_np = feat.data.cpu().numpy()
        # recude dimensions by PCA
        feat_ = pca.transform(feat_np)
        feats = concat_feat(feats, feat_)
Beispiel #4
0
                                   model_path=cfg.inception_v3_model,
                                   transform_input=True,
                                   extract_feat=True))
    model.eval()

    # init PCA model
    pca = PCAWrapper(n_components=cfg.n_components,
                     batch_size=cfg.pca_batch_size)
    pca.load_params(filepath=cfg.pca_model)

    subfolders = list_folders(cfg.dataset_path)
    for subfolder in subfolders:
        print("current folder: {}".format(subfolder))
        # data loader for frames in single video
        data_loader = get_dataloader(dataset="FrameImage",
                                     path=os.path.join(subfolder, 'frames'),
                                     frame_num =cfg.frame_num,
                                     batch_size=cfg.batch_size)

        # extract features by inception_v3
        feats = None
        for step, frames in enumerate(data_loader):
            #print("extracting feature [{}/{}]".format(step + 1, len(data_loader)))
            feat = model(make_variable(frames))
            feat_np = feat.data.cpu().numpy()
            # recude dimensions by PCA
            feat_ = pca.transform(feat_np)
            feats = concat_feat(feats, feat_)
        embedding = quantize(feats)

        # write visual features into numpy file
        np.save('{}.npy'.format(os.path.join(subfolder, 'vfeat')), embedding[:cfg.frame_num,])