def extract(model, filepath, vid): """Extract features by inception_v3.""" # data loader for frames in ingle video data_loader = get_dataloader(dataset="VideoFrame", path=filepath, num_frames=cfg.num_frames, batch_size=cfg.batch_size) # extract features by inception_v3 feats = None for step, frames in enumerate(data_loader): print("--> extract features [{}/{}]".format(step + 1, len(data_loader))) feat = model(make_variable(frames)) feats = concat_feat_var(feats, feat.data.cpu()) print("--> save feats to {}".format( cfg.inception_v3_feats_path.format(vid))) torch.save(feats, cfg.inception_v3_feats_path.format(vid)) print("--> delete original video file: {}".format(filepath)) os.remove(filepath)
model = make_cuda( inception_v3(pretrained=True, transform_input=True, extract_feat=True)) model.eval() # get vid list video_list = os.listdir(cfg.video_root) video_list = [ os.path.splitext(v)[0] for v in video_list if os.path.splitext(v)[1] in cfg.video_ext ] # extract features by inception_v3 for idx, vid in enumerate(video_list): if os.path.exists(cfg.inception_v3_feats_path.format(vid)): print("skip {}".format(vid)) else: print("extract feature from {} [{}/{}]".format( vid, idx + 1, len(video_list))) # data loader for frames decoded from several videos data_loader = get_dataloader(dataset="FrameImage", path=cfg.frame_root, batch_size=cfg.batch_size, vid=vid) feats = None for step, frames in enumerate(data_loader): print("--> step [{}/{}]".format(step + 1, len(data_loader))) feat = model(make_variable(frames)) feats = concat_feat_var(feats, feat.data.cpu()) torch.save(feats, cfg.inception_v3_feats_path.format(vid))
inception_v3_feats = torch.load( cfg.inception_v3_feats_path.format("total")) else: # get inception_v3 feats list feats_list = os.listdir(cfg.inception_v3_feats_root) feats_list = [ v for v in feats_list if os.path.splitext(v)[1] in cfg.inception_v3_feats_ext ] # load inception_v3 feats inception_v3_feats = None for step, feat_file in enumerate(feats_list): print("loadingg inception_v3 from {} [{}/{}]".format( feat_file, step + 1, len(feats_list))) feat_path = os.path.join(cfg.inception_v3_feats_root, feat_file) feat = torch.load(feat_path) inception_v3_feats = concat_feat_var(inception_v3_feats, feat) # save all feats into single file torch.save(inception_v3_feats, cfg.inception_v3_feats_path.format("total")) # train PCA X = inception_v3_feats.numpy() pca.fit(X) # sabe PCA params pca.save_params(filepath=cfg.pca_model)