def main():
	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

	args = get_args()
	register_logger(log_file=args.log_file)

	cudnn.benchmark = True

	data_loader = VideoIter(dataset_path=args.dataset_path,
							clip_length=args.clip_length,
							frame_stride=args.frame_interval,
							video_transform=build_transforms(),
							return_label=False)

	data_iter = torch.utils.data.DataLoader(data_loader,
											batch_size=args.batch_size,
											shuffle=False,
											num_workers=args.num_workers,
											pin_memory=True)

	network = C3D(pretrained=args.pretrained_3d)
	if device.type != 'cpu':
		network = torch.nn.DataParallel(network)
	network = network.to(device)

	if not path.exists(args.save_dir):
		mkdir(args.save_dir)

	features_writer = FeaturesWriter(num_videos=data_loader.video_count)
	loop_i = 0
	with torch.no_grad():
		for data, clip_idxs, dirs, vid_names in data_iter:
			outputs = network(data.to(device)).detach().cpu().numpy()

			for i, (dir, vid_name, clip_idx) in enumerate(zip(dirs, vid_names, clip_idxs)):
				if loop_i == 0:
					logging.info(f"Video {features_writer.dump_count} / {features_writer.num_videos} : Writing clip {clip_idx} of video {vid_name}")

				loop_i += 1
				loop_i %= args.log_every

				dir = path.join(args.save_dir, dir)
				features_writer.write(feature=outputs[i],
									  video_name=vid_name,
									  idx=clip_idx,
									  dir=dir, )

	features_writer.dump()
Ejemplo n.º 2
0
def main():
    device = get_torch_device()

    args = get_args()
    register_logger(log_file=args.log_file)

    cudnn.benchmark = True

    data_loader, data_iter = get_features_loader(
        args.dataset_path, args.clip_length, args.frame_interval,
        args.batch_size, args.num_workers, args.model_type)

    network = load_feature_extractor(args.model_type, args.pretrained_3d,
                                     device).eval()

    if not path.exists(args.save_dir):
        mkdir(args.save_dir)

    features_writer = FeaturesWriter(num_videos=data_loader.video_count)
    loop_i = 0
    with torch.no_grad():
        for data, clip_idxs, dirs, vid_names in data_iter:
            outputs = network(data.to(device)).detach().cpu().numpy()

            for i, (dir, vid_name,
                    clip_idx) in enumerate(zip(dirs, vid_names, clip_idxs)):
                if loop_i == 0:
                    logging.info(
                        f"Video {features_writer.dump_count} / {features_writer.num_videos} : Writing clip {clip_idx} of video {vid_name}"
                    )

                loop_i += 1
                loop_i %= args.log_every

                dir = path.join(args.save_dir, dir)
                features_writer.write(
                    feature=outputs[i],
                    video_name=vid_name,
                    idx=clip_idx,
                    dir=dir,
                )

    features_writer.dump()
                        type=float,
                        default=0.01,
                        help="learning rate")
    parser.add_argument('--end_epoch',
                        type=int,
                        default=20000,
                        help="maxmium number of training epoch")

    return parser.parse_args()


if __name__ == "__main__":
    args = get_args()

    # Register directories
    register_logger(log_file=args.log_file)
    os.makedirs(args.exps_dir, exist_ok=True)
    models_dir = path.join(args.exps_dir, 'models')
    tb_dir = path.join(args.exps_dir, 'tensorboard')
    os.makedirs(models_dir, exist_ok=True)
    os.makedirs(tb_dir, exist_ok=True)

    # Optimizations
    device = get_torch_device()
    cudnn.benchmark = True  # enable cudnn tune

    # Data loader
    train_loader = FeaturesDatasetWrapper(features_path=args.features_path,
                                          annotation_path=args.annotation_path)
    train_iter = torch.utils.data.DataLoader(train_loader,
                                             batch_size=args.batch_size,