def main(args): dataset = VidDataSet(K=args.K, path_to_mp4=args.data_dir, device=torch.device("cuda"), join_by_video=True) print("Dataset size", len(dataset)) for idx in tqdm.tqdm(range(args.start_idx, args.end_idx + 1)): filename = create_filename(*dataset.get_video_info(idx)) output_path = os.path.join(args.output_dir, filename) frame_mark = dataset.get_frame_mark_numpy_array(idx) np.savez_compressed(output_path, frame_mark=frame_mark)
from network.blocks import * from network.model import * from tensorboard_logger import configure, log_value import pdb """Create dataset and net""" device = torch.device("cuda:0") cpu = torch.device("cpu") tensorboard_path = './experiment/tensorboard' path_to_chkpt = './experiment/model_weights_self_train.tar' path_to_backup = './experiment/backup_model_weights.tar' path_to_mp4 = "/home/cxu-serve/p1/common/voxceleb/test/video/sample" VGGFace_body_path = '/home/cxu-serve/p1/common/vggface/new/Pytorch_VGGFACE_IR.py' VGGFace_weight_path = '/home/cxu-serve/p1/common/vggface/new/Pytorch_VGGFACE.pth' dataset = VidDataSet(K=8, path_to_mp4=path_to_mp4, device=device) dataLoader = DataLoader(dataset, batch_size=2, shuffle=True) G = Generator(224).to(device) E = Embedder(224).to(device) D = Discriminator(dataset.__len__()).to(device) G.train() E.train() D.train() optimizerG = optim.Adam(params=list(E.parameters()) + list(G.parameters()), lr=5e-5) optimizerD = optim.Adam(params=D.parameters(), lr=2e-4) """Criterion"""
from network.blocks import * from network.model import * from tensorboard_logger import configure, log_value import pdb """Create dataset and net""" device = torch.device("cuda:0") cpu = torch.device("cpu") tensorboard_path = './experiment/tensorboard' path_to_chkpt = './experiment/model_weights_self_train.tar' path_to_backup = './experiment/backup_model_weights.tar' path_to_mp4 = "/home/cxu-serve/p1/common/voxceleb/test/video/sample_one" VGGFace_body_path = '/home/cxu-serve/p1/common/vggface/new/Pytorch_VGGFACE_IR.py' VGGFace_weight_path = '/home/cxu-serve/p1/common/vggface/new/Pytorch_VGGFACE.pth' dataset = VidDataSet(K=8, path_to_mp4=path_to_mp4, device=device) dataLoader = DataLoader(dataset, batch_size=1, shuffle=True) G = Generator(224).to(device) E = Embedder(224).to(device) # D = Discriminator(dataset.__len__()) # D = Discriminator(dataset.__len__()).to(device) G.train() E.train() # D.train() optimizerG = optim.Adam(params=list(E.parameters()) + list(G.parameters()), lr=5e-5) # optimizerD = optim.Adam(params = D.parameters(), lr=2e-4)
import torchvision.utils as vutils from dataset.dataset_class import VidDataSet from dataset.video_extraction_conversion import * from loss.loss_discriminator import * from loss.loss_generator import * from network.blocks import * from network.model import * """Create dataset and net""" os.environ['CUDA_VISIBLE_DEVICES'] = '1,0' device = torch.device("cuda") cpu = torch.device("cpu") path_to_chkpt = 'model_weights.tar' path_to_backup = 'backup_model_weights.tar' dataset = VidDataSet( K=8, path_to_mp4='/data2/hao66/dataset/voxceleb1/unzippedFaces', device=device) print('# of videos: ', len(dataset)) dataLoader = DataLoader(dataset, batch_size=2, shuffle=True) G = torch.nn.DataParallel(Generator(224)).to(device) E = torch.nn.DataParallel(Embedder(224)).to(device) D = torch.nn.DataParallel(Discriminator(dataset.__len__())).to(device) G.train() E.train() D.train() optimizerG = optim.Adam(params=list(E.parameters()) + list(G.parameters()), lr=5e-5)