def load_model(path): model = SyncNet_color() print("Load checkpoint from: {}".format(path)) checkpoint = _load(path) s = checkpoint["state_dict"] new_s = {} for k, v in s.items(): new_s[k.replace('module.', '')] = v model.load_state_dict(new_s) model = model.to(device) for p in model.parameters(): p.reguires_grad = False return model.eval()
cv2.imwrite('{}/{}_{}.jpg'.format(folder, batch_idx, t), c[t]) logloss = nn.BCELoss() def cosine_loss(a, v, y): d = nn.functional.cosine_similarity(a, v) loss = logloss(d.unsqueeze(1), y) return loss device = torch.device("cuda" if use_cuda else "cpu") syncnet = SyncNet().to(device) for p in syncnet.parameters(): p.requires_grad = False recon_loss = nn.L1Loss() def get_sync_loss(mel, g): g = g[:, :, :, g.size(3) // 2:] g = torch.cat([g[:, :, i] for i in range(syncnet_T)], dim=1) # B, 3 * T, H//2, W a, v = syncnet(mel, g) y = torch.ones(g.size(0), 1).float().to(device) return cosine_loss(a, v, y) def train(device,
train_data_loader = data_utils.DataLoader( train_dataset, batch_size=hparams.syncnet_batch_size, shuffle=True, num_workers=hparams.num_workers, ) test_data_loader = data_utils.DataLoader( test_dataset, batch_size=hparams.syncnet_batch_size, num_workers=8) device = torch.device("cuda" if use_cuda else "cpu") # Model model = SyncNet().to(device) print("total trainable params {}".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], lr=hparams.syncnet_lr) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer=False) train( device, model, train_data_loader, test_data_loader,
train_dataset = Dataset('train') test_dataset = Dataset('val') train_data_loader = data_utils.DataLoader( train_dataset, batch_size=hparams.syncnet_batch_size, shuffle=True, num_workers=hparams.num_workers) test_data_loader = data_utils.DataLoader( test_dataset, batch_size=hparams.syncnet_batch_size, num_workers=8) device = torch.device("cuda" if use_cuda else "cpu") # Model model = SyncNet().to(device) print('total trainable params {}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad))) optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], lr=hparams.syncnet_lr) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer=False) train(device, model, train_data_loader, test_data_loader, optimizer, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.syncnet_checkpoint_interval, nepochs=hparams.nepochs) #we add a save option if not os.path.exists(model_dir): os.mkdir(model_dir)