def load_expert_discriminator(path): discriminator = SyncNet() print("Load sync net checkpoint from: {}".format(path)) discriminator_checkpoint = _load(path) s = discriminator_checkpoint["state_dict"] new_s = {} for k, v in s.items(): new_s[k.replace('module.', '')] = v discriminator.load_state_dict(new_s) discriminator = discriminator.to(device) return discriminator.eval()
for t in range(len(c)): cv2.imwrite('{}/{}_{}.jpg'.format(folder, batch_idx, t), c[t]) logloss = nn.BCELoss() def cosine_loss(a, v, y): d = nn.functional.cosine_similarity(a, v) loss = logloss(d.unsqueeze(1), y) return loss device = torch.device("cuda" if use_cuda else "cpu") syncnet = SyncNet().to(device) for p in syncnet.parameters(): p.requires_grad = False recon_loss = nn.L1Loss() def get_sync_loss(mel, g): g = g[:, :, :, g.size(3) // 2:] g = torch.cat([g[:, :, i] for i in range(syncnet_T)], dim=1) # B, 3 * T, H//2, W a, v = syncnet(mel, g) y = torch.ones(g.size(0), 1).float().to(device) return cosine_loss(a, v, y)
test_dataset = Dataset("val") train_data_loader = data_utils.DataLoader( train_dataset, batch_size=hparams.syncnet_batch_size, shuffle=True, num_workers=hparams.num_workers, ) test_data_loader = data_utils.DataLoader( test_dataset, batch_size=hparams.syncnet_batch_size, num_workers=8) device = torch.device("cuda" if use_cuda else "cpu") # Model model = SyncNet().to(device) print("total trainable params {}".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], lr=hparams.syncnet_lr) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer=False) train( device, model,