예제 #1
0
def load_expert_discriminator(path):
    discriminator = SyncNet()
    print("Load sync net checkpoint from: {}".format(path))
    discriminator_checkpoint = _load(path)
    s = discriminator_checkpoint["state_dict"]
    new_s = {}
    for k, v in s.items():
        new_s[k.replace('module.', '')] = v
    discriminator.load_state_dict(new_s)

    discriminator = discriminator.to(device)
    return discriminator.eval()
예제 #2
0
def load_model(path):
    model = SyncNet_color()
    print("Load checkpoint from: {}".format(path))
    checkpoint = _load(path)
    s = checkpoint["state_dict"]
    new_s = {}
    for k, v in s.items():
        new_s[k.replace('module.', '')] = v
    model.load_state_dict(new_s)

    model = model.to(device)
    for p in model.parameters():
        p.reguires_grad = False
    return model.eval()
예제 #3
0
def load_expert(path):
    model = SyncNet_color()
    print("Load checkpoint from: {}".format(path))
    checkpoint = _load(path)
    s = checkpoint["state_dict"]
    new_s = {}
    for k, v in s.items():
        new_s[k.replace('module.', '')] = v
    model.load_state_dict(new_s)

    model = model.to(device)
    return model.eval()
예제 #4
0
        for t in range(len(c)):
            cv2.imwrite('{}/{}_{}.jpg'.format(folder, batch_idx, t), c[t])


logloss = nn.BCELoss()


def cosine_loss(a, v, y):
    d = nn.functional.cosine_similarity(a, v)
    loss = logloss(d.unsqueeze(1), y)

    return loss


device = torch.device("cuda" if use_cuda else "cpu")
syncnet = SyncNet().to(device)
for p in syncnet.parameters():
    p.requires_grad = False

recon_loss = nn.L1Loss()


def get_sync_loss(mel, g):
    g = g[:, :, :, g.size(3) // 2:]
    g = torch.cat([g[:, :, i] for i in range(syncnet_T)], dim=1)
    # B, 3 * T, H//2, W
    a, v = syncnet(mel, g)
    y = torch.ones(g.size(0), 1).float().to(device)
    return cosine_loss(a, v, y)

예제 #5
0
    test_dataset = Dataset("val")

    train_data_loader = data_utils.DataLoader(
        train_dataset,
        batch_size=hparams.syncnet_batch_size,
        shuffle=True,
        num_workers=hparams.num_workers,
    )

    test_data_loader = data_utils.DataLoader(
        test_dataset, batch_size=hparams.syncnet_batch_size, num_workers=8)

    device = torch.device("cuda" if use_cuda else "cpu")

    # Model
    model = SyncNet().to(device)
    print("total trainable params {}".format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))

    optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad],
                           lr=hparams.syncnet_lr)

    if checkpoint_path is not None:
        load_checkpoint(checkpoint_path,
                        model,
                        optimizer,
                        reset_optimizer=False)

    train(
        device,
        model,
예제 #6
0
    # Dataset and Dataloader setup
    train_dataset = Dataset('train')
    test_dataset = Dataset('val')

    train_data_loader = data_utils.DataLoader(
        train_dataset, batch_size=hparams.syncnet_batch_size, shuffle=True,
        num_workers=hparams.num_workers)

    test_data_loader = data_utils.DataLoader(
        test_dataset, batch_size=hparams.syncnet_batch_size,
        num_workers=8)

    device = torch.device("cuda" if use_cuda else "cpu")

    # Model
    model = SyncNet().to(device)
    print('total trainable params {}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))

    optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad],
                           lr=hparams.syncnet_lr)

    if checkpoint_path is not None:
        load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer=False)

    train(device, model, train_data_loader, test_data_loader, optimizer,
          checkpoint_dir=checkpoint_dir,
          checkpoint_interval=hparams.syncnet_checkpoint_interval,
          nepochs=hparams.nepochs)
          
    #we add a save option
    if not os.path.exists(model_dir):
예제 #7
0
    for mode, loader in zip(["Training", "Validation"],
                            [train_data_loader, test_data_loader]):
        if not loader:
            continue
        print("{} Data Size: {}".format(mode, len(loader.dataset)))
        for batch in loader:
            for i, item in enumerate(batch):
                print("Item: {} Shape: {} Max: {} Min: {}".format(
                    i, item.shape, item.max(), item.min()))
            break

    device = torch.device("cuda" if use_cuda else "cpu")

    # Model
    model = SyncNet().to(device)
    print('total trainable params {}'.format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))

    wandb.watch(model)

    optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad],
                           lr=hparams.syncnet_lr)

    if checkpoint_path is not None:
        load_checkpoint(checkpoint_path,
                        model,
                        optimizer,
                        reset_optimizer=False)

    train(device,
예제 #8
0
    test_dataset = Dataset('val')

    if args.finetune:
        train_dataset = Dataset('train')
        train_data_loader = data_utils.DataLoader(
            train_dataset, batch_size=hparams.syncnet_batch_size, shuffle=True,
            num_workers=hparams.num_workers)

    test_data_loader = data_utils.DataLoader(
        test_dataset, batch_size=hparams.syncnet_batch_size,
        num_workers=8)

    device = torch.device("cuda:{}".format(args.gpus[0]) if use_cuda else "cpu")

    # Model
    model = SyncNet().to(device)
    print('total trainable params {}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))

    optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad],
                           lr=hparams.syncnet_lr)

    if checkpoint_path is not None:
        load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer=False)

if args.finetune:
    finetune(device, model, train_data_loader, test_data_loader, optimizer,
        checkpoint_dir=checkpoint_dir,
        checkpoint_prefix=args.checkpoint_prefix,
        checkpoint_interval=hparams.syncnet_checkpoint_interval,
        nepochs=200)
else: