Example #1
0
training_data = DataLoader(piano, batch_size=2, shuffle=True)
model = WaveNet().cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.StepLR(optimizer, 500, gamma=0.5)

for epoch in range(20000):
    running_loss = 0.0
    for index, (data, target, _) in enumerate(training_data):
        data = Variable(data.type(torch.FloatTensor)).cuda()
        logits = model(data)
        logits = logits[:, :, :-1]
        y = target[:, :, recp_field:].squeeze(1).cuda()

        loss = F.cross_entropy(logits, y).cuda()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()
        running_loss += loss.item()
    # print("[%d %.3f]" % (epoch + 1, running_loss / (index+1)))

    if epoch % 100 == 99:
        print("[%d %.3f]" % (epoch + 1, running_loss / (index + 1)))
        torch.save(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }, 'checkpoint.pth')
print('finished')
Example #2
0
        train_loader = DataLoader(train_data,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.workers)
        print("Number of training inputs:", len(train_data))
        print('Bach training')
        losses, predictions = train(model, optimizer, train_loader, criterion,
                                    args.epochs, False)
    else:
        print("Invalid dataset.")
        quit()

    if args.save_path:
        model.to(torch.device('cpu'))
        state = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict()
        }
        pth = args.save_path + '/' + args.model_name + '.pt'
        print(pth)
        torch.save(state, pth)

    torch.cuda.empty_cache()

    if args.save_path:
        save_wav = args.save_path + '/' + args.model_name
        D.generation(model,
                     device,
                     filename=save_wav,
                     seconds=args.gen_len,
                     dataset=args.dataset)
Example #3
0
            mel = mel.to(device)
            wav = wav.to(device)
            optimizer.zero_grad()
            output = model(mel, wav[:, :-1])
            output = output.transpose(-1, -2)

            loss = criterion(output.contiguous().view(-1, 256),
                             wav[:, 1:].contiguous().view(-1).type(torch.LongTensor).to(device))
            running_loss += loss.item()

            loss.backward()
            optimizer.step()
            ctr += 1
            pbar.update(1)

        torch.save(model.state_dict(), 'weights'+str(epoch) + '.pt')

        for data in val_loader:
            mel, wav = data
            mel = mel.to(device)
            wav = wav.to(device)
            model.eval()
            with torch.no_grad():
                output = model(mel, wav[:, :-1])
                output = output.transpose(-1, -2)

                loss = criterion(output.contiguous().view(-1, 256),
                                 wav[:, 1:].contiguous().view(-1).type(torch.LongTensor).to(device))
                val_loss += loss.item()

            val_ctr += 1