コード例 #1
0
def initialize_model(n_latent_features, use_gpu=True):
    model = Autoencoder(n_latent_features)

    if use_gpu:
        model = model.cuda()

    return model
コード例 #2
0
ファイル: train_ae.py プロジェクト: vwrs/IEGAN
def train():
    ae = Autoencoder()
    # load trained model
    # model_path = ''
    # g.load_state_dict(torch.load(model_path))

    criterion = torch.nn.MSELoss()
    optimizer = optim.Adam(ae.parameters(), lr=opt.lr, weight_decay=opt.decay)

    # load dataset
    # ==========================
    kwargs = dict(num_workers=1, pin_memory=True) if cuda else {}
    dataloader = DataLoader(
        datasets.MNIST('MNIST', download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor()
                       ])),
        batch_size=opt.batch_size, shuffle=True, **kwargs
    )
    N = len(dataloader)

    # get sample batch
    dataiter = iter(dataloader)
    samples, _ = dataiter.next()
    # cuda
    if cuda:
        ae.cuda()
        criterion.cuda()
        samples = samples.cuda()
    samples = Variable(samples)

    if opt.history:
        loss_history = np.empty(N*opt.epochs, dtype=np.float32)
    # train
    # ==========================
    for epoch in range(opt.epochs):
        loss_mean = 0.0
        for i, (imgs, _) in enumerate(dataloader):
            if cuda:
                imgs = imgs.cuda()
            imgs = Variable(imgs)

            # forward & backward & update params
            ae.zero_grad()
            _, outputs = ae(imgs)
            loss = criterion(outputs, imgs)
            loss.backward()
            optimizer.step()

            loss_mean += loss.data[0]
            if opt.history:
                loss_history[N*epoch + i] = loss.data[0]
            show_progress(epoch+1, i+1, N, loss.data[0])

        print('\ttotal loss (mean): %f' % (loss_mean/N))
        # generate fake images
        _, reconst = ae(samples)
        vutils.save_image(reconst.data,
                          os.path.join(IMAGE_PATH,'%d.png' % (epoch+1)),
                          normalize=False)
    # save models
    torch.save(ae.state_dict(), MODEL_FULLPATH)
    # save loss history
    if opt.history:
        np.save('history/'+opt.name, loss_history)
コード例 #3
0
    print('* (Train) Epoch: {} | G Loss: {:.4f} | C Loss: {:.4f}'.format(
        epoch, g_train_loss, c_train_loss))
    return (g_train_loss, c_train_loss)


train_loader, vocab = load(batch_size, seq_len)
autoencoder = Autoencoder(enc_hidden_dim, dec_hidden_dim, embedding_dim,
                          latent_dim, vocab.size(), dropout, seq_len)
autoencoder.load_state_dict(
    torch.load('autoencoder.th', map_location=lambda x, y: x))
generator = Generator(n_layers, block_dim)
critic = Critic(n_layers, block_dim)

g_optimizer = optim.Adam(generator.parameters(), lr=lr)
c_optimizer = optim.Adam(critic.parameters(), lr=lr)
if cuda:
    autoencoder = autoencoder.cuda()
    generator = generator.cuda()
    critic = critic.cuda()

best_loss = np.inf

for epoch in range(1, epochs + 1):
    g_loss, c_loss = train(epoch)
    loss = g_loss + c_loss
    if loss < best_loss:
        best_loss = loss
        print('Saved')
        torch.save(generator.state_dict(), 'generator.th')
        torch.save(critic.state_dict(), 'critic.th')
コード例 #4
0
def train():
    g = Generator(Zdim)
    g.apply(weights_init)
    print(g)
    # load pretrained Autoencoder
    if opt.ae:
        ae = Autoencoder()
        ae.load_state_dict(torch.load(os.path.join(MODEL_PATH, opt.ae)))

    # custom loss function
    # ==========================
    criterion = MQSymKLLoss(th=opt.threshold)
    # setup optimizer
    # ==========================
    optimizer = optim.Adam(g.parameters(), lr=opt.lr, weight_decay=opt.decay)

    z = torch.FloatTensor(BS, Zdim, 1, 1).normal_(0, 1)
    z_pred = torch.FloatTensor(64, Zdim, 1, 1).normal_(0, 1)
    # cuda
    if cuda:
        g.cuda()
        criterion.cuda()
        z, z_pred = z.cuda(), z_pred.cuda()

    z_pred = Variable(z_pred)

    if opt.ae:
        if cuda:
            ae.cuda()
        ae.eval()
    # load dataset
    # ==========================
    kwargs = dict(num_workers=1, pin_memory=True) if cuda else {}
    dataloader = DataLoader(
        datasets.MNIST(
            'MNIST',
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor()
                # transforms.Normalize((0.1307,), (0.3081,))
            ])),
        batch_size=BS,
        shuffle=True,
        **kwargs)
    N = len(dataloader)
    if opt.history:
        loss_history = np.empty(N * opt.epochs, dtype=np.float32)
    # train
    # ==========================
    for epoch in range(opt.epochs):
        loss_mean = 0.0
        for i, (imgs, _) in enumerate(dataloader):
            if cuda:
                imgs = imgs.cuda()
            imgs = Variable(imgs)

            g.zero_grad()
            # forward & backward & update params
            z.resize_(BS, Zdim, 1, 1).normal_(0, 1)
            zv = Variable(z)
            outputs = g(zv)
            if opt.ae:
                imgs_enc, _ = ae(imgs)
                out_enc, _ = ae(outputs)
                loss = criterion(out_enc, imgs_enc)
            else:
                loss = criterion(outputs, imgs)
            loss.backward()
            optimizer.step()

            loss_mean += loss.data[0]
            if opt.history:
                loss_history[N * epoch + i] = loss.data[0]
            show_progress(epoch + 1, i + 1, N, loss.data[0])

        print('\ttotal loss (mean): %f' % (loss_mean / N))
        # generate fake images
        vutils.save_image(g(z_pred).data,
                          os.path.join(IMAGE_PATH, '%d.png' % (epoch + 1)),
                          normalize=False)
        # normalize=True)
    # save models
    torch.save(g.state_dict(), MODEL_FULLPATH)
    # save loss history
    if opt.history:
        np.save('history' + opt.name, loss_history)
コード例 #5
0
if __name__ == "__main__":
    directory = sys.argv[1]
    filenames = [directory + "/%05d_batched.pkl.gz" % i for i in range(9000)]
    # print(filenames)
    train_count = int(len(filenames) * 0.9)
    train_filenames = filenames[:train_count]
    valid_filenames = filenames[train_count:]

    model = Autoencoder(0, 1)
    # valid_data = torch.from_numpy(signal_data_valid).cuda()[:, None, :]
    for p in model.parameters():
        if p.dim() > 1:
            torch.nn.init.xavier_uniform_(p)
    # model = torch.load('model.pt')
    model = model.cuda()

    parameters = model.parameters()
    optimizer = optim.Adam(parameters, lr=1e-3)  # , weight_decay=1e-6)
    # optimizer = optim.SGD(parameters, lr=0.05, momentum=0.999)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     factor=0.5,
                                                     patience=10,
                                                     verbose=True,
                                                     threshold=0.0001,
                                                     threshold_mode='rel',
                                                     cooldown=0,
                                                     min_lr=1e-6,
                                                     eps=1e-08)
コード例 #6
0
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_dataset = MNIST(data_dir,
                      train=True,
                      download=True,
                      transform=img_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = MNIST(data_dir,
                     train=False,
                     download=True,
                     transform=img_transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

model = Autoencoder()
if use_gpu:
    model.cuda()

criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=1e-5)

loss_list = []
test_loss_list = []

for epoch in range(num_epochs + 1):
    # train
    train_loss = 0
    num_iters = 0
    for img, _ in train_loader:
        # batch_size x 784