Exemplo n.º 1
0
data_loader = VOCSegDataSet(is_transform=True, img_size=(FCNConfig["IMAGE_SIZE"], FCNConfig["IMAGE_SIZE"]),
                            augmentations=Compose([RandomRotate(10),
                                                   RandomHorizontallyFlip()]), img_norm=True)

train_loader = DataLoader(data_loader, batch_size=FCNConfig["BATCH_SIZE"], shuffle=True)

'''
Train
'''
bar = ProgressBar(FCNConfig["EPOCHS"], len(train_loader), "Loss:%.3f")
for epoch in range(1, FCNConfig["EPOCHS"]):
    model.train()
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images.cuda() if FCNConfig["GPU_NUMS"] > 0 else images)
        labels = Variable(labels.cuda() if FCNConfig["GPU_NUMS"] > 0 else labels)

        optimizer.zero_grad()
        outputs = model(images)

        loss = loss_fn(input=outputs, target=labels)

        loss.backward()
        optimizer.step()

        bar.show(epoch, loss.item())
    torch.save(model.state_dict(), "FCN32s_%03d.pth" % epoch)



Exemplo n.º 2
0
        D_fake = Net_D(image_fake, label_var)
        D_fake_loss = BCE_LOSS(D_fake, label_false_var)

        D_loss = D_real_loss + D_fake_loss
        D_loss.backward()
        D_optimizer.step()

        Net_G.zero_grad()
        Noise_var = Variable(
            torch.randn(mini_batch, CONFIG["NOISE_DIM"]).cuda(
            ) if CONFIG["GPU_NUMS"] > 0 else torch.
            randn(mini_batch, CONFIG["NOISE_DIM"]))
        image_fake = Net_G(Noise_var, label_var)
        D_fake = Net_D(image_fake, label_var)

        G_loss = BCE_LOSS(D_fake, label_true_var)

        G_loss.backward()
        G_optimizer.step()

        bar.show(epoch, D_loss.item(), G_loss.item())

    test_images = Net_G(Predict_Noise_var, Predict_y)

    torchvision.utils.save_image(test_images.data[:100],
                                 'outputs/Face_%03d.png' % (epoch),
                                 nrow=10,
                                 normalize=True,
                                 range=(-1, 1),
                                 padding=0)
Exemplo n.º 3
0
        )

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x


model = autoencoder().cuda() if torch.cuda.is_available() else autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
                             weight_decay=1e-5)
proBar = ProgressBar(num_epochs, len(dataloader), "loss:%.3f")
for epoch in range(num_epochs):
    for data in dataloader:
        img, _ = data
        img = Variable(img).cuda() if torch.cuda.is_available() else Variable(img)
        # ===================forward=====================
        output = model(img)
        loss = criterion(output, img)
        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        proBar.show(loss.data[0])

    if epoch % 10 == 0:
        pic = to_img(output.cpu().data)
        save_image(pic, './dc_img/image_{}.png'.format(epoch))

                t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1))
            fake_img = netG(noises).detach()  # 根据噪声生成假图
            output = netD(fake_img)
            error_d_fake = criterion(output, fake_labels)
            error_d_fake.backward()
            optimizer_discriminator.step()

            error_d = error_d_fake + error_d_real

        if ii % 1 == 0:
            # 训练生成器
            netG.zero_grad()
            noises.data.copy_(
                t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1))
            fake_img = netG(noises)
            output = netD(fake_img)
            error_g = criterion(output, true_labels)
            error_g.backward()
            optimizer_generator.step()

        proBar.show(epoch, error_d.item(), error_g.item())

    # 保存模型、图片
    fix_fake_imgs = netG(fix_noises)
    tv.utils.save_image(fix_fake_imgs.data[:64],
                        'outputs/Pytorch_AnimateFace_%03d.png' % epoch,
                        normalize=True,
                        range=(-1, 1))

t.save(netG.state_dict(), "outputs/DCGAN_AnimateFace_Pytorch_Generator.pth")
Exemplo n.º 5
0
for step in range(STEPS):
    out = net(x)  # input x and predict based on x
    loss = loss_func(
        out, y
    )  # must be (1. nn output, 2. target), the target label is NOT one-hotted

    optimizer.zero_grad()  # clear gradients for next train
    loss.backward()  # backpropagation, compute gradients
    optimizer.step()  # apply gradients

    _, prediction = torch.max(F.softmax(out, dim=None), 1)
    pred_y = prediction.data.numpy().squeeze()
    target_y = y.data.numpy()
    accuracy = sum(pred_y == target_y) / x.shape[0]

    bar.show(1, loss.item(), accuracy)

    if (step + 1) % DECAY_STEP == 0:
        out = net(Variable(torch.FloatTensor(x_show)))
        _, prediction = torch.max(F.softmax(out, dim=None), 1)
        pred_y = prediction.data.numpy().squeeze()
        predict.append(pred_y)
        myloss.append(loss.item())

fig, axes = plt.subplots()
plt.xlabel("X1", fontsize=15)
plt.ylabel("X2", fontsize=15)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.suptitle("Pytorch")
time_template = 'step = %d, train loss=%.9f'
Exemplo n.º 6
0
                               shuffle=True)

cnn = CNN().cuda() if GPU_NUMS > 0 else CNN()
optimizer = Adam(cnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss().cuda() if GPU_NUMS > 0 else nn.CrossEntropyLoss()
proBar = ProgressBar(EPOCH, len(train_loader), "Loss: %.3f;Accuracy: %.3f")
for epoch in range(EPOCH):
    for step, (x,y) in enumerate(train_loader):
        b_x = Variable(x.cuda() if GPU_NUMS > 0 else x)
        b_y = Variable(y.type(torch.LongTensor).cuda() if GPU_NUMS > 0 else y.type(torch.LongTensor)).squeeze_()
        output = cnn(b_x)
        loss = loss_func(output, b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        prediction = torch.max(F.softmax(output, dim=1), 1)[1]
        pred_y = prediction.cpu().data.numpy().squeeze()
        target_y = b_y.cpu().data.numpy()
        accuracy = sum(pred_y == target_y) / len(target_y)

        proBar.show(epoch, loss.data[0], accuracy)

test_x = Variable(torch.unsqueeze(torch.FloatTensor(train_data.test_data), dim=1).cuda() if GPU_NUMS > 0 else torch.unsqueeze(torch.FloatTensor(train_data.test_data), dim=1))
test_y = Variable(torch.LongTensor(train_data.test_labels).cuda() if GPU_NUMS > 0 else torch.LongTensor(train_data.test_labels))
test_y = test_y.squeeze()
test_output = cnn(test_x)
pred_y = torch.max(F.softmax(test_output, dim=1), 1)[1].cpu().data.numpy().squeeze()
target_y = test_y.cpu().data.numpy()
accuracy = sum(pred_y == target_y) / len(target_y)
print("test accuracy is %.3f" % accuracy)
Exemplo n.º 7
0
        error_real.backward()

        D_x = output.data.mean()
        fake_pic = NetG(noise).detach()
        output2 = NetD(fake_pic)
        label.data.fill_(0)  # 0 for fake
        error_fake = criterion(output2.squeeze(), label)

        error_fake.backward()
        D_x2 = output2.data.mean()
        error_D = error_real + error_fake
        optimizerD.step()

        NetG.zero_grad()
        label.data.fill_(1)
        noise.data.normal_(0, 1)
        fake_pic = NetG(noise)
        output = NetD(fake_pic)
        error_G = criterion(output.squeeze(), label)
        error_G.backward()

        optimizerG.step()
        D_G_z2 = output.data.mean()
        bar.show(epoch, error_D.item(), error_G.item())

    fake_u = NetG(fix_noise)

    torchvision.utils.save_image(fake_u.data[:64],
                                 "outputs/MNIST_%03d.png" % epoch,
                                 normalize=True,
                                 range=(-1, 1))
Exemplo n.º 8
0
        x = self.encoder(x)
        x = self.decoder(x)
        return x


model = autoencoder().cuda() if GPU_NUMS > 0 else autoencoder()
criterion = MSELoss()
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)

proBar = ProgressBar(EPOCH, len(train_loader), "Loss:%.3f")

for epoch in range(1, EPOCH):
    for data in train_loader:
        img, _ = data
        img = img.view(img.size(0), -1)
        img = Variable(img).cuda() if GPU_NUMS > 0 else Variable(img)
        # ===================forward=====================
        output = model(img)
        loss = criterion(output, img)
        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        proBar.show(epoch, loss.item())
    # ===================log========================

    if epoch % 10 == 0:
        pic = to_img(output.cpu().data)
        save_image(pic, 'output/image_{}.png'.format(epoch))

torch.save(model.state_dict(), 'sim_autoencoder.pth')
Exemplo n.º 9
0
dataset = ImageFolder(root='/input/face/64_crop',
                      transform=Compose([ToTensor()]))
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

bar = ProgressBar(EPOCH, len(train_loader), "Loss:%.3f")

model.train()
train_loss = 0
for epoch in range(EPOCH):
    for ii, (image, label) in enumerate(train_loader):
        mini_batch = image.shape[0]
        data = Variable(image.cuda() if GPU_NUMS > 0 else image)
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()

        bar.show(loss.data[0] / mini_batch)

    model.eval()
    z = torch.randn(BATCH_SIZE, model.latent_variable_size)
    z = Variable(z.cuda() if GPU_NUMS > 0 else z, volatile=True)
    recon = model.decode(z)
    torchvision.utils.save_image(recon.data,
                                 'output/Face64_%02d.png' % (epoch + 1))

torch.save(model.state_dict(), "output/VAE_64_Face_Pytorch_Generator.pth")
loss_func = MSELoss()

x_data, y_data = Variable(x_train), Variable(y_train)
bar = ProgressBar(1, STEPS, "train_loss:%.9f")

predict = []
myloss = []

for step in range(STEPS):
    prediction = Net(x_data)
    loss = loss_func(prediction, y_data)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    bar.show(1, loss.item())
    if (step + 1) % DECAY_STEP == 0:
        predict.append(prediction.data.numpy())
        myloss.append(loss.item())
#
fig, ax = plt.subplots()
t = np.arange(len(x_data))
ln, = ax.plot([], [], 'r-', animated=False)
plt.scatter(t, y_data)
plt.title('Pytorch', fontsize=18)
time_template = 'step = %d, train loss=%.9f'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
plt.grid(True)


def init():
Exemplo n.º 11
0
        d_loss_a = -torch.mean(torch.log(d_output_real[:,0]) + torch.log(1 - d_output_fake[:,0]))

        # Mutual Information Loss
        output_cc = d_output_fake[:, 1:1+CONFIG["CC_DIM"]]
        output_dc = d_output_fake[:, 1+CONFIG["CC_DIM"]:]
        d_loss_cc = torch.mean((((output_cc - 0.0) / 0.5) ** 2))
        # d_loss_dc = -(torch.mean(torch.sum(dc * output_dc, 1)) + torch.mean(torch.sum(dc * dc, 1)))
        d_loss_dc = -(torch.mean(torch.sum(dc_var * output_dc, 1)) + torch.mean(torch.sum(dc_var * dc_var, 1)))

        d_loss = d_loss_a + CONFIG["CONTINUOUS_WEIGHT"] * d_loss_cc + 1.0 * d_loss_dc

        # Optimization
        NetD.zero_grad()
        d_loss.backward(retain_graph=True)
        d_optimizer.step()

        # ===================== Train G =====================#
        # Fake -> Real
        g_loss_a = -torch.mean(torch.log(d_output_fake[:,0]))

        g_loss = g_loss_a + CONFIG["CONTINUOUS_WEIGHT"] * d_loss_cc + 1.0 * d_loss_dc

        # Optimization
        NetG.zero_grad()
        g_loss.backward()
        g_optimizer.step()

        bar.show(epoch, d_loss.item(), g_loss.item())

    fake_images = NetG(torch.cat((fixed_noise_var, fixed_cc_var, fixed_dc_var), 1))
    torchvision.utils.save_image(fake_images.data, "outputs/mnist_%03d.png" % epoch, nrow=10)
for step in range(STEPS):
    out = net(x)  # input x and predict based on x
    loss = loss_func(
        out, y
    )  # must be (1. nn output, 2. target), the target label is NOT one-hotted

    optimizer.zero_grad()  # clear gradients for next train
    loss.backward()  # backpropagation, compute gradients
    optimizer.step()  # apply gradients

    _, prediction = torch.max(F.softmax(out, dim=None), 1)
    pred_y = prediction.data.numpy().squeeze()
    target_y = y.data.numpy()
    accuracy = sum(pred_y == target_y) / x.shape[0]

    bar.show(step, loss.item(), accuracy)

    if (step + 1) % DECAY_STEP == 0:
        out = net(Variable(torch.FloatTensor(x_show)))
        _, prediction = torch.max(F.softmax(out, dim=None), 1)
        pred_y = prediction.data.numpy().squeeze()
        predict.append(pred_y)
        myloss.append(loss.item())

fig, axes = plt.subplots()
plt.xlabel(iris_feature[0], fontsize=15)
plt.ylabel(iris_feature[1], fontsize=15)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.suptitle("Pytorch")
time_template = 'step = %d, train loss=%.9f'
Exemplo n.º 13
0
# 准备网络
model = json["model"](json["pretrained"])

model = torch.nn.DataParallel(
    model).cuda() if GPU_NUMS > 1 else torch.nn.DataParallel(model)
optimizer = Adam(model.parameters(), lr=LR)
loss_func = CrossEntropyLoss().cuda() if GPU_NUMS > 0 else CrossEntropyLoss()

# 训练数据
proBar = ProgressBar(EPOCH, len(train_loader), "loss:%.3f,acc:%.3f")
for epoch in range(EPOCH):
    for step, (x, y) in enumerate(train_loader):
        data = Variable(x.cuda() if GPU_NUMS > 0 else x)
        label = Variable(
            torch.squeeze(y, dim=1).type(torch.LongTensor).cuda() if
            GPU_NUMS > 0 else torch.squeeze(y, dim=1).type(torch.LongTensor))
        optimizer.zero_grad()
        output = model(data)

        loss = loss_func(output, label)
        loss.backward()
        optimizer.step()

        prediction = torch.max(softmax(output), 1)[1]
        pred_label = prediction.data.cpu().numpy().squeeze()
        target_y = label.data.cpu().numpy()
        accuracy = sum(pred_label == target_y) / len(target_y)

        proBar.show(loss.data[0], accuracy)
torch.save(model.state_dict(), "%s.pth" % MODEL)
Exemplo n.º 14
0
        D_train_loss = D_LOSS_REAL + D_LOSS_FAKE
        D_train_loss.backward()
        D_optimizer.step()

        NetG.zero_grad()
        img_fake = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)
        label_fake = (torch.rand(mini_batch, 1) * 10).type(
            torch.LongTensor).squeeze()
        img_fake_var = Variable(
            img_fake.cuda() if CONFIG["GPU_NUMS"] > 0 else img_fake)
        label_fake_G_var = Variable(onehot[label_fake].cuda(
        ) if CONFIG["GPU_NUMS"] > 0 else onehot[label_fake])
        label_fake_D_var = Variable(fill[label_fake].cuda(
        ) if CONFIG["GPU_NUMS"] > 0 else fill[label_fake])
        g_result = NetG(img_fake_var, label_fake_G_var)
        d_result = NetD(g_result, label_fake_D_var)
        d_result = d_result.squeeze()
        G_train_loss = BCELoss()(d_result, label_true_var)
        G_train_loss.backward()
        G_optimizer.step()

        bar.show(epoch, D_train_loss.item(), G_train_loss.item())

    test_images = NetG(fixed_z_, fixed_y_label_)

    torchvision.utils.save_image(test_images.data[:100],
                                 'outputs/mnist_%03d.png' % (epoch),
                                 nrow=10,
                                 normalize=True,
                                 range=(-1, 1),
                                 padding=0)
Exemplo n.º 15
0
        y.data.resize_(current_batch_size).fill_(1)
        y_pred = NetD(x)
        errD_real = criterion(y_pred, y)
        errD_real.backward()
        D_real = y_pred.data.mean()

        z.data.resize_(current_batch_size,CONFIG["NOISE_DIM"], 1, 1).normal_(0, 1)
        x_fake = NetG(z)
        y.data.resize_(current_batch_size).fill_(0)
        y_pred_fake = NetD(x_fake.detach())
        errD_fake = criterion(y_pred_fake, y)
        errD_fake.backward()
        D_fake = y_pred_fake.data.mean()
        errD = errD_real + errD_fake
        optimizerD.step()

        for p in NetD.parameters():
            p.requires_grad = False

        NetG.zero_grad()
        y.data.resize_(current_batch_size).fill_(1)
        y_pred_fake = NetD(x_fake)
        errG = criterion(y_pred_fake, y)
        errG.backward(retain_graph=True)
        D_G = y_pred_fake.data.mean()
        optimizerG.step()

        bar.show(epoch, errD.item(), errG.item())
    fake_test = NetG(z_test)
    tv.utils.save_image(fake_test.data, 'outputs/Cat_%03d.png' %epoch, nrow=10, normalize=True)
Exemplo n.º 16
0
        latent_samples = torch.randn(bs, z_dim)
        g_gen_input = Variable(latent_samples)
        if GPU_NUMS > 0:
            g_gen_input = g_gen_input.cuda()
        g_fake_data = generator(g_gen_input)
        g_fake_decision = discriminator(g_fake_data)
        labels = Variable(torch.ones(bs))
        if GPU_NUMS > 0:
            labels = labels.cuda()
        g_loss = criterion(g_fake_decision, labels)  # we want to fool, so pretend it's all genuine

        g_loss.backward()
        g_optimizer.step()  # Only optimizes G's parameters

    loss_d_real = d_real_loss.item()
    loss_d_fake = d_fake_loss.item()
    loss_g = g_loss.item()

    progBar.show(loss_d_real, loss_d_fake, loss_g)
    if train_iter == 1 or train_iter % 100 == 0:
        msg = 'Iteration {}: D_loss(real/fake): {:.6g}/{:.6g} G_loss: {:.6g}'.format(train_iter, loss_d_real, loss_d_fake, loss_g)

        gen_samples = g_fake_data.data.cpu().numpy()

        visualizer.draw(real_samples, gen_samples, msg, show=False)
        visualizer.savefig('outputs/Pytorch_Batman_%04d' % train_iter)

torch.save(generator.state_dict(), "outputs/GAN_Batman_Pytorch_Generator.pth")

loss = tf.reduce_mean(tf.square(y - prediction))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

predict = []
myloss = []
bar = ProgressBar(1, STEPS, "train_loss:%.9f")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(STEPS):
    _, train_loss, prediction_value = sess.run([train_step, loss, prediction],
                                               feed_dict={
                                                   x: x_train,
                                                   y: y_train
                                               })

    bar.show(1, train_loss)
    if (step + 1) % DECAY_STEP == 0:
        predict.append(prediction_value)
        myloss.append(train_loss)

fig, ax = plt.subplots()
t = np.arange(len(x_train))
ln, = ax.plot([], [], 'r-', animated=False)
plt.scatter(t, y_train)
time_template = 'step = %d, train loss=%.9f'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
plt.title('Tensorflow', fontsize=18)
plt.grid(True)


def init():
Exemplo n.º 18
0
def loss_function(recon_x, x, mu, logvar):
    BCE = reconstruction_function(recon_x, x)  # mse loss
    # loss = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
    KLD = torch.sum(KLD_element).mul_(-0.5)
    return BCE + KLD

optimizer = optim.Adam(model.parameters(), lr=1e-3)

for epoch in range(EPOCH):
    model.train()
    train_loss = 0
    for batch_idx, data in enumerate(train_loader):
        img, _ = data
        img = img.view(img.size(0), -1)
        img = Variable(img)
        if torch.cuda.is_available():
            img = img.cuda()
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(img)
        loss = loss_function(recon_batch, img, mu, logvar)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()
        proBar.show(loss.data[0] / len(img))

    if epoch % 10 == 0:
        save = to_img(recon_batch.cpu().data)
        save_image(save, 'output/image_{}.png'.format(epoch))

torch.save(model.state_dict(), 'output/vae.pth')