예제 #1
0
Net_D = Discriminator()
Net_G = Generator()
if GPU_NUMS > 1:
    Net_G.cuda()
    Net_D.cuda()
Net_D = DataParallel(Net_D)
Net_G = DataParallel(Net_G)

G_optimizer = Adam(Net_G.parameters(), lr=LR, betas=(0.5, 0.999))
D_optimizer = Adam(Net_G.parameters(), lr=LR, betas=(0.5, 0.999))

label_true = torch.ones(BATCH_SIZE)
label_false = torch.zeros(BATCH_SIZE)
label_true_var = Variable(label_true.cuda() if GPU_NUMS > 1 else label_true)
label_false_var = Variable(label_false.cuda() if GPU_NUMS > 1 else label_false)
proBar = ProgressBar(EPOCHS, len(train_loader), "D loss:%.3f; G loss:%.3f")
for epoch in range(EPOCHS):
    for image, label in train_loader:
        label = one_hot(label.long().squeeze())
        image_var = Variable(image.cuda() if GPU_NUMS > 1 else image)
        label_var = Variable(label.cuda() if GPU_NUMS > 1 else label)

        Noise_var = Variable(torch.randn(BATCH_SIZE, NOISE_DIM))
        '''
        训练判别器
        '''
        Net_D.zero_grad()
        D_real = Net_D(image_var, label_var)
        D_real_loss = BCELoss()(D_real, label_true_var)

        image_fake = Net_G(Noise_var, label_var)
예제 #2
0
optimizerD = Adam(netd.parameters(),lr=opt.lr,betas=(opt.beta1,0.999))
optimizerG = Adam(netg.parameters(),lr=opt.lr,betas=(opt.beta1,0.999))

# criterion
criterion = nn.BCELoss()

fix_noise = Variable(t.FloatTensor(opt.batch_size,opt.nz,1,1).normal_(0,1))
if opt.GPU_NUMS > 1:
    fix_noise = fix_noise.cuda()
    netd.cuda()
    netg.cuda()
    criterion.cuda() # it's a good habit

print('begin training, be patient')
bar = ProgressBar(opt.max_epoch, len(dataloader), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(opt.max_epoch):
    for ii, data in enumerate(dataloader,0):
        real,_=data
        input = Variable(real)
        label = Variable(t.ones(input.size(0))) # 1 for real
        noise = t.randn(input.size(0),opt.nz,1,1)
        noise = Variable(noise)

        if opt.GPU_NUMS > 1:
            noise = noise.cuda()
            input = input.cuda()
            label = label.cuda()

        # ----- train netd -----
        netd.zero_grad()
예제 #3
0
    fake_labels = Variable(t.zeros(config.BATCH_SIZE))
    fix_noises = Variable(t.randn(config.BATCH_SIZE, config.NOISE_Z, 1, 1))
    noises = Variable(t.randn(config.BATCH_SIZE, config.NOISE_Z, 1, 1))

    # errord_meter = AverageValueMeter()
    # errorg_meter = AverageValueMeter()

    if config.GPU_NUM > 1:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        true_labels, fake_labels = true_labels.cuda(), fake_labels.cuda()
        fix_noises, noises = fix_noises.cuda(), noises.cuda()

    epochs = range(config.EPOCH_NUM)
    proBar = ProgressBar(config.EPOCH_NUM, len(dataLoader),
                         "D Loss:%.3f;G Loss:%.3f")
    for epoch in iter(epochs):
        for ii, (img, _) in enumerate(dataLoader):
            real_img = Variable(img)
            if config.GPU_NUM > 1:
                real_img = real_img.cuda()
            if ii % config.D_EVERY == 0:
                # 训练判别器
                optimizer_discriminator.zero_grad()
                ## 尽可能的把真图片判别为正确
                output = netD(real_img)
                error_d_real = criterion(output, true_labels)
                error_d_real.backward()

                ## 尽可能把假图片判别为错误
                noises.data.copy_(
예제 #4
0
def sample_images(epoch):
    r, c = 10, 10

    fig, axs = plt.subplots(r, c)
    for i in range(c):
        z_dict = get_z(c1_len * 10, sequential = True)
        out_gen = Net_G(torch.cat([z_dict[k] for k in z_dict.keys()], dim = 1))

        for j in range(r):
            idx = i * 10 + j + 1
            axs[j,i].imshow(np.round(out_gen[idx - 1, 0].cpu().data.numpy() * 255), cmap = 'gray')
            axs[j,i].axis('off')
    fig.savefig("output/mnist_%02d.png" % epoch)
    plt.close()

bar = ProgressBar(EPOCHS, len(train_loader), "D Loss:%.3f;G Loss:%.3f;Q Loss:%.3f")
for epoch in range(EPOCHS):
    for i, (data, targets) in enumerate(train_loader):
        ones = Variable(torch.ones(data.size()[0], 1)).cuda() if GPU_NUMS > 1 else Variable(torch.ones(data.size()[0], 1))
        zeros = Variable(torch.zeros(data.size()[0], 1)).cuda() if GPU_NUMS > 1 else Variable(torch.zeros(data.size()[0], 1))

        z_dict = get_z(data.size()[0])
        z = torch.cat([z_dict[k] for k in z_dict.keys()], dim = 1)

        data = Variable(data.float().cuda(async = True) if GPU_NUMS > 1 else data.float()) / 255
        targets = Variable(targets.float().cuda(async = True) if GPU_NUMS > 1 else targets.float())

        # Forward pass on real MNIST
        out_dis, hid = Net_D(data)
        c1 = LogSoftmax()(Q_cat(hid))
        loss_dis = mse(out_dis, ones) - torch.sum(targets * c1) / (torch.sum(targets) + 1e-3) # Loss for real MNIST
예제 #5
0
train_datagen = SegDataGenerator(zoom_range=[0.5, 2.0],
                                 zoom_maintain_shape=True,
                                 crop_mode='random',
                                 crop_size=(cfg.IMAGE_SIZE, cfg.IMAGE_SIZE),
                                 rotation_range=0,
                                 shear_range=0,
                                 horizontal_flip=True,
                                 channel_shift_range=20,
                                 fill_mode='constant',
                                 label_cval=cfg.LABEL_CVAL)
val_datagen = SegDataGenerator()
steps_per_epoch = int(
    np.ceil(get_file_len(cfg.TRAIN_FILE_PATH) / float(cfg.BATCH_SIZE)))

probar = ProgressBar()
model.fit_generator(generator=train_datagen.flow_from_directory(
    file_path=cfg.TRAIN_FILE_PATH,
    data_dir=cfg.DATA_DIR,
    data_suffix=".jpg",
    label_dir=cfg.LABEL_DIR,
    label_suffix=".png",
    classes=cfg.NUM_CLASS,
    target_size=(cfg.IMAGE_SIZE, cfg.IMAGE_SIZE),
    color_mode='rgb',
    batch_size=cfg.BATCH_SIZE,
    shuffle=True,
    loss_shape=None,
    ignore_label=255),
                    steps_per_epoch=steps_per_epoch,
                    epochs=cfg.EPOCH_NUM,
예제 #6
0
    x = x.clamp(0, 1)
    x = x.view(x.size(0), 1, 28, 28)
    return x

EPOCH = 100
BATCH_SIZE = 128
learning_rate = 1e-3

img_transform = transforms.Compose([
    transforms.ToTensor()
    # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

train_data = MNISTDataSet(train=True, transform=torchvision.transforms.ToTensor())
train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
proBar = ProgressBar(EPOCH, len(train_loader), "loss:%.3f")

class VAE(nn.Module):
    def __init__(self):
        super(VAE, self).__init__()

        self.fc1 = nn.Linear(784, 400)
        self.fc21 = nn.Linear(400, 20)
        self.fc22 = nn.Linear(400, 20)
        self.fc3 = nn.Linear(20, 400)
        self.fc4 = nn.Linear(400, 784)

    def encode(self, x):
        h1 = F.relu(self.fc1(x))
        return self.fc21(h1), self.fc22(h1)
예제 #7
0
    def forward(self, x):
        x = leaky_relu(self.map1(x), 0.1)
        x = leaky_relu(self.map2(x), 0.1)
        return sigmoid(self.map3(x))


generator = SimpleMLP(input_size=z_dim, hidden_size=50, output_size=DIMENSION)
discriminator = SimpleMLP(input_size=DIMENSION, hidden_size=100, output_size=1)
if GPU_NUMS > 0:
    generator.cuda()
    discriminator.cuda()
criterion = BCELoss()

d_optimizer = Adadelta(discriminator.parameters(), lr=1)
g_optimizer = Adadelta(generator.parameters(), lr=1)
progBar = ProgressBar(1, iterations,
                      "D Loss:(real/fake) %.3f/%.3f,G Loss:%.3f")
for train_iter in range(1, iterations + 1):
    for d_index in range(3):
        # 1. Train D on real+fake
        discriminator.zero_grad()

        #  1A: Train D on real
        real_samples = sample_2d(lut_2d, bs)
        d_real_data = Variable(torch.Tensor(real_samples))
        if GPU_NUMS > 0:
            d_real_data = d_real_data.cuda()
        d_real_decision = discriminator(d_real_data)
        labels = Variable(torch.ones(bs))
        if GPU_NUMS > 0:
            labels = labels.cuda()
        d_real_loss = criterion(d_real_decision, labels)  # ones = true
예제 #8
0
    gan = KModels.Model(inputs=ganInput, outputs=ganOutput)
    if GPU_NUMS > 1:
        model = multi_gpu_model(gan, GPU_NUMS)
        # gan = kutils.multi_gpu_model(gan, 2)
    gan.compile(loss='binary_crossentropy',
                optimizer=adam,
                metrics=['accuracy'])

    dLosses = []
    gLosses = []

    batchCount = X_train.shape[0] // batchSize
    print('Epochs:', epochs)
    print('Batch size:', batchSize)
    print('Batches per epoch:', batchCount)
    progBar = ProgressBar(epochs, batchCount,
                          "D Loss:%.3f,D Acc:%.3f;G Loss:%.3f,G Acc:%.3f")
    samples_image = []
    for e in range(1, (epochs + 1)):
        # Get a random set of input noise and images

        for _ in range(batchCount):
            noise = np.random.normal(0, 1, size=[batchSize, randomDim])
            imageBatch = X_train[np.random.randint(0,
                                                   X_train.shape[0],
                                                   size=batchSize)]

            imageBatch = np.reshape(imageBatch,
                                    newshape=(batchSize, 28, 28, 1))

            # Generate fake MNIST images
            generatedImages = generator.predict(noise)
예제 #9
0
# 准备数据
json = MODEL_LIST["name" == MODEL]
train_data = Cifar10DataSet(train=True, transform=json["transform"])
train_loader = DataLoader(dataset=train_data,
                          batch_size=BATCH_SIZE,
                          shuffle=True)

# 准备网络
model = json["model"](json["pretrained"])
model = torch.nn.DataParallel(model).cuda()
optimizer = Adam(model.parameters(), lr=LR)
loss_func = CrossEntropyLoss().cuda()

# 训练数据
proBar = ProgressBar(EPOCH, len(train_loader), "loss:%.3f,acc:%.3f")
for epoch in range(EPOCH):
    for step, (x, y) in enumerate(train_loader):
        data = Variable(x)
        label = Variable(torch.squeeze(y, dim=1).type(torch.LongTensor))
        output = model(data)
        loss = loss_func(output, label)
        loss.backward()
        optimizer.step()

        prediction = torch.max(softmax(output), 1)[1]
        pred_label = prediction.data.numpy().squeeze()
        target_y = label.data.numpy()
        accuracy = sum(pred_label == target_y) / len(target_y)

        proBar.show(loss.data[0], accuracy)
예제 #10
0
    dloss = 0
    aloss = 0

    x_train = imageList
    y_train = labelList

    x_train = (x_train.reshape((x_train.shape[0], ) +
                               (config.IMAGE_SIZE, config.IMAGE_SIZE,
                                config.IMAGE_CHANNEL)).astype('float32')) / 255

    batchCount = x_train.shape[0] // batchSize
    print('Epochs:', epochs)
    print('Batch size:', batchSize)
    print('Batches per epoch:', batchCount)
    progBar = ProgressBar(epochs, batchCount, "D Loss:%.3f;G Loss:%.3f")
    samples_image = []
    start = 0

    for epoch in range(1, (epochs + 1)):
        for _ in range(batchCount):
            noise = np.random.normal(size=(batchSize, 100))
            generatedImages = generator.predict(noise)

            imageBatch = x_train[np.random.randint(0,
                                                   x_train.shape[0],
                                                   size=batchSize)]

            combined_images = np.concatenate([generatedImages, imageBatch])

            labels = np.concatenate(
예제 #11
0
    'GAN 2D Example Visualization of {}'.format(input_path))

generator = build_generator()
discriminator = build_discriminator()
discriminator.trainable = False

ganInput = Input(shape=(z_dim, ))
generator = build_generator()
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
if GPU_NUMS > 1:
    gan = multi_gpu_model(gan, GPU_NUMS)
gan.compile(loss='binary_crossentropy',
            optimizer=RMSprop(lr=0.0008, clipvalue=1.0, decay=1e-8))
progBar = ProgressBar(1, iterations, "D Loss:%.3f,G Loss:%.3f")

for epoch_iter in range(1, iterations + 1):
    for index in range(20):
        real_samples = sample_2d(lut_2d, bs)
        # print(real_samples.shape)

        noise = np.random.normal(-1, 1, size=[bs, z_dim])
        generateImage = generator.predict(noise)

        discriminator.trainable = True
        yDis = np.zeros(2 * bs)
        yDis[:bs] = 1
        d_loss = discriminator.train_on_batch(
            np.concatenate((real_samples, generateImage)), yDis)
    for index in range(1):
    def train(self, epochs, batch_size=128, save_interval=50):

        # Load the dataset
        (X_train, _), (_, _) = read_mnist()

        # Rescale -1 to 1
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)

        half_batch = int(batch_size / 2)
        proBar = ProgressBar(1, epochs,
                             "d loss:%.3f,d acc:%.3f;g loss:%.3f,g acc:%.3f")

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]

            # Generate a half batch of embedded images
            latent_fake = self.encoder.predict(imgs)

            latent_real = np.random.normal(size=(half_batch, self.encoded_dim))

            valid = np.ones((half_batch, 1))
            fake = np.zeros((half_batch, 1))

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(latent_real, valid)
            d_loss_fake = self.discriminator.train_on_batch(latent_fake, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            # Generator wants the discriminator to label the generated representations as valid
            valid_y = np.ones((batch_size, 1))

            # Train the generator
            g_loss = self.adversarial_autoencoder.train_on_batch(
                imgs, [imgs, valid_y])

            # Plot the progress
            # print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
            proBar.show(d_loss[0], d_loss[1], g_loss[0], g_loss[1])

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                # Select a random half batch of images
                idx = np.random.randint(0, X_train.shape[0], 25)
                imgs = X_train[idx]
                self.save_imgs(epoch, imgs)
예제 #13
0
    Scale(IMG_SIZE),
    ToTensor(),
    Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_loader = torch.utils.data.DataLoader(
    # MNIST('data', train=True, download=True, transform=transform),
    MNISTDataSet('../ganData/mnist.npz', train=True, transform=transform),
    batch_size=BATCH_SIZE,
    shuffle=True)
'''
开始训练
'''
BCE_loss = BCELoss()
G_optimizer = Adam(Net_G.parameters(), lr=LR, betas=(0.5, 0.999))
D_optimizer = Adam(Net_D.parameters(), lr=LR, betas=(0.5, 0.999))
bar = ProgressBar(EPOCHS, len(train_loader), "D Loss:%.3f; G Loss:%.3f")

# label preprocess
onehot = torch.zeros(10, 10)
onehot = onehot.scatter_(
    1,
    torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1),
    1).view(10, 10, 1, 1)
fill = torch.zeros([10, 10, IMG_SIZE, IMG_SIZE])

for i in range(10):
    fill[i, i, :, :] = 1

bar = ProgressBar(EPOCHS, len(train_loader), "D Loss:%.3f; G Loss:%.3f")
for epoch in range(EPOCHS):
    # learning rate decay
            nn.ReLU(True),
            nn.ConvTranspose2d(8, 1, 2, stride=2, padding=1),  # b, 1, 28, 28
            nn.Tanh())

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x


model = autoencoder().cuda() if torch.cuda.is_available() else autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=1e-5)
proBar = ProgressBar(num_epochs, len(dataloader), "loss:%.3f")
for epoch in range(num_epochs):
    for data in dataloader:
        img, _ = data
        img = Variable(img).cuda() if torch.cuda.is_available() else Variable(
            img)
        # ===================forward=====================
        output = model(img)
        loss = criterion(output, img)
        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        proBar.show(loss.data[0])

    if epoch % 10 == 0:
예제 #15
0
test_input, test_target = test_data_loader.__iter__().__next__()

real_a = torch.FloatTensor(BATCH_SIZE, IMAGE_CHANNEL, IMAGE_SIZE, IMAGE_SIZE)
real_b = torch.FloatTensor(BATCH_SIZE, OUTPUT_CHANNEL, IMAGE_SIZE, IMAGE_SIZE)

if GPU_NUMS > 1:
    Net_G = Net_G.cuda()
    Net_D = Net_D.cuda()
    lossGAN = lossGAN.cuda()
    lossL1 = lossL1.cuda()
    lossMSE = lossMSE.cuda()

real_a = Variable(real_a.cuda() if GPU_NUMS > 1 else real_a)
real_b = Variable(real_b.cuda() if GPU_NUMS > 1 else real_b)

bar = ProgressBar(EPOCHS, len(train_data_loader), "D loss:%.3f;G loss:%.3f")
for epoch in range(EPOCHS):
    for iteration, batch in enumerate(train_data_loader, 1):
        real_a_cpu, real_b_cpu = batch[0], batch[1]
        real_a.data.resize_(real_a_cpu.size()).copy_(real_a_cpu)
        real_b.data.resize_(real_b_cpu.size()).copy_(real_b_cpu)
        fake_b = Net_G(real_a)

        optimizer_D.zero_grad()

        # train with fake
        fake_ab = torch.cat((real_a, fake_b), 1)
        pred_fake = Net_D.forward(fake_ab.detach())
        loss_d_fake = lossGAN(pred_fake, False)

        # train with real
예제 #16
0
Discriminator.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
noise_temp = Input(shape=(NOISE_DIM, ))
label_temp = Input(shape=(1, ))
img = Generator([noise_temp, label_temp])
Discriminator.trainable = False

valid = Discriminator([img, label_temp])
Gan = Model([noise_temp, label_temp], valid)
Gan.compile(loss='binary_crossentropy', optimizer=optimizer)
'''
开始训练
'''
half_batch = int(BATCH_SIZE / 2)
proBar = ProgressBar(1, EPOCH, "D loss: %f, acc.: %.2f%%; G loss: %f")
for epoch in range(1, EPOCH + 1):
    idx = np.random.randint(0, x_train.shape[0], size=half_batch)
    image, label = x_train[idx], y_train[idx]

    noise = np.random.normal(0, 1, (half_batch, 100))
    generate_image = Generator.predict([noise, label])

    valid = np.ones((half_batch, 1))
    fake = np.zeros((half_batch, 1))

    d_loss_real = Discriminator.train_on_batch([image, label], valid)
    d_loss_fake = Discriminator.train_on_batch([generate_image, label], fake)
    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

    noise = np.random.normal(0, 1, (BATCH_SIZE, 100))