Exemple #1
0
def train(X, generator, discriminator, GAN, size_latent, batch=256, epoch=25):
  count_batch = int(X.shape[0] / batch)
  for i in range(epoch):
    print("EPOCH: %d" % (i+1))
    for j in range(count_batch):
      real_images, real_image_labels = sample_real(X, int(batch / 2))
      fake_images, fake_image_labels = sample_fake(generator, int(batch / 2),
                                                   size_latent)
      latent_points = sample_latent_point(batch, size_latent)
      latent_points_labels = np.ones((batch, 1))

      discriminator.train_on_batch(
          np.vstack((real_images, fake_images)),
          np.vstack((real_image_labels, fake_image_labels)))
      
      GAN.train_on_batch(latent_points, latent_points_labels)
    show_accuracy(X, generator, discriminator, size_latent)
  generator.save("gan.h5")
Exemple #2
0
    auxiliary_loss.cuda()

# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)

# Configure data loader
from datasets import G10
dataset = G10(img_size=opt.img_size, use_labels=True)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True)

from utils import sample_real
sample_real(dataloader=dataloader,
            batch_size=opt.batch_size,
            save_path=DIR_PATH,
            labled=True)

# Losses
g_losses_per_epoch = []
d_losses_per_epoch = []

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))

FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
beta1 = 0.5

# Number of GPUs available. Use 0 for CPU mode.
ngpu = 0

# Decide which device we want to run on
device = torch.device("cuda:0" if (
    torch.cuda.is_available() and ngpu > 0) else "cpu")

dataset = G10(img_size=image_size, just_spirals=True)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         shuffle=True)

# Plot some training images
sample_real(dataloader=dataloader, batch_size=batch_size, run_name=RUN_NAME)


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find("Conv") != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find("BatchNorm") != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)


class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
#         train=True,
#         download=True,
#         transform=transforms.Compose(
#             [transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
#         ),
#     ),
#     batch_size=opt.batch_size,
#     shuffle=True,
# )

dataset = G10(img_size=opt.img_size, no_classes=False)
dataloader = torch.utils.data.DataLoader(
    dataset, batch_size=opt.batch_size, shuffle=True
)

sample_real(dataloader=dataloader, batch_size=opt.batch_size, run_name=DIR, labled=True)

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor


def sample_image(n_row, batches_done):
    """Saves a grid of generated digits ranging from 0 to n_classes"""
    # Sample noise
    z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))
    # Get labels ranging from 0 to n_classes for n rows
    labels = np.array([num for _ in range(n_row) for num in range(n_row)])
Exemple #5
0
)

# dataloader = torch.utils.data.DataLoader(
#     datasets.MNIST(
#         "../../data/mnist",
#         train=True,
#         download=True,
#         transform=transforms.Compose(
#             [transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
#         ),
#     ),
#     batch_size=opt.batch_size,
#     shuffle=True,
# )

sample_real(dataloader=dataloader, batch_size=opt.batch_size, run_name="wgangp")

# Optimizers
optimizer_G = torch.optim.Adam(
    generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)
)
optimizer_D = torch.optim.Adam(
    discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)
)

Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

# ----------
#  Training
# ----------
Exemple #6
0
    generator.cuda()
    discriminator.cuda()
    adversarial_loss.cuda()

# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)

# Configure data loader
dataset = G10(img_size=IMG_SIZE, just_spirals=True)

dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=True)

sample_real(dataloader=dataloader, batch_size=BATCH_SIZE, run_name=DIR)

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=LR, betas=BETAS)
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=LR, betas=BETAS)

Tensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor

# ----------
#  Training
# ----------

for epoch in range(N_EPOCHS):
    for i, imgs in enumerate(dataloader):
        # Adversarial ground truths
        # valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)