Ejemplo n.º 1
0
    def __init__(self, z_size,
                 image_size, image_channel_size,
                 c_channel_size, g_channel_size):
        # configurations
        super().__init__()
        self.z_size = z_size
        self.image_size = image_size
        self.image_channel_size = image_channel_size
        self.c_channel_size = c_channel_size
        self.g_channel_size = g_channel_size

        # components
        self.critic = gan.Critic(
            image_size=self.image_size,
            image_channel_size=self.image_channel_size,
            channel_size=self.c_channel_size,
        )
        self.generator = gan.Generator(
            z_size=self.z_size,
            image_size=self.image_size,
            image_channel_size=self.image_channel_size,
            channel_size=self.g_channel_size,
        )

        # training related components that should be set before training.
        self.generator_optimizer = None
        self.critic_optimizer = None
        self.critic_updates_per_generator_update = None
        self.lamda = None
Ejemplo n.º 2
0
def create_gan():
    root = os.path.dirname(__file__)

    n = gan.Generator(4, 4)

    gun_weights = os.path.join(root, 'gan.pth')
    if os.path.exists(gun_weights):
        n.load_state_dict(torch.load(gun_weights))
    n.eval()
    return n
Ejemplo n.º 3
0
def make_results():
    generator = gan.Generator(channels=3,
                              latent_size=128,
                              num_classes=2,
                              hidden_dim=64).cuda()
    generator.load_state_dict(torch.load('generator.pkl'))
    generator.eval()
    files = sorted(glob.glob('train_img/*.png'),
                   key=lambda n: int(n.split('\\')[-1].replace('.png', '')))
    images = [skimage.img_as_ubyte(plt.imread(name)) for name in files]
    imageio.mimsave('result/train.gif', images, fps=5)
    size = 64
    latent = []
    attr = []
    for _ in range(3):
        latent.append(torch.randn(size, generator.latent_size).cuda())
        attr.append(torch.randint(2, size=(size, 2)).float().cuda())
    interpolate('result/interpolation.gif', generator, latent, attr)
Ejemplo n.º 4
0
    Y = T.ivector()
    y = Output(Y, shape=(args.batch_size, ), index_max=ny)
else:
    Y = None
    y = None

modules = []
nets = []

# generation
gen_kwargs = dict(args=args,
                  dist=dist,
                  nc=nc,
                  bnkwargs=bnkwargs,
                  gen_transform=gen_transform)
train_gen = gan.Generator(**gen_kwargs)
gX = train_gen.data
gXtest = gan.Generator(source=train_gen.net, mode='test', **gen_kwargs).data

lrt = sharedX(lr)


def get_updater(optimizer, **kwargs):
    opt_map = dict(adam='Adam', sgd='Momentum', rms='RMSprop')
    if optimizer not in opt_map:
        raise ValueError('Unknown optimizer: %s' % (optimizer, ))
    if optimizer == 'adam':
        kwargs.update(b1=b1)
    elif optimizer == 'sgd':
        kwargs.update(momentum=args.sgd_momentum)
    opt_func = getattr(updates, opt_map[optimizer])
Ejemplo n.º 5
0
def train():
    n_epoch, lr, betas = 50, 0.0002, (0.5, 0.999)
    transform = transforms.Compose([
        transforms.Resize(64),
        transforms.CenterCrop(64),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    train_dataset = datasets.CelebA('',
                                    split='train',
                                    transform=transform,
                                    download=True)
    train_loader = data.DataLoader(train_dataset, batch_size=128, shuffle=True)
    generator = gan.Generator(channels=3,
                              latent_size=128,
                              num_classes=2,
                              hidden_dim=64).cuda()
    discriminator = gan.Discriminator(channels=3, hidden_dim=64).cuda()
    generator.apply(gan.weights_init_normal)
    discriminator.apply(gan.weights_init_normal)
    generator.train()
    discriminator.train()
    gen_optimizer = torch.optim.Adam(generator.parameters(),
                                     lr=lr,
                                     betas=betas)
    disc_optimizer = torch.optim.Adam(discriminator.parameters(),
                                      lr=lr,
                                      betas=betas)
    criterion = torch.nn.BCELoss()
    generator_train_loss, discriminator_train_loss = [], []
    example = torch.randn(64, generator.latent_size).cuda()
    example_attr = torch.randint(2, size=(64, 2)).float().cuda()
    try:
        for epoch in range(n_epoch):
            epoch_time = time.time()
            generator_train_loss.append(0)
            discriminator_train_loss.append(0)
            for image, attr in train_loader:
                batch_size = image.size(0)
                image = image.cuda()
                attr = attr[:, [15, 20]].float().cuda()
                real_label = torch.ones(batch_size).cuda()
                fake_label = torch.zeros(batch_size).cuda()

                latent = torch.randn(batch_size, generator.latent_size).cuda()
                generated = generator(latent, attr)

                disc_optimizer.zero_grad()
                real_loss = criterion(discriminator(image), real_label)
                fake_loss = criterion(discriminator(generated.detach()),
                                      fake_label)
                disc_loss = real_loss + fake_loss
                disc_loss.backward()
                disc_optimizer.step()

                gen_optimizer.zero_grad()
                gen_loss = criterion(discriminator(generated), real_label)
                gen_loss.backward()
                gen_optimizer.step()

                generator_train_loss[-1] += gen_loss.item()
                discriminator_train_loss[-1] += disc_loss.item()
            generator_train_loss[-1] /= train_loader.batch_size
            discriminator_train_loss[-1] /= train_loader.batch_size
            if epoch % 1 == 0:
                with torch.no_grad():
                    generated = generator(example, example_attr).detach().cpu()
                grid = torchvision.utils.make_grid(generated,
                                                   padding=2,
                                                   normalize=True)
                plt.imshow(grid.numpy().transpose(1, 2, 0))
                plt.axis('off')
                plt.grid()
                plt.savefig(f'train_img/{epoch}.png')
                plt.show()
            print(
                f'Epoch {epoch}/{n_epoch} done in {time.time() - epoch_time:.2f}s with loss '
                f'Generator({generator_train_loss[-1]:.3f}) Discriminator({discriminator_train_loss[-1]:.3f})'
            )
            torch.save(generator.state_dict(), f'model/generator_{epoch}.pkl')
    except KeyboardInterrupt:
        pass
    torch.save(generator.state_dict(), f'generator.pkl')
    plt.plot(generator_train_loss, label='Generator')
    plt.plot(discriminator_train_loss, label='Discriminator')
    plt.xlabel('Epoch')
    plt.title('BCE Loss')
    plt.legend()
    plt.savefig('loss.png')
    plt.show()
Ejemplo n.º 6
0
    y = None

modules = []
nets = []

# generation
gen_kwargs = dict(args=args,
                  dist=dist,
                  nc=nc,
                  bnkwargs=bnkwargs,
                  gen_transform=gen_transform)
train_gens = []
gXs = []
gXtests = []
for ng in range(args.num_generator):
    tg = gan.Generator(**gen_kwargs, name="Generator%d" % ng)
    gXs.append(tg.data)
    train_gens.append(tg)
    testg = gan.Generator(source=tg.net,
                          name="Generator%d" % ng,
                          mode='test',
                          **gen_kwargs).data
    gXtests.append(testg)

lrt = sharedX(args.learning_rate)


def get_updater(optimizer, **kwargs):
    opt_map = dict(adam='Adam', sgd='Momentum', rms='RMSprop')
    if optimizer not in opt_map:
        raise ValueError('Unknown optimizer: %s' % (optimizer, ))