Ejemplo n.º 1
0
    def __init__(self, device):
        self.device = device        
        self.netG_A = self.__init_weights(Generator(3, use_dropout=False).to(self.device))
        self.netG_B = self.__init_weights(Generator(3, use_dropout=False).to(self.device))
        self.netD_A = self.__init_weights(Discriminator(3).to(self.device))
        self.netD_B = self.__init_weights(Discriminator(3).to(self.device))
        
        self.criterion_gan = nn.MSELoss()
        self.criterion_cycle = nn.L1Loss()
        self.criterion_idt = nn.L1Loss()

        self.optimizer_G = torch.optim.Adam(itertools.chain(
            self.netG_A.parameters(), self.netG_B.parameters()), lr=0.0002, betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(itertools.chain(
            self.netD_A.parameters(), self.netD_B.parameters()), lr=0.0002, betas=(0.5, 0.999))
        self.optimizers = [self.optimizer_G, self.optimizer_D]

        self.fake_A_pool = ImagePool(50)
        self.fake_B_pool = ImagePool(50)

        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
        self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']

        self.lambda_A = 10
        self.lambda_B = 10
        self.lambda_idt = 0.5

        self.save_dir = './models'
Ejemplo n.º 2
0
def main(args):
    # print args
    print_args(args)

    # download and extract dataset
    get_cifar10_data(args.data_path)
    data_dirs = extract_cifar10_images(args.data_path)

    dataset = Cifar10Dataset(root_dir=data_dirs["test"],
                             mirror=False,
                             random_seed=1)

    print("test dataset len: {}".format(len(dataset)))

    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)

    generator_bn = Generator("batch")
    generator_sn = Generator("batch")

    generator_bn.load_state_dict(
        torch.load("checkpoints/checkpoint_ep0_gen.pt", map_location="cpu"))
    generator_sn.load_state_dict(
        torch.load("checkpoints_spectral/checkpoint_ep10_gen.pt",
                   map_location="cpu"))

    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    for idx, sample in enumerate(data_loader):

        img_l, real_img_lab = sample[:, 0:1, :, :], sample

        fake_img_ab_bn = generator_bn(img_l).detach()
        fake_img_lab_bn = torch.cat([img_l, fake_img_ab_bn], dim=1)

        fake_img_ab_sn = generator_sn(img_l).detach()
        fake_img_lab_sn = torch.cat([img_l, fake_img_ab_sn], dim=1)

        print("sample {}/{}".format(idx + 1, len(data_loader) + 1))
        save_test_sample(real_img_lab,
                         fake_img_lab_bn,
                         fake_img_lab_sn,
                         osp.join(args.save_path,
                                  "test_sample_{}.png".format(idx)),
                         show=True)
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--learning-rate', '-lr', type=float, default=1e-3)
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--data-parallel', action='store_true')
    parser.add_argument('--num-d-iterations', type=int, default=1)
    args = parser.parse_args()
    args.cuda = torch.cuda.is_available() and not args.no_cuda
    print(args)

    device = torch.device('cuda' if args.cuda else 'cpu')

    net_g = Generator(ch=128).to(device)
    net_d = Discriminator(ch=128).to(device)

    optim_g = optim.Adam(
        net_g.parameters(), lr=args.learning_rate, betas=(0.5, 0.999))
    optim_d = optim.Adam(
        net_d.parameters(), lr=args.learning_rate, betas=(0.5, 0.999))

    dataloader = get_cat_dataloader()

    trainer = Trainer(net_g, net_d, optim_g, optim_d, dataloader, device,
                      args.num_d_iterations)

    os.makedirs('samples', exist_ok=True)

    trainer.train(args.epochs)
Ejemplo n.º 4
0
 def init_trainer(self):
     # networks
     self.G = Generator(nc=self.nc, nz=self.nz, size=self.size)
     self.D = Discriminator(nc=self.nc, nz=self.nz, size=self.size)
     self.G_EMA = copy.deepcopy(self.G)
     # move to GPU
     self.G = nn.DataParallel(self.G, device_ids=self.device_ids).to(self.device)
     self.D = nn.DataParallel(self.D, device_ids=self.device_ids).to(self.device)
     self.G_EMA = self.G_EMA.to('cpu') # keep this model on CPU to save GPU memory
     for param in self.G_EMA.parameters():
         param.requires_grad_(False) # turn off grad because G_EMA will only be used for inference
     # optimizers
     self.opt_G = optim.Adam(self.G.parameters(), lr=self.lr, betas=(0,0.99), eps=1e-8, weight_decay=0.)
     self.opt_D = optim.Adam(self.D.parameters(), lr=self.lr, betas=(0,0.99), eps=1e-8, weight_decay=0.)
     # data loader
     self.transform = transforms.Compose([
         RatioCenterCrop(1.),
         transforms.Resize((300,300), Image.ANTIALIAS),
         transforms.RandomCrop((self.size,self.size)),
         RandomRotate(),
         transforms.RandomVerticalFlip(),
         transforms.RandomHorizontalFlip(),
         transforms.ToTensor()
     ])
     self.dataset = ISIC_GAN('train_gan.csv', transform=self.transform)
     self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size,
         shuffle=True, num_workers=8, worker_init_fn=_worker_init_fn_(), drop_last=True)
     # tickers (used for fading in)
     self.tickers = self.unit_epoch * self.num_aug * len(self.dataloader)
Ejemplo n.º 5
0
def train():
    RDN = Generator("RDN")
    D = Discriminator("discriminator")
    HR = tf.placeholder(tf.float32, [None, 96, 96, 3])
    LR = tf.placeholder(tf.float32, [None, 24, 24, 3])
    SR = RDN(LR)
    fake_logits = D(SR, LR)
    real_logits = D(HR, LR)
    D_loss, G_loss = Hinge_Loss(fake_logits, real_logits)
    G_loss += MSE(SR, HR) * LAMBDA
    itr = tf.Variable(MAX_ITERATION, dtype=tf.int32, trainable=False)
    learning_rate = tf.Variable(2e-4, trainable=False)
    op_sub = tf.assign_sub(itr, 1)
    D_opt = tf.train.AdamOptimizer(learning_rate, beta1=0., beta2=0.9).minimize(D_loss, var_list=D.var_list())
    with tf.control_dependencies([op_sub]):
        G_opt = tf.train.AdamOptimizer(learning_rate, beta1=0., beta2=0.9).minimize(G_loss, var_list=RDN.var_list())
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver()
    while True:
        HR_data, LR_data = read_crop_data(TRAINING_SET_PATH, BATCH_SIZE, [96, 96, 3], 4)
        sess.run(D_opt, feed_dict={HR: HR_data, LR: LR_data})
        [_, iteration] = sess.run([G_opt, itr], feed_dict={HR: HR_data, LR: LR_data})
        iteration = MAX_ITERATION - iteration
        if iteration < MAX_ITERATION // 2:
            learning_rate = learning_rate * (iteration * 2 / MAX_ITERATION)
        if iteration % 10 == 0:
            [D_LOSS, G_LOSS, LEARNING_RATE, img] = sess.run([D_loss, G_loss, learning_rate, SR], feed_dict={HR: HR_data, LR: LR_data})
            output = (np.concatenate((HR_data[0, :, :, :], img[0, :, :, :]), axis=1) + 1) * 127.5
            Image.fromarray(np.uint8(output)).save(RESULTS+str(iteration)+".jpg")
            print("Iteration: %d, D_loss: %f, G_loss: %f, LearningRate: %f"%(iteration, D_LOSS, G_LOSS, LEARNING_RATE))
        if iteration % 500 == 0:
            saver.save(sess, SAVE_MODEL + "model.ckpt")
Ejemplo n.º 6
0
def main(args):
    """
    Main function for the script
    :param args: parsed command line arguments
    :return: None
    """

    print("Creating generator object ...")
    # create the generator object
    gen = th.nn.DataParallel(
        Generator(depth=args.depth, latent_size=args.latent_size))

    print("Loading the generator weights from:", args.generator_file)
    # load the weights into it
    gen.load_state_dict(th.load(args.generator_file, map_location=str(device)))

    # path for saving the files:
    save_path = args.out_dir

    print("Generating scale synchronized images ...")
    for img_num in tqdm(range(1, args.num_samples + 1)):
        # generate the images:
        with th.no_grad():
            point = th.randn(1, args.latent_size)
            point = (point / point.norm()) * (args.latent_size**0.5)
            ss_image = gen(point, depth=args.out_depth, alpha=1)
            # color adjust the generated image:
            ss_image = adjust_dynamic_range(ss_image)

        # save the ss_image in the directory
        imsave(os.path.join(save_path,
                            str(img_num) + ".png"),
               ss_image.squeeze(0).permute(1, 2, 0).cpu())

    print("Generated %d images at %s" % (args.num_samples, save_path))
Ejemplo n.º 7
0
    def __init__(self, image_size, input_channels, hidden_channels, output_channels, latent_dimension, lr, device, clamp=0.01, gp_weight=10):
        self.image_size = image_size
        self.input_channels = input_channels
        self.hidden_chanels = hidden_channels
        self.output_channels = output_channels
        self.latent_dimension = latent_dimension
        self.device = device
        self.clamp = clamp
        self.gp_weight = gp_weight

        self.critic = Critic(image_size, hidden_channels,
                             input_channels).to(device)
        self.generator = Generator(
            image_size, latent_dimension, hidden_channels, output_channels).to(device)

        self.critic.apply(self.weights_init)
        self.generator.apply(self.weights_init)

        self.optimizer_critic = torch.optim.RMSprop(
            self.critic.parameters(), lr)
        self.optimizer_gen = torch.optim.RMSprop(
            self.generator.parameters(), lr)

        self.optimizer_critic = torch.optim.Adam(
            self.critic.parameters(), lr, betas=(0, 0.9))
        self.optimizer_gen = torch.optim.Adam(
            self.generator.parameters(), lr, betas=(0, 0.9))

        self.critic_losses = []
        self.gen_losses = []

        self.losses = []
Ejemplo n.º 8
0
    def create_generator(self):
        kernels_gen_encoder = [
            (32, 1, 0),  # [batch, 512, 512, ch] => [batch, 512, 512, 32]
            (32, 2, 0),  # [batch, 512, 512, 32] => [batch, 256, 256, 32]
            (64, 2, 0),  # [batch, 256, 256, 32] => [batch, 128, 128, 64]
            (128, 2, 0),  # [batch, 128, 128, 64] => [batch, 64, 64, 128]
            (256, 2, 0),  # [batch, 64, 64, 128] => [batch, 32, 32, 256]
            (512, 2, 0),  # [batch, 32, 32, 256] => [batch, 16, 16, 512]
            (512, 2, 0),  # [batch, 16, 16, 512] => [batch, 8, 8, 512]
            (512, 2, 0),  # [batch, 8, 8, 512] => [batch, 4, 4, 512]
            (512, 2, 0)  # [batch, 4, 4, 512] => [batch, 2, 2, 512]
        ]

        kernels_gen_decoder = [
            (512, 2, 0),  # [batch, 2, 2, 512] => [batch, 4, 4, 512]
            (512, 2, 0),  # [batch, 4, 4, 512] => [batch, 8, 8, 512]
            (512, 2, 0),  # [batch, 8, 8, 512] => [batch, 16, 16, 512]
            (256, 2, 0),  # [batch, 16, 16, 512] => [batch, 32, 32, 256]
            (128, 2, 0),  # [batch, 32, 32, 256] => [batch, 64, 64, 128]
            (64, 2, 0),  # [batch, 64, 64, 128] => [batch, 128, 128, 64]
            (32, 2, 0),  # [batch, 128, 128, 64] => [batch, 256, 256, 64]
            (32, 2, 0)  # [batch, 256, 256, 64] => [batch, 512, 512, 32]
        ]

        return Generator('gen',
                         kernels_gen_encoder,
                         kernels_gen_decoder,
                         training=self.options.training)
Ejemplo n.º 9
0
def train(path):
    imgs = get_training_imgs(path)
    nums = len(imgs)
    Gs = []
    Ds = []
    fixed_Zs = []
    sigmas = []
    ch = 16
    for i in range(nums):
        if i % 4 == 0:
            ch = ch * 2
        G = Generator(ch)
        D = Discriminator(ch)
        G.to("cuda:0")
        D.to("cuda:0")
        if i > 0:
            try:
                G.load_state_dict(G_.state_dict())
                D.load_state_dict(D_.state_dict())
                del G_, D_
            except:
                pass
        Gs.append(G)
        Ds.append(D)
        print(".............Total Scale: %d, current scale: %d............."%(nums, i+1))
        G_, D_ = train_single_scale(Gs, Ds, imgs[:i+1], sigmas, fixed_Zs)
    state_dict = {}
    state_dict["Gs"] = Gs
    state_dict["sigmas"] = sigmas
    state_dict["imgs"] = imgs
    torch.save(state_dict, path[:-3]+"pth")
Ejemplo n.º 10
0
def get_models(latent_dim, model_dim, device, output_dim, channels, init=True):
    generator = Generator(latent_dim, model_dim, channels).to(device)
    critic = Critic(model_dim, output_dim, channels).to(device)
    if init:
        generator.apply(__weights_init_normal)
        critic.apply(__weights_init_normal)
    return generator, critic
Ejemplo n.º 11
0
    def __init__(self, args):

        self.z_dim = args.z_dim
        self.decay_rate = args.decay_rate
        self.learning_rate = args.learning_rate
        self.model_name = args.model_name
        self.batch_size = args.batch_size

        #initialize networks
        self.Generator = Generator(self.z_dim).cuda()
        self.Encoder = Encoder(self.z_dim).cuda()
        self.Discriminator = Discriminator().cuda()

        #set optimizers for all networks
        self.optimizer_G_E = torch.optim.Adam(
            list(self.Generator.parameters()) +
            list(self.Encoder.parameters()),
            lr=self.learning_rate,
            betas=(0.5, 0.999))

        self.optimizer_D = torch.optim.Adam(self.Discriminator.parameters(),
                                            lr=self.learning_rate,
                                            betas=(0.5, 0.999))

        #initialize network weights
        self.Generator.apply(weights_init)
        self.Encoder.apply(weights_init)
        self.Discriminator.apply(weights_init)
Ejemplo n.º 12
0
def main():
    opt = parse.parse_args()
    print(opt)

    img_shape = (opt.channels, opt.img_size, opt.img_size)

    cuda = True if torch.cuda.is_available() else False

    generator = Generator(opt.n_classes, opt.latent_dim, img_shape)

    print(generator)

    if cuda:
        generator = generator.cuda()
        torch.cuda.set_device(opt.gpu_ids)

    os.makedirs('data/mnist', exist_ok=True)
    dataloader = DataLoader(datasets.MNIST('data/mnist',
                                           train=False,
                                           transform=transforms.Compose([
                                               transforms.Resize(opt.img_size),
                                               transforms.ToTensor(),
                                               transforms.Normalize(
                                                   mean=(0.5, 0.5, 0.5),
                                                   std=(0.5, 0.5, 0.5))
                                           ]),
                                           download=True),
                            batch_size=opt.batch_size,
                            shuffle=False)

    FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor

    generator.eval()
    generator.load_state_dict(torch.load(opt.model_dir))
    for i, (image, labels) in enumerate(dataloader):
        batch_size = image.shape[0]
        z = Variable(
            FloatTensor(
                np.random.normal(loc=0.0,
                                 scale=1.0,
                                 size=(opt.batch_size, opt.latent_dim))))
        real_image = Variable(image.type(FloatTensor))
        labels = Variable(labels.type(LongTensor))
        gen_image = generator(z, labels)

        print("[Batch %d/%d]" % (i, len(dataloader)))

        gen_image = gen_image.view(gen_image.size(0), *img_shape)
        real_images = real_image.view(real_image.size(0), *img_shape)
        save_image(
            gen_image.data,
            'images_4_nocat_MSE_ls_lc/test/fake/%d_generated_image.png' % (i),
            nrow=1,
            normalize=True)
        save_image(real_images.data,
                   'images_4_nocat_MSE_ls_lc/test/real/%d_real_image.png' % i,
                   nrow=1,
                   normalize=True)
Ejemplo n.º 13
0
def load_gan(model_path):
    gan = Generator()
    model = torch.load(model_path)['gan']
    #pretrained_state_dict = torch.load(model_path)['gan']
    #model_state_dict = gan.state_dict()
    #model = dict_unite(pretrained_state_dict, model_state_dict)
    gan.load_state_dict(model)
    return gan
Ejemplo n.º 14
0
Archivo: main.py Proyecto: TobiGee/src
def test(config):
    """
    This function tests a trained neuronal network. In this case the generator of the GAN.

    Args:
    config : A dictionary with the extracted configuration out of the config.ini file and the commandlineparser. 
        Following dictionary-values are necessary:

        {'image_dim_x': intended image x-dimension, 
         'image_dim_y': intended image y-dimension,
         'image_dim':   image_dim_x * image_dim_y,
         'batch_size':   size of the batch per iteration
         'datatype':    datatype of the dataset (MNIST | CIFAR10 | ImageFolder),
         'device':      device which should execute the test (gpu | cpu),
         'z_dim':       Hyperparameter for the Generator,
         'logpathfake': logpath for tensorboard SummaryWriter (--logdir)}
    """

    #load Generator
    gen = Generator(config['z_dim'], config['image_dim']).to(config['device'])
    gen.load_model(config)

    #load Dataset
    print("Load dataset...")
    loader = load_Dataset(config)

    #initialize tensorboard summarywriter
    writer_fake = SummaryWriter(config['logpathfake'])
    writer_real = SummaryWriter(config['logpathreal'])
    trained_iterations = gen.training_iterations
    step_gen = gen.training_iterations
    #Testing trained Generator
    print("Testing...")
    for batch_idx, (real, _) in enumerate(loader):
        real = real.view(-1, config['image_dim']).to(config['device'])
        batch_size = real.shape[0]

        if batch_idx == 0:
            with torch.no_grad():
                noise = torch.randn(config['batch_size'],
                                    config['z_dim']).to(config['device'])
                fake = gen(noise).reshape(-1, 1, config['image_dim_x'],
                                          config['image_dim_y'])
                data = real.reshape(-1, 1, config['image_dim_x'],
                                    config['image_dim_y'])
                img_grid_fake = torchvision.utils.make_grid(fake,
                                                            normalize=True)
                img_grid_real = torchvision.utils.make_grid(data,
                                                            normalize=True)
                writer_fake.add_image(
                    "Mnist generated fake images out of test",
                    img_grid_fake,
                    global_step=trained_iterations)
                writer_real.add_image("Mnist reference Images",
                                      img_grid_real,
                                      global_step=0)
Ejemplo n.º 15
0
def generate_process(opt):
    print("generate_process")
    # ----- Device Setting -----
    device = torch.device("cpu")

    # ----- Output Setting -----

    img_output_dir = opt.output_dir
    if not os.path.exists(img_output_dir):
        os.mkdir(img_output_dir)

    if not os.path.exists(os.path.join(img_output_dir, "images")):
        os.mkdir(os.path.join(img_output_dir, "images"))

    if opt.save_latent is True:
        if not os.path.exists(os.path.join(img_output_dir, "latents")):
            os.mkdir(os.path.join(img_output_dir, "latents"))

    print("Output :", img_output_dir)

    # ----- Model Loading -----
    print("Use model :", opt.model)
    model_g = Generator()
    model_g.load_state_dict(torch.load(opt.model, map_location="cpu"))
    model_g.to(device)

    model_g.eval()

    if opt.mode == "normal":
        latents = [torch.randn(size=(opt.width * opt.height, opt.latent_size, 1, 1)) for i in range(opt.n_img)]
        # latents = [torch.randn(size=(opt.width * opt.height, opt.latent_size, 1, 1)]

    elif opt.mode == "use_latent":
        assert opt.latent_dir != "None", "latent source directory is not set"
        latent_paths = [os.path.join(opt.latent_dir, name) for name in os.listdir(opt.latent_dir)]
        latents = [torch.from_numpy(np.load(path)) for path in latent_paths]

    elif opt.mode == "inter":
        latent_start = torch.from_numpy(np.load(opt.start_latent))
        latent_end = torch.from_numpy(np.load(opt.end_latent))
        alphas = [float(n / opt.latent_num) for n in range(opt.n_img)]
        latents = [alpha * latent_end + (1 - alpha) * latent_start for alpha in alphas]

    print("Generate image num :", len(latents))

    # ----- Generate Step -----
    print("Start Generate Process")
    for index,latent in tqdm(enumerate(latents)):
        img_path = os.path.join(img_output_dir, "images", str(index + 1) + ".png")
        generate(latent, model_g, opt.width, opt.height, img_path)

        if opt.save_latent:
            latent_path = os.path.join(img_output_dir, "latents", str(index + 1) + ".npy")
            np.save(latent_path, latent.numpy())

    print("Finish Generate Process")
Ejemplo n.º 16
0
def init_test(args):
    """Create the data loader and the generators for testing purposes."""
    # create loader
    dataset = Cifar10Dataset.get_datasets_from_scratch(args.data_path)['test']
    print('Test dataset len: {}'.format(len(dataset)))
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)

    # check CUDA availability and set device
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print('Use GPU: {}'.format(str(device) != 'cpu'))

    # download the weights for the generators
    if not os.path.exists('batchnorm_ep200_weigths_gen.pt'):
        print('Downloading model weights for generator with BN...')
        os.system(
            'wget https://www.dropbox.com/s/r33ndl969q83gik/batchnorm_ep200_weigths_gen.pt'
        )

    if not os.path.exists('spectralnorm_ep100_weights_gen.pt'):
        print('Downloading model weights for generator with SN...')
        os.system(
            'wget https://www.dropbox.com/s/tccxduyqp3dj5dg/spectralnorm_ep100_weights_gen.pt'
        )

    # load generator that was trained with batch norm
    generator_bn = Generator(normalization_type='batch').to(device)
    # load generator that was trained with spectral norm
    generator_sn = Generator(normalization_type='batch').to(device)

    # load the weights
    generator_bn.load_state_dict(
        torch.load('batchnorm_ep200_weigths_gen.pt', map_location=device))
    generator_sn.load_state_dict(
        torch.load('spectralnorm_ep100_weights_gen.pt', map_location=device))

    # make save dir, if needed
    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    return device, data_loader, generator_bn, generator_sn
Ejemplo n.º 17
0
def make_generated_dataset():
    generator = Generator(args.dim, args.latent_dim, args.n_pixels, args.bn_g)
    generator.load_state_dict(torch.load(args.model_dir, map_location=device))
    generator.eval()

    with torch.no_grad():
        samples = generator(args.n_samples)
        samples = ((samples + 1.0) * (255 / 2.0)).numpy().astype('uint8')
        for i in range(args.n_samples):
            img = np.transpose(samples[i], (1, 2, 0))
            imageio.imwrite(
                '{}{}{}'.format(args.gen_save_dir, i + 1, args.file_type), img)
Ejemplo n.º 18
0
def load_gen(model_load_path, use_bn=False, last_act='tanh',
             use_wscale=True, use_he_backward=True,
             resolution_list=[4, 8, 16, 32, 64, 128], channel_list=[512, 512, 256, 128, 64, 32]):
    """Load generator and grow it
    """
    with nn.parameter_scope("generator"):
        _ = nn.load_parameters(model_load_path)
    gen = Generator(use_bn=use_bn, last_act=last_act,
                    use_wscale=use_wscale, use_he_backward=use_he_backward)
    for i in range(len(resolution_list)):
        gen.grow(resolution_list[i], channel_list[i])
    return gen
Ejemplo n.º 19
0
    def __init__(self, dataset_dir, generator_channels, discriminator_channels, nz, style_depth, lrs, betas, eps,
                 phase_iter, weights_halflife, batch_size, n_cpu, opt_level):
        self.nz = nz
        self.dataloader = Dataloader(dataset_dir, batch_size, phase_iter * 2, n_cpu)

        self.generator = Generator(generator_channels, nz, style_depth).cuda()
        self.generator_ema = Generator(generator_channels, nz, style_depth).cuda()
        self.generator_ema.load_state_dict(copy.deepcopy(self.generator.state_dict()))
        self.discriminator = Discriminator(discriminator_channels).cuda()

        self.tb = tensorboard.tf_recorder('StyleGAN')

        self.phase_iter = phase_iter
        self.lrs = lrs
        self.betas = betas
        self.weights_halflife = weights_halflife

        self.opt_level = opt_level

        self.ema = None

        torch.backends.cuda.benchmark = True
Ejemplo n.º 20
0
def train():
    generator = Generator()
    discriminator = Discriminator()
    generator.to("cuda:0")
    discriminator.to("cuda:0")
    Opt_D = optim.Adam(discriminator.parameters(), lr=2e-4, betas=(0.5, 0.999))
    Opt_G = optim.Adam(generator.parameters(), lr=2e-4, betas=(0.5, 0.999))
    data = np.concatenate((sio.loadmat("D:/cifar10/data_batch_1.mat")["data"],
                           sio.loadmat("D:/cifar10/data_batch_2.mat")["data"],
                           sio.loadmat("D:/cifar10/data_batch_3.mat")["data"],
                           sio.loadmat("D:/cifar10/data_batch_4.mat")["data"],
                           sio.loadmat("D:/cifar10/data_batch_5.mat")["data"]))
    nums = data.shape[0]

    for i in range(100000):
        rand_idx = np.random.choice(range(nums), batchsize)
        batch = np.reshape(data[rand_idx], [batchsize, 3, 32, 32])
        batch = torch.tensor(batch / 127.5 - 1,
                             dtype=torch.float32).to("cuda:0")
        for j in range(n_cri):
            z = torch.randn(batchsize, 128).to("cuda:0")
            fake_img = generator(z).detach()
            fake_logits = discriminator(fake_img)
            real_logits = discriminator(batch)
            D_loss = torch.mean(
                torch.max(torch.zeros_like(real_logits),
                          1. - real_logits)) + torch.mean(
                              torch.max(torch.zeros_like(fake_logits),
                                        1. + fake_logits))
            Opt_D.zero_grad()
            D_loss.backward()
            Opt_D.step()
        z = torch.randn(batchsize, 128).to("cuda:0")
        fake_img = generator(z)
        fake_logits = discriminator(fake_img)
        G_loss = -torch.mean(fake_logits)
        Opt_G.zero_grad()
        G_loss.backward()
        Opt_G.step()
        if i % 100 == 0:
            img = (fake_img[0] + 1) * 127.5
            Image.fromarray(
                np.uint8(
                    np.transpose(img.cpu().detach().numpy(),
                                 axes=[1, 2, 0]))).save("./results/" + str(i) +
                                                        ".jpg")
            print("Iteration: %d, D_loss: %f, G_loss: %f" %
                  (i, D_loss, G_loss))
        if i % 1000 == 0:
            torch.save(generator, "generator.pth")
            torch.save(discriminator, "discriminator.pth")
Ejemplo n.º 21
0
    def __init__(self, hyperparameters):
        super(STGANtrainer, self).__init__()
        self.hyperparameters = hyperparameters
        self.gen = Generator(5, 5, 4, 13)
        self.dis = Discriminator(5, 64, 13)

        self.dis_opt = Adam(self.dis.parameters(),
                            lr=self.hyperparameters['lr_dis'],
                            betas=self.hyperparameters['lr_beta'])
        self.gen_opt = Adam(self.gen.parameters(),
                            lr=self.hyperparameters['lr_gen'],
                            betas=self.hyperparameters['lr_beta'])
        self.dis_attr_opt = Adam(self.gen.parameters(),
                                 lr=0.5 * self.hyperparameters['lr_gen'],
                                 betas=self.hyperparameters['lr_beta'])
Ejemplo n.º 22
0
def init():
    for dirname in os.listdir(state_dict_path):
        discriminator = Discriminator()
        discriminator.load_state_dict(
            torch.load(state_dict_path + '/' + dirname + '/discriminator'))
        generator = Generator()
        generator.load_state_dict(
            torch.load(state_dict_path + '/' + dirname + '/generator'))
        network = {
            'id': dirname,
            'discriminator': discriminator,
            'generator': generator,
        }
        networks.append(network)
        network_dict[dirname] = network
        print('Model #' + dirname + ' loaded.')
Ejemplo n.º 23
0
    def __init__(self, generator_path, discriminator_path):

        self.device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')

        # Path for loading and saving model weights
        self.generator_path = generator_path
        self.discriminator_path = discriminator_path

        # Get training and testing data
        train_set = PegasusDataset('data',
                                   train=True,
                                   download=True,
                                   transform=torchvision.transforms.Compose(
                                       [torchvision.transforms.ToTensor()]))

        test_set = PegasusDataset('data',
                                  train=False,
                                  download=True,
                                  transform=torchvision.transforms.Compose(
                                      [torchvision.transforms.ToTensor()]))

        self.train_loader = torch.utils.data.DataLoader(train_set,
                                                        shuffle=True,
                                                        batch_size=BATCH_SIZE,
                                                        drop_last=True)
        self.test_loader = torch.utils.data.DataLoader(test_set,
                                                       shuffle=True,
                                                       batch_size=BATCH_SIZE,
                                                       drop_last=True)

        # Create generator and discriminator
        self.G = Generator().to(self.device)
        self.D = Discriminator().to(self.device)

        #self.loadModels(self.generator_path, self.discriminator_path)

        # initialise the optimiser
        self.optimiser_G = torch.optim.Adam(self.G.parameters(),
                                            lr=0.0002,
                                            betas=(0.5, 0.99))
        self.optimiser_D = torch.optim.Adam(self.D.parameters(),
                                            lr=0.0002,
                                            betas=(0.5, 0.99))
        self.bce_loss = nn.BCELoss()

        self.dgRatio = 0
Ejemplo n.º 24
0
    def __init__(self,
                 in_size: int,
                 ts_size: int = 100,
                 latent_dim: int = 20,
                 lr: float = 0.0005,
                 weight_decay: float = 1e-6,
                 iterations_critic: int = 5,
                 gamma: float = 10,
                 weighted: bool = True,
                 use_gru=False):
        super(TadGAN, self).__init__()
        self.in_size = in_size
        self.latent_dim = latent_dim
        self.lr = lr
        self.weight_decay = weight_decay
        self.iterations_critic = iterations_critic
        self.gamma = gamma
        self.weighted = weighted

        self.hparams = {
            'lr': self.lr,
            'weight_decay': self.weight_decay,
            'iterations_critic': self.iterations_critic,
            'gamma': self.gamma
        }

        self.encoder = Encoder(in_size,
                               ts_size=ts_size,
                               out_size=self.latent_dim,
                               batch_first=True,
                               use_gru=use_gru)
        self.generator = Generator(use_gru=use_gru)
        self.critic_x = CriticX(in_size=in_size)
        self.critic_z = CriticZ()

        self.encoder.apply(init_weights)
        self.generator.apply(init_weights)
        self.critic_x.apply(init_weights)
        self.critic_z.apply(init_weights)

        if self.logger is not None:
            self.logger.log_hyperparams(self.hparams)

        self.y_hat = []
        self.index = []
        self.critic = []
    def create_generator(self):
        kernels_gen_encoder = [
            (64, 1, 0),  # [batch, 32, 32, ch] => [batch, 32, 32, 64]
            (128, 2, 0),  # [batch, 32, 32, 64] => [batch, 16, 16, 128]
            (256, 2, 0),  # [batch, 16, 16, 128] => [batch, 8, 8, 256]
            (512, 2, 0),  # [batch, 8, 8, 256] => [batch, 4, 4, 512]
            (512, 2, 0),  # [batch, 4, 4, 512] => [batch, 2, 2, 512]
        ]

        kernels_gen_decoder = [
            (512, 2, 0.5),  # [batch, 2, 2, 512] => [batch, 4, 4, 512]
            (256, 2, 0.5),  # [batch, 4, 4, 512] => [batch, 8, 8, 256]
            (128, 2, 0),  # [batch, 8, 8, 256] => [batch, 16, 16, 128]
            (64, 2, 0),  # [batch, 16, 16, 128] => [batch, 32, 32, 64]
        ]

        return Generator('gen', kernels_gen_encoder, kernels_gen_decoder)
def generate():
    train_phase = tf.placeholder(tf.bool)
    z = tf.placeholder(tf.float32, [None, 128])
    G = Generator("generator")
    fake_img = G(z, train_phase)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(
        tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "generator"))
    saver.restore(sess, "./save_para/.\\model.ckpt")
    Z = np.random.standard_normal([NUMS_GEN, 128])
    FAKE_IMG = sess.run(fake_img, feed_dict={z: Z, train_phase: False})
    if not os.path.exists("./generate"):
        os.mkdir("./generate")
    for i in range(NUMS_GEN):
        Image.fromarray(np.uint8(
            (FAKE_IMG[i] + 1) * 127.5)).save("./generate/" + str(i) + ".jpg")
Ejemplo n.º 27
0
def main():
    z_dim = 100
    device = -1  #CPU
    batch_size = 1
    model = Generator(z_dim)

    model.to_gpu()
    chainer.serializers.load_npz('result-dcgan/gen_snapshot_epoch-200.npz',
                                 model)
    model.to_cpu()

    x, _ = model.generate_noise(device, batch_size)
    y = model(x)

    graph = ChainerConverter().convert([x], [y])
    exec_info = generate_descriptor("webassembly", graph)
    exec_info.save("./model")
Ejemplo n.º 28
0
    def __init__(self, dataset_dir, log_dir, generator_channels,
                 discriminator_channels, nz, style_depth, lrs, betas, eps,
                 phase_iter, batch_size, n_cpu, opt_level):
        self.nz = nz
        self.dataloader = Dataloader(dataset_dir, batch_size, phase_iter * 2,
                                     n_cpu)

        self.generator = cuda(
            DataParallel(Generator(generator_channels, nz, style_depth)))
        self.discriminator = cuda(
            DataParallel(Discriminator(discriminator_channels)))

        self.tb = tensorboard.tf_recorder('StyleGAN', log_dir)

        self.phase_iter = phase_iter
        self.lrs = lrs
        self.betas = betas

        self.opt_level = opt_level
Ejemplo n.º 29
0
Archivo: model.py Proyecto: xlnwel/cv
    def _build_graph(self):
        with tf.device('/CPU: 0'):
            self.image = self._prepare_data()
        gen_args = self.args['generator']
        gen_args['batch_size'] = self.batch_size
        self.generator = Generator('Generator', 
                                    gen_args, 
                                    self.graph, 
                                    self.training,
                                    scope_prefix= self.name, 
                                    log_tensorboard=self.log_tensorboard,
                                    log_params=self.log_params)
        self.gen_image = self.generator.image
        dis_args = self.args['discriminator']
        self.real_discriminator = Discriminator('Discriminator', 
                                                dis_args, 
                                                self.graph, 
                                                self.image,
                                                False,
                                                self.training,
                                                scope_prefix= self.name,
                                                log_tensorboard=self.log_tensorboard,
                                                log_params=self.log_params)
        self.fake_discriminator = Discriminator('Discriminator',
                                                dis_args,
                                                self.graph,
                                                self.gen_image,
                                                False,
                                                self.training,
                                                scope_prefix=self.name,
                                                log_tensorboard=False,
                                                log_params=False,
                                                reuse=True)
        
        self.gen_loss = self._generator_loss()
        self.dis_loss = self._discriminator_loss()

        self.gen_opt_op, _, _ = self.generator._optimization_op(self.gen_loss)
        self.dis_opt_op, _, _ = self.real_discriminator._optimization_op(self.dis_loss)
        
        with tf.device('/CPU: 0'):
            self._log_train_info()
Ejemplo n.º 30
0
 def __init__(self, arg, device):
     self.device = device
     # network
     self.nc = arg.nc
     self.nz = arg.nz
     self.init_size = arg.init_size
     self.size = arg.size
     self.G = Generator(nc=self.nc, nz=self.nz, size=self.size)
     # pre-growing
     total_stages = int(math.log2(self.size / self.init_size)) + 1
     for i in range(total_stages - 1):
         self.G.grow_network()
         self.G.flush_network()
     for param in self.G.parameters():
         param.requires_grad_(False)
     # load checkpoint
     checkpoint = torch.load('checkpoint.tar')
     self.G.load_state_dict(checkpoint['G_EMA_state_dict'])
     self.G = self.G.to(self.device)
     self.G.eval()