示例#1
0
    def __init__(self, dataset, n_blocks, final_shape):
        GAN.__init__(self, dataset, self.LATENT_DIM)

        self.n_blocks = n_blocks
        self.final_shape = final_shape
        # Works only for 3 dimensional images (HxWxC)
        self.image_size = final_shape[0:2]  # Allows for rectangular images
        self.channels = final_shape[2]
示例#2
0
文件: progan.py 项目: Recoan0/PokeGan
    def __init__(self, dataset, n_blocks, start_shape, latent_dim):
        GAN.__init__(self, dataset, latent_dim)

        self.start_shape = start_shape
        self.discriminator_models = Discriminator.define_discriminator(n_blocks, start_shape)
        self.generator_models = Generator.define_generator(latent_dim, n_blocks, start_shape)

        self.gan_models = ProGAN._define_composite(self.discriminator_models, self.generator_models)

        self.constant_latent_vector = ProGAN._generate_latent_points(self.latent_dim, 1)
示例#3
0
    def __init__(self,
                 name,
                 learning_rate,
                 n_output,
                 noise_dim,
                 discriminator,
                 generator,
                 beta=0.9,
                 gen_kwargs={},
                 disc_kwargs={},
                 graph=None):

        self.noise_dim = noise_dim
        self.n_output = n_output
        out_shape = [None] + self.n_output
        self.discriminator = discriminator
        self.generator = generator

        GAN.__init__(self, name, graph)

        with tf.variable_scope(name):

            self.noise = tf.placeholder(tf.float32,
                                        shape=[None,
                                               noise_dim])  # Noise vector.
            self.real_pc = tf.placeholder(tf.float32,
                                          shape=out_shape)  # Ground-truth.

            with tf.variable_scope('generator'):
                self.generator_out = self.generator(self.noise,
                                                    self.n_output[0],
                                                    **gen_kwargs)

            with tf.variable_scope('discriminator') as scope:
                self.real_prob, self.real_logit = self.discriminator(
                    self.real_pc, scope=scope, **disc_kwargs)
                self.synthetic_prob, self.synthetic_logit = self.discriminator(
                    self.generator_out, reuse=True, scope=scope, **disc_kwargs)

            self.loss_d = tf.reduce_mean(-safe_log(self.real_prob) -
                                         safe_log(1 - self.synthetic_prob))
            self.loss_g = tf.reduce_mean(-safe_log(self.synthetic_prob))

            train_vars = tf.trainable_variables()

            d_params = [
                v for v in train_vars
                if v.name.startswith(name + '/discriminator/')
            ]
            g_params = [
                v for v in train_vars
                if v.name.startswith(name + '/generator/')
            ]

            self.opt_d = self.optimizer(learning_rate, beta, self.loss_d,
                                        d_params)
            self.opt_g = self.optimizer(learning_rate, beta, self.loss_g,
                                        g_params)
            self.saver = tf.train.Saver(tf.global_variables(),
                                        max_to_keep=None)
            self.init = tf.global_variables_initializer()

            # Launch the session
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            self.sess = tf.Session(config=config)
            self.sess.run(self.init)
示例#4
0
 def __init__(self,
              dset_name,
              imsize,
              nc,
              data_root='./data',
              results_root='./results',
              noise_dim=100,
              dout_dim=1,
              batch_size=64,
              max_giters=50000,
              lr=1e-4,
              clip_disc=False,
              disc_size=64,
              gp_lambda=10.,
              ecfd_type='gaussian_ecfd',
              sigmas=[1.0],
              num_freqs=8,
              optimize_sigma=False,
              disc_net='flexible-dcgan',
              gen_net='flexible-dcgan'):
     """Intializer for a CFGANGP model.
     
     Arguments:
         dset_name {str} -- Name of the dataset.
         imsize {int} -- Size of the image.
         nc {int} -- Number of channels.
     
     Keyword Arguments:
         data_root {str} -- Directory where datasets are stored (default: {'./data'}).
         results_root {str} -- Directory where results will be saved (default: {'./results'}).
         noise_dim {int} -- Dimension of noise input to generator (default: {100}).
         dout_dim {int} -- Dimension of output from discriminator (default: {1}).
         batch_size {int} -- Batch size (default: {64}).
         max_giters {int} -- Maximum number of generator iterations (default: {50000}).
         lr {[type]} -- Learning rate (default: {1e-4}).
         clip_disc {bool} -- Whether to clip the parameters of discriminator in [-0.01, 0.01].
                             This should be True when gradient penalty is not used (default: {True}). 
         disc_size {int} -- Number of filters in the first Conv layer of critic. (default: {64}).
         gp_lambda {float} -- Trade-off for gradient penalty (default: {10.0}).
         ecfd_type {str} -- Weighting distribution for ECFD (default: {'gaussian_ecfd'}).
         sigmas {list} -- A list of sigmas (default: {[1.0]}).
         num_freqs {int} -- Number of random frequencies for ECFD (default: {8}).
         optimize_sigma {bool} -- Whether to optimize sigma (default: {False}).
         disc_net {str} -- Discriminator network type (default: {'flexible-dcgan'}).
         gen_net {str} -- Generator network type (default: {'flexible-dcgan'}).
     """
     GAN.__init__(self,
                  dset_name,
                  imsize,
                  nc,
                  data_root=data_root,
                  results_root=results_root,
                  noise_dim=noise_dim,
                  dout_dim=dout_dim,
                  batch_size=batch_size,
                  clip_disc=gp_lambda == 0.,
                  max_giters=max_giters,
                  lr=lr,
                  disc_size=disc_size,
                  batch_norm=False,
                  gen_net=gen_net,
                  disc_net=disc_net)
     self.ecfd_fn = getattr(ecfd, ecfd_type)
     self.optimize_sigma = optimize_sigma
     self.num_freqs = num_freqs
     self.reg_lambda = 16.0
     cls_name = self.__class__.__name__.lower()
     if optimize_sigma:
         cls_name = 'o' + cls_name
     self.results_root = os.path.join(results_root, dset_name, cls_name)
     if optimize_sigma:
         self.lg_sigmas = torch.zeros((1, dout_dim)).cuda()
         self.lg_sigmas.requires_grad = True
         self.d_optim = torch.optim.RMSprop(
             list(self.discriminator.parameters()) + [self.lg_sigmas],
             lr=lr)
         self.results_root += '_{:s}'.format(ecfd_type)
     else:
         self.sigmas = sigmas
         self.results_root += '_{:s}_{:s}'.format(
             ecfd_type, '_'.join(map(str, sigmas)))
     self.gp_lambda = gp_lambda
     self.results_root = os.path.join(self.results_root, self.gen_net)
     self.ensure_dirs()
示例#5
0
 def __init__(self, *args, **kwargs):
     GAN.__init__(self, *args, **kwargs)