示例#1
0
    def __init__(
        self,
        logdir,  # directory of stored models
        imgdir,  # directory of images for FeedDict
        learning_rate=0.001,  # Adam optimizer learning rate
        beta1=0,  # Adam optimizer beta1
        beta2=0.99,  # Adam optimizer beta2
        w_lambda=10.0,  # WGAN-GP/LP lambda
        w_gamma=1.0,  # WGAN-GP/LP gamma
        epsilon=0.001,  # WGAN-GP/LP lambda
        z_length=512,  # latent variable size
        n_imgs=800000,  # number of images to show in each growth step
        batch_repeats=1,  # number of times to repeat minibatch
        n_examples=24,  # number of example images to generate
        lipschitz_penalty=True,  # if True, use WGAN-LP instead of WGAN-GP
        big_image=True,  # Generate a single large preview image, only works if n_examples = 24
        reset_optimizer=True,  # reset optimizer variables with each new layer
        batch_sizes=None,
        channels=None,
    ):

        # Scale down the number of factors if scaling_factor is provided
        self.channels = channels if channels else [
            512, 512, 512, 512, 256, 128, 64, 32, 16, 16
        ]
        self.batch_sizes = batch_sizes if batch_sizes else [
            16, 16, 16, 16, 16, 16, 12, 4, 3
        ]

        self.z_length = z_length
        self.n_examples = n_examples
        self.batch_repeats = batch_repeats if batch_repeats else 1
        self.n_imgs = n_imgs
        self.logdir = logdir
        self.big_image = big_image
        self.w_lambda = w_lambda
        self.w_gamma = w_gamma
        self.epsilon = epsilon
        self.reset_optimizer = reset_optimizer
        self.lipschitz_penalty = lipschitz_penalty

        # Initialize FeedDict
        self.feed = FeedDict.load(logdir,
                                  imgdir=imgdir,
                                  z_length=z_length,
                                  n_examples=n_examples)
        self.n_layers = self.feed.n_sizes
        self.max_imgs = (self.n_layers - 0.5) * self.n_imgs * 2

        # Initialize placeholders
        self.x_placeholder = tf.placeholder(tf.uint8, [None, 3, None, None])
        self.z_placeholder = tf.placeholder(tf.float32, [None, self.z_length])

        # Global step
        with tf.variable_scope('global_step'):
            self.global_step = tf.Variable(0,
                                           name='global_step',
                                           trainable=False,
                                           dtype=tf.int32)

        # Non-trainable variables for counting to next layer and incrementing value of alpha
        with tf.variable_scope('image_count'):
            self.total_imgs = tf.Variable(0,
                                          name='total_images',
                                          trainable=False,
                                          dtype=tf.int32)

            img_offset = tf.add(self.total_imgs, self.n_imgs)
            imgs_per_layer = self.n_imgs * 2

            self.img_step = tf.mod(img_offset, imgs_per_layer)
            self.layer = tf.minimum(tf.floor_div(img_offset, imgs_per_layer),
                                    self.n_layers - 1)

            fade_in = tf.to_float(self.img_step) / float(self.n_imgs)
            self.alpha = tf.minimum(1.0, tf.maximum(0.0, fade_in))

        # Initialize optimizer as member variable if not rest_optimizer, otherwise generate new
        # optimizer for each layer
        if self.reset_optimizer:
            self.lr = learning_rate
            self.beta1 = beta1
            self.beta2 = beta2
        else:
            self.g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1,
                                                      beta2)
            self.d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1,
                                                      beta2)
        self.networks = [
            self.create_network(i + 1) for i in range(self.n_layers)
        ]

        # Initialize Session, FileWriter and Saver
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
        self.writer = tf.summary.FileWriter(self.logdir, graph=self.sess.graph)
        self.saver = tf.train.Saver()

        # Look in logdir to see if a saved model already exists. If so, load it
        try:
            self.saver.restore(self.sess,
                               tf.train.latest_checkpoint(self.logdir))
            print('Restored model -----------\n')
        except Exception:
            pass
示例#2
0
    def __init__(
            self,
            logdir,  # directory of stored models
            imgdir,  # directory of images for FeedDict
            learning_rate=0.001,  # Adam optimizer learning rate
            beta1=0,  # Adam optimizer beta1
            beta2=0.99,  # Adam optimizer beta2
            w_lambda=10.0,  # WGAN-GP/LP lambda
            w_gamma=1.0,  # WGAN-GP/LP gamma
            epsilon=0.001,  # WGAN-GP/LP lambda
            z_length=512,  # latent variable size
            n_imgs=800000,  # number of images to show in each growth step
            batch_repeats=1,  # number of times to repeat minibatch
            n_examples=24,  # number of example images to generate
            lipschitz_penalty=True,  # if True, use WGAN-LP instead of WGAN-GP
            big_image=True,  # Generate a single large preview image, only works if n_examples = 24
            scaling_factor=None,  # factor to scale down number of trainable parameters
            reset_optimizer=False,  # reset optimizer variables with each new layer
    ):

        # Scale down the number of factors if scaling_factor is provided
        self.channels = [512, 512, 512, 512, 256, 128, 64, 32, 16, 8]
        if scaling_factor:
            assert scaling_factor > 1
            self.channels = [
                max(4, c // scaling_factor) for c in self.channels
            ]

        self.batch_size = [16, 16, 16, 16, 16, 16, 8, 4, 3]
        self.z_length = z_length
        self.n_examples = n_examples
        self.batch_repeats = batch_repeats if batch_repeats else 1
        self.n_imgs = n_imgs
        self.logdir = logdir
        self.big_image = big_image
        self.w_lambda = w_lambda
        self.w_gamma = w_gamma
        self.epsilon = epsilon
        self.reset_optimizer = reset_optimizer
        self.lipschitz_penalty = lipschitz_penalty
        self.start = True

        # Generate fized latent variables for image previews
        np.random.seed(0)
        self.z_fixed = np.random.normal(size=[self.n_examples, self.z_length])

        # Initialize placeholders
        self.x_placeholder = tf.placeholder(tf.float32, [None, None, None, 3])
        self.z_placeholder = tf.placeholder(tf.float32, [None, self.z_length])

        # Global step
        with tf.variable_scope('global_step'):
            self.global_step = tf.Variable(0,
                                           name='global_step',
                                           trainable=False)
            self.global_step_op = tf.assign(self.global_step,
                                            tf.add(self.global_step, 1))

        # Non-trainable variables for counting to next layer and incrementing value of alpha
        with tf.variable_scope('image_count'):
            self.total_imgs = tf.Variable(0.0,
                                          name='image_step',
                                          trainable=False)
            self.img_count_placeholder = tf.placeholder(tf.float32)
            self.img_step_op = tf.assign(
                self.total_imgs,
                tf.add(self.total_imgs, self.img_count_placeholder))

            self.img_step = tf.mod(tf.add(self.total_imgs, self.n_imgs),
                                   self.n_imgs * 2)
            self.alpha = tf.minimum(1.0, tf.div(self.img_step, self.n_imgs))
            self.layer = tf.floor_div(tf.add(self.total_imgs, self.n_imgs),
                                      self.n_imgs * 2)

        # Initialize optimizer as member variable if not rest_optimizer, otherwise generate new
        # optimizer for each layer
        if self.reset_optimizer:
            self.lr = learning_rate
            self.beta1 = beta1
            self.beta2 = beta2
        else:
            self.g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1,
                                                      beta2)
            self.d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1,
                                                      beta2)

        # Initialize FeedDict
        self.feed = FeedDict.load(imgdir, logdir)
        self.n_layers = int(np.log2(1024)) - 1
        self.networks = [
            self._create_network(i + 1) for i in range(self.n_layers)
        ]

        # Initialize Session, FileWriter and Saver
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
        self.writer = tf.summary.FileWriter(self.logdir, graph=self.sess.graph)
        self.saver = tf.train.Saver()

        # Look in logdir to see if a saved model already exists. If so, load it
        try:
            self.saver.restore(self.sess,
                               tf.train.latest_checkpoint(self.logdir))
            print('Restored ----------------\n')
        except Exception:
            pass