Esempio n. 1
0
def generator(z_seed, is_training, init=False,reuse=False):
    with tf.variable_scope('generator_model', reuse=reuse):
        counter = {}
        x = z_seed
        with tf.variable_scope('dense_1'):
            x = tf.layers.dense(x, units=4 * 4 * 512, kernel_initializer=init_kernel)
            x = tf.layers.batch_normalization(x, training=is_training, name='batchnorm_1')
            x = tf.nn.relu(x)

        x = tf.reshape(x, [-1, 4, 4, 512])

        with tf.variable_scope('deconv_1'):
            x = tf.layers.conv2d_transpose(x, 256, [5, 5], strides=[2, 2], padding='SAME', kernel_initializer=init_kernel)
            x = tf.layers.batch_normalization(x, training=is_training, name='batchnorm_2')
            x = tf.nn.relu(x)

        with tf.variable_scope('deconv_2'):
            x = tf.layers.conv2d_transpose(x, 128, [5, 5], strides=[2, 2], padding='SAME', kernel_initializer=init_kernel)
            x = tf.layers.batch_normalization(x, training=is_training, name='batchnormn_3')
            x = tf.nn.relu(x)

        with tf.variable_scope('deconv_3'):
            output = nn.deconv2d(x, num_filters=3, filter_size=[5, 5], stride=[2, 2], nonlinearity=tf.tanh, init=init,
                                 counters=counter, init_scale=0.1)
        return output
Esempio n. 2
0
    def bad_sampler(self, z_seed, is_training=False, init=False):
        with tf.variable_scope('bad_generator', reuse=tf.AUTO_REUSE):
            counter = {}
            x = z_seed
            with tf.variable_scope('dense_1'):
                # x = tf.layers.dense(x, units=4 * 4 * 512, kernel_initializer = init_kernel)
                # x = tf.layers.batch_normalization(x, training=is_training, name='batchnorm_1')
                x = nn._linear_fc(x, 4 * 4 * 512, 'bg_h0_lin')
                x = nn.batch_norm_contrib(x,
                                          name='batchnorm_1',
                                          train=is_training)
                x = tf.nn.relu(x)

            x = tf.reshape(x, [-1, 4, 4, 512])

            with tf.variable_scope('deconv_1'):
                # x = tf.layers.conv2d_transpose(x, 256, [5, 5], strides=[2, 2], padding='SAME',
                #                                kernel_initializer=init_kernel)
                # x = tf.layers.batch_normalization(x, training=is_training, name='batchnorm_2')
                x = nn._deconv2d(x,
                                 256,
                                 k_w=5,
                                 k_h=5,
                                 d_w=2,
                                 d_h=2,
                                 name='bg_dconv0')  # [8, 8]
                x = nn.batch_norm_contrib(x,
                                          name='batchnorm_2',
                                          train=is_training)
                x = tf.nn.relu(x)

            with tf.variable_scope('deconv_2'):
                # x = tf.layers.conv2d_transpose(x, 128, [5, 5], strides=[2, 2], padding='SAME',
                #                                kernel_initializer=init_kernel)
                # x = tf.layers.batch_normalization(x, training=is_training, name='batchnormn_3')
                x = nn._deconv2d(x,
                                 128,
                                 k_w=5,
                                 k_h=5,
                                 d_w=2,
                                 d_h=2,
                                 name='bg_dconv1')
                x = nn.batch_norm_contrib(x,
                                          name='batchnorm_2',
                                          train=is_training)
                x = tf.nn.relu(x)

            with tf.variable_scope('deconv_3'):
                output = nn.deconv2d(x,
                                     num_filters=3,
                                     filter_size=[5, 5],
                                     stride=[2, 2],
                                     nonlinearity=tf.tanh,
                                     init=init,
                                     counters=counter,
                                     init_scale=0.1)
        return output
Esempio n. 3
0
    def _build_generator(self,
                         tensor=None,
                         label=None,
                         training=False,
                         batch_norm=None,
                         init=False):

        assert self.n_pixel % 16 == 0, "isize has to be a multiple of 16"
        nfilt = 512
        csize = 4
        if label is None:
            if self.input_y_g is None:
                # TO BE EDITED
                # sample generated images
                batch_size = (self.input_z_g).shape[0]
                #label = get_one_hot(np.repeat(np.tile(self.n_class), (batch_size // self.n_class) + 1), depth=self.n_class)
                #label = label[0:batch_size, :]
                label = get_one_hot(
                    np.random.randint(self.n_class, size=batch_size),
                    self.n_class)
            else:
                # get label from input
                label = self.input_y_g
        if tensor is None:
            # add label to noise
            tensor = tf.concat([self.input_z_g, label], 1)
        else:
            # assuming tensor is a specific noise
            tensor = tf.concat([tensor, label], 1)
            # tensor = tf.concat([tensor, tf.one_hot(label, self.n_class)], 1)
        if batch_norm is None:
            batch_norm = self.batch_norm_G
        if batch_norm:

            def bn(x, name=None):
                return tf.contrib.layers.batch_norm(x,
                                                    is_training=training,
                                                    renorm=BATCH_RENORM,
                                                    decay=BATCH_NORM_DECAY)
        else:
            # return the same if bn is not aactivated
            bn = tf.identity
        with tf.variable_scope('generator', reuse=tf.AUTO_REUSE) as scope:
            # set reuse if necessary
            #if tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope.name):
            #    scope.reuse_variables()
            #if constructed first time, it makes nothing reuse. only reusable on second call

            # initial layer
            with tf.variable_scope('initial.{0}-{1}'.format(
                    self.n_noise + self.n_class, nfilt)):
                tensor = tf.reshape(tf.nn.relu(
                    bn(tf.layers.dense(tf.reshape(
                        tensor, [-1, 1, 1, self.n_noise + self.n_class]),
                                       units=4 * 4 * 512,
                                       kernel_initializer=init_normal(0.05),
                                       name='dense'),
                       name='bn')),
                                    shape=[-1, 4, 4, 512])

            # upscaling layers
            while csize < self.n_pixel / 2:
                with tf.variable_scope('pyramid.{0}-{1}'.format(
                        nfilt, nfilt // 2)):
                    tensor = convconcatlayer(tensor, label)
                    tensor = tf.nn.relu(
                        bn(tf.layers.conv2d_transpose(
                            tensor,
                            nfilt // 2,
                            5,
                            2,
                            'same',
                            use_bias=not batch_norm,
                            kernel_initializer=init_normal(0.05),
                            name='conv'),
                           name='bn'))
                csize *= 2
                nfilt //= 2

            # final layer
            with tf.variable_scope('final.{0}-{1}'.format(
                    nfilt, self.n_channel)):
                tensor = weightnorm.deconv2d(tensor,
                                             self.n_channel, [5, 5], [2, 2],
                                             'SAME',
                                             init=init,
                                             nonlinearity=tf.tanh)

                # removed mask layer
        return tensor, label