Esempio n. 1
0
    def generator(self, x_data, name='g_'):
        with tf.variable_scope(name):
            x_data, output = flatten(x_data), None

            if self.flags.dataset == 'mnist':
                g0 = tf.nn.relu(tf_utils.linear(x_data,
                                                self.num_hiddens[0],
                                                name='fc1'),
                                name='relu1')
                output = tf_utils.linear(
                    g0, self.image_size[0] * self.image_size[1] *
                    self.image_size[2])
            elif self.flags.dataset == 'cifar10':
                g0 = tf.nn.relu(tf_utils.linear(x_data,
                                                self.num_hiddens[0],
                                                name='fc1'),
                                name='relu1')
                g1 = tf.nn.relu(tf_utils.linear(g0,
                                                self.num_hiddens[1],
                                                name='fc2'),
                                name='relu2')
                output = tf_utils.linear(
                    g1, self.image_size[0] * self.image_size[1] *
                    self.image_size[2])
            else:
                raise NotImplementedError

        return self.out_func(output)
Esempio n. 2
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 64 -> 32 or 32 -> 16
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16 or 16 -> 8
            h1_conv = tf_utils.conv2d(h0_lrelu, self.dis_c[1], name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8 or 8 -> 4
            h2_conv = tf_utils.conv2d(h1_lrelu, self.dis_c[2], name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            if (self.flags.dataset == 'mnist') or (self.flags.dataset == 'cifar10'):
                h2_flatten = flatten(h2_lrelu)
                h3_linear = tf_utils.linear(h2_flatten, 1, name='h3_linear')

                return tf.nn.sigmoid(h3_linear), h3_linear
            else:
                # 8 -> 4
                h3_conv = tf_utils.conv2d(h2_lrelu, self.dis_c[3], name='h3_conv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
                h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

                h3_flatten = flatten(h3_lrelu)
                h4_linear = tf_utils.linear(h3_flatten, 1, name='h4_linear')

                return tf.nn.sigmoid(h4_linear), h4_linear
Esempio n. 3
0
    def decoder(self, z, name='decoder', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()
            tf_utils.print_activations(z)

            # 1st hidden layer
            h0_linear = tf_utils.linear(z, self.n_hidden, name='h0_linear')
            h0_tanh = tf_utils.tanh(h0_linear, name='h0_tanh')
            h0_drop = tf.nn.dropout(h0_tanh,
                                    keep_prob=self.keep_prob_tfph,
                                    name='h0_drop')
            tf_utils.print_activations(h0_drop)

            # 2nd hidden layer
            h1_linear = tf_utils.linear(h0_drop,
                                        self.n_hidden,
                                        name='h1_linear')
            h1_elu = tf_utils.elu(h1_linear, name='h1_elu')
            h1_drop = tf.nn.dropout(h1_elu,
                                    keep_prob=self.keep_prob_tfph,
                                    name='h1_drop')
            tf_utils.print_activations(h1_drop)

            # 3rd hidden layer
            h2_linear = tf_utils.linear(h1_drop,
                                        self.output_dim,
                                        name='h2_linear')
            h2_sigmoid = tf_utils.sigmoid(h2_linear, name='h2_sigmoid')
            tf_utils.print_activations(h2_sigmoid)

            output = tf.reshape(h2_sigmoid, [-1, *self.image_size])
            tf_utils.print_activations(output)

        return output
Esempio n. 4
0
    def forward_network(self, input_img, reuse=False):
        with tf.compat.v1.variable_scope(self.name, reuse=reuse):
            tf_utils.print_activations(input_img, logger=None)
            inputs = self.conv2d_fixed_padding(inputs=input_img, filters=64, kernel_size=7, strides=2, name='conv1')
            inputs = tf_utils.max_pool(inputs, name='3x3_maxpool', ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                                       logger=None)

            inputs = self.block_layer(inputs=inputs, filters=64, block_fn=self.bottleneck_block, blocks=self.layers[0],
                                      strides=1, train_mode=False, name='block_layer1')
            inputs = self.block_layer(inputs=inputs, filters=128, block_fn=self.bottleneck_block, blocks=self.layers[1],
                                      strides=2, train_mode=False, name='block_layer2')
            inputs = self.block_layer(inputs=inputs, filters=256, block_fn=self.bottleneck_block, blocks=self.layers[2],
                                      strides=2, train_mode=False, name='block_layer3')
            inputs = self.block_layer(inputs=inputs, filters=512, block_fn=self.bottleneck_block, blocks=self.layers[3],
                                      strides=2, train_mode=False, name='block_layer4')

            inputs = tf_utils.relu(inputs, name='before_flatten_relu', logger=None)

            # _, h, w, _ = inputs.get_shape().as_list()
            # inputs = tf_utils.avg_pool(inputs, name='gap', ksize=[1, h, w, 1], strides=[1, 1, 1, 1], logger=self.logger)

            # Flatten & FC1
            inputs = tf_utils.flatten(inputs, name='flatten', logger=None)
            inputs = tf_utils.linear(inputs, 512, name='FC1')
            inputs = tf_utils.relu(inputs, name='FC1_relu', logger=None)

            inputs = tf_utils.linear(inputs, 256, name='FC2')
            inputs = tf_utils.relu(inputs, name='FC2_relu', logger=None)

            logits = tf_utils.linear(inputs, self.num_attribute, name='Out')

            return logits
Esempio n. 5
0
    def discriminator(self, y_data, name='d_', is_reuse=False):
        with tf.variable_scope(name, reuse=is_reuse):
            y_data = flatten(y_data)
            d0 = tf.nn.relu(
                tf_utils.linear(y_data, self.num_hidden, name='fc1'))
            d1 = tf_utils.linear(d0, 1, name='fc2')

        return tf.nn.sigmoid(d1), d1
Esempio n. 6
0
    def generator(self, x_data, name='g_'):
        with tf.variable_scope(name):
            x_data = flatten(x_data)
            g0 = tf.nn.relu(tf_utils.linear(x_data,
                                            self.num_hidden,
                                            name='fc1'),
                            name='relu1')
            g1 = tf_utils.linear(
                g0,
                self.image_size[0] * self.image_size[1] * self.image_size[2])

        return tf.nn.tanh(g1)
Esempio n. 7
0
def Discriminator(inputs, is_reuse=True, name='disc'):
    with tf.variable_scope(name, reuse=is_reuse):
        print('is_reuse: {}'.format(is_reuse))
        output01 = tf_utils.linear(inputs, DIM, name='fc-1')
        output01 = tf_utils.relu(output01, name='relu-1')

        output02 = tf_utils.linear(output01, DIM, name='fc-2')
        output02 = tf_utils.relu(output02, name='relu-2')

        output03 = tf_utils.linear(output02, DIM, name='fc-3')
        output03 = tf_utils.relu(output03, name='relu-3')

        output04 = tf_utils.linear(output03, DIS_DIM, name='fc-4')
        
        return output04
    def generator(self, data, name='g_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            data_flatten = flatten(data)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten, 4*4*self.gen_c[0], name='h0_linear')
            h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu, self.gen_c[1], name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu, self.gen_c[2], name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 32 x 32
            h3_deconv = tf_utils.deconv2d(h2_relu, self.gen_c[3], name='h3_deconv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

            # 64 x 64
            output = tf_utils.deconv2d(h3_relu, self.image_size[2], name='h4_deconv2d')
            return tf.nn.tanh(output)
Esempio n. 9
0
    def input_to_feature(self, z):
        with tf.variable_scope("g_", reuse=True):
            data_flatten = flatten(z)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten,
                                        4 * 4 * self.gen_c[0],
                                        name='h0_linear')
            h0_reshape = tf.reshape(
                h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape,
                                               name='h0_batchnorm',
                                               _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu,
                                          self.gen_c[1],
                                          name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv,
                                               name='h1_batchnorm',
                                               _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu,
                                          self.gen_c[2],
                                          name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv,
                                               name='h2_batchnorm',
                                               _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            return h2_relu
Esempio n. 10
0
    def encoder(self,data,name='enc_'):
        with tf.variable_scope(name):
            # 64 -> 32 or 32 -> 16
            h0_conv = tf_utils.conv3d(data, self.dis_c[0], name='h0_conv3d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16 or 16 -> 8
            h1_conv = tf_utils.conv3d(h0_lrelu, self.dis_c[1], name='h1_conv3d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8 or 8 -> 4
            h2_conv = tf_utils.conv3d(h1_lrelu, self.dis_c[2], name='h2_conv3d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            h3_conv = tf_utils.conv3d(h2_lrelu, self.dis_c[3], name='h3_conv3d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            h4_conv = tf_utils.conv3d(h3_lrelu, self.dis_c[3],k_d=1,k_h=2,k_w=2, name='h4_conv3d')
            h4_batchnorm = tf_utils.batch_norm(h4_conv, name='h4_batchnorm', _ops=self._dis_train_ops)
            h4_lrelu = tf_utils.lrelu(h4_batchnorm, name='h4_lrelu')

            h4_flatten = flatten(h4_lrelu)
            h5_linear = tf_utils.linear(h4_flatten, 1024, name='h4_linear')
            return tf.nn.sigmoid(h5_linear), h5_linear
Esempio n. 11
0
    def decoder(self,data,name='dec_'):
        with tf.variable_scope(name):
            h0_linear = tf_utils.linear(data, 5*8*self.dis_c[3],input_size=1024, name='h0_linear')
            h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0],1, 5, 8, self.dis_c[3]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv3d(h0_relu, self.dis_c[3],output_size=[10,15,256],k_t=4,d_t=5, d_h=2, d_w=2,padding_='SAME', name='h1_deconv3d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv3d(h1_relu, self.dis_c[2],output_size=[20,30,256],stepup_out=1, name='h2_deconv3d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            h3_deconv = tf_utils.deconv3d(h2_relu, self.dis_c[1],output_size=[40,60,128],stepup_out=2,name='h3_deconv3d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')


            h4_deconv = tf_utils.deconv3d(h3_relu, self.dis_c[0],output_size=[79,119,64],stepup_out=2,d_t=1, d_h=2, d_w=2,padding_='SAME' ,name='h4_deconv3d')
            h4_batchnorm = tf_utils.batch_norm(h4_deconv, name='h4_batchnorm', _ops=self._gen_train_ops)
            h4_relu = tf.nn.relu(h4_batchnorm, name='h4_relu')

                # 64 x 64
            output = tf_utils.deconv3d(h4_relu, self.image_size[2],output_size=self.image_size, d_t=1, d_h=2, d_w=2,padding_='SAME',name='h5_deconv3d')
            return tf.nn.tanh(output)
Esempio n. 12
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten, 4*4*self.gen_c[0], name='h0_linear')
            h0_reshape = tf.reshape(h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape, name='h0_batchnorm', _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu, self.gen_c[1], name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv, name='h1_batchnorm', _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu, self.gen_c[2], name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv, name='h2_batchnorm', _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 32 x 32
            if (self.flags.dataset == 'mnist') or (self.flags.dataset == 'cifar10'):
                output = tf_utils.deconv2d(h2_relu, self.image_size[2], name='h3_deconv2d')
                return tf.nn.tanh(output)
            else:
                h3_deconv = tf_utils.deconv2d(h2_relu, self.gen_c[3], name='h3_deconv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_deconv, name='h3_batchnorm', _ops=self._gen_train_ops)
                h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

                # 64 x 64
                output = tf_utils.deconv2d(h3_relu, self.image_size[2], name='h4_deconv2d')
                return tf.nn.tanh(output)
Esempio n. 13
0
    def basicDiscriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()
            tf_utils.print_activations(data)

            # from (N, 32, 32, 1) to (N, 16, 16, 64)
            h0_conv = tf_utils.conv2d(data,
                                      self.dis_c[0],
                                      k_h=5,
                                      k_w=5,
                                      name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # from (N, 16, 16, 64) to (N, 8, 8, 128)
            h1_conv = tf_utils.conv2d(h0_lrelu,
                                      self.dis_c[1],
                                      k_h=5,
                                      k_w=5,
                                      name='h1_conv2d')
            h1_lrelu = tf_utils.lrelu(h1_conv, name='h1_lrelu')

            # from (N, 8, 8, 128) to (N, 4, 4, 256)
            h2_conv = tf_utils.conv2d(h1_lrelu,
                                      self.dis_c[2],
                                      k_h=5,
                                      k_w=5,
                                      name='h2_conv2d')
            h2_lrelu = tf_utils.lrelu(h2_conv, name='h2_lrelu')

            # from (N, 4, 4, 256) to (N, 4096) and to (N, 1)
            h2_flatten = flatten(h2_lrelu)
            h3_linear = tf_utils.linear(h2_flatten, 1, name='h3_linear')

            return tf.nn.sigmoid(h3_linear), h3_linear
Esempio n. 14
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # (128, 256) -> (64, 128)
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # (64, 128) -> (32, 64)
            h1_conv = tf_utils.conv2d(h0_lrelu,
                                      self.dis_c[1],
                                      name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # (32, 64) -> (16, 32)
            h2_conv = tf_utils.conv2d(h1_lrelu,
                                      self.dis_c[2],
                                      name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # (16, 32) -> (8, 16)
            h3_conv = tf_utils.conv2d(h2_lrelu,
                                      self.dis_c[3],
                                      name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv,
                                               name='h3_batchnorm',
                                               _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # (8, 16) -> (4, 8)
            h4_conv = tf_utils.conv2d(h3_lrelu,
                                      self.dis_c[4],
                                      name='h4_conv2d')
            h4_batchnorm = tf_utils.batch_norm(h4_conv,
                                               name='h4_batchnorm',
                                               _ops=self._dis_train_ops)
            h4_lrelu = tf_utils.lrelu(h4_batchnorm, name='h4_lrelu')

            # (4, 8) -> (2, 4)
            h5_conv = tf_utils.conv2d(h4_lrelu,
                                      self.dis_c[5],
                                      name='h5_conv2d')
            h5_batchnorm = tf_utils.batch_norm(h5_conv,
                                               name='h5_batchnorm',
                                               _ops=self._dis_train_ops)
            h5_lrelu = tf_utils.lrelu(h5_batchnorm, name='h5_lrelu')

            h5_flatten = flatten(h5_lrelu)
            h6_linear = tf_utils.linear(h5_flatten, 1, name='h6_linear')

            return tf.nn.sigmoid(h6_linear), h6_linear
Esempio n. 15
0
    def basicGenerator(self, data, name='g_'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)
            tf_utils.print_activations(data_flatten)

            # from (N, 128) to (N, 4, 4, 256)
            h0_linear = tf_utils.linear(data_flatten,
                                        self.gen_c[0],
                                        name='h0_linear')
            if self.flags.dataset == 'cifar10':
                h0_linear = tf.reshape(h0_linear, [
                    tf.shape(h0_linear)[0], 4, 4,
                    int(self.gen_c[0] / (4 * 4))
                ])
                h0_linear = tf_utils.norm(h0_linear,
                                          _type='batch',
                                          _ops=self.gen_train_ops,
                                          name='h0_norm')
            h0_relu = tf.nn.relu(h0_linear, name='h0_relu')
            h0_reshape = tf.reshape(
                h0_relu,
                [tf.shape(h0_relu)[0], 4, 4,
                 int(self.gen_c[0] / (4 * 4))])

            # from (N, 4, 4, 256) to (N, 8, 8, 128)
            h1_deconv = tf_utils.deconv2d(h0_reshape,
                                          self.gen_c[1],
                                          k_h=5,
                                          k_w=5,
                                          name='h1_deconv2d')
            if self.flags.dataset == 'cifar10':
                h1_deconv = tf_utils.norm(h1_deconv,
                                          _type='batch',
                                          _ops=self.gen_train_ops,
                                          name='h1_norm')
            h1_relu = tf.nn.relu(h1_deconv, name='h1_relu')

            # from (N, 8, 8, 128) to (N, 16, 16, 64)
            h2_deconv = tf_utils.deconv2d(h1_relu,
                                          self.gen_c[2],
                                          k_h=5,
                                          k_w=5,
                                          name='h2_deconv2d')
            if self.flags.dataset == 'cifar10':
                h2_deconv = tf_utils.norm(h2_deconv,
                                          _type='batch',
                                          _ops=self.gen_train_ops,
                                          name='h2_norm')
            h2_relu = tf.nn.relu(h2_deconv, name='h2_relu')

            # from (N, 16, 16, 64) to (N, 32, 32, 1)
            output = tf_utils.deconv2d(h2_relu,
                                       self.image_size[2],
                                       k_h=5,
                                       k_w=5,
                                       name='h3_deconv2d')

            return tf_utils.tanh(output)
Esempio n. 16
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()
            h2_lrelu, _= self.encoder(data)

            # 8 -> 4
            h4_linear = tf_utils.linear(h2_lrelu, 1, name='h4_linear')
            return tf.nn.sigmoid(h4_linear), h4_linear
Esempio n. 17
0
def Generator(n_samples, real_data_, name='gen'):
    if FIXED_GENERATOR:
        return real_data_ + (1. * tf.random_normal(tf.shape(real_data_)))
    else:
        with tf.variable_scope(name):
            noise = tf.random_normal([n_samples, 100])
            output01 = tf_utils.linear(noise, DIM, name='fc-1')
            output01 = tf_utils.relu(output01, name='relu-1')
            
            output02 = tf_utils.linear(output01, DIM, name='fc-2')
            output02 = tf_utils.relu(output02, name='relu-2')
            
            output03 = tf_utils.linear(output02, DIM, name='fc-3')
            output03 = tf_utils.relu(output03, name='relu-3')
            
            output04 = tf_utils.linear(output03, GEN_DIM, name='fc-4')
            
            return output04
Esempio n. 18
0
    def discriminator(self, y_data, name='d_', is_reuse=False):
        with tf.variable_scope(name, reuse=is_reuse):
            y_data, output = flatten(y_data), None

            if self.flags.dataset == 'mnist':
                d0 = tf.nn.relu(
                    tf_utils.linear(y_data, self.num_hiddens[0], name='fc1'))
                output = tf_utils.linear(d0, 1, name='fc2')
            elif self.flags.dataset == 'cifar10':
                d0 = tf.nn.relu(
                    tf_utils.linear(y_data, self.num_hiddens[0], name='fc1'))
                d1 = tf.nn.relu(
                    tf_utils.linear(d0, self.num_hiddens[1], name='fc2'))
                output = tf_utils.linear(d1, 1, name='fc3')
            else:
                raise NotImplementedError

        return tf.nn.sigmoid(output), output
    def Generator_Softmax(n_samples,  name='gen'):

        with tf.variable_scope(name):
            noise = tf.random_normal([n_samples, GEN_DIM])
            output01 = tf_utils.linear(noise, 2*DIM, name='fc-1')
            output01 = tf_utils.relu(output01, name='relu-1')
            
            output02 = tf_utils.linear(output01, 2*DIM, name='fc-2')
            output02 = tf_utils.relu(output02, name='relu-2')
            
            output03 = tf_utils.linear(output02, 2*DIM, name='fc-3')
            output03 = tf_utils.relu(output03, name='relu-3')

            output04 = tf_utils.linear(output03, GEN_DIM, name='fc-4')

            # Reminder: a logit can be modeled as a linear function of the predictors
            output05 = tf.nn.softmax(output04, name = 'softmax-1')

            return output05
Esempio n. 20
0
    def encoder(self, data, name='encoder'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)
            tf_utils.print_activations(data_flatten)

            # 1st hidden layer
            h0_linear = tf_utils.linear(data_flatten,
                                        self.n_hidden,
                                        name='h0_linear')
            h0_elu = tf_utils.elu(h0_linear, name='h0_elu')
            h0_drop = tf.nn.dropout(h0_elu,
                                    keep_prob=self.keep_prob_tfph,
                                    name='h0_drop')
            tf_utils.print_activations(h0_drop)

            # 2nd hidden layer
            h1_linear = tf_utils.linear(h0_drop,
                                        self.n_hidden,
                                        name='h1_linear')
            h1_tanh = tf_utils.tanh(h1_linear, name='h1_tanh')
            h1_drop = tf.nn.dropout(h1_tanh,
                                    keep_prob=self.keep_prob_tfph,
                                    name='h1_drop')
            tf_utils.print_activations(h1_drop)

            # 3rd hidden layer
            h2_linear = tf_utils.linear(h1_drop,
                                        2 * self.flags.z_dim,
                                        name='h2_linear')
            tf_utils.print_activations(h2_linear)

            # The mean parameter is unconstrained
            mean = h2_linear[:, :self.flags.z_dim]
            # The standard deviation must be positive.
            # Parameterize with a softplus and add a small epsilon for numerical stability
            stddev = 1e-6 + tf.nn.softplus(h2_linear[:, self.flags.z_dim:])

            tf_utils.print_activations(mean)
            tf_utils.print_activations(stddev)

        return mean, stddev
def Generator(n_samples, real_data_, name='gen'):
    if FIXED_GENERATOR:
        return real_data_ + (1. * tf.random_normal(tf.shape(real_data_)))
    else:
        with tf.variable_scope(name):
            noise = tf.random_normal([n_samples, GEN_DIM])
            output01 = tf_utils.linear(noise, DIM, name='fc-1')
            output01 = tf_utils.relu(output01, name='relu-1')

            output02 = tf_utils.linear(output01, DIM, name='fc-2')
            output02 = tf_utils.relu(output02, name='relu-2')

            output03 = tf_utils.linear(output02, DIM, name='fc-3')
            output03 = tf_utils.relu(output03, name='relu-3')

            output04 = tf_utils.linear(output03, GEN_DIM, name='fc-4')

            # Reminder: a logit can be modeled as a linear function of the predictors
            output05 = tf.nn.softmax(output04, name='softmax-1')

            return output05
Esempio n. 22
0
    def resnetDiscriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()
            tf_utils.print_activations(data)

            # (N, 64, 64, 64)
            conv_0 = tf_utils.conv2d(data,
                                     output_dim=self.dis_c[0],
                                     k_h=3,
                                     k_w=3,
                                     d_h=1,
                                     d_w=1,
                                     name='conv_0')
            # (N, 32, 32, 128)
            resblock_1 = tf_utils.res_block_v2(conv_0,
                                               self.dis_c[1],
                                               filter_size=3,
                                               _ops=self.dis_train_ops,
                                               norm_='layer',
                                               resample='down',
                                               name='res_block_1')
            # (N, 16, 16, 256)
            resblock_2 = tf_utils.res_block_v2(resblock_1,
                                               self.dis_c[2],
                                               filter_size=3,
                                               _ops=self.dis_train_ops,
                                               norm_='layer',
                                               resample='down',
                                               name='res_block_2')
            # (N, 8, 8, 512)
            resblock_3 = tf_utils.res_block_v2(resblock_2,
                                               self.dis_c[3],
                                               filter_size=3,
                                               _ops=self.dis_train_ops,
                                               norm_='layer',
                                               resample='down',
                                               name='res_block_3')
            # (N, 4, 4, 512)
            resblock_4 = tf_utils.res_block_v2(resblock_3,
                                               self.dis_c[4],
                                               filter_size=3,
                                               _ops=self.dis_train_ops,
                                               norm_='layer',
                                               resample='down',
                                               name='res_block_4')
            # (N, 4*4*512)
            flatten_5 = flatten(resblock_4)
            output = tf_utils.linear(flatten_5, 1, name='output')

            return tf.nn.sigmoid(output), output
    def __init__(self, input_dim, output_dim=1, optimizer=None, use_dropout=True, lr=0.001, random_seed=123,
                 is_train=True, log_dir=None, name=None):
        self.name = name
        self.is_train = is_train
        self.log_dir = log_dir
        self.cur_lr = None
        self.logger, self.file_handler, self.stream_handler = utils.init_logger(log_dir=self.log_dir,
                                                                                name=self.name,
                                                                                is_train=self.is_train)
        with tf.variable_scope(self.name):
            # Placeholders for inputs
            self.X = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='X')
            self.y = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name='y')
            self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
            tf_utils.print_activations(self.X, logger=self.logger if self.is_train else None)

            # Placeholders for TensorBoard
            self.train_acc = tf.placeholder(tf.float32, name='train_acc')
            self.val_acc = tf.placeholder(tf.float32, name='val_acc')

            net = self.X
            if use_dropout:
                net = tf_utils.dropout(x=net,
                                       keep_prob=self.keep_prob,
                                       seed=random_seed,
                                       name='dropout',
                                       logger=self.logger if self.is_train else None)

            # Network, loss, and optimizer
            self.y_pred = tf_utils.linear(net, output_size=output_dim)
            tf_utils.print_activations(self.y_pred, logger=self.logger if self.is_train else None)
            self.loss = tf.math.reduce_mean(tf.nn.l2_loss(self.y_pred - self.y))
            self.train_op, self.cur_lr = optimizer_fn(optimizer, lr=lr, loss=self.loss, name=self.name)

            # Accuracy etc
            self.y_pred_round = tf.math.round(x=self.y_pred, name='rounded_pred')
            accuracy = tf.equal(tf.cast(x=self.y_pred_round, dtype=tf.int32), tf.cast(x=self.y, dtype=tf.int32))
            self.accuracy = tf.reduce_mean(tf.cast(x=accuracy, dtype=tf.float32)) * 100.

        self._tensorboard()
        tf_utils.show_all_variables(logger=self.logger if self.is_train else None)
Esempio n. 24
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name, reuse=is_reuse) as scope:
            # 64 -> 32
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16
            h1_conv = tf_utils.conv2d(h0_lrelu,
                                      self.dis_c[1],
                                      name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8
            h2_conv = tf_utils.conv2d(h1_lrelu,
                                      self.dis_c[2],
                                      name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 8 -> 4
            h3_conv = tf_utils.conv2d(h2_lrelu,
                                      self.dis_c[3],
                                      name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv,
                                               name='h3_batchnorm',
                                               _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            h3_flatten = flatten(h3_lrelu)
            h4_linear = tf_utils.linear(h3_flatten, 1, name='h4_linear')

            return tf.nn.sigmoid(h4_linear), h4_linear
Esempio n. 25
0
    def __call__(self, x, is_train=True):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, 120, 160, 1) -> (N, 60, 80, 64)
            h0_conv = tf_utils.conv2d(
                x,
                output_dim=self.dims[0],
                initializer='he',
                name='h0_conv',
                logger=self.logger if is_train is True else None)
            h0_lrelu = tf_utils.lrelu(
                h0_conv,
                name='h0_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 60, 80, 64) -> (N, 30, 40, 128)
            h1_conv = tf_utils.conv2d(
                h0_lrelu,
                output_dim=self.dims[1],
                initializer='he',
                name='h1_conv',
                logger=self.logger if is_train is True else None)
            h1_norm = tf_utils.norm(
                h1_conv,
                name='h1_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h1_lrelu = tf_utils.lrelu(
                h1_norm,
                name='h1_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 30, 40, 128) -> (N, 15, 20, 256)
            h2_conv = tf_utils.conv2d(
                h1_lrelu,
                output_dim=self.dims[2],
                initializer='he',
                name='h2_conv',
                logger=self.logger if is_train is True else None)
            h2_norm = tf_utils.norm(
                h2_conv,
                name='h2_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h2_lrelu = tf_utils.lrelu(
                h2_norm,
                name='h2_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 15, 20, 256) -> (N, 8, 10, 512)
            h3_conv = tf_utils.conv2d(
                h2_lrelu,
                output_dim=self.dims[3],
                initializer='he',
                name='h3_conv',
                logger=self.logger if is_train is True else None)
            h3_norm = tf_utils.norm(
                h3_conv,
                name='h3_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h3_lrelu = tf_utils.lrelu(
                h3_norm,
                name='h3_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 8, 10, 512) -> (N, 4, 5, 1024)
            h4_conv = tf_utils.conv2d(
                h3_lrelu,
                output_dim=self.dims[4],
                initializer='he',
                name='h4_conv',
                logger=self.logger if is_train is True else None)
            h4_norm = tf_utils.norm(
                h4_conv,
                name='h4_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h4_lrelu = tf_utils.lrelu(
                h4_norm,
                name='h4_lrelu',
                logger=self.logger if is_train is True else None)
            # (N, 4, 5, 1024) -> (N, 4*5*1024)
            h4_flatten = tf_utils.flatten(
                h4_lrelu,
                name='h4_flatten',
                logger=self.logger if is_train is True else None)

            # (N, 4*5*1024) -> (N, 1)
            output = tf_utils.linear(
                h4_flatten,
                output_size=self.dims[5],
                initializer='he',
                name='output',
                logger=self.logger if is_train is True else None)

            # Set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
Esempio n. 26
0
    def __call__(self, x, is_train=True):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, 100) -> (N, 4, 5, 512)
            h0_linear = tf_utils.linear(
                x,
                4 * 5 * self.dims[0],
                name='h0_linear',
                initializer='He',
                logger=self.logger if is_train is True else None)
            h0_reshape = tf.reshape(
                h0_linear, [tf.shape(h0_linear)[0], 4, 5, self.dims[0]])

            # (N, 4, 5, 512) -> (N, 8, 10, 512)
            resblock_1 = tf_utils.res_block_v2(
                x=h0_reshape,
                k=self.dims[1],
                filter_size=3,
                _ops=self._ops,
                norm_='batch',
                resample='up',
                name='res_block_1',
                logger=self.logger if is_train is True else None)

            # (N, 8, 10, 512) -> (N, 16, 20, 256)
            resblock_2 = tf_utils.res_block_v2(
                x=resblock_1,
                k=self.dims[2],
                filter_size=3,
                _ops=self._ops,
                norm_='batch',
                resample='up',
                name='res_block_2',
                logger=self.logger if is_train is True else None)

            # (N, 16, 20, 256) -> (N, 15, 20, 256)
            resblock_2_split, _ = tf.split(resblock_2, [15, 1],
                                           axis=1,
                                           name='resblock_2_split')
            tf_utils.print_activations(
                resblock_2_split,
                logger=self.logger if is_train is True else None)

            # (N, 15, 20, 256) -> (N, 30, 40, 128)
            resblock_3 = tf_utils.res_block_v2(
                x=resblock_2_split,
                k=self.dims[3],
                filter_size=3,
                _ops=self._ops,
                norm_='batch',
                resample='up',
                name='res_block_3',
                logger=self.logger if is_train is True else None)

            # (N, 30, 40, 128) -> (N, 60, 80, 64)
            resblock_4 = tf_utils.res_block_v2(
                x=resblock_3,
                k=self.dims[4],
                filter_size=3,
                _ops=self._ops,
                norm_='batch',
                resample='up',
                name='res_block_4',
                logger=self.logger if is_train is True else None)

            # (N, 60, 80, 64) -> (N, 120, 160, 64)
            resblock_5 = tf_utils.res_block_v2(
                x=resblock_4,
                k=self.dims[5],
                filter_size=3,
                _ops=self._ops,
                norm_='batch',
                resample='up',
                name='res_block_5',
                logger=self.logger if is_train is True else None)

            norm_5 = tf_utils.norm(
                resblock_5,
                name='norm_5',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)

            relu_5 = tf_utils.relu(
                norm_5,
                name='relu_5',
                logger=self.logger if is_train is True else None)

            # (N, 120, 160, 64) -> (N, 120, 160, 3)
            conv_6 = tf_utils.conv2d(
                relu_5,
                output_dim=self.dims[6],
                k_h=3,
                k_w=3,
                d_h=1,
                d_w=1,
                name='conv_6',
                logger=self.logger if is_train is True else None)

            output = tf_utils.tanh(
                conv_6,
                name='output',
                logger=self.logger if is_train is True else None)

        # Set reuse=True for next call
        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope=self.name)

        return output
Esempio n. 27
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)
            tf_utils.print_activations(data_flatten)

            # from (N, 128) to (N, 2, 4, 512)
            h0_linear = tf_utils.linear(data_flatten,
                                        self.gen_c[0],
                                        name='h0_linear')
            h0_reshape = tf.reshape(
                h0_linear,
                [tf.shape(h0_linear)[0], 2, 4,
                 int(self.gen_c[0] / (2 * 4))])

            # (N, 4, 8, 512)
            resblock_1 = tf_utils.res_block_v2(h0_reshape,
                                               self.gen_c[1],
                                               filter_size=3,
                                               _ops=self.gen_train_ops,
                                               norm_='batch',
                                               resample='up',
                                               name='res_block_1')
            # (N, 8, 16, 256)
            resblock_2 = tf_utils.res_block_v2(resblock_1,
                                               self.gen_c[2],
                                               filter_size=3,
                                               _ops=self.gen_train_ops,
                                               norm_='batch',
                                               resample='up',
                                               name='res_block_2')
            # (N, 16, 32, 128)
            resblock_3 = tf_utils.res_block_v2(resblock_2,
                                               self.gen_c[3],
                                               filter_size=3,
                                               _ops=self.gen_train_ops,
                                               norm_='batch',
                                               resample='up',
                                               name='res_block_3')
            # (N, 32, 64, 64)
            resblock_4 = tf_utils.res_block_v2(resblock_3,
                                               self.gen_c[4],
                                               filter_size=3,
                                               _ops=self.gen_train_ops,
                                               norm_='batch',
                                               resample='up',
                                               name='res_block_4')
            # (N, 64, 128, 32)
            resblock_5 = tf_utils.res_block_v2(resblock_4,
                                               self.gen_c[5],
                                               filter_size=3,
                                               _ops=self.gen_train_ops,
                                               norm_='batch',
                                               resample='up',
                                               name='res_block_5')
            # (N, 128, 256, 32)
            resblock_6 = tf_utils.res_block_v2(resblock_5,
                                               self.gen_c[6],
                                               filter_size=3,
                                               _ops=self.gen_train_ops,
                                               norm_='batch',
                                               resample='up',
                                               name='res_block_6')

            norm_7 = tf_utils.norm(resblock_6,
                                   _type='batch',
                                   _ops=self.gen_train_ops,
                                   name='norm_7')
            relu_7 = tf_utils.relu(norm_7, name='relu_7')

            # (N, 128, 256, 3)
            output = tf_utils.conv2d(relu_7,
                                     output_dim=self.image_size[2],
                                     k_w=3,
                                     k_h=3,
                                     d_h=1,
                                     d_w=1,
                                     name='output')

            return tf_utils.tanh(output)
Esempio n. 28
0
    def __call__(self, x, is_train=True):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, 100) -> (N, 4, 5, 1024)
            h0_linear = tf_utils.linear(
                x,
                4 * 5 * self.dims[0],
                name='h0_linear',
                initializer='He',
                logger=self.logger if is_train is True else None)
            h0_reshape = tf.reshape(
                h0_linear, [tf.shape(h0_linear)[0], 4, 5, self.dims[0]])
            h0_norm = tf_utils.norm(
                h0_reshape,
                name='h0_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h0_relu = tf_utils.relu(
                h0_norm,
                name='h0_relu',
                logger=self.logger if is_train is True else None)

            # (N, 4, 5, 1024) -> (N, 8, 10, 512)
            h1_deconv = tf_utils.deconv2d(
                h0_relu,
                output_dim=self.dims[1],
                name='h1_deconv2d',
                initializer='He',
                logger=self.logger if is_train is True else None)
            h1_norm = tf_utils.norm(
                h1_deconv,
                name='h1_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h1_relu = tf_utils.relu(
                h1_norm,
                name='h1_relu',
                logger=self.logger if is_train is True else None)

            # (N, 8, 10, 512) -> (N, 16, 20, 256)
            h2_deconv = tf_utils.deconv2d(
                h1_relu,
                output_dim=self.dims[2],
                name='h2_deconv2d',
                initializer='He',
                logger=self.logger if is_train is True else None)
            h2_norm = tf_utils.norm(
                h2_deconv,
                name='h2_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h2_relu = tf_utils.relu(
                h2_norm,
                name='h2_relu',
                logger=self.logger if is_train is True else None)
            # (N, 16, 20, 256) -> (N, 15, 20, 256)
            h2_split, _ = tf.split(h2_relu, [15, 1], axis=1, name='h2_split')
            tf_utils.print_activations(
                h2_split, logger=self.logger if is_train is True else None)

            # (N, 15, 20, 256) -> (N, 30, 40, 128)
            h3_deconv = tf_utils.deconv2d(
                h2_split,
                output_dim=self.dims[3],
                name='h3_deconv2d',
                initializer='He',
                logger=self.logger if is_train is True else None)
            h3_norm = tf_utils.norm(
                h3_deconv,
                name='h3_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h3_relu = tf_utils.relu(
                h3_norm,
                name='h3_relu',
                logger=self.logger if is_train is True else None)

            # (N, 30, 40, 128) -> (N, 60, 80, 64)
            h4_deconv = tf_utils.deconv2d(
                h3_relu,
                output_dim=self.dims[4],
                name='h4_deconv2d',
                initializer='He',
                logger=self.logger if is_train is True else None)
            h4_norm = tf_utils.norm(
                h4_deconv,
                name='h4_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h4_relu = tf_utils.relu(
                h4_norm,
                name='h4_relu',
                logger=self.logger if is_train is True else None)

            # (N, 60, 80, 64) -> (N, 120, 160, 1)
            h5_deconv = tf_utils.deconv2d(
                h4_relu,
                output_dim=self.dims[5],
                name='h5_deconv',
                initializer='He',
                logger=self.logger if is_train is True else None)
            output = tf_utils.tanh(
                h5_deconv,
                name='output',
                logger=self.logger if is_train is True else None)

            # Set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
Esempio n. 29
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            data_flatten = flatten(data)

            # 2 x 4
            h0_linear = tf_utils.linear(data_flatten,
                                        2 * 4 * self.gen_c[0],
                                        name='h0_linear')
            h0_reshape = tf.reshape(
                h0_linear, [tf.shape(h0_linear)[0], 2, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape,
                                               name='h0_batchnorm',
                                               _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 4 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu,
                                          self.gen_c[1],
                                          name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv,
                                               name='h1_batchnorm',
                                               _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 8 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu,
                                          self.gen_c[2],
                                          name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv,
                                               name='h2_batchnorm',
                                               _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 16 x 32
            h3_deconv = tf_utils.deconv2d(h2_relu,
                                          self.gen_c[3],
                                          name='h3_deconv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_deconv,
                                               name='h3_batchnorm',
                                               _ops=self._gen_train_ops)
            h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

            # 32 x 64
            h4_deconv = tf_utils.deconv2d(h3_relu,
                                          self.gen_c[4],
                                          name='h4_deconv2d')
            h4_batchnorm = tf_utils.batch_norm(h4_deconv,
                                               name='h4_batchnorm',
                                               _ops=self._gen_train_ops)
            h4_relu = tf.nn.relu(h4_batchnorm, name='h4_relu')

            # 64 x 128
            h5_deconv = tf_utils.deconv2d(h4_relu,
                                          self.gen_c[5],
                                          name='h5_deconv2d')
            h5_batchnorm = tf_utils.batch_norm(h5_deconv,
                                               name='h5_batchnorm',
                                               _ops=self._gen_train_ops)
            h5_relu = tf.nn.relu(h5_batchnorm, name='h5_relu')

            # 128 x 256
            output = tf_utils.deconv2d(h5_relu,
                                       self.image_size[2],
                                       name='h6_deconv2d')
            return tf.nn.tanh(output)
Esempio n. 30
0
    def KGenerator(self, data, reuse=False, name='Kg_'):
        with tf.variable_scope(name, reuse=reuse):
            data_flatten = flatten(data)
            #tf_utils.print_activations(data_flatten)

            # from (N, 64) to (N, 4, 4, 128)
            h0_linear = tf_utils.linear(data_flatten,
                                        4 * 4 * 128,
                                        name='h0_linear')

            h0_linear = tf.reshape(h0_linear,
                                   [tf.shape(h0_linear)[0], 4, 4, 128])
            h0_linear = tf_utils.norm(h0_linear,
                                      _type='instance',
                                      name='h0_norm')
            h0_relu = tf.nn.relu(h0_linear, name='h0_relu')
            h0_reshape = tf.reshape(h0_relu, [tf.shape(h0_relu)[0], 4, 4, 128])

            # from (N, 4, 4, 128) to (N, 8, 8, 128)
            h1_deconv = tf_utils.deconv2d(h0_reshape,
                                          128,
                                          k_h=5,
                                          k_w=5,
                                          name='h1_deconv2d',
                                          is_print=False)

            h1_deconv = tf_utils.norm(h1_deconv,
                                      _type='instance',
                                      name='h1_norm')
            h1_relu = tf.nn.relu(h1_deconv, name='h1_relu')

            # from (N, 8, 8, 128) to (N, 16, 16, 64)
            h2_deconv = tf_utils.deconv2d(h1_relu,
                                          64,
                                          k_h=5,
                                          k_w=5,
                                          name='h2_deconv2d',
                                          is_print=False)

            h2_deconv = tf_utils.norm(h2_deconv,
                                      _type='instance',
                                      name='h2_norm')
            h2_relu = tf.nn.relu(h2_deconv, name='h2_relu')

            # from (N, 16, 16, 64) to (N, 32, 32, 32)
            h3_deconv = tf_utils.deconv2d(h2_relu,
                                          32,
                                          k_h=5,
                                          k_w=5,
                                          name='h3_deconv2d',
                                          is_print=False)
            h3_deconv = tf_utils.norm(h3_deconv,
                                      _type='instance',
                                      name='h3_norm')
            h3_relu = tf.nn.relu(h3_deconv, name='h3_relu')

            # from (N, 32, 32, 32) to (N, 15,15, 1)
            h4 = tf_utils.conv2d(h3_relu,
                                 k=1,
                                 k_h=4,
                                 k_w=4,
                                 d_h=2,
                                 d_w=2,
                                 stddev=0.02,
                                 padding='VALID',
                                 name='h4_conv2d',
                                 is_print=False)
            x = (h4 - tf.reduce_min(h4)) / (tf.reduce_max(h4) -
                                            tf.reduce_sum(h4))
            x = x / tf.reduce_sum(x)
            return x