def Discriminator(inputs, is_reuse=True, name='disc'):
            with tf.variable_scope(name, reuse=is_reuse):
                print('is_reuse: {}'.format(is_reuse))
                output01 = tf_utils.linear(inputs, DIM, name='fc-1')
                output01 = tf_utils.relu(output01, name='relu-1')

                output02 = tf_utils.linear(output01, DIM, name='fc-2')
                output02 = tf_utils.relu(output02, name='relu-2')

                output03 = tf_utils.linear(output02, DIM, name='fc-3')
                output03 = tf_utils.relu(output03, name='relu-3')

                output04 = tf_utils.linear(output03, DIS_DIM, name='fc-4')

                return output04
Ejemplo n.º 2
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 64 -> 32 or 32 -> 16
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16 or 16 -> 8
            h1_conv = tf_utils.conv2d(h0_lrelu,
                                      self.dis_c[1],
                                      name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8 or 8 -> 4
            h2_conv = tf_utils.conv2d(h1_lrelu,
                                      self.dis_c[2],
                                      name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            if self.flags.dataset == 'mnist':
                h2_flatten = tf.compat.v1.layers.flatten(h2_lrelu)
                h3_linear = tf_utils.linear(h2_flatten, 1, name='h3_linear')

                return tf.nn.sigmoid(h3_linear), h3_linear
            else:
                # 8 -> 4
                h3_conv = tf_utils.conv2d(h2_lrelu,
                                          self.dis_c[3],
                                          name='h3_conv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_conv,
                                                   name='h3_batchnorm',
                                                   _ops=self._dis_train_ops)
                h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

                h3_flatten = tf.compat.v1.layers.flatten(h3_lrelu)
                h4_linear = tf_utils.linear(h3_flatten, 1, name='h4_linear')

                return tf.nn.sigmoid(h4_linear), h4_linear
Ejemplo n.º 3
0
    def generator(self, data, name='g_'):
        with tf.compat.v1.variable_scope(name):
            # data_flatten = flatten(data)
            data_flatten = tf.compat.v1.layers.flatten(data)

            # 4 x 4
            h0_linear = tf_utils.linear(data_flatten,
                                        4 * 4 * self.gen_c[0],
                                        name='h0_linear')
            h0_reshape = tf.reshape(
                h0_linear, [tf.shape(h0_linear)[0], 4, 4, self.gen_c[0]])
            h0_batchnorm = tf_utils.batch_norm(h0_reshape,
                                               name='h0_batchnorm',
                                               _ops=self._gen_train_ops)
            h0_relu = tf.nn.relu(h0_batchnorm, name='h0_relu')

            # 8 x 8
            h1_deconv = tf_utils.deconv2d(h0_relu,
                                          self.gen_c[1],
                                          name='h1_deconv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_deconv,
                                               name='h1_batchnorm',
                                               _ops=self._gen_train_ops)
            h1_relu = tf.nn.relu(h1_batchnorm, name='h1_relu')

            # 16 x 16
            h2_deconv = tf_utils.deconv2d(h1_relu,
                                          self.gen_c[2],
                                          name='h2_deconv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_deconv,
                                               name='h2_batchnorm',
                                               _ops=self._gen_train_ops)
            h2_relu = tf.nn.relu(h2_batchnorm, name='h2_relu')

            # 32 x 32
            # if self.flags.dataset == 'mnist' or self.flags.dataset == "nasdaq":
            if self.flags.dataset == 'mnist' or self.flags.dataset == "nasdaq":
                output = tf_utils.deconv2d(h2_relu,
                                           self.image_size[2],
                                           name='h3_deconv2d')
                return tf.nn.tanh(output)
            else:
                h3_deconv = tf_utils.deconv2d(h2_relu,
                                              self.gen_c[3],
                                              name='h3_deconv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_deconv,
                                                   name='h3_batchnorm',
                                                   _ops=self._gen_train_ops)
                h3_relu = tf.nn.relu(h3_batchnorm, name='h3_relu')

                # 64 x 64
                output = tf_utils.deconv2d(h3_relu,
                                           self.image_size[2],
                                           name='h4_deconv2d')
                return tf.nn.tanh(output)