コード例 #1
0
 def discriminator(self, input, reuse=False):
     n_hidden = self.config.n_hidden_disc
     with tf.variable_scope('Discriminator') as scope:
         if reuse:
             scope.reuse_variables()
         output = layers.linear(input, n_hidden, name='LN1', stdev=0.2)
         output = tf.nn.relu(output)
         output = layers.linear(output, 1, name='LN2', stdev=0.2)
         #output = slim.fully_connected(input, n_hidden,
         #                              activation_fn=tf.nn.relu)
         #output = slim.fully_connected(output, 1, activation_fn=None)
         return tf.reshape(output, [-1])
コード例 #2
0
    def discriminator(self, inputs, reuse=False):
        if self.arch:
            arch = self.arch
            dim_c = arch['dim_c_D']
            if arch['activation_D'] == 'relu':
                activation_fn = tf.nn.relu
            elif arch['activation_D'] == 'leakyRelu':
                activation_fn = tf.nn.leaky_relu
            else:
                activation_fn = tf.nn.tanh
            batchnorm = arch['batchnorm_D']
        else:
            dim_c = self.config.dim_c
            activation_fn = tf.nn.leaky_relu
            batchnorm = False

        with tf.variable_scope('Discriminator') as scope:
            if reuse:
                scope.reuse_variables()

            output = tf.reshape(inputs, [-1, 28, 28, 1])

            output = layers.conv2d(output, dim_c, name='Conv1')
            output = activation_fn(output)

            output = layers.conv2d(output, 2*dim_c, name='Conv2')
            if batchnorm:
                output = layers.batchnorm(output, is_training=self.is_training,
                                          name='BN2')
            output = activation_fn(output)

            output = tf.reshape(output, [-1, 7*7*2*dim_c])
            output = layers.linear(output, 1, name='LN3')

            return tf.reshape(output, [-1])
コード例 #3
0
 def mlp(self, features, name):
     h = features
     dim = features.shape[-1]
     dim_list = [dim * 2, dim]
     for i in range(2):
         h = GNNlayers.linear(h, dim_list[i], "%s_%s" % (name, i))
         h = GNNlayers.layer_norm(h, "norm_%s_%s" % (name, i))
         h = pgl.layers.graph_norm(self.gw, h)
         h = L.relu(h)
     return h
コード例 #4
0
 def generator(self, input):
     dim_x = self.config.dim_x
     n_hidden = self.config.n_hidden_gen
     with tf.variable_scope('Generator'):
         output = layers.linear(input, n_hidden, name='LN1', stdev=0.2)
         output = layers.batchnorm(output,
                                   is_training=self.is_training,
                                   name='BN1')
         output = tf.nn.relu(output)
         output = layers.linear(output, n_hidden, name='LN2', stdev=0.2)
         output = layers.batchnorm(output,
                                   is_training=self.is_training,
                                   name='BN2')
         output = tf.nn.relu(output)
         output = layers.linear(output, dim_x, name='LN3', stdev=0.2)
         #output = slim.fully_connected(input, n_hidden,
         #                              activation_fn=tf.nn.relu)
         #output = slim.fully_connected(output, n_hidden,
         #                              activation_fn=tf.nn.relu)
         #output = slim.fully_connected(output, dim_x, activation_fn=None)
         return output
コード例 #5
0
def auxiliary_classifier(layer_dict,
                         n_class,
                         keep_prob=1.,
                         inputs=None,
                         pretrained_dict=None,
                         is_training=True,
                         bn=False,
                         init_w=None,
                         trainable=True,
                         wd=0):
    if inputs is not None:
        layer_dict['cur_input'] = inputs

    layer_dict['cur_input'] = L.global_avg_pool(layer_dict['cur_input'],
                                                keepdims=True)

    arg_scope = tf.contrib.framework.arg_scope
    with arg_scope([L.conv, L.linear],
                   layer_dict=layer_dict,
                   bn=bn,
                   init_w=init_w,
                   trainable=trainable,
                   is_training=is_training,
                   wd=wd,
                   add_summary=False):
        L.conv(1, 128, name='conv', stride=1, nl=tf.nn.relu)
        L.linear(out_dim=512, name='fc_1', nl=tf.nn.relu)
        L.drop_out(layer_dict, is_training, keep_prob=keep_prob)
        L.linear(out_dim=512, name='fc_2', nl=tf.nn.relu)
        L.drop_out(layer_dict, is_training, keep_prob=keep_prob)
        L.linear(out_dim=n_class, name='classifier', bn=False)

    return layer_dict['cur_input']
コード例 #6
0
    def generator(self, input):
        if self.arch:
            arch = self.arch
            dim_z = arch['dim_z']
            dim_c = arch['dim_c_G']
            if arch['activation_G'] == 'relu':
                activation_fn = tf.nn.relu
            elif arch['activation_G'] == 'leakyRelu':
                activation_fn = tf.nn.leaky_relu
            else:
                activation_fn = tf.nn.tanh
            batchnorm = arch['batchnorm_G']
        else:
            dim_z = self.config.dim_z
            dim_c = self.config.dim_c
            activation_fn = tf.nn.relu
            batchnorm = True

        with tf.variable_scope('Generator'):
            output = layers.linear(input, 4 * 4 * 4 * dim_c, name='LN1')
            if batchnorm:
                output = layers.batchnorm(output,
                                          is_training=self.is_training,
                                          name='BN1')
            output = activation_fn(output)
            output = tf.reshape(output, [-1, 4, 4, 4 * dim_c])

            output_shape = [-1, 8, 8, 2 * dim_c]
            output = layers.deconv2d(output, output_shape, name='Deconv2')
            if batchnorm:
                output = layers.batchnorm(output,
                                          is_training=self.is_training,
                                          name='BN2')
            output = activation_fn(output)

            output_shape = [-1, 16, 16, dim_c]
            output = layers.deconv2d(output, output_shape, name='Decovn3')
            if batchnorm:
                output = layers.batchnorm(output,
                                          is_training=self.is_training,
                                          name='BN3')
            output = activation_fn(output)

            output_shape = [-1, 32, 32, 3]
            output = layers.deconv2d(output, output_shape, name='Deconv4')
            output = tf.nn.tanh(output)

            return tf.reshape(output, [-1, 32 * 32 * 3])
コード例 #7
0
 def get_maccs_repr(self):
     feature = GNNlayers.linear(self.feature, 167, 'maccs_fc')
     feature = L.softmax(feature)  # for every node
     feature = pgl.layers.graph_pooling(self.gw, feature, "sum")
     return feature
コード例 #8
0
 def get_mgf_repr(self):
     feature = GNNlayers.linear(self.feature, 2048, 'mgf_fc')
     feature = L.softmax(feature)
     feature = pgl.layers.graph_pooling(self.gw, feature, "sum")
     return feature