예제 #1
0
def mlp(inputs,
        dims,
        activation_fn=tf.nn.relu,
        dropout=None,
        training=False,
        normalizer_fn=None,
        normalizer_params=None,
        weights_initializer=initializers.xavier_initializer(),
        weights_regularizer=None,
        biases_initializer=init_ops.zeros_initializer,
        biases_regularizer=None,
        reuse=None,
        variables_collections=None,
        outputs_collections=None,
        trainable=True,
        scope=None):
    with tf.variable_scope(scope, 'mlp', [inputs], reuse=reuse):
        for i in xrange(len(dims) - 1):
            inputs = slim.fully_connected(
                inputs,
                dims[i],
                activation_fn=activation_fn,
                weights_initializer=weights_initializer,
                biases_initializer=biases_initializer,
                reuse=reuse,
                scope='fc_%d' % i)
            if dropout and dropout > 0.:
                inputs = tf.layers.dropout(inputs, dropout, training=training)

        return slim.linear(inputs,
                           dims[-1],
                           weights_initializer=weights_initializer,
                           biases_initializer=biases_initializer,
                           reuse=reuse,
                           scope='linear')
def forward(inputs,
            num_outputs,
            input_dim=None,
            hiddens=[200],
            activation_fn=tf.nn.relu,
            weights_initializer=initializers.xavier_initializer(),
            weights_regularizer=None,
            biases_initializer=init_ops.zeros_initializer(),
            biases_regularizer=None,
            reuse=None,
            scope=None):
    """
  similary as melt.slim.layers.mlp but the first step(from input to first hidden adjusted so input can be sparse)
  """

    assert len(hiddens) >= 1, "must at least contain one hidden layer"

    scope = 'mlp' if scope is None else scope
    with tf.variable_scope(scope):
        outputs = melt.layers.fully_connected(
            inputs,
            num_outputs,
            input_dim=input_dim,
            activation_fn=activation_fn,
            weights_initializer=weights_initializer,
            weights_regularizer=weights_regularizer,
            biases_initializer=biases_initializer,
            biases_regularizer=biases_regularizer,
            reuse=reuse,
            scope='fc_0')

        #--------other hidden layers
        # for i in xrange(len(hiddens) -1):
        #   outputs = slim.fully_connected(outputs, hiddens[i + 1],
        #                          activation_fn=activation_fn,
        #                          weights_initializer=weights_initializer,
        #                          weights_regularizer=weights_regularizer,
        #                          biases_initializer=biases_initializer,
        #                          biases_regularizer=biases_regularizer,
        #                          scope='fc_%d'%i+1)

        slim.stack(outputs,
                   slim.fully_connected,
                   hiddens[1:],
                   activation_fn=activation_fn,
                   weights_initializer=weights_initializer,
                   weights_regularizer=weights_regularizer,
                   biases_initializer=biases_initializer,
                   biases_regularizer=biases_regularizer,
                   scope='fc')

        return slim.linear(outputs,
                           num_outputs,
                           weights_initializer=weights_initializer,
                           weights_regularizer=weights_regularizer,
                           biases_initializer=biases_initializer,
                           biases_regularizer=biases_regularizer,
                           scope='linear')
예제 #3
0
    def _stochastic_dense_generator_net(self, inputs, noise):
        x = inputs
        with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.tanh):
            if self.params.without_generator_skip_connections:
                x = tf.concat([noise, x], axis=1)
                for i, units in enumerate(self.params.generator_units):
                    x = slim.fully_connected(x,
                                             units,
                                             scope='dense{}'.format(i))
            else:
                xs = tf.split(x, len(self.params.generator_units), axis=1)
                for i, units in enumerate(self.params.generator_units):
                    if i == 0:
                        x = tf.concat([noise, xs[i]], axis=1)
                    else:
                        x = tf.concat([x, xs[i]], axis=1)
                    x = slim.fully_connected(x,
                                             units,
                                             scope='dense{}'.format(i))

            max_nodes = self.params.max_nodes
            with tf.variable_scope('logits_adj', values=[x]):
                # bond_type : batch_size x num_bond_types x max_nodes x max_nodes
                logits_adj = slim.linear(
                    x,
                    num_outputs=(self.params.num_edge_types + 1) * max_nodes *
                    max_nodes)
                logits_adj = tf.reshape(
                    logits_adj,
                    [-1, self.params.num_edge_types + 1, max_nodes, max_nodes])
                # make adjacency matrix symmetric
                logits_adj = (logits_adj +
                              tf.matrix_transpose(logits_adj)) / 2.0

            with tf.variable_scope('logits_features', values=[x]):
                logits_features = slim.linear(x,
                                              num_outputs=max_nodes *
                                              (self.params.num_node_types + 1))
                # node_features : batch_size x max_nodes x n_features (num_atom_types)
                logits_features = tf.reshape(
                    logits_features,
                    [-1, max_nodes, self.params.num_node_types + 1])

        return logits_adj, logits_features
예제 #4
0
            def dis(seq_img, bn_scope, reuse=False, cond_vec=None):
                with tf.variable_scope('discriminator', reuse=reuse):
                    fs = 32
                    covariance = tf.matmul(seq_img, seq_img, transpose_b=True)
                    x = tf.expand_dims(covariance, -1)
                    x = lrelu(slim.conv2d(x, fs * 1, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    x = lrelu(slim.conv2d(x, fs * 2, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    x = lrelu(slim.conv2d(x, fs * 4, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    x = lrelu(slim.conv2d(x, fs * 4, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    covariance_feat = slim.flatten(x)

                    # x = tf.nn.embedding_lookup(embeddings, seq)
                    # x = ResNetBuilder(dis_train,
                    #                   bn_scopes=['fake', 'real'],
                    #                   bn_scope=bn_scope).\
                    #     resnet(x, structure=[2, 2, 2, 2], filters=8, nb_class=1)

                    #  note axis
                    fs = 32
                    x = seq_img
                    x = tf.expand_dims(seq_img, -1)
                    x = lrelu(slim.conv2d(x, fs * 1, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    x = lrelu(slim.conv2d(x, fs * 2, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    x = lrelu(slim.conv2d(x, fs * 4, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    x = lrelu(slim.conv2d(x, fs * 4, [5, 5]))
                    x = slim.max_pool2d(x, (2, 2))
                    seq_feat = slim.flatten(x)

                    feat = tf.concat([covariance_feat, seq_feat], axis=1)
                    if cond_vec is not None:
                        feat = tf.concat([feat, cond_vec], axis=-1)
                    feat = lrelu(slim.linear(feat, 200))

                    x = slim.linear(feat, 1)
                    # x = tf.nn.sigmoid(x)
                return x
예제 #5
0
def simple_cnn(input_tensor, kernel_size=5, filter_sizes=(20, 50)):
    filters1, filters2 = filter_sizes

    h = slim.conv2d(input_tensor,
                    num_outputs=filters1,
                    kernel_size=kernel_size,
                    stride=1)
    h = slim.batch_norm(h, decay=0.9)
    h = tf.nn.relu(h)
    h = slim.max_pool2d(h, kernel_size=(2, 2), stride=2, padding='same')
    h = slim.conv2d(h, num_outputs=filters2, kernel_size=kernel_size, stride=1)
    h = slim.batch_norm(h, decay=0.9)
    h = tf.nn.relu(h)
    h = slim.max_pool2d(h, kernel_size=(2, 2), stride=2, padding='same')

    h = slim.flatten(h)
    h = slim.linear(h, 500)
    h = slim.linear(h, 10)

    return h
예제 #6
0
    def cycle_discriminator_fn(self, inputs_1: TensorDict,
                               inputs_2: TensorDict, mode: str) -> tf.Tensor:
        with tf.variable_scope('Stack_1'):
            net_1 = self._discriminator_net(inputs_1, mode=mode)
        with tf.variable_scope('Stack_2'):
            net_2 = self._discriminator_net(inputs_2, mode=mode)

        net = tf.multiply(net_1, net_2)
        net = slim.fully_connected(net,
                                   self.params.discriminator_dense_units[-1],
                                   activation_fn=tf.nn.tanh)
        net = slim.linear(net, 1)
        return net
예제 #7
0
 def dis(seq_img, bn_scope, reuse=False):
     with tf.variable_scope('discriminator', reuse=reuse):
         print 'dis'
         slices = tf.unstack(seq_img, axis=1)
         fw_cell = hparam.basic_cell(32)
         # bw_cell = hparam.basic_cell(64)
         x, state = tf.nn.static_rnn(
             fw_cell,  # bw_cell,
             slices,
             dtype=tf.float32,
         )
         x = tf.stack(x, axis=1)
         print x
         # x = tf.concat(x, 2)
         x = slim.linear(x, 1)
         print x
         x = slim.flatten(x)
         print x
         x = slim.linear(x, 1)
         print x
         # x = tf.nn.sigmoid(x)
     return x
예제 #8
0
파일: molgan.py 프로젝트: ai-med/almgig
 def discriminator_fn(self, inputs, mode):
     x = self._discriminator_net(inputs, mode)
     # don't need bias, gets canceled out in Wasserstein loss
     outputs = slim.linear(x, 1, biases_initializer=None)
     return outputs
예제 #9
0
 def encoder_fn(self, inputs: TensorDict, noise: tf.Tensor,
                mode: str) -> tf.Tensor:
     net = self._graph_conv_net(inputs, noise, mode)
     outputs = slim.linear(net, self.params.num_latent)
     return outputs
예제 #10
0
 def discriminator_fn(self, inputs: TensorDict, embedding: tf.Tensor,
                      mode: str) -> tf.Tensor:
     net = self._graph_conv_net(inputs, embedding, mode)
     # don't need bias, gets canceled out in Wasserstein loss
     outputs = slim.linear(net, 1, biases_initializer=None)
     return outputs
예제 #11
0
def build_pixel_rnn_basic_model(B=50, H=32, W=32, C=32, n_units=100,
                                n_layers=2):
    """Summary

    Parameters
    ----------
    B : int, optional
        Description
    H : int, optional
        Description
    W : int, optional
        Description
    C : int, optional
        Description
    n_units : int, optional
        Description
    n_layers : int, optional
        Description

    Returns
    -------
    TYPE
        Description
    """
    # Input to the network, a batch of images
    X = tf.placeholder(tf.float32, shape=[B, H, W, C], name='X')
    keep_prob = tf.placeholder(tf.float32, shape=1, name='keep_prob')

    # Flatten to 2 dimensions
    X_2d = tf.reshape(X, [-1, H * W * C])

    # Turn each pixel value into a vector of one-hot values
    X_onehot = tf.one_hot(tf.cast(X_2d, tf.uint8), depth=256, axis=2)

    # Split each pixel into its own tensor resulting in H * W * C number of
    # Tensors each shaped as B x 256
    pixels = [
        tf.squeeze(p, axis=1) for p in tf.split(X_onehot, H * W * C, axis=1)
    ]

    # Create a GRU recurrent layer
    cells = tf.contrib.rnn.GRUCell(n_units)
    initial_state = cells.zero_state(
        batch_size=tf.shape(X)[0], dtype=tf.float32)
    if n_layers > 1:
        cells = tf.contrib.rnn.MultiRNNCell(
            [cells] * n_layers, state_is_tuple=True)
        initial_state = cells.zero_state(tf.shape(X)[0], tf.float32)
    cells = tf.contrib.rnn.DropoutWrapper(cells, output_keep_prob=keep_prob)

    # Connect our pixel distributions (onehots) to an rnn, this will return us a
    # list of tensors, one for each of our pixels.
    hs, final_state = tf.contrib.rnn.static_rnn(
        cells, pixels, initial_state=initial_state)

    # Concat N pixels result back into a Tensor, B x N x n_units
    stacked = tf.concat([tf.expand_dims(h_i, axis=1) for h_i in hs], axis=1)

    # And now to 2d so we can connect to FC layer
    stacked = tf.reshape(stacked, [-1, n_units])

    # And now connect to FC layer
    prediction = slim.linear(stacked, 256, scope='linear')
    if B * H * W * C > 1:
        prediction = tf.slice(prediction, [0, 0],
                              [int(prediction.shape[0] - 1), -1])
        X_onehot_flat = tf.slice(
            tf.reshape(X_onehot, [-1, 256]), [1, 0], [-1, -1])
        loss = tf.nn.softmax_cross_entropy_with_logits(
            labels=X_onehot_flat, logits=prediction)

        cost = tf.reduce_mean(loss)
    else:
        cost = None

    return {
        'X': X,
        'recon': prediction,
        'cost': cost,
        'initial_state': initial_state,
        'final_state': final_state
    }