Exemplo n.º 1
0
def weight_variable(shape, stddev=0.05, name='weight'):
    # initial=tf.truncated_normal(shape=shape,dtype=tf.float32,stddev=stddev)
    initial = variance_scaling_initializer(factor=1.0,
                                           mode='FAN_AVG',
                                           uniform=True)
    # return tf.Variable(initial,dtype=tf.float32,name=name)
    return tf.Variable(initial(shape=shape), name=name)
Exemplo n.º 2
0
def conv2d(input_,
           output_dim,
           k_h=3,
           k_w=3,
           d_h=2,
           d_w=2,
           padding='SAME',
           name="conv2d",
           with_w=False):
    with tf.variable_scope(name):
        w = tf.get_variable(
            'w', [k_h, k_w, input_.get_shape()[-1], output_dim],
            initializer=variance_scaling_initializer())
        if padding == 'Other':

            padding = 'VALID'
            input_ = tf.pad(input_, [[0, 0], [3, 3], [3, 3], [0, 0]],
                            "CONSTANT")

        elif padding == 'VALID':
            padding = 'VALID'

        conv = tf.nn.conv2d(input_,
                            w,
                            strides=[1, d_h, d_w, 1],
                            padding=padding)
        biases = tf.get_variable('biases', [output_dim],
                                 initializer=tf.constant_initializer(0.0))
        conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())

        if with_w:
            return conv, w, biases

        else:
            return conv
Exemplo n.º 3
0
    def create_cond_critic_proj(cls, xinput, input_clusters, var_scope,
                           critic_layers, clusters_no, reuse=None):
        """
        Class method that instantiates a Critic and creates a conditional
         critic with the original projection conditioning method.

        Parameters
        ----------
        xinput : Tensor
            Tensor containing the input cells.
        input_clusters : Tensor
            Tensor containing the corresponding cluster indexes of the input cells.
        var_scope : str
            Variable scope used for the created tensors.
        critic_layers : list
            List of integers corresponding to the number of neurons of each
             layer of the critic.
        clusters_no : int
            Number of clusters.
        reuse : Boolean
            Whether to reuse the already existing Tensors.
            Default is None.

        Returns
        -------
        A Creator object with the defined architecture.

        """

        with tf.variable_scope(var_scope, reuse=reuse):
            for i_lay, output_size in enumerate(critic_layers):
                with tf.variable_scope("layers_" + str(i_lay + 1)):
                    xinput = layers.relu(
                        xinput,
                        output_size,
                        weights_initializer=layers.variance_scaling_initializer(mode="FAN_AVG"),
                        biases_initializer=tf.zeros_initializer())

            with tf.variable_scope("layers_" + 'proj'):
                proj_weights_m = tf.get_variable(
                    "proj_weights_m",
                    [clusters_no, critic_layers[-1], 1],
                    dtype=tf.float32, initializer=layers.xavier_initializer())

                proj_weights = tf.nn.embedding_lookup(proj_weights_m,
                                                      input_clusters)

                output_proj = tf.einsum('ij,ijk->ik', xinput, proj_weights)

            with tf.variable_scope("layers_" + 'output'):
                output = layers.linear(
                    xinput, 1,
                    weights_initializer=layers.xavier_initializer(),
                    biases_initializer=tf.zeros_initializer())

                dist = tf.add(output_proj, output)

        return cls(xinput, dist, var_scope, critic_layers,
                   input_clusters=input_clusters,
                   clusters_no=clusters_no, reuse=reuse)
Exemplo n.º 4
0
def de_conv(input_, output_shape,
             k_h=3, k_w=3, d_h=2, d_w=2, stddev=0.02,
             name="deconv2d", with_w=False):

    with tf.variable_scope(name):
        # filter : [height, width, output_channels, in_channels]
        w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
                            initializer=variance_scaling_initializer())
        try:
            deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
                                            strides=[1, d_h, d_w, 1])
        # Support for verisons of TensorFlow before 0.7.0
        except AttributeError:
            deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
                                    strides=[1, d_h, d_w, 1])
        biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())

        if with_w:

            return deconv, w, biases

        else:

            return deconv
Exemplo n.º 5
0
def de_conv2d(input_,
              output_shape,
              k_h=3,
              k_w=3,
              d_h=2,
              d_w=2,
              stddev=2e-2,
              name='deconv2d',
              with_w=False,
              initializer=variance_scaling_initializer()):
    with tensorflow.variable_scope(name):
        w = tensorflow.get_variable(
            'w', [k_h, k_w, output_shape[-1],
                  input_.get_shape()[-1]],
            initializer=initializer)
        deconv = tensorflow.nn.conv2d_transpose(input_,
                                                w,
                                                output_shape=output_shape,
                                                strides=[1, d_h, d_w, 1])
        b = tensorflow.get_variable(
            'b', [output_shape[-1]],
            initializer=tensorflow.constant_initializer(0.0))
        deconv = tensorflow.reshape(tensorflow.nn.bias_add(deconv, b),
                                    deconv.get_shape())
        if with_w:
            return deconv, w, b
        else:
            return deconv
Exemplo n.º 6
0
def fully_connect(input_, output_size, scope=None, with_w=False):
    shape = input_.get_shape().as_list()
    with tf.variable_scope(scope or "Linear"):
        matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
                                 variance_scaling_initializer())
        bias = tf.get_variable("bias", [output_size],
                               initializer=tf.constant_initializer(0.0))
        if with_w:
            return tf.matmul(input_, matrix) + bias, matrix, bias
        else:
            return tf.matmul(input_, matrix) + bias
Exemplo n.º 7
0
def conv2d(input_, output_dim, k_h=3, k_w=3, d_h=2, d_w=2, name='conv2d'):
    with tensorflow.variable_scope(name):
        w = tensorflow.get_variable(
            'w', [k_h, k_w, input_.get_shape()[-1], output_dim],
            initializer=variance_scaling_initializer())
        conv = tensorflow.nn.conv2d(input_,
                                    w,
                                    strides=[1, d_h, d_w, 1],
                                    padding='SAME')
        b = tensorflow.get_variable(
            'b', [output_dim],
            initializer=tensorflow.constant_initializer(0.0))
        conv = tensorflow.reshape(tensorflow.nn.bias_add(conv, b),
                                  conv.get_shape())
        return conv, w
Exemplo n.º 8
0
def fully_connected(input_,
                    output_size,
                    scope=None,
                    with_w=False,
                    initializer=variance_scaling_initializer()):
    shape = input_.get_shape().as_list()
    with tensorflow.variable_scope(scope or 'Linear'):
        matrix = tensorflow.get_variable('Matrix', [shape[1], output_size],
                                         tensorflow.float32,
                                         initializer=initializer)
        b = tensorflow.get_variable(
            'b', [output_size],
            initializer=tensorflow.constant_initializer(0.0))
        if with_w:
            return tensorflow.matmul(input_, matrix) + b, matrix, b
        else:
            return tensorflow.matmul(input_, matrix) + b
Exemplo n.º 9
0
def conv2d(input_, output_dim, k_h=3, k_w=3, d_h=2, d_w=2, name="conv2d"):
    with tf.variable_scope(name):

        w = tf.get_variable(
            'w', [k_h, k_w, input_.get_shape()[-1], output_dim],
            initializer=variance_scaling_initializer(
            ))  #方差缩放初始化,[3,3,in_channel,out_channel]
        conv = tf.nn.conv2d(input_,
                            w,
                            strides=[1, d_h, d_w, 1],
                            padding='SAME')

        biases = tf.get_variable('biases', [output_dim],
                                 initializer=tf.constant_initializer(0.0))
        conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())

        return conv, w
Exemplo n.º 10
0
    def create_critic(cls, xinput, var_scope,
                      critic_layers, reuse=None):
        """
        Class method that instantiates a Critic and creates a
         non-conditional critic.

        Parameters
        ----------
        xinput : Tensor
            Tensor containing the input cells.
        var_scope : str
            Variable scope used for the created tensors.
        critic_layers : list
            List of integers corresponding to the number of neurons of each
             layer of the critic.
        reuse : Boolean
            Whether to reuse the already existing Tensors.
            Default is None.

        Returns
        -------
        A Creator object with the defined architecture.

        """

        with tf.variable_scope(var_scope, reuse=reuse):
            for i_lay, output_size in enumerate(critic_layers):
                with tf.variable_scope("layers_" + str(i_lay + 1)):
                    xinput = layers.relu(
                        xinput, output_size,
                        weights_initializer=layers.variance_scaling_initializer(mode="FAN_AVG"),
                        biases_initializer=tf.zeros_initializer())

            with tf.variable_scope("layers_" + 'output'):
                output = layers.linear(
                    xinput, 1,
                    weights_initializer=layers.xavier_initializer(),
                    biases_initializer=tf.zeros_initializer())

        return cls(xinput, output, var_scope, critic_layers, reuse=reuse)
Exemplo n.º 11
0
def de_conv(input_,
            output_shape,
            k_h=3,
            k_w=3,
            d_h=2,
            d_w=2,
            stddev=0.02,
            name="deconv2d",
            with_w=False,
            initializer=variance_scaling_initializer()):

    with tf.variable_scope(name):
        # filter : [height, width, output_channels, in_channels]
        w = tf.get_variable(
            'w', [k_h, k_w, output_shape[-1],
                  input_.get_shape()[-1]],
            initializer=initializer)  #[3,3,128,138]
        try:
            deconv = tf.nn.conv2d_transpose(input_,
                                            w,
                                            output_shape=output_shape,
                                            strides=[1, d_h, d_w, 1])
        # Support for verisons of TensorFlow before 0.7.0
        except AttributeError:
            deconv = tf.nn.deconv2d(input_,
                                    w,
                                    output_shape=output_shape,
                                    strides=[1, d_h, d_w, 1])
        ##第二个参数filter:卷积核,它要求是一个Tensor,具有[filter_height, filter_width, out_channels, in_channels]这样的shape,
        # 具体含义是[卷积核的高度,卷积核的宽度,卷积核个数,图像通道数]
        biases = tf.get_variable('biases', [output_shape[-1]],
                                 initializer=tf.constant_initializer(0.0))
        deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())

        if with_w:
            return deconv, w, biases
        else:
            return deconv
Exemplo n.º 12
0
    def create_cond_generator(cls, z_input, batch_size, latent_dim,
                              output_cells_dim, var_scope, gen_layers,
                              output_lsn, gen_cond_type, clusters_ratios,
                              is_training, clusters_no=None,
                              input_clusters=None, reuse=None):
        """
        Class method that instantiates a Generator and creates a
        conditional generator.

        Parameters
        ----------
        z_input : Tensor
            Tensor containing the noise used as input by the generator.
        batch_size : int
            Batch size used during the training.
        latent_dim : int
            Dimension of the latent space used from which the input noise
            of the generator is sampled.
        output_cells_dim : int
            Dimension of the output cells (i.e. the number of genes).
        var_scope : str
            Variable scope used for the created tensors.
        gen_layers : list
            List of integers corresponding to the number of neurons of
            each layer of the generator.
        output_lsn : int, None
            Parameter of the LSN layer at the output of the critic
            (i.e. total number of counts per generated cell).
        gen_cond_type : str
            conditional normalization layers used in the generator, can be
             either "batchnorm" or "layernorm". If anything else, it won't be
              added in the model (which means no conditional generation).
        clusters_ratios : Tensor
            Placeholder containing the list of cluster ratios of the input data.
        is_training : Tensor
            Boolean placeholder encoding for whether we're in training or
            inference mode (for the batch normalization).
        clusters_no : int
            Number of clusters.
            Default is None.
        input_clusters : Tensor
            Placeholders for the cluster indexes that should be used for
            conditional generation.
            Default is None.
        reuse : Boolean
            Whether to reuse the already existing Tensors.
            Default is None.

        Returns
        -------
        A conditional Generator object with the defined architecture.
        """

        with tf.variable_scope(var_scope, reuse=reuse):

            for i_lay, size in enumerate(gen_layers):
                with tf.variable_scope("generator_layers_" + str(i_lay + 1)):
                    z_input = layers.linear(
                        z_input,
                        size,
                        weights_initializer=layers.xavier_initializer(),
                        biases_initializer=None)

                    if i_lay != -1:
                        if gen_cond_type == "batchnorm":
                            z_input = batchnorm(
                                [0], z_input,
                                is_training=is_training,
                                labels=input_clusters,
                                n_labels=clusters_no)

                        elif gen_cond_type == "layernorm":
                            z_input = layernorm([1],
                                                z_input,
                                                labels=input_clusters,
                                                n_labels=clusters_no)

                    z_input = tf.nn.relu(z_input)

            with tf.variable_scope("generator_layers_" + 'output'):
                fake_outputs = layers.relu(
                    z_input, output_cells_dim,
                    weights_initializer=layers.variance_scaling_initializer(mode="FAN_AVG"),
                    biases_initializer=tf.zeros_initializer())

                if output_lsn is not None:
                    gammas_output = tf.Variable(
                        np.ones(z_input.shape.as_list()[0]) * output_lsn,
                        trainable=False)
                    sigmas = tf.reduce_sum(fake_outputs, axis=1)
                    scale_ls = tf.cast(gammas_output, dtype=tf.float32) / \
                        (sigmas + sys.float_info.epsilon)

                    fake_outputs = tf.transpose(tf.transpose(fake_outputs) *
                                                scale_ls)

            return cls(fake_outputs, batch_size, latent_dim, output_cells_dim,
                       var_scope, gen_layers, output_lsn,
                       gen_cond_type=gen_cond_type, is_training=is_training,
                       clusters_ratios=clusters_ratios, clusters_no=clusters_no,
                       input_clusters=input_clusters, reuse=reuse)
Exemplo n.º 13
0
def weight_variable(shape, stddev=0.05, name='weight'):
    # initial=tf.truncated_normal(shape=shape,dtype=tf.float32,stddev=stddev)
    initial = variance_scaling_initializer()
    # return tf.Variable(initial,dtype=tf.float32,name=name)
    return tf.Variable(initial(shape=shape), name=name)
Exemplo n.º 14
0
def conv3d(input_, output_dim, k_h=3, k_w=3, k_d=3, d_h=2, d_w=2, d_d=3, padding='SAME', name="conv3d", with_w=False):

    with tf.variable_scope(name):

        w = tf.get_variable('w', [k_h, k_w, k_d, input_.get_shape()[-1], output_dim], initializer=variance_scaling_initializer())

        if padding == 'Other':
            # Not sure what's up with this r n --andrew
            # Something about going from latent space to first conv.
            padding = 'VALID'
            input_ = tf.pad(input_, [[0,0], [3, 3], [3, 3], [3, 3], [0, 0]], "CONSTANT")

        elif padding == 'VALID':
            padding = 'VALID'

        conv = tf.nn.conv3d(input_, w, strides=[1, d_h, d_w, d_d, 1], padding=padding)
        biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
        conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())

        if with_w:
            return conv, w, biases
        else:
            return conv
Exemplo n.º 15
0
def conv2d(input, output_dim, ksize = [3, 3, 2, 2], padding='SAME', name="conv2d", with_w=False):
    k_h, k_w, d_h, d_w =ksize[0], ksize[1], ksize[2], ksize[3]
    with tf.variable_scope(name):
        w = tf.get_variable('weights', [k_h, k_w, input.get_shape()[-1], output_dim], initializer= variance_scaling_initializer())
        b = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
        if padding == 'Other':
            padding = 'VALID'
            input = tf.pad(input, [[0,0], [3, 3], [3, 3], [0, 0]], "CONSTANT")
        elif padding == 'VALID':
            padding = 'VALID'
        conv = tf.nn.conv2d(input, w, strides=[1, d_h, d_w, 1], padding=padding)
        shape = conv.get_shape().as_list()
        conv = tf.reshape(tf.nn.bias_add(conv, b), (-1,shape[1],shape[2],shape[3]))
        if with_w:
            return conv, w, b
        else:
            return conv
Exemplo n.º 16
0
def fully_connect(input, output_size, stddev=0.02, scope=None, with_w=False):
  with tf.variable_scope(scope or "Linear"):
    w = tf.get_variable("weights", [input.get_shape().as_list()[1], output_size], tf.float32, variance_scaling_initializer())
    b = tf.get_variable("biases", [output_size], initializer=tf.constant_initializer(0.0))
    output = tf.matmul(input, w) + b
    if with_w:
        return output, w, b
    else:
        return output
Exemplo n.º 17
0
 def get_weights_initializer(self):
     # return tf.truncated_normal_initializer(stddev=0.02)
     return variance_scaling_initializer()