Example #1
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()
        input_features = in_layers[
            0].out_tensor  # batch_size * None * n_channel

        i = tf.shape(input_features)[0]
        j = tf.shape(input_features)[1]
        embedded_features = tf.reshape(
            tf.matmul(
                tf.reshape(input_features[:, :, self.pos_start:self.pos_end],
                           [i * j, self.pos_end - self.pos_start]),
                self.embedding), [i, j, self.embedding_length])
        out_tensor = tf.concat(
            [embedded_features, input_features[:, :, self.pos_end:]], axis=2)

        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #2
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: fx, x
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        fx = in_layers[0].out_tensor
        x = in_layers[1].out_tensor

        pad_dimension = len(x.get_shape()) - 1
        if self.fx_in_channels is None:
            self.fx_in_channels = fx.get_shape().as_list()[-1]
        if self.x_in_channels is None:
            self.x_in_channels = x.get_shape().as_list()[-1]

        pad_length = self.fx_in_channels - self.x_in_channels
        assert pad_length >= 0

        pad = [[0, 0]] * pad_dimension + [[0, pad_length]]
        out_tensor = fx + tf.pad(x, pad, "CONSTANT")
        if set_tensors:
            self.variables = None
            self.out_tensor = out_tensor
        return out_tensor
Example #3
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features, output_flag
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()

        input_features = in_layers[0].out_tensor

        out_tensor = tf.nn.atrous_conv2d(input_features,
                                         self.W,
                                         rate=self.rate,
                                         padding='SAME')
        out_tensor = tf.nn.bias_add(out_tensor, self.b)

        if len(in_layers) > 2:
            flag = tf.expand_dims(in_layers[1].out_tensor, axis=3)
            train_flag = in_layers[2].out_tensor
            out_tensor = tf.layers.batch_normalization(out_tensor,
                                                       training=train_flag)
            out_tensor = out_tensor * tf.to_float(flag)
        elif len(in_layers) > 1:
            flag = tf.expand_dims(in_layers[1].out_tensor, axis=3)
            out_tensor = out_tensor * tf.to_float(flag)

        out_tensor = self.activation(out_tensor)
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #4
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features, input_flag
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()

        input_features = in_layers[0].out_tensor
        if self.activation_first:
            input_features = self.activation(input_features)
        out_tensor = tf.nn.conv2d(input_features,
                                  self.W,
                                  strides=[1, 1, 1, 1],
                                  padding='SAME')
        out_tensor = tf.nn.bias_add(out_tensor, self.b)
        if len(in_layers) > 1:
            flag = tf.expand_dims(in_layers[1].out_tensor, axis=3)
            out_tensor = out_tensor * tf.to_float(flag)
        if not self.activation_first:
            out_tensor = self.activation(out_tensor)
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #5
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        input_features = in_layers[0].out_tensor
        out_tensor = tf.nn.max_pool(input_features,
                                    [1, self.n_size, self.n_size, 1],
                                    strides=[1, self.n_size, self.n_size, 1],
                                    padding='SAME')
        if set_tensors:
            self.out_tensor = out_tensor
        return out_tensor
Example #6
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        parent_tensor = in_layers[0].out_tensor
        if len(in_layers) > 1:
            train_flag = in_layers[1].out_tensor
        else:
            train_flag = False
        out_tensor = tf.layers.batch_normalization(parent_tensor,
                                                   training=train_flag)

        if set_tensors:
            self.out_tensor = out_tensor
        return out_tensor
Example #7
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()
        input_features = in_layers[0].out_tensor
        max_n_res = tf.reduce_max(in_layers[1].out_tensor)

        indices1 = tf.reshape(
            tf.tile(tf.expand_dims(tf.range(max_n_res), 1), (1, max_n_res)),
            (-1, ))
        indices2 = tf.reshape(tf.tile(tf.range(max_n_res), (max_n_res, )),
                              (-1, ))
        indices3 = tf.to_int32(tf.to_float(indices1 + indices2) / 2)
        indices4 = tf.to_int32(tf.ceil(tf.to_float(indices1 + indices2) / 2))

        tensor1 = tf.gather(input_features,
                            indices=tf.reshape(indices1,
                                               (max_n_res, max_n_res)),
                            axis=1)
        tensor2 = tf.gather(input_features,
                            indices=tf.reshape(indices2,
                                               (max_n_res, max_n_res)),
                            axis=1)
        tensor3 = tf.gather(input_features,
                            indices=tf.reshape(indices3,
                                               (max_n_res, max_n_res)),
                            axis=1)
        tensor4 = tf.gather(input_features,
                            indices=tf.reshape(indices4,
                                               (max_n_res, max_n_res)),
                            axis=1)

        out_tensor = tf.concat([tensor1, (tensor3 + tensor4) / 2, tensor2],
                               axis=3)
        if self.features_2D:
            features_2D = in_layers[3].out_tensor
            out_tensor = tf.concat([out_tensor, features_2D], axis=3)
        if len(in_layers) > 2:
            flag = tf.expand_dims(in_layers[2].out_tensor, axis=3)
            out_tensor = out_tensor * tf.to_float(flag)
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #8
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features, output_flag
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()

        input_features = in_layers[0].out_tensor

        n_channels = self.n_output_feat // len(self.rate)
        output_feats = [0]
        for i in range(1, len(self.rate)):
            output_feats.append(n_channels * i)
        output_feats.append(self.n_output_feat)

        out_tensors = [
            tf.nn.bias_add(
                tf.nn.atrous_conv2d(
                    input_features,
                    self.W[:, :, :, output_feats[i]:output_feats[i + 1]],
                    rate=rate,
                    padding='SAME'),
                self.b[output_feats[i]:output_feats[i + 1]])
            for i, rate in enumerate(self.rate)
        ]
        out_tensor = tf.concat(out_tensors, 3)

        if len(in_layers) > 2:
            flag = tf.expand_dims(in_layers[1].out_tensor, axis=3)
            train_flag = in_layers[2].out_tensor
            out_tensor = tf.layers.batch_normalization(out_tensor,
                                                       training=train_flag)
            out_tensor = out_tensor * tf.to_float(flag)
        elif len(in_layers) > 1:
            flag = tf.expand_dims(in_layers[1].out_tensor, axis=3)
            out_tensor = out_tensor * tf.to_float(flag)

        out_tensor = self.activation(out_tensor)
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #9
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features, input_flag_2D
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)
        self.build()

        input_features = in_layers[0].out_tensor
        input_features = (input_features +
                          tf.transpose(input_features, perm=[0, 2, 1, 3])) / 2

        flag = tf.cast(in_layers[1].out_tensor, dtype=tf.bool)
        out_tensor = tf.boolean_mask(input_features, flag)

        out_tensor = tf.nn.xw_plus_b(out_tensor, self.W, self.b)
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #10
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: distance, n_residues
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        print("TriangleInequality only supports batch_size of 1")
        dist_map = tf.exp(in_layers[0].out_tensor)
        n_residues = in_layers[1].out_tensor

        # Only test neighbouring residues
        test_indice = tf.range(n_residues[0] - 2)
        test_indice = tf.stack(
            [
                test_indice,  #i
                test_indice + 1,  #j 
                test_indice + 1,  #j
                test_indice + 2,  #k
                test_indice,  #i
                test_indice + 2
            ],
            axis=1)  # k
        test_indice = tf.reshape(test_indice, [-1, 3, 2])

        # indice for distance(i,j) is n*i - i(i+1)/2 + j
        dist_map_indice = n_residues[0]*test_indice[:, :, 0] - \
            (test_indice[:, :, 0] + 1)*test_indice[:, :, 0]//2 + \
            test_indice[:, :, 1]

        dist = tf.gather(dist_map, dist_map_indice)

        penalty = tf.nn.relu(2 * tf.reduce_max(dist, axis=1) -
                             tf.reduce_sum(dist, axis=1))
        out_tensor = tf.reduce_sum(penalty * self.rate * 10. /
                                   tf.reduce_max(dist_map))

        if set_tensors:
            self.out_tensor = out_tensor
        return out_tensor
Example #11
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features, input_flag
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)
        self.build()

        input_features = in_layers[0].out_tensor
        flag = tf.expand_dims(in_layers[1].out_tensor, axis=2)
        train_flag = in_layers[2].out_tensor
        input_features = tf.layers.batch_normalization(input_features,
                                                       training=train_flag)
        out_tensor = self.activation(input_features)
        out_tensor = out_tensor * tf.to_float(flag)

        out_tensor = tf.nn.conv1d(out_tensor, self.W, stride=1, padding='SAME')
        out_tensor = tf.nn.bias_add(out_tensor, self.b)

        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #12
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features, output_shape, output_flag
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        input_features = in_layers[0].out_tensor
        if self.uppertri:
            diag_elems = tf.transpose(tf.matrix_band_part(
                tf.transpose(input_features, perm=[0, 3, 1, 2]), 0, 0),
                                      perm=[0, 2, 3, 1])
            input_features = input_features + tf.transpose(
                input_features, perm=[0, 2, 1, 3]) - diag_elems

        out_shape = in_layers[1].out_tensor
        out_tensor = tf.image.resize_bilinear(input_features, out_shape[1:3])
        if len(in_layers) > 2:
            flag = tf.expand_dims(in_layers[2].out_tensor, axis=3)
            out_tensor = out_tensor * tf.to_float(flag)

        if set_tensors:
            self.out_tensor = out_tensor
        return out_tensor
Example #13
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ parent layers: input_features, output_shape, output_flag
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()

        input_features = in_layers[0].out_tensor
        out_shape = in_layers[1].out_tensor

        out_tensor = tf.nn.conv2d_transpose(
            input_features,
            self.W,
            out_shape,
            strides=[1, self.n_size, self.n_size, 1],
            padding='SAME')
        out_tensor = tf.nn.bias_add(out_tensor, self.b)

        if len(in_layers) > 3:
            flag = tf.expand_dims(in_layers[2].out_tensor, axis=3)
            train_flag = in_layers[3].out_tensor
            out_tensor = tf.layers.batch_normalization(out_tensor,
                                                       training=train_flag)
            out_tensor = out_tensor * tf.to_float(flag)
        elif len(in_layers) > 2:
            flag = tf.expand_dims(in_layers[2].out_tensor, axis=3)
            out_tensor = out_tensor * tf.to_float(flag)

        if not self.activation_first:
            out_tensor = self.activation(out_tensor)
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor