示例#1
0
    def call(self, x_batch):

        ### GAT with embedding:
        target_features, feature_dist_graph, rij_dist_pairs, b_scope, start_end_env, l_scope, scope_update, scope_update_lig = x_batch
        target_features = self.embedding(target_features)
        rij_dist_pairs = tf.reshape(rij_dist_pairs,
                                    (tf.shape(rij_dist_pairs)[0], 1))
        message = tf.concat([feature_dist_graph, rij_dist_pairs], axis=1)
        message = self.dist_embedding(message)

        target_features = tf.concat(
            [[np.zeros(tf.shape(target_features)[1])], target_features],
            axis=0)
        for i in range(0, self.num_iters):
            multible_entry_f_lig = getConnectedFeatures(
                target_features, start_end_env)
            target_features = self.GATLayer(multible_entry_f_lig, message,
                                            b_scope)
            message = updateConnectedDict(target_features, scope_update_lig,
                                          scope_update)

        cmp_enc = tf.gather(target_features, indices=(l_scope), axis=0)
        mol_vecs = tf.reduce_sum(cmp_enc, 1)

        if self.output_nn:
            return tf.reshape(self.output_nn(mol_vecs), [-1])
        return mol_vecs
示例#2
0
    def call(self, x_batch):

        ### GAT with embedding:
        target_features_orig, feature_dist_graph, rij_dist_pairs, b_scope, start_end_env, l_scope, scope_update, scope_update_lig = x_batch
        target_features_orig = self.embedding(target_features_orig)
        rij_dist_pairs = tf.reshape(rij_dist_pairs,
                                    (tf.shape(rij_dist_pairs)[0], 1))
        message = tf.concat([feature_dist_graph, rij_dist_pairs], axis=1)
        message = self.dist_embedding(message)

        target_features = tf.concat(
            [[np.zeros(tf.shape(target_features_orig)[1])],
             target_features_orig],
            axis=0)
        target_features_orig = tf.concat(
            [[np.zeros(tf.shape(target_features_orig)[1])],
             target_features_orig],
            axis=0)
        for i in range(0, self.num_heads - 1):
            multible_entry_f_lig = getConnectedFeatures(
                target_features, start_end_env)
            target_features = self.heads[i](multible_entry_f_lig, message,
                                            b_scope)
            target_features = self._update_target_features(
                target_features, target_features_orig)
            message = updateConnectedDict(target_features, scope_update_lig,
                                          scope_update)

        cmp_enc = tf.gather(target_features, indices=(l_scope), axis=0)
        mol_vecs = tf.reduce_sum(cmp_enc, 1)

        try:
            if self.output_nn and self.regression:
                return tf.reshape(self.output_nn(mol_vecs), [-1])
            elif self.output_nn:
                return self.output_nn(mol_vecs)
            return mol_vecs
        except Exception as e:
            log.error(
                'There seems to be an issue with the input dimensions.' +
                'Please check the input dimension of the output_nn you defined outside.'
                + 'Currently the NN output dimension is:' +
                str(self.num_heads * self.emb_dim), e)
示例#3
0
    def call(self, x_batch):
        target_features, feature_dist_graph, rij_dist_pairs, b_scope, start_end_env, l_scope, scope_update, scope_update_lig = x_batch
        target_features = createVar(target_features)
        feature_dist_graph = createVar(feature_dist_graph)
        rij_dist_pairs = createVar(rij_dist_pairs)

        target_features = self.W_i_a(target_features)
        feature_dist_graph = self.W_i_a(feature_dist_graph)

        rij_dist_pairs = tf.reshape(rij_dist_pairs,
                                    (tf.shape(rij_dist_pairs)[0], 1))
        rij_dist_pairs = self.W_i_b(rij_dist_pairs)
        rij_dist_pairs = tf.keras.activations.relu(rij_dist_pairs)
        message = tf.concat([feature_dist_graph, rij_dist_pairs], axis=1)
        message = self.W_h(message)

        message = tf.concat([[np.zeros(32)], message], axis=0)
        message = tf.gather(message, indices=(b_scope))
        message = tf.reduce_sum((message), 1)

        for i in range(self.depth):
            target_features = self.gru(message, target_features)
            message = tf.convert_to_tensor(
                updateConnectedDict(target_features, scope_update_lig,
                                    scope_update))
            message = tf.concat([message, rij_dist_pairs], axis=1)
            message = self.W_h(message)
            message = tf.concat([[np.zeros(32)], message], axis=0)
            message = tf.gather(message, indices=(b_scope))
            message = tf.reduce_sum(message, 1)

        feature_hiddens = tf.concat([[np.zeros(32)], target_features], axis=0)
        mol_vecs = tf.gather(feature_hiddens, indices=(l_scope), axis=0)
        mol_vecs = tf.reduce_sum(mol_vecs, 1)

        if self.output_nn:
            return tf.reshape(self.output_nn(mol_vecs), [-1])
        return mol_vecs