def _build_graph(self): """Build the model.""" smiles_seqs = Input(dtype=tf.int32, shape=(self.max_seq_len, ), name='Input') rnn_input = tf.keras.layers.Embedding( input_dim=len(self.char_to_idx), output_dim=self.embedding_dim)(smiles_seqs) if self.use_conv: rnn_input = Conv1D(filters=self.filters, kernel_size=self.kernel_size, strides=self.strides, activation=tf.nn.relu, name='Conv1D')(rnn_input) rnn_embeddings = rnn_input for idx, rnn_type in enumerate(self.rnn_types[:-1]): rnn_layer = RNN_DICT[rnn_type] layer = rnn_layer(units=self.rnn_sizes[idx], return_sequences=True) if self.use_bidir: layer = Bidirectional(layer) rnn_embeddings = layer(rnn_embeddings) # Last layer sequences not returned. layer = RNN_DICT[self.rnn_types[-1]](units=self.rnn_sizes[-1]) if self.use_bidir: layer = Bidirectional(layer) rnn_embeddings = layer(rnn_embeddings) if self.mode == "classification": logits = Dense(self.n_tasks * self.n_classes)(rnn_embeddings) logits = Reshape((self.n_tasks, self.n_classes))(logits) if self.n_classes == 2: output = Activation(activation='sigmoid')(logits) loss = SigmoidCrossEntropy() else: output = Softmax()(logits) loss = SoftmaxCrossEntropy() outputs = [output, logits] output_types = ['prediction', 'loss'] else: output = Dense(self.n_tasks * 1, name='Dense')(rnn_embeddings) output = Reshape((self.n_tasks, 1), name='Reshape')(output) outputs = [output] output_types = ['prediction'] loss = L2Loss() model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs) return model, loss, output_types
def __init__(self, n_tasks, K=10, penalty=0.0, mode="classification", **kwargs): """Initialize MultitaskIRVClassifier Parameters ---------- n_tasks: int Number of tasks K: int Number of nearest neighbours used in classification penalty: float Amount of penalty (l2 or l1 applied) """ self.n_tasks = n_tasks self.K = K self.n_features = 2 * self.K * self.n_tasks self.penalty = penalty mol_features = Input(shape=(self.n_features, )) predictions = IRVLayer(self.n_tasks, self.K, self.penalty)(mol_features) logits = [] outputs = [] for task in range(self.n_tasks): task_output = Slice(task, 1)(predictions) sigmoid = Activation(tf.sigmoid)(task_output) logits.append(task_output) outputs.append(sigmoid) outputs = layers.Stack(axis=1)(outputs) outputs2 = Lambda(lambda x: 1 - x)(outputs) outputs = [ Concatenate(axis=2)([outputs2, outputs]), logits[0] if len(logits) == 1 else Concatenate(axis=1)(logits) ] model = tf.keras.Model(inputs=[mol_features], outputs=outputs) super(MultitaskIRVClassifier, self).__init__(model, SigmoidCrossEntropy(), output_types=['prediction', 'loss'], **kwargs)
def _build_graph(self): smile_images = Input(shape=self.input_shape) stem = chemnet_layers.Stem(self.base_filters)(smile_images) inceptionA_out = self.build_inception_module(inputs=stem, type="A") reductionA_out = chemnet_layers.ReductionA( self.base_filters)(inceptionA_out) inceptionB_out = self.build_inception_module(inputs=reductionA_out, type="B") reductionB_out = chemnet_layers.ReductionB( self.base_filters)(inceptionB_out) inceptionC_out = self.build_inception_module(inputs=reductionB_out, type="C") avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out) if self.mode == "classification": logits = Dense(self.n_tasks * self.n_classes)(avg_pooling_out) logits = Reshape((self.n_tasks, self.n_classes))(logits) if self.n_classes == 2: output = Activation(activation='sigmoid')(logits) loss = SigmoidCrossEntropy() else: output = Softmax()(logits) loss = SoftmaxCrossEntropy() outputs = [output, logits] output_types = ['prediction', 'loss'] else: output = Dense(self.n_tasks * 1)(avg_pooling_out) output = Reshape((self.n_tasks, 1))(output) outputs = [output] output_types = ['prediction'] loss = L2Loss() model = tf.keras.Model(inputs=[smile_images], outputs=outputs) return model, loss, output_types