def test_DTNNEmbedding_pickle(): tg = TensorGraph() atom_numbers = Feature(shape=(None, 23), dtype=tf.int32) Embedding = DTNNEmbedding(in_layers=[atom_numbers]) tg.add_output(Embedding) tg.set_loss(Embedding) tg.build() tg.save()
def build_graph(self): self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32) # Character embedding self.Embedding = DTNNEmbedding( n_embedding=self.n_embedding, periodic_table_length=len(self.char_dict.keys()) + 1, in_layers=[self.smiles_seqs]) self.pooled_outputs = [] self.conv_layers = [] for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters): # Multiple convolutional layers with different filter widths self.conv_layers.append( Conv1D( kernel_size=filter_size, filters=num_filter, padding='valid', in_layers=[self.Embedding])) # Max-over-time pooling self.pooled_outputs.append( ReduceMax(axis=1, in_layers=[self.conv_layers[-1]])) # Concat features from all filters(one feature per filter) concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs) dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs]) dense = Dense( out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout]) # Highway layer from https://arxiv.org/pdf/1505.00387.pdf self.gather = Highway(in_layers=[dense]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense( out_channels=2, activation_fn=None, in_layers=[self.gather]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense( out_channels=1, activation_fn=None, in_layers=[self.gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Stack(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def test_dtnn_embedding(self): """Test that DTNNEmbedding can be invoked.""" n_embedding = 10 periodic_table_length = 20 test_tensor_input = np.random.permutation( np.arange(0, periodic_table_length // 2, dtype=np.int32)) with self.session() as sess: test_tensor = tf.convert_to_tensor(test_tensor_input, dtype=tf.int32) dtnn_embedding = DTNNEmbedding( n_embedding=n_embedding, periodic_table_length=periodic_table_length) dtnn_embedding.create_tensor(in_layers=[test_tensor]) # Layer is wrapper around embedding lookup, tested that then sess.run(tf.global_variables_initializer()) out_tensor = dtnn_embedding.out_tensor.eval() embedding_val = dtnn_embedding.trainable_variables[0].eval() expected_output = embedding_val[test_tensor_input] self.assertAllClose(out_tensor, expected_output) self.assertAllClose(out_tensor.shape, (periodic_table_length // 2, n_embedding))
def test_dtnn_embedding(self): """Test that DTNNEmbedding can be invoked.""" n_embedding = 10 periodic_table_length = 20 test_tensor_input = np.random.permutation( np.arange(0, periodic_table_length // 2, dtype=np.int32)) with self.session() as sess: test_tensor = tf.convert_to_tensor(test_tensor_input, dtype=tf.int32) dtnn_embedding = DTNNEmbedding( n_embedding=n_embedding, periodic_table_length=periodic_table_length) dtnn_embedding.create_tensor(in_layers=[test_tensor]) # Layer is wrapper around embedding lookup, tested that then sess.run(tf.global_variables_initializer()) out_tensor = dtnn_embedding.out_tensor.eval() embedding_val = dtnn_embedding.embedding_list.eval() expected_output = embedding_val[test_tensor_input] self.assertAllClose(out_tensor, expected_output) self.assertAllClose(out_tensor.shape, (periodic_table_length // 2, n_embedding))
def _build_graph(self): self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32) # Character embedding Embedding = DTNNEmbedding( n_embedding=self.n_embedding, periodic_table_length=len(self.char_dict.keys()) + 1, in_layers=[self.smiles_seqs]) pooled_outputs = [] conv_layers = [] for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters): # Multiple convolutional layers with different filter widths conv_layers.append( Conv1D(kernel_size=filter_size, filters=num_filter, padding='valid', in_layers=[Embedding])) # Max-over-time pooling pooled_outputs.append( ReduceMax(axis=1, in_layers=[conv_layers[-1]])) # Concat features from all filters(one feature per filter) concat_outputs = Concat(axis=1, in_layers=pooled_outputs) dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs]) dense = Dense(out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout]) # Highway layer from https://arxiv.org/pdf/1505.00387.pdf gather = Highway(in_layers=[dense]) if self.mode == "classification": logits = Dense(out_channels=self.n_tasks * 2, activation_fn=None, in_layers=[gather]) logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits]) output = SoftMax(in_layers=[logits]) self.add_output(output) labels = Label(shape=(None, self.n_tasks, 2)) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) else: vals = Dense(out_channels=self.n_tasks * 1, activation_fn=None, in_layers=[gather]) vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals]) self.add_output(vals) labels = Label(shape=(None, self.n_tasks, 1)) loss = ReduceSum(L2Loss(in_layers=[labels, vals])) weights = Weights(shape=(None, self.n_tasks)) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss)
def build_graph(self): """Building graph structures: Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression """ self.atom_number = Feature(shape=(None, ), dtype=tf.int32) self.distance = Feature(shape=(None, self.n_distance)) self.atom_membership = Feature(shape=(None, ), dtype=tf.int32) self.distance_membership_i = Feature(shape=(None, ), dtype=tf.int32) self.distance_membership_j = Feature(shape=(None, ), dtype=tf.int32) dtnn_embedding = DTNNEmbedding(n_embedding=self.n_embedding, in_layers=[self.atom_number]) if self.dropout > 0.0: dtnn_embedding = Dropout(self.dropout, in_layers=dtnn_embedding) dtnn_layer1 = DTNNStep(n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_embedding, self.distance, self.distance_membership_i, self.distance_membership_j ]) if self.dropout > 0.0: dtnn_layer1 = Dropout(self.dropout, in_layers=dtnn_layer1) dtnn_layer2 = DTNNStep(n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_layer1, self.distance, self.distance_membership_i, self.distance_membership_j ]) if self.dropout > 0.0: dtnn_layer2 = Dropout(self.dropout, in_layers=dtnn_layer2) dtnn_gather = DTNNGather(n_embedding=self.n_embedding, layer_sizes=[self.n_hidden], n_outputs=self.n_tasks, output_activation=self.output_activation, in_layers=[dtnn_layer2, self.atom_membership]) if self.dropout > 0.0: dtnn_gather = Dropout(self.dropout, in_layers=dtnn_gather) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dtnn_gather, out_channels=n_tasks)]) self.add_output(output) weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def build_graph(self): """Building graph structures: Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression """ self.atom_number = Feature(shape=(None,), dtype=tf.int32) self.distance = Feature(shape=(None, self.n_distance)) self.atom_membership = Feature(shape=(None,), dtype=tf.int32) self.distance_membership_i = Feature(shape=(None,), dtype=tf.int32) self.distance_membership_j = Feature(shape=(None,), dtype=tf.int32) dtnn_embedding = DTNNEmbedding( n_embedding=self.n_embedding, in_layers=[self.atom_number]) dtnn_layer1 = DTNNStep( n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_embedding, self.distance, self.distance_membership_i, self.distance_membership_j ]) dtnn_layer2 = DTNNStep( n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_layer1, self.distance, self.distance_membership_i, self.distance_membership_j ]) dtnn_gather = DTNNGather( n_embedding=self.n_embedding, layer_sizes=[self.n_hidden], n_outputs=self.n_tasks, output_activation=self.output_activation, in_layers=[dtnn_layer2, self.atom_membership]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): regression = DTNNExtract(task, in_layers=[dtnn_gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)