def build_graph(self): """ Building graph structures: """ self.m1_features = Feature(shape=(None, self.n_features)) self.m2_features = Feature(shape=(None, self.n_features)) prev_layer1 = self.m1_features prev_layer2 = self.m2_features for layer_size in self.layer_sizes: prev_layer1 = Dense( out_channels=layer_size, in_layers=[prev_layer1], activation_fn=tf.nn.relu) prev_layer2 = prev_layer1.shared([prev_layer2]) if self.dropout > 0.0: prev_layer1 = Dropout(self.dropout, in_layers=prev_layer1) prev_layer2 = Dropout(self.dropout, in_layers=prev_layer2) readout_m1 = Dense( out_channels=1, in_layers=[prev_layer1], activation_fn=None) readout_m2 = readout_m1.shared([prev_layer2]) self.add_output(Sigmoid(readout_m1) * 4 + 1) self.add_output(Sigmoid(readout_m2) * 4 + 1) self.difference = readout_m1 - readout_m2 label = Label(shape=(None, 1)) loss = HingeLoss(in_layers=[label, self.difference]) self.my_task_weights = Weights(shape=(None, 1)) loss = WeightedError(in_layers=[loss, self.my_task_weights]) self.set_loss(loss)
def test_dense(self): """Test that Dense can be invoked.""" in_dim = 2 out_dim = 3 batch_size = 10 in_tensor = np.random.rand(batch_size, in_dim) with self.session() as sess: in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32) out_tensor = Dense(out_dim)(in_tensor) sess.run(tf.global_variables_initializer()) out_tensor = out_tensor.eval() assert out_tensor.shape == (batch_size, out_dim)
def test_shared_layer(self): n_data_points = 20 n_features = 2 X = np.random.rand(n_data_points, n_features) y1 = np.array([[0, 1] for x in range(n_data_points)]) X = NumpyDataset(X) ys = [NumpyDataset(y1)] databag = Databag() features = Feature(shape=(None, n_features)) databag.add_dataset(features, X) outputs = [] label = Label(shape=(None, 2)) dense1 = Dense(out_channels=2, in_layers=[features]) dense2 = dense1.shared(in_layers=[features]) output1 = SoftMax(in_layers=[dense1]) output2 = SoftMax(in_layers=[dense2]) smce = SoftMaxCrossEntropy(in_layers=[label, dense1]) outputs.append(output1) outputs.append(output2) databag.add_dataset(label, ys[0]) total_loss = ReduceMean(in_layers=[smce]) tg = dc.models.TensorGraph(learning_rate=0.01) for output in outputs: tg.add_output(output) tg.set_loss(total_loss) tg.fit_generator( databag.iterbatches( epochs=1, batch_size=tg.batch_size, pad_batches=True)) prediction = tg.predict_on_generator(databag.iterbatches()) assert_true(np.all(np.isclose(prediction[0], prediction[1], atol=0.01)))
def test_multi_task_classifier(self): n_data_points = 20 n_features = 2 X = np.random.rand(n_data_points, n_features) y1 = np.array([[0, 1] for x in range(n_data_points)]) y2 = np.array([[1, 0] for x in range(n_data_points)]) X = NumpyDataset(X) ys = [NumpyDataset(y1), NumpyDataset(y2)] databag = Databag() features = Feature(shape=(None, n_features)) databag.add_dataset(features, X) outputs = [] entropies = [] for i in range(2): label = Label(shape=(None, 2)) dense = Dense(out_channels=2, in_layers=[features]) output = SoftMax(in_layers=[dense]) smce = SoftMaxCrossEntropy(in_layers=[label, dense]) entropies.append(smce) outputs.append(output) databag.add_dataset(label, ys[i]) total_loss = ReduceMean(in_layers=entropies) tg = dc.models.TensorGraph(learning_rate=0.01) for output in outputs: tg.add_output(output) tg.set_loss(total_loss) tg.fit_generator( databag.iterbatches(epochs=1000, batch_size=tg.batch_size, pad_batches=True)) predictions = tg.predict_on_generator(databag.iterbatches()) for i in range(2): y_real = ys[i].X y_pred = predictions[i] assert_true(np.all(np.isclose(y_pred, y_real, atol=0.6)))
def build_graph(self): self.atom_numbers = Feature(shape=(None, self.max_atoms), dtype=tf.int32) self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms)) self.atom_feats = Feature(shape=(None, self.max_atoms, 4)) previous_layer = ANIFeat(in_layers=self.atom_feats, max_atoms=self.max_atoms) self.featurized = previous_layer Hiddens = [] for n_hidden in self.layer_structures: Hidden = AtomicDifferentiatedDense( self.max_atoms, n_hidden, self.atom_number_cases, activation='tanh', in_layers=[previous_layer, self.atom_numbers]) Hiddens.append(Hidden) previous_layer = Hiddens[-1] costs = [] self.labels_fd = [] for task in range(self.n_tasks): regression = Dense(out_channels=1, activation_fn=None, in_layers=[Hiddens[-1]]) output = BPGather(self.max_atoms, in_layers=[regression, self.atom_flags]) self.add_output(output) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, output]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def test_get_layer_variable_values_eager(self): """Tests to get variable values associated with a layer in eager mode""" with context.eager_mode(): # Test for correct value return (eager mode) tg = dc.models.TensorGraph() var = Variable([10.0, 12.0]) tg.add_output(var) expected = [10.0, 12.0] obtained = tg.get_layer_variable_values(var)[0] np.testing.assert_array_equal(expected, obtained) # Test for shape (eager mode) tg = dc.models.TensorGraph() input_tensor = Input(shape=(10, 100)) output = Dense(out_channels=20, in_layers=[input_tensor]) tg.add_output(output) expected_shape = (100, 20) obtained_shape = tg.get_layer_variable_values(output)[0].shape assert expected_shape == obtained_shape
def test_multi_task_regressor(self): n_data_points = 20 n_features = 2 X = np.random.rand(n_data_points, n_features) y1 = np.expand_dims(np.array([0.5 for x in range(n_data_points)]), axis=-1) y2 = np.expand_dims(np.array([-0.5 for x in range(n_data_points)]), axis=-1) X = NumpyDataset(X) ys = [NumpyDataset(y1), NumpyDataset(y2)] databag = Databag() features = Feature(shape=(None, n_features)) databag.add_dataset(features, X) outputs = [] losses = [] for i in range(2): label = Label(shape=(None, 1)) dense = Dense(out_channels=1, in_layers=[features]) loss = ReduceSquareDifference(in_layers=[dense, label]) outputs.append(dense) losses.append(loss) databag.add_dataset(label, ys[i]) total_loss = ReduceMean(in_layers=losses) tg = dc.models.TensorGraph(learning_rate=0.01) for output in outputs: tg.add_output(output) tg.set_loss(total_loss) tg.fit_generator( databag.iterbatches( epochs=1000, batch_size=tg.batch_size, pad_batches=True)) predictions = tg.predict_on_generator(databag.iterbatches()) for i in range(2): y_real = ys[i].X y_pred = predictions[i] assert_true(np.all(np.isclose(y_pred, y_real, atol=1.5)))
def test_compute_model_performance_multitask_classifier(self): n_data_points = 20 n_features = 1 n_tasks = 2 n_classes = 2 X = np.ones(shape=(n_data_points // 2, n_features)) * -1 X1 = np.ones(shape=(n_data_points // 2, n_features)) X = np.concatenate((X, X1)) class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))]) class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))]) y1 = np.concatenate((class_0, class_1)) y2 = np.concatenate((class_1, class_0)) y = np.stack([y1, y2], axis=1) dataset = NumpyDataset(X, y) features = Feature(shape=(None, n_features)) label = Label(shape=(None, n_tasks, n_classes)) dense = Dense(out_channels=n_tasks * n_classes, in_layers=[features]) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=dense) output = SoftMax(in_layers=[logits]) smce = SoftMaxCrossEntropy(in_layers=[label, logits]) total_loss = ReduceMean(in_layers=smce) tg = dc.models.TensorGraph(learning_rate=0.01, batch_size=n_data_points) tg.add_output(output) tg.set_loss(total_loss) tg.fit(dataset, nb_epoch=1000) metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode="classification") scores = tg.evaluate_generator(tg.default_generator(dataset), [metric], labels=[label], per_task_metrics=True) scores = list(scores[1].values()) # Loosening atol to see if tests stop failing sporadically assert_true(np.all(np.isclose(scores, [1.0, 1.0], atol=0.50)))
def test_copy_layers(self): """Test copying layers.""" tg = dc.models.TensorGraph() features = Feature(shape=(None, 10)) dense = Dense( 10, in_layers=features, biases_initializer=tf.random_normal_initializer) constant = Constant(10.0) output = dense + constant tg.add_output(output) tg.set_loss(output) tg.fit_generator([]) replacements = {constant: Constant(20.0)} copy = output.copy(replacements, tg) assert isinstance(copy, Add) assert isinstance(copy.in_layers[0], Dense) assert isinstance(copy.in_layers[0].in_layers[0], Feature) assert copy.in_layers[1] == replacements[constant] variables = tg.get_layer_variables(dense) with tg._get_tf("Graph").as_default(): values = tg.session.run(variables) for v1, v2 in zip(values, copy.in_layers[0].variable_values): assert np.array_equal(v1, v2)
def test_save_load(self): n_data_points = 20 n_features = 2 X = np.random.rand(n_data_points, n_features) y = [[0, 1] for x in range(n_data_points)] dataset = NumpyDataset(X, y) features = Feature(shape=(None, n_features)) dense = Dense(out_channels=2, in_layers=[features]) output = SoftMax(in_layers=[dense]) label = Label(shape=(None, 2)) smce = SoftMaxCrossEntropy(in_layers=[label, dense]) loss = ReduceMean(in_layers=[smce]) tg = dc.models.TensorGraph(learning_rate=0.01) tg.add_output(output) tg.set_loss(loss) tg.fit(dataset, nb_epoch=1) prediction = np.squeeze(tg.predict_on_batch(X)) tg.save() tg1 = TensorGraph.load_from_dir(tg.model_dir) prediction2 = np.squeeze(tg1.predict_on_batch(X)) assert_true(np.all(np.isclose(prediction, prediction2, atol=0.01)))
def test_sequential(self): """Test creating an Estimator from a Sequential model.""" n_samples = 20 n_features = 2 # Create a dataset and an input function for processing it. X = np.random.rand(n_samples, n_features) y = [0.5 for x in range(n_samples)] dataset = dc.data.NumpyDataset(X, y) def input_fn(epochs): x, y, weights = dataset.make_iterator(batch_size=n_samples, epochs=epochs).get_next() return {'x': x}, y # Create the model. model = dc.models.Sequential(loss="mse", learning_rate=0.01) model.add(Dense(out_channels=1)) # Create an estimator from it. x_col = tf.feature_column.numeric_column('x', shape=(n_features, )) metrics = {'error': tf.metrics.mean_absolute_error} estimator = model.make_estimator(feature_columns=[x_col], metrics=metrics) # Train the model. estimator.train(input_fn=lambda: input_fn(1000)) # Evaluate the model. results = estimator.evaluate(input_fn=lambda: input_fn(1)) assert results['loss'] < 1e-2 assert results['error'] < 0.1
def test_copy_layers_shared(self): """Test copying layers with shared variables.""" tg = dc.models.TensorGraph() features = Feature(shape=(None, 10)) dense = Dense( 10, in_layers=features, biases_initializer=tf.random_normal_initializer) constant = Constant(10.0) output = dense + constant tg.add_output(output) tg.set_loss(output) replacements = {features: features, constant: Constant(20.0)} copy = output.copy(replacements, shared=True) tg.add_output(copy) assert isinstance(copy, Add) assert isinstance(copy.in_layers[0], Dense) assert isinstance(copy.in_layers[0].in_layers[0], Feature) assert copy.in_layers[1] == replacements[constant] variables1 = tg.get_layer_variables(dense) variables2 = tg.get_layer_variables(copy.in_layers[0]) for v1, v2, in zip(variables1, variables2): assert v1 == v2 feed_dict = {features: np.random.random((5, 10))} v1, v2 = tg.predict_on_generator([feed_dict], outputs=[output, copy]) assert_true(np.all(np.isclose(v1 + 10, v2)))
def build_graph(self): """Building graph structures: Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression """ self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) self.pair_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_atom_feat, n_pair_input_feat=self.n_pair_feat, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, in_layers=[ self.atom_features, self.pair_features, self.pair_split, self.atom_to_pair ]) weave_layer2A, weave_layer2P = WeaveLayerFactory( n_atom_input_feat=self.n_hidden, n_pair_input_feat=self.n_hidden, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, update_pair=False, in_layers=[ weave_layer1A, weave_layer1P, self.pair_split, self.atom_to_pair ]) dense1 = Dense(out_channels=self.n_graph_feat, activation_fn=tf.nn.tanh, in_layers=weave_layer2A) batch_norm1 = BatchNorm(epsilon=1e-5, in_layers=[dense1]) weave_gather = WeaveGather(self.batch_size, n_input=self.n_graph_feat, gaussian_expand=True, in_layers=[batch_norm1, self.atom_split]) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=weave_gather, out_channels=n_tasks * n_classes) ]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape(shape=(None, n_tasks), in_layers=[ Dense(in_layers=weave_gather, out_channels=n_tasks) ]) self.add_output(output) weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=[0.02], bias_init_consts=[1.0], weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=[0.5], n_classes=2, **kwargs): """Create a TensorGraphMultiTaskClassifier. In addition to the following arguments, this class also accepts all the keywork arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). bias_init_consts: list the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). n_classes: int the number of classes """ super(TensorGraphMultiTaskClassifier, self).__init__(mode='classification', **kwargs) self.n_tasks = n_tasks self.n_features = n_features self.n_classes = n_classes # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=tf.nn.relu, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer # Compute the loss function for each label. output = Reshape(shape=(-1, n_tasks, n_classes), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes) ]) self.add_output(output) labels = Label(shape=(None, n_tasks, n_classes)) weights = Weights(shape=(None, n_tasks)) loss = SoftMaxCrossEntropy(in_layers=[labels, output]) weighted_loss = WeightedError(in_layers=[loss, weights]) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def build_graph(self): """ Building graph structures: """ self.atom_features = Feature(shape=(None, self.number_atom_features)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) in_layer = self.atom_features for layer_size, dropout in zip(self.graph_conv_layers, self.dropout): gc1_in = [in_layer, self.degree_slice, self.membership ] + self.deg_adjs gc1 = GraphConv(layer_size, activation_fn=tf.nn.relu, in_layers=gc1_in) batch_norm1 = BatchNorm(in_layers=[gc1]) if dropout > 0.0: batch_norm1 = Dropout(dropout, in_layers=batch_norm1) gp_in = [batch_norm1, self.degree_slice, self.membership ] + self.deg_adjs in_layer = GraphPool(in_layers=gp_in) dense = Dense(out_channels=self.dense_layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer]) batch_norm3 = BatchNorm(in_layers=[dense]) if self.dropout[-1] > 0.0: batch_norm3 = Dropout(self.dropout[-1], in_layers=batch_norm3) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=readout, out_channels=n_tasks * n_classes) ]) logits = TrimGraphOutput([logits, weights]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=readout, out_channels=n_tasks)]) output = TrimGraphOutput([output, weights]) self.add_output(output) if self.uncertainty: log_var = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=readout, out_channels=n_tasks)]) log_var = TrimGraphOutput([log_var, weights]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def build_graph(self): # Build placeholders self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) self.atom_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) message_passing = MessagePassing(self.T, message_fn='enn', update_fn='gru', n_hidden=self.n_hidden, in_layers=[ self.atom_features, self.pair_features, self.atom_to_pair ]) atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing]) mol_embeddings = SetGather( self.M, self.batch_size, n_hidden=self.n_hidden, in_layers=[atom_embeddings, self.atom_split]) dense1 = Dense(out_channels=2 * self.n_hidden, activation_fn=tf.nn.relu, in_layers=[mol_embeddings]) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=dense1, out_channels=n_tasks * n_classes) ]) logits = TrimGraphOutput([logits, weights]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)]) output = TrimGraphOutput([output, weights]) self.add_output(output) if self.uncertainty: log_var = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)]) log_var = TrimGraphOutput([log_var, weights]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') return self.out_tensor conv2d_1 = Conv2d(num_outputs=32) tg.add_layer(conv2d_1, parents=[make_image]) conv2d_2 = Conv2d(num_outputs=64) tg.add_layer(conv2d_2, parents=[conv2d_1]) flatten = Flatten() tg.add_layer(flatten, parents=[conv2d_2]) dense1 = Dense(out_channels=1024, activation_fn=tf.nn.relu) tg.add_layer(dense1, parents=[flatten]) dense2 = Dense(out_channels=10) tg.add_layer(dense2, parents=[dense1]) label = Input(shape=(None, 10)) tg.add_label(label) smce = SoftMaxCrossEntropy() tg.add_layer(smce, parents=[label, dense2]) loss = ReduceMean() tg.add_layer(loss, parents=[smce]) tg.set_loss(loss)
def build_graph(self): """Building graph structures: Features => DAGLayer => DAGGather => Classification or Regression """ self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms), dtype=tf.int32) self.calculation_orders = Feature(shape=(None, self.max_atoms), dtype=tf.int32) self.calculation_masks = Feature(shape=(None, self.max_atoms), dtype=tf.bool) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.n_atoms = Feature(shape=(), dtype=tf.int32) dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat, n_atom_feat=self.n_atom_feat, max_atoms=self.max_atoms, layer_sizes=self.layer_sizes, dropout=self.dropout, batch_size=self.batch_size, in_layers=[ self.atom_features, self.parents, self.calculation_orders, self.calculation_masks, self.n_atoms ]) dag_gather = DAGGather(n_graph_feat=self.n_graph_feat, n_outputs=self.n_outputs, max_atoms=self.max_atoms, layer_sizes=self.layer_sizes_gather, dropout=self.dropout, in_layers=[dag_layer1, self.membership]) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=dag_gather, out_channels=n_tasks * n_classes) ]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dag_gather, out_channels=n_tasks)]) self.add_output(output) if self.uncertainty: log_var = Reshape(shape=(None, n_tasks), in_layers=[ Dense(in_layers=dag_gather, out_channels=n_tasks) ]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, uncertainty=False, **kwargs): """Create a MultitaskRegressor. In addition to the following arguments, this class also accepts all the keywork arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or float the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. uncertainty: bool if True, include extra outputs and loss terms to enable the uncertainty in outputs to be predicted """ super(MultitaskRegressor, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1) if not isinstance(bias_init_consts, collections.Sequence): bias_init_consts = [bias_init_consts] * (n_layers + 1) if not isinstance(dropouts, collections.Sequence): dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): activation_fns = [activation_fns] * n_layers if uncertainty: if any(d == 0.0 for d in dropouts): raise ValueError( 'Dropout must be included in every layer to predict uncertainty' ) # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts, activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer self.neural_fingerprint = prev_layer # Compute the loss function for each label. output = Reshape(shape=(-1, n_tasks, 1), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_init_stddevs[-1]), biases_initializer=TFWrapper( tf.constant_initializer, value=bias_init_consts[-1])) ]) self.add_output(output) labels = Label(shape=(None, n_tasks, 1)) weights = Weights(shape=(None, n_tasks, 1)) if uncertainty: log_var = Reshape( shape=(-1, n_tasks, 1), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_init_stddevs[-1]), biases_initializer=TFWrapper(tf.constant_initializer, value=0.0)) ]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1, 2])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs) gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[gp1, degree_slice, membership] + deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs) dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2]) batch_norm3 = BatchNorm(in_layers=[dense]) readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, degree_slice, membership] + deg_adjs) costs = [] labels = [] for task in range(len(current_tasks)): classification = Dense(out_channels=2, activation_fn=None, in_layers=[readout]) softmax = SoftMax(in_layers=[classification]) tg.add_output(softmax)
gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[dp1, degree_slice, membership] + deg_adjs) bn2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[bn2, degree_slice, membership] + deg_adjs) dp2 = Dropout(0.5, in_layers=gp2) gc3 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[dp2, degree_slice, membership] + deg_adjs) bn3 = BatchNorm(in_layers=[gc3]) gp3 = GraphPool(in_layers=[b3, degree_slice, membership] + deg_adjs) dp3 = Dropout(0.5, in_layers=gp3) dense1 = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[dp3]) out1 = GraphGather(batch_size=n_batch, activation_fn=tf.nn.tanh, in_layers=[dense1, degree_slice, membership] + deg_adjs) # in this model, multilabel (15 precursors) shall be classified # using the trained featuret vector cost15 = [] for ts in range(ntask): label_t = label15[ts] classification_t = Dense(out_channels=2, in_layers=[out1]) softmax_t = SoftMax(in_layers=[classification_t]) tg.add_output(softmax_t) cost_t = SoftMaxCrossEntropy(in_layers=[label_t, classification_t]) cost15.append(cost_t)
def build_graph(self): self.vertex_features = Feature(shape=(None, self.max_atoms, 75)) self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms)) self.mask = Feature(shape=(None, self.max_atoms, 1)) gcnn1 = BatchNorm( GraphCNN( num_filters=64, in_layers=[self.vertex_features, self.adj_matrix, self.mask])) gcnn1 = Dropout(self.dropout, in_layers=gcnn1) gcnn2 = BatchNorm( GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask])) gcnn2 = Dropout(self.dropout, in_layers=gcnn2) gc_pool, adj_matrix = GraphCNNPool( num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask]) gc_pool = BatchNorm(gc_pool) gc_pool = Dropout(self.dropout, in_layers=gc_pool) gcnn3 = BatchNorm( GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix])) gcnn3 = Dropout(self.dropout, in_layers=gcnn3) gc_pool2, adj_matrix2 = GraphCNNPool(num_vertices=8, in_layers=[gcnn3, adj_matrix]) gc_pool2 = BatchNorm(gc_pool2) gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2) flattened = Flatten(in_layers=gc_pool2) readout = Dense(out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened) costs = [] self.my_labels = [] for task in range(self.n_tasks): if self.mode == 'classification': classification = Dense(out_channels=2, activation_fn=None, in_layers=[readout]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.my_labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == 'regression': regression = Dense(out_channels=1, activation_fn=None, in_layers=[readout]) self.add_output(regression) label = Label(shape=(None, 1)) self.my_labels.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": entropy = Stack(in_layers=costs, axis=-1) elif self.mode == "regression": entropy = Stack(in_layers=costs, axis=1) self.my_task_weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[entropy, self.my_task_weights]) self.set_loss(loss)
def build_graph(self): self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) self.pair_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_atom_feat, n_pair_input_feat=self.n_pair_feat, n_atom_output_feat=self.n_hidden[0], n_pair_output_feat=self.n_hidden[0], in_layers=[ self.atom_features, self.pair_features, self.pair_split, self.atom_to_pair ]) for myind in range(1, len(self.n_hidden) - 1): weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_hidden[myind - 1], n_pair_input_feat=self.n_hidden[myind - 1], n_atom_output_feat=self.n_hidden[myind], n_pair_output_feat=self.n_hidden[myind], update_pair=True, in_layers=[ weave_layer1A, weave_layer1P, self.pair_split, self.atom_to_pair ]) if len(self.n_hidden) > 1.5: myind = len(self.n_hidden) - 1 weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_hidden[myind - 1], n_pair_input_feat=self.n_hidden[myind - 1], n_atom_output_feat=self.n_hidden[myind], n_pair_output_feat=self.n_hidden[myind], update_pair=False, in_layers=[ weave_layer1A, weave_layer1P, self.pair_split, self.atom_to_pair ]) dense1 = Dense(out_channels=self.n_graph_feat[0], activation_fn=tf.nn.tanh, in_layers=weave_layer1A) #batch_norm1 = BatchNormalization(epsilon=1e-5, mode=1, in_layers=[dense1]) batch_norm1 = MyBatchNorm(in_layers=[dense1]) weave_gather = WeaveGather(self.batch_size, n_input=self.n_graph_feat[0], gaussian_expand=False, in_layers=[batch_norm1, self.atom_split]) weave_gatherBatchNorm2 = MyBatchNorm(in_layers=[weave_gather]) curLayer = weave_gatherBatchNorm2 for myind in range(1, len(self.n_graph_feat) - 1): curLayer = Dense(out_channels=self.n_graph_feat[myind], activation_fn=tf.nn.relu, in_layers=[curLayer]) curLayer = Dropout(self.dropout, in_layers=[curLayer]) classification = Dense(out_channels=self.n_tasks, activation_fn=None, in_layers=[curLayer]) sigmoid = MySigmoid(in_layers=[classification]) self.add_output(sigmoid) self.label = Label(shape=(None, self.n_tasks)) all_cost = MySigmoidCrossEntropy( in_layers=[self.label, classification]) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss) self.mydense1 = dense1 self.mybatch_norm1 = batch_norm1 self.myweave_gather = weave_gather self.myclassification = classification self.mysigmoid = sigmoid self.myall_cost = all_cost self.myloss = loss
def __init__(self, n_tasks, n_features, alpha_init_stddevs=0.02, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, **kwargs): """Creates a progressive network. Only listing parameters specific to progressive networks here. Parameters ---------- n_tasks: int Number of tasks n_features: int Number of input features alpha_init_stddevs: list List of standard-deviations for alpha in adapter layers. layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or float the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. """ super(ProgressiveMultitaskRegressor, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features self.layer_sizes = layer_sizes self.alpha_init_stddevs = alpha_init_stddevs self.weight_init_stddevs = weight_init_stddevs self.bias_init_consts = bias_init_consts self.dropouts = dropouts self.activation_fns = activation_fns n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): self.weight_init_stddevs = [weight_init_stddevs] * n_layers if not isinstance(alpha_init_stddevs, collections.Sequence): self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers if not isinstance(bias_init_consts, collections.Sequence): self.bias_init_consts = [bias_init_consts] * n_layers if not isinstance(dropouts, collections.Sequence): self.dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): self.activation_fns = [activation_fns] * n_layers # Add the input features. self.mol_features = Feature(shape=(None, n_features)) all_layers = {} outputs = [] for task in range(self.n_tasks): task_layers = [] for i in range(n_layers): if i == 0: prev_layer = self.mol_features else: prev_layer = all_layers[(i - 1, task)] if task > 0: lateral_contrib, trainables = self.add_adapter( all_layers, task, i) task_layers.extend(trainables) layer = Dense(in_layers=[prev_layer], out_channels=layer_sizes[i], activation_fn=None, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=self.weight_init_stddevs[i]), biases_initializer=TFWrapper( tf.constant_initializer, value=self.bias_init_consts[i])) task_layers.append(layer) if i > 0 and task > 0: layer = layer + lateral_contrib assert self.activation_fns[ i] is tf.nn.relu, "Only ReLU is supported" layer = ReLU(in_layers=[layer]) if self.dropouts[i] > 0.0: layer = Dropout(self.dropouts[i], in_layers=[layer]) all_layers[(i, task)] = layer prev_layer = all_layers[(n_layers - 1, task)] layer = Dense(in_layers=[prev_layer], out_channels=1, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=self.weight_init_stddevs[-1]), biases_initializer=TFWrapper( tf.constant_initializer, value=self.bias_init_consts[-1])) task_layers.append(layer) if task > 0: lateral_contrib, trainables = self.add_adapter( all_layers, task, n_layers) task_layers.extend(trainables) layer = layer + lateral_contrib outputs.append(layer) self.add_output(layer) task_label = Label(shape=(None, 1)) task_weight = Weights(shape=(None, 1)) weighted_loss = ReduceSum( L2Loss(in_layers=[task_label, layer, task_weight])) self.create_submodel(layers=task_layers, loss=weighted_loss, optimizer=None) # Weight decay not activated """
def build_graph(self): """ Building graph structures: """ self.atom_features = Feature(shape=(None, 75)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None,), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) gc1 = GraphConv( 64, activation_fn=tf.nn.relu, in_layers=[self.atom_features, self.degree_slice, self.membership] + self.deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, self.degree_slice, self.membership] + self.deg_adjs) gc2 = GraphConv( 64, activation_fn=tf.nn.relu, in_layers=[gp1, self.degree_slice, self.membership] + self.deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, self.degree_slice, self.membership] + self.deg_adjs) dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2]) batch_norm3 = BatchNorm(in_layers=[dense]) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) if self.error_bars == True: readout = Dropout(in_layers=[readout], dropout_prob=0.2) costs = [] self.my_labels = [] for task in range(self.n_tasks): if self.mode == 'classification': classification = Dense( out_channels=2, activation_fn=None, in_layers=[readout]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.my_labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == 'regression': regression = Dense( out_channels=1, activation_fn=None, in_layers=[readout]) self.add_output(regression) label = Label(shape=(None, 1)) self.my_labels.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": entropy = Concat(in_layers=costs, axis=-1) elif self.mode == "regression": entropy = Stack(in_layers=costs, axis=1) self.my_task_weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[entropy, self.my_task_weights]) self.set_loss(loss)
def graph_conv_model(batch_size, tasks): model = TensorGraph(model_dir=model_dir, batch_size=batch_size, use_queue=False) atom_features = Feature(shape=(None, 75)) degree_slice = Feature(shape=(None, 2), dtype=tf.int32) membership = Feature(shape=(None, ), dtype=tf.int32) deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs) gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[gp1, degree_slice, membership] + deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs) dense = Dense(out_channels=128, activation_fn=None, in_layers=[gp2]) batch_norm3 = BatchNorm(in_layers=[dense]) gg1 = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, degree_slice, membership] + deg_adjs) costs = [] labels = [] for task in tasks: classification = Dense(out_channels=2, activation_fn=None, in_layers=[gg1]) softmax = SoftMax(in_layers=[classification]) model.add_output(softmax) label = Label(shape=(None, 2)) labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) entropy = Concat(in_layers=costs) task_weights = Weights(shape=(None, len(tasks))) loss = WeightedError(in_layers=[entropy, task_weights]) model.set_loss(loss) def feed_dict_generator(dataset, batch_size, epochs=1): for epoch in range(epochs): for ind, (X_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches(batch_size, pad_batches=True)): d = {} for index, label in enumerate(labels): d[label] = to_one_hot(y_b[:, index]) d[task_weights] = w_b multiConvMol = ConvMol.agglomerate_mols(X_b) d[atom_features] = multiConvMol.get_atom_features() d[degree_slice] = multiConvMol.deg_slice d[membership] = multiConvMol.membership for i in range(1, len(multiConvMol.get_deg_adjacency_lists())): d[deg_adjs[i - 1]] = multiConvMol.get_deg_adjacency_lists()[i] yield d return model, feed_dict_generator, labels, task_weights
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, n_classes=2, **kwargs): """Create a MultitaskClassifier. In addition to the following arguments, this class also accepts all the keyword arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or loat the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. n_classes: int the number of classes """ super(MultitaskClassifier, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features self.n_classes = n_classes n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): weight_init_stddevs = [weight_init_stddevs] * n_layers if not isinstance(bias_init_consts, collections.Sequence): bias_init_consts = [bias_init_consts] * n_layers if not isinstance(dropouts, collections.Sequence): dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): activation_fns = [activation_fns] * n_layers # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts, activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer # Compute the loss function for each label. self.neural_fingerprint = prev_layer logits = Reshape(shape=(-1, n_tasks, n_classes), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes) ]) output = SoftMax(logits) self.add_output(output) labels = Label(shape=(None, n_tasks, n_classes)) weights = Weights(shape=(None, n_tasks)) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def graph_conv_net(batch_size, prior, num_task): """ Build a tensorgraph for multilabel classification task Return: features and labels layers """ tg = TensorGraph(use_queue=False) if prior == True: add_on = num_task else: add_on = 0 atom_features = Feature(shape=(None, 75 + 2 * add_on)) circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32) degree_slice = Feature(shape=(None, 2), dtype=tf.int32) membership = Feature(shape=(None, ), dtype=tf.int32) deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64 + add_on, activation_fn=tf.nn.elu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs) gc2 = GraphConv(64 + add_on, activation_fn=tf.nn.elu, in_layers=[gc1, degree_slice, membership] + deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs) add = Concat(in_layers=[gp1, gp2]) add = Dropout(0.5, in_layers=[add]) dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add]) batch_norm3 = BatchNorm(in_layers=[dense]) readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, degree_slice, membership] + deg_adjs) batch_norm4 = BatchNorm(in_layers=[readout]) dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features]) dense1 = BatchNorm(in_layers=[dense1]) dense1 = Dropout(0.5, in_layers=[dense1]) dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features]) dense1 = BatchNorm(in_layers=[dense1]) dense1 = Dropout(0.5, in_layers=[dense1]) merge_feat = Concat(in_layers=[dense1, batch_norm4]) merge = Dense(out_channels=256, activation_fn=tf.nn.elu, in_layers=[merge_feat]) costs = [] labels = [] for task in range(num_task): classification = Dense(out_channels=2, activation_fn=None, in_layers=[merge]) softmax = SoftMax(in_layers=[classification]) tg.add_output(softmax) label = Label(shape=(None, 2)) labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) weights = Weights(shape=(None, num_task)) loss = WeightedError(in_layers=[all_cost, weights]) tg.set_loss(loss) #if prior == True: # return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer return tg, atom_features, circular_features, degree_slice, membership, deg_adjs, labels, weights
def build_graph(self): # inputs placeholder self.inputs = Feature(shape=(None, self.image_size, self.image_size, 3), dtype=tf.float32) # data preprocessing and augmentation in_layer = DRAugment(self.augment, self.batch_size, size=(self.image_size, self.image_size), in_layers=[self.inputs]) # first conv layer in_layer = Conv2D(int(self.n_init_kernel), kernel_size=7, activation_fn=None, in_layers=[in_layer]) in_layer = BatchNorm(in_layers=[in_layer]) in_layer = ReLU(in_layers=[in_layer]) # downsample by max pooling res_in = MaxPool2D(ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], in_layers=[in_layer]) for ct_module in range(self.n_downsample - 1): # each module is a residual convolutional block # followed by a convolutional downsample layer in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)), kernel_size=1, activation_fn=None, in_layers=[res_in]) in_layer = BatchNorm(in_layers=[in_layer]) in_layer = ReLU(in_layers=[in_layer]) in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)), kernel_size=3, activation_fn=None, in_layers=[in_layer]) in_layer = BatchNorm(in_layers=[in_layer]) in_layer = ReLU(in_layers=[in_layer]) in_layer = Conv2D(int(self.n_init_kernel * 2**ct_module), kernel_size=1, activation_fn=None, in_layers=[in_layer]) res_a = BatchNorm(in_layers=[in_layer]) res_out = res_in + res_a res_in = Conv2D(int(self.n_init_kernel * 2**(ct_module + 1)), kernel_size=3, stride=2, in_layers=[res_out]) res_in = BatchNorm(in_layers=[res_in]) # max pooling over the final outcome in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in]) for layer_size in self.n_fully_connected: # fully connected layers in_layer = Dense(layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer]) # dropout for dense layers #in_layer = Dropout(0.25, in_layers=[in_layer]) logit_pred = Dense(self.n_tasks * self.n_classes, activation_fn=None, in_layers=[in_layer]) logit_pred = Reshape(shape=(None, self.n_tasks, self.n_classes), in_layers=[logit_pred]) weights = Weights(shape=(None, self.n_tasks)) labels = Label(shape=(None, self.n_tasks), dtype=tf.int32) output = SoftMax(logit_pred) self.add_output(output) loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred]) weighted_loss = WeightedError(in_layers=[loss, weights]) # weight decay regularizer # weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def build_graph(self): """Building graph structures: Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression """ self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) combined = Combine_AP(in_layers=[self.atom_features, self.pair_features]) self.pair_split = Feature(shape=(None,), dtype=tf.int32) self.atom_split = Feature(shape=(None,), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) weave_layer1 = WeaveLayer( n_atom_input_feat=self.n_atom_feat, n_pair_input_feat=self.n_pair_feat, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, in_layers=[combined, self.pair_split, self.atom_to_pair]) weave_layer2 = WeaveLayer( n_atom_input_feat=self.n_hidden, n_pair_input_feat=self.n_hidden, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, update_pair=False, in_layers=[weave_layer1, self.pair_split, self.atom_to_pair]) separated = Separate_AP(in_layers=[weave_layer2]) dense1 = Dense( out_channels=self.n_graph_feat, activation_fn=tf.nn.tanh, in_layers=[separated]) batch_norm1 = BatchNormalization(epsilon=1e-5, mode=1, in_layers=[dense1]) weave_gather = WeaveGather( self.batch_size, n_input=self.n_graph_feat, gaussian_expand=True, in_layers=[batch_norm1, self.atom_split]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense( out_channels=2, activation_fn=None, in_layers=[weave_gather]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense( out_channels=1, activation_fn=None, in_layers=[weave_gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Concat(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, bypass_layer_sizes=[100], bypass_weight_init_stddevs=[.02], bypass_bias_init_consts=[1.], bypass_dropouts=[.5], **kwargs): """ Create a RobustMultitaskRegressor. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or loat the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bypass_layer_sizes: list the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers. bypass_weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of bypass layers. same requirements as weight_init_stddevs bypass_bias_init_consts: list or float the value to initialize the biases in bypass layers same requirements as bias_init_consts bypass_dropouts: list or float the dropout probablity to use for bypass layers. same requirements as dropouts """ super(RobustMultitaskRegressor, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): weight_init_stddevs = [weight_init_stddevs] * n_layers if not isinstance(bias_init_consts, collections.Sequence): bias_init_consts = [bias_init_consts] * n_layers if not isinstance(dropouts, collections.Sequence): dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): activation_fns = [activation_fns] * n_layers n_bypass_layers = len(bypass_layer_sizes) if not isinstance(bypass_weight_init_stddevs, collections.Sequence): bypass_weight_init_stddevs = [bypass_weight_init_stddevs ] * n_bypass_layers if not isinstance(bypass_bias_init_consts, collections.Sequence): bypass_bias_init_consts = [bypass_bias_init_consts ] * n_bypass_layers if not isinstance(bypass_dropouts, collections.Sequence): bypass_dropouts = [bypass_dropouts] * n_bypass_layers bypass_activation_fns = [activation_fns[0]] * n_bypass_layers # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the shared dense layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts, activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer top_multitask_layer = prev_layer task_outputs = [] for i in range(self.n_tasks): prev_layer = mol_features # Add task-specific bypass layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( bypass_layer_sizes, bypass_weight_init_stddevs, bypass_bias_init_consts, bypass_dropouts, bypass_activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper( tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer top_bypass_layer = prev_layer if n_bypass_layers > 0: task_layer = Concat( axis=1, in_layers=[top_multitask_layer, top_bypass_layer]) else: task_layer = top_multitask_layer task_out = Dense(in_layers=[task_layer], out_channels=1) task_outputs.append(task_out) output = Concat(axis=1, in_layers=task_outputs) self.add_output(output) labels = Label(shape=(None, n_tasks)) weights = Weights(shape=(None, n_tasks)) weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights])) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def sluice_model(batch_size, tasks): model = TensorGraph(model_dir=model_dir, batch_size=batch_size, use_queue=False, tensorboard=True) atom_features = Feature(shape=(None, 75)) degree_slice = Feature(shape=(None, 2), dtype=tf.int32) membership = Feature(shape=(None, ), dtype=tf.int32) sluice_loss = [] deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) as1 = AlphaShare(in_layers=[gc1, gc1]) sluice_loss.append(gc1) batch_norm1a = BatchNorm(in_layers=[as1[0]]) batch_norm1b = BatchNorm(in_layers=[as1[1]]) gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] + deg_adjs) gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] + deg_adjs) gc2a = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[gp1a, degree_slice, membership] + deg_adjs) gc2b = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[gp1b, degree_slice, membership] + deg_adjs) as2 = AlphaShare(in_layers=[gc2a, gc2b]) sluice_loss.append(gc2a) sluice_loss.append(gc2b) batch_norm2a = BatchNorm(in_layers=[as2[0]]) batch_norm2b = BatchNorm(in_layers=[as2[1]]) gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] + deg_adjs) gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] + deg_adjs) densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a]) denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b]) batch_norm3a = BatchNorm(in_layers=[densea]) batch_norm3b = BatchNorm(in_layers=[denseb]) as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b]) sluice_loss.append(batch_norm3a) sluice_loss.append(batch_norm3b) gg1a = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[as3[0], degree_slice, membership] + deg_adjs) gg1b = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[as3[1], degree_slice, membership] + deg_adjs) costs = [] labels = [] count = 0 for task in tasks: if count < len(tasks) / 2: classification = Dense(out_channels=2, activation_fn=None, in_layers=[gg1a]) print("first half:") print(task) else: classification = Dense(out_channels=2, activation_fn=None, in_layers=[gg1b]) print('second half') print(task) count += 1 softmax = SoftMax(in_layers=[classification]) model.add_output(softmax) label = Label(shape=(None, 2)) labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) entropy = Concat(in_layers=costs) task_weights = Weights(shape=(None, len(tasks))) task_loss = WeightedError(in_layers=[entropy, task_weights]) s_cost = SluiceLoss(in_layers=sluice_loss) total_loss = Add(in_layers=[task_loss, s_cost]) model.set_loss(total_loss) def feed_dict_generator(dataset, batch_size, epochs=1): for epoch in range(epochs): for ind, (X_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches(batch_size, pad_batches=True)): d = {} for index, label in enumerate(labels): d[label] = to_one_hot(y_b[:, index]) d[task_weights] = w_b multiConvMol = ConvMol.agglomerate_mols(X_b) d[atom_features] = multiConvMol.get_atom_features() d[degree_slice] = multiConvMol.deg_slice d[membership] = multiConvMol.membership for i in range(1, len(multiConvMol.get_deg_adjacency_lists())): d[deg_adjs[i - 1]] = multiConvMol.get_deg_adjacency_lists()[i] yield d return model, feed_dict_generator, labels, task_weights
def build_graph(self): self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32) # Character embedding self.Embedding = DTNNEmbedding( n_embedding=self.n_embedding, periodic_table_length=len(self.char_dict.keys()) + 1, in_layers=[self.smiles_seqs]) self.pooled_outputs = [] self.conv_layers = [] for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters): # Multiple convolutional layers with different filter widths self.conv_layers.append( Conv1D(kernel_size=filter_size, filters=num_filter, padding='valid', in_layers=[self.Embedding])) # Max-over-time pooling self.pooled_outputs.append( ReduceMax(axis=1, in_layers=[self.conv_layers[-1]])) # Concat features from all filters(one feature per filter) concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs) dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs]) dense = Dense(out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout]) # Highway layer from https://arxiv.org/pdf/1505.00387.pdf self.gather = Highway(in_layers=[dense]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense(out_channels=2, activation_fn=None, in_layers=[self.gather]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense(out_channels=1, activation_fn=None, in_layers=[self.gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Stack(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)