def test_mpnn_regression_uncertainty(): tasks, dataset, transformers, metric = get_dataset('regression', 'Weave') batch_size = 10 model = MPNNModel(len(tasks), mode='regression', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1, dropout=0.1, batch_size=batch_size, uncertainty=True) model.fit(dataset, nb_epoch=40) # Predict the output and uncertainty. pred, std = model.predict_uncertainty(dataset) mean_error = np.mean(np.abs(dataset.y - pred)) mean_value = np.mean(np.abs(dataset.y)) mean_std = np.mean(std) assert mean_error < 0.5 * mean_value assert mean_std > 0.5 * mean_error assert mean_std < mean_value
def test_mpnn_regression_uncertainty(self): tasks, dataset, transformers, metric = self.get_dataset( 'regression', 'Weave') model = MPNNModel( len(tasks), mode='regression', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1, dropout=0.1, uncertainty=True) model.fit(dataset, nb_epoch=40) # Predict the output and uncertainty. pred, std = model.predict_uncertainty(dataset) mean_error = np.mean(np.abs(dataset.y - pred)) mean_value = np.mean(np.abs(dataset.y)) mean_std = np.mean(std) assert mean_error < 0.5 * mean_value assert mean_std > 0.5 * mean_error assert mean_std < mean_value
def test_mpnn_regression_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'regression', 'Weave') model = MPNNModel(len(tasks), mode='regression', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1) model.fit(dataset, nb_epoch=50) scores = model.evaluate(dataset, [metric], transformers) assert all(s < 0.1 for s in scores['mean_absolute_error'])
def test_mpnn_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'classification', 'Weave') model = MPNNModel(len(tasks), mode='classification', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1) model.fit(dataset, nb_epoch=20) scores = model.evaluate(dataset, [metric], transformers) assert scores['mean-roc_auc_score'] >= 0.9
def test_mpnn_regression_model(): tasks, dataset, transformers, metric = get_dataset('regression', 'Weave') batch_size = 10 model = MPNNModel(len(tasks), mode='regression', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1, batch_size=batch_size) model.fit(dataset, nb_epoch=60) scores = model.evaluate(dataset, [metric], transformers) assert scores['mean_absolute_error'] < 0.1
def test_mpnn_regression_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'regression', 'Weave') model = MPNNModel(len(tasks), mode='regression', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1) model.fit(dataset, nb_epoch=50) scores = model.evaluate(dataset, [metric], transformers) assert all(s < 0.1 for s in scores['mean_absolute_error']) model.save() model = TensorGraph.load_from_dir(model.model_dir) scores2 = model.evaluate(dataset, [metric], transformers) assert np.allclose(scores['mean_absolute_error'], scores2['mean_absolute_error'])
def test_mpnn_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'classification', 'Weave') model = MPNNModel(len(tasks), mode='classification', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1) model.fit(dataset, nb_epoch=20) scores = model.evaluate(dataset, [metric], transformers) assert scores['mean-roc_auc_score'] >= 0.9 model.save() model = TensorGraph.load_from_dir(model.model_dir) scores2 = model.evaluate(dataset, [metric], transformers) assert np.allclose(scores['mean-roc_auc_score'], scores2['mean-roc_auc_score'])
def test_mpnn_regression_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'regression', 'Weave') model = MPNNModel( len(tasks), mode='regression', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1) model.fit(dataset, nb_epoch=50) scores = model.evaluate(dataset, [metric], transformers) assert all(s < 0.1 for s in scores['mean_absolute_error']) model.save() model = TensorGraph.load_from_dir(model.model_dir) scores2 = model.evaluate(dataset, [metric], transformers) assert np.allclose(scores['mean_absolute_error'], scores2['mean_absolute_error'])
def test_mpnn_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'classification', 'Weave') model = MPNNModel( len(tasks), mode='classification', n_hidden=75, n_atom_feat=75, n_pair_feat=14, T=1, M=1) model.fit(dataset, nb_epoch=20) scores = model.evaluate(dataset, [metric], transformers) assert scores['mean-roc_auc_score'] >= 0.9 model.save() model = TensorGraph.load_from_dir(model.model_dir) scores2 = model.evaluate(dataset, [metric], transformers) assert np.allclose(scores['mean-roc_auc_score'], scores2['mean-roc_auc_score'])
# Fit metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean) # Do setup required for tf/keras models n_tasks = 1 #Only solubility to predict n_atom_feat = 75 n_pair_feat = 14 batch_size = 64 n_hidden = 100 T = 3 M = 5 nb_epoch = 10 model = MPNNModel(n_tasks = n_tasks, n_atom_feat = n_atom_feat, n_pair_feat = n_pair_feat, n_hidden = n_hidden, T = T, M = M, mode = "regression", batch_size=batch_size, learning_rate=0.0001, model_dir="/home/rod/Dropbox/Quimica/Analysis/ANalisis/Borradores/MPNNModel/") #To prevent overfitting # Fit trained model print("Fitting model") model.fit(train_dataset, nb_epoch=nb_epoch) model.save() print("Evaluating model") train_scores = model.evaluate(train_dataset, [metric]) valid_scores = model.evaluate(valid_dataset, [metric]) print("Train scores") print(train_scores) print("Validation scores")