Beispiel #1
0
    def test_mpnn_regression_model(self):
        tasks, dataset, transformers, metric = self.get_dataset(
            'regression', 'Weave')

        model = MPNNModel(len(tasks),
                          mode='regression',
                          n_hidden=75,
                          n_atom_feat=75,
                          n_pair_feat=14,
                          T=1,
                          M=1)

        model.fit(dataset, nb_epoch=50)
        scores = model.evaluate(dataset, [metric], transformers)
        assert all(s < 0.1 for s in scores['mean_absolute_error'])

        model.save()
        model = TensorGraph.load_from_dir(model.model_dir)
        scores2 = model.evaluate(dataset, [metric], transformers)
        assert np.allclose(scores['mean_absolute_error'],
                           scores2['mean_absolute_error'])
Beispiel #2
0
    def test_mpnn_model(self):
        tasks, dataset, transformers, metric = self.get_dataset(
            'classification', 'Weave')

        model = MPNNModel(len(tasks),
                          mode='classification',
                          n_hidden=75,
                          n_atom_feat=75,
                          n_pair_feat=14,
                          T=1,
                          M=1)

        model.fit(dataset, nb_epoch=20)
        scores = model.evaluate(dataset, [metric], transformers)
        assert scores['mean-roc_auc_score'] >= 0.9

        model.save()
        model = TensorGraph.load_from_dir(model.model_dir)
        scores2 = model.evaluate(dataset, [metric], transformers)
        assert np.allclose(scores['mean-roc_auc_score'],
                           scores2['mean-roc_auc_score'])
  def test_mpnn_regression_model(self):
    tasks, dataset, transformers, metric = self.get_dataset(
        'regression', 'Weave')

    model = MPNNModel(
        len(tasks),
        mode='regression',
        n_hidden=75,
        n_atom_feat=75,
        n_pair_feat=14,
        T=1,
        M=1)

    model.fit(dataset, nb_epoch=50)
    scores = model.evaluate(dataset, [metric], transformers)
    assert all(s < 0.1 for s in scores['mean_absolute_error'])

    model.save()
    model = TensorGraph.load_from_dir(model.model_dir)
    scores2 = model.evaluate(dataset, [metric], transformers)
    assert np.allclose(scores['mean_absolute_error'],
                       scores2['mean_absolute_error'])
  def test_mpnn_model(self):
    tasks, dataset, transformers, metric = self.get_dataset(
        'classification', 'Weave')

    model = MPNNModel(
        len(tasks),
        mode='classification',
        n_hidden=75,
        n_atom_feat=75,
        n_pair_feat=14,
        T=1,
        M=1)

    model.fit(dataset, nb_epoch=20)
    scores = model.evaluate(dataset, [metric], transformers)
    assert scores['mean-roc_auc_score'] >= 0.9

    model.save()
    model = TensorGraph.load_from_dir(model.model_dir)
    scores2 = model.evaluate(dataset, [metric], transformers)
    assert np.allclose(scores['mean-roc_auc_score'],
                       scores2['mean-roc_auc_score'])
Beispiel #5
0
n_pair_feat = 14
batch_size = 64
n_hidden = 100
T = 3
M = 5
nb_epoch = 10
model = MPNNModel(n_tasks = n_tasks, n_atom_feat = n_atom_feat, n_pair_feat = n_pair_feat,
                  n_hidden = n_hidden, T = T, M = M,
                  mode = "regression",
                  batch_size=batch_size, learning_rate=0.0001,
                  model_dir="/home/rod/Dropbox/Quimica/Analysis/ANalisis/Borradores/MPNNModel/") #To prevent overfitting

# Fit trained model
print("Fitting model")
model.fit(train_dataset, nb_epoch=nb_epoch)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric])
valid_scores = model.evaluate(valid_dataset, [metric])

print("Train scores")
print(train_scores)

print("Validation scores")
print(valid_scores)

"""  
With featurizer = dc.feat.ConvMolFeaturizer()
----------------------------------------
Train scores
{'mean-pearson_r2_score': 0.9637847589740351}