Example #1
0
import os
import numpy as np
import deepchem as dc
from deepchem.molnet import load_pdbbind_grid
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind_grid(split='random', subset='full')
train_data, valid_data, test_data = pdbbind_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
model = dc.models.MultitaskRegressor(len(pdbbind_tasks), train_data.X.shape[1],dropouts=[.25],learning_rate=0.0003,weight_init_stddevs=[.1],batch_size=64, model_dir='pdbbind_test')

model.fit(train_data, nb_epoch=100)
train_score = model.evaluate(train_data, [metric], transformers)
valid_score = model.evaluate(valid_data, [metric], transformers)

feature = dc.feat.RdkitGridFeaturizer(voxel_width=16.0,feature_types=["ecfp", "splif", "hbond", "salt_bridge"],ecfp_power=9,splif_power=9,flatten=True)

grid = feature.featurize_complexes(['ligand2.sdf'], ['3lpt.pdb'])
print(grid)

# reload = dc.models.MultitaskRegressor(len(pdbbind_tasks), train_data.X.shape[1],dropouts=[.25],learning_rate=0.0003,weight_init_stddevs=[.1],batch_size=64, model_dir='pdbbind_test')
# print(reload.get_checkpoints())
# reload.load_from_pretrained(reload, model_dir='pdbbind_test')
# reload.evaluate(train_data, [metric], transformers)
# reload.evaluate(valid_data, [metric], transformers)
Example #2
0
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"

import os
import numpy as np
import tensorflow as tf
# For stable runs
np.random.seed(123)
tf.set_random_seed(123)

import deepchem as dc
from deepchem.molnet import load_pdbbind_grid

split = "random"
subset = "full"
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind_grid(
    split=split, subset=subset)
train_dataset, valid_dataset, test_dataset = pdbbind_datasets

metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)

current_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(current_dir, "%s_%s_DNN" % (split, subset))

n_features = train_dataset.X.shape[1]
model = dc.models.MultitaskRegressor(len(pdbbind_tasks),
                                     n_features,
                                     model_dir=model_dir,
                                     dropouts=[.25],
                                     learning_rate=0.0003,
                                     weight_init_stddevs=[.1],
                                     batch_size=64)
Example #3
0
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"

import os
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from deepchem.molnet import load_pdbbind_grid

# For stable runs
np.random.seed(123)

split = "random"
subset = "full"
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind_grid(
    split=split, subset=subset)
train_dataset, valid_dataset, test_dataset = pdbbind_datasets

metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)

current_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(current_dir, "%s_%s_RF" % (split, subset))

sklearn_model = RandomForestRegressor(n_estimators=500)
model = dc.models.SklearnModel(sklearn_model, model_dir=model_dir)

# Fit trained model
print("Fitting model on train dataset")
model.fit(train_dataset)
model.save()