import warnings warnings.filterwarnings('ignore') import deepchem as dc #from deepchem.models.tensorgraph.models.graph_models import MPNNTensorGraph from deepchem.models.tensorgraph.models.graph_models import GraphConvModel #from deepchem.feat import WeaveFeaturizer from deepchem.feat.graph_features import ConvMolFeaturizer from deepchem.feat.graph_features import WeaveFeaturizer from deepchem.data.data_loader import CSVLoader import pandas as pd import numpy as np featurizer = ConvMolFeaturizer() #featurizer = WeaveFeaturizer(graph_distance=True, explicit_H=False) train_loader = CSVLoader(tasks=['LogD7.4'], smiles_field='smiles', featurizer=featurizer) test_loader = CSVLoader(tasks=['LogD7.4'], smiles_field='smiles', featurizer=featurizer) X_train = train_loader.featurize('../demo_data/reg/training_set.csv') X_test = test_loader.featurize('../demo_data/reg/testing_set.csv') model = GraphConvModel(n_tasks=1, mode='regression') model.fit(X_train) print(model.predict(X_test))
print("Starting trial %d" % trial_num) # Number of features on conv-mols n_feat = 75 # Batch size of models batch_size = 50 #graph_model = dc.nn.SequentialGraph(n_feat) model = GraphConvModel( 1, graph_conv_layers=[64, 128, 64], batch_size=batch_size) # Fit trained model model.fit(support, nb_epoch=10) # Test model task_dataset = dc.data.get_task_dataset_minus_support(test_dataset, support, task) y_pred = model.predict(task_dataset) score = metric.compute_metric(task_dataset.y, y_pred, task_dataset.w) print("Score on task %s is %s" % (str(task), str(score))) task_scores[task].append(score) # Join information for all tasks. mean_task_scores = {} std_task_scores = {} for task in range(len(test_dataset.get_task_names())): mean_task_scores[task] = np.mean(np.array(task_scores[task])) std_task_scores[task] = np.std(np.array(task_scores[task])) print("Mean scores") print(mean_task_scores) print("Standard Deviations") print(std_task_scores)
# Number of features on conv-mols n_feat = 75 # Batch size of models batch_size = 50 #graph_model = dc.nn.SequentialGraph(n_feat) model = GraphConvModel(1, graph_conv_layers=[64, 128, 64], batch_size=batch_size) # Fit trained model model.fit(support, nb_epoch=10) # Test model task_dataset = dc.data.get_task_dataset_minus_support( test_dataset, support, task) y_pred = model.predict(task_dataset) score = metric.compute_metric(task_dataset.y, y_pred, task_dataset.w) print("Score on task %s is %s" % (str(task), str(score))) task_scores[task].append(score) # Join information for all tasks. mean_task_scores = {} std_task_scores = {} for task in range(len(test_dataset.get_task_names())): mean_task_scores[task] = np.mean(np.array(task_scores[task])) std_task_scores[task] = np.std(np.array(task_scores[task])) print("Mean scores") print(mean_task_scores) print("Standard Deviations") print(std_task_scores)
def graph_conv_training(): graph_featurizer = dc.feat.graph_features.ConvMolFeaturizer() loader = dc.data.data_loader.CSVLoader(tasks=[t_task.get()], smiles_field=t_smiles.get(), id_field=t_id.get(), featurizer=graph_featurizer) dataset = loader.featurize(t_csv.get()) splitter = dc.splits.splitters.RandomSplitter() trainset, testset = splitter.train_test_split(dataset) hp = dc.molnet.preset_hyper_parameters param = hp.hps['graphconvreg'] print(param) batch_size = 48 from deepchem.models.tensorgraph.models.graph_models import GraphConvModel model = GraphConvModel(n_tasks=1, batch_size=64, uncertainty=False, mode='regression') model = dc.models.GraphConvTensorGraph(1, batch_size=batch_size, learning_rate=1e-3, use_queue=False, mode='regression', model_dir=t_savename.get()) np.random.seed(1) random.seed(1) model.fit(dataset, nb_epoch=max(1, int(t_epochs.get()))) #model.fit(trainset, nb_epoch=max(1, int(t_epochs.get()))) metric = dc.metrics.Metric(dc.metrics.r2_score) print('epoch: ', t_epochs.get()) print("Evaluating model") train_score = model.evaluate(trainset, [metric]) test_score = model.evaluate(testset, [metric]) model.save() pred_train = model.predict(trainset) pred_test = model.predict(testset) y_train = np.array(trainset.y, dtype=np.float32) y_test = np.array(testset.y, dtype=np.float32) import matplotlib.pyplot as plt plt.figure() plt.figure(figsize=(5, 5)) plt.scatter(y_train, pred_train, label='Train', c='blue') plt.title('Graph Convolution') plt.xlabel('Measured value') plt.ylabel('Predicted value') plt.scatter(y_test, pred_test, c='lightgreen', label='Test', alpha=0.8) plt.legend(loc=4) #plt.show() plt.savefig('score-tmp.png') from PIL import Image img = Image.open('score-tmp.png') img_resize = img.resize((400, 400), Image.LANCZOS) img_resize.save('score-tmp.png') global image_score image_score_open = Image.open('score-tmp.png') image_score = ImageTk.PhotoImage(image_score_open, master=frame1) canvas.create_image(200, 200, image=image_score) #Calculate R2 score print("Train score") print(train_score) t_train_r2.set(train_score) print("Test scores") print(test_score) t_test_r2.set(test_score) #Calculate RMSE train_rmse = 1 test_rmse = 1 ''' print("Train RMSE") print(train_rmse) t_train_rmse.set(train_rmse) print("Test RMSE") print(test_rmse) t_test_rmse.set(test_rmse) ''' df_save = pd.DataFrame({'pred_train': pred_train, 'meas_train': y_train}) df_save.to_csv('pred_and_meas_train.csv') print('finish!')
trainset, testset = splitter.train_test_split(dataset) X_oversampled, y_oversampled = ros.fit_resample( np.atleast_2d(X_embeddings[0]).T, labels) test_classifier = GraphConvModel(1, graph_conv_layers=[64, 64], dense_layer_size=128, dropout=0.5, model_dir='models', mode='classification', number_atom_features=75, n_classes=2, uncertainty=False, use_queue=False, tensorboard=True) test_classifier.fit(trainset, nb_epoch=10) dnn_preds = test_classifier.predict(testset) break # hp = dc.molnet.preset_hyper_parameters # param = hp.hps[ 'graphconvreg' ] # print(param['batch_size']) # g = tf.Graph() # graph_model = dc.nn.SequentialGraph( 75 ) # graph_model.add( dc.nn.GraphConv( int(param['n_filters']), 75, activation='relu' )) # graph_model.add( dc.nn.BatchNormalization( epsilon=1e-5, mode=1 )) # graph_model.add( dc.nn.GraphPool() ) # graph_model.add( dc.nn.GraphConv( int(param['n_filters']), int(param['n_filters']), activation='relu' )) # graph_model.add( dc.nn.BatchNormalization( epsilon=1e-5, mode=1 )) # graph_model.add( dc.nn.GraphPool() ) # graph_model.add( dc.nn.Dense( int(param['n_fully_connected_nodes']), int(param['n_filters']), activation='relu' )) # graph_model.add( dc.nn.BatchNormalization( epsilon=1e-5, mode=1 )) # #graph_model.add( dc.nn.GraphGather(param['batch_size'], activation='tanh'))
model = GraphConvModel(n_tasks=1, mode='regression', dropout=0.2) model.fit(dataset_train, nb_epoch=1000) # In[10]: metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) print(model.evaluate(dataset_train, [metric])) print(model.evaluate(dataset_test, [metric])) # In[11]: test_preds = model.predict(dataset_test) # In[12]: import pandas as pd # In[13]: print(test_preds) test_preds = pd.DataFrame(test_preds) print(test_preds) test_preds.columns = ['MGC_out'] writer = pd.ExcelWriter('test_preds_MGC.xlsx', engine='xlsxwriter')