def test_graph_conv_atom_features(self): tasks, dataset, transformers, metric = self.get_dataset('regression', 'Raw', num_tasks=1) atom_feature_name = 'feature' y = [] for mol in dataset.X: atom_features = [] for atom in mol.GetAtoms(): val = np.random.normal() mol.SetProp( "atom %08d %s" % (atom.GetIdx(), atom_feature_name), str(val)) atom_features.append(np.random.normal()) y.append(np.sum(atom_features)) featurizer = ConvMolFeaturizer(atom_properties=[atom_feature_name]) X = featurizer.featurize(dataset.X) dataset = deepchem.data.NumpyDataset(X, np.array(y)) batch_size = 50 model = GraphConvTensorGraph( len(tasks), number_atom_features=featurizer.feature_length(), batch_size=batch_size, mode='regression') model.fit(dataset, nb_epoch=1) y_pred1 = model.predict(dataset) model.save() model2 = TensorGraph.load_from_dir(model.model_dir) y_pred2 = model2.predict(dataset) self.assertTrue(np.all(y_pred1 == y_pred2))
def test_graph_conv_regression_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'regression', 'GraphConv') batch_size = 50 model = GraphConvTensorGraph( len(tasks), batch_size=batch_size, mode='regression') model.fit(dataset, nb_epoch=1) scores = model.evaluate(dataset, [metric], transformers) model.save() model = TensorGraph.load_from_dir(model.model_dir) scores = model.evaluate(dataset, [metric], transformers)
def test_change_loss_function(self): tasks, dataset, transformers, metric = self.get_dataset('regression', 'GraphConv', num_tasks=1) batch_size = 50 model = GraphConvTensorGraph(len(tasks), batch_size=batch_size, mode='regression') model.fit(dataset, nb_epoch=1) model.save() model2 = TensorGraph.load_from_dir(model.model_dir, restore=False) dummy_label = model2.labels[-1] dummy_ouput = model2.outputs[-1] loss = ReduceSum(L2Loss(in_layers=[dummy_label, dummy_ouput])) module = model2.create_submodel(loss=loss) model2.restore() model2.fit(dataset, nb_epoch=1, submodel=module)