def test_weave_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'classification', 'Weave') model = WeaveModel(len(tasks), mode='classification') model.fit(dataset, nb_epoch=50) scores = model.evaluate(dataset, [metric], transformers) assert scores['mean-roc_auc_score'] >= 0.9 model.save() model = TensorGraph.load_from_dir(model.model_dir) scores2 = model.evaluate(dataset, [metric], transformers) assert np.allclose(scores['mean-roc_auc_score'], scores2['mean-roc_auc_score'])
def test_weave_regression_model(self): tasks, dataset, transformers, metric = self.get_dataset( 'regression', 'Weave') model = WeaveModel(len(tasks), mode='regression') model.fit(dataset, nb_epoch=80) scores = model.evaluate(dataset, [metric], transformers) assert all(s < 0.1 for s in scores['mean_absolute_error']) model.save() model = TensorGraph.load_from_dir(model.model_dir) scores2 = model.evaluate(dataset, [metric], transformers) assert np.allclose(scores['mean_absolute_error'], scores2['mean_absolute_error'])
def test_change_loss_function_weave(self): tasks, dataset, transformers, metric = self.get_dataset( 'regression', 'Weave', num_tasks=1) batch_size = 50 model = WeaveModel( len(tasks), batch_size=batch_size, mode='regression', use_queue=False) model.fit(dataset, nb_epoch=1) model.save() model2 = TensorGraph.load_from_dir(model.model_dir, restore=False) dummy_label = model2.labels[-1] dummy_ouput = model2.outputs[-1] loss = ReduceSum(L2Loss(in_layers=[dummy_label, dummy_ouput])) module = model2.create_submodel(loss=loss) model2.restore() model2.fit(dataset, nb_epoch=1, submodel=module)
n_hidden = 10 batch_size = 64 n_graph_feat = 10 nb_epoch = 10 model = WeaveModel( n_tasks=n_tasks, n_atom_feat=n_atom_feat, n_pair_feat=n_pair_feat, n_hidden=n_hidden, n_graph_feat=n_graph_feat, mode="regression", batch_size=batch_size, model_dir= "/home/rod/Dropbox/Quimica/Analysis/ANalisis/Borradores/WeaveModel/" ) #To prevent overfitting # Fit trained model print("Fitting model") model.fit(train_dataset, nb_epoch=nb_epoch) model.save() print("Evaluating model") train_scores = model.evaluate(train_dataset, [metric], transformers) valid_scores = model.evaluate(valid_dataset, [metric], transformers) print("Train scores") print(train_scores) print("Validation scores") print(valid_scores) #save_dataset_to_disk("./", train_dataset, valid_dataset, test_dataset, transformers)