def calculate_metrics(Y, P, dataset='davis'): # aupr = get_aupr(Y, P) cindex = get_cindex(Y, P) # DeepDTA cindex2 = get_ci(Y, P) # GraphDTA rm2 = get_rm2(Y, P) # DeepDTA mse = get_mse(Y, P) pearson = get_pearson(Y, P) spearman = get_spearman(Y, P) rmse = get_rmse(Y, P) print('metrics for ', dataset) # print('aupr:', aupr) print('cindex:', cindex) print('cindex2', cindex2) print('rm2:', rm2) print('mse:', mse) print('pearson', pearson) result_file_name = 'results/result_' + model_st + '_' + dataset + '.txt' result_str = '' result_str += dataset + '\r\n' result_str += 'rmse:' + str(rmse) + ' ' + ' mse:' + str( mse) + ' ' + ' pearson:' + str(pearson) + ' ' + 'spearman:' + str( spearman) + ' ' + 'ci:' + str(cindex) + ' ' + 'rm2:' + str(rm2) print(result_str) open(result_file_name, 'w').writelines(result_str)
def get_scores_full(labels, predictions, validation_test, total_training_loss, total_validation_test_loss, epoch, comp_tar_pair_dataset, fold_epoch_results): deep_dta_rm2 = get_rm2(np.asarray(labels), np.asarray(predictions)) # deep_dta_aupr = get_aupr(np.asarray(labels), np.asarray( # predictions)) deep_dta_cindex = get_cindex(np.asarray(labels), np.asarray(predictions)) deep_dta_mse = mse(np.asarray(labels), np.asarray(predictions)) rmse_score = rmse(np.asarray(labels), np.asarray(predictions)) pearson_score = pearson(np.asarray(labels), np.asarray(predictions)) spearman_score = spearman(np.asarray(labels), np.asarray(predictions)) ci_score = ci(np.asarray(labels), np.asarray(predictions)) f1_score = f1(np.asarray(labels), np.asarray(predictions)) ave_auc_score = average_AUC(np.asarray(labels), np.asarray(predictions)) fold_epoch_results.append([ deep_dta_rm2, deep_dta_cindex, deep_dta_mse, pearson_score, spearman_score, ci_score, f1_score, ave_auc_score ]) print("Epoch:{}\tTraining Loss:{}\t{} Loss:{}".format( epoch, total_training_loss, validation_test, total_validation_test_loss)) print("{} DeepDTA RM2:\t{}".format(validation_test, deep_dta_rm2)) print("{} DeepDTA MSE\t{}".format(validation_test, deep_dta_mse)) print("{} RMSE\t{}".format(validation_test, rmse_score)) print("{} DeepDTA c-index\t{}".format(validation_test, deep_dta_cindex)) print("{} Pearson:\t{}".format(validation_test, pearson_score)) print("{} Spearman:\t{}".format(validation_test, spearman_score)) print("{} Ci:\t{}".format(validation_test, ci_score)) print("{} F1-Score:\t{}".format(validation_test, f1_score)) print("{} Average_AUC:\t{}".format(validation_test, ave_auc_score))
def test(self, dataset): N = len(dataset) T, S = [], [] for data in dataset: (correct_labels, predicted_scores) = self.model(data, train=False) # print(correct_labels, predicted_scores) T.append(correct_labels) S.append(predicted_scores) MSE = mean_squared_error(T, S) cindex = get_cindex(T, S) rm2 = get_rm2(T, S) AUPR = get_aupr(T, S) return MSE, cindex, rm2, AUPR, T, S