def test_single(turn): output_list = [] model_single.eval() for i in range(len(test_adj_list)): adj = torch.Tensor(test_adj_list[i]) feature = torch.Tensor(test_feature_list[i]) output = model_single(feature, adj) output_list.append(output) output_list = torch.Tensor(output_list) # loss_test = F.binary_cross_entropy_with_logits(output_list, torch.Tensor(test_label_list)) # print("single_test_loss:", loss_test) labels = torch.Tensor(test_label_list) labels.unsqueeze_(0) output_list.unsqueeze_(0) test_accuracy = accuracy(output_list.t(), labels.t()) print() print("test_accuracy:", test_accuracy) fo.write(str(test_accuracy)) fo.write("\n") print() if phase3: save_pred(turn, output_list.t()) return test_accuracy
def test_single(model, turn): output_list = [] model.eval() for i in range(len(test_adj_list)): adj = torch.Tensor(test_adj_list[i]).cuda() feature = torch.Tensor(test_feature_list[i]).cuda() output = model(feature, adj) output_list.append(output) labels = [] output_list = torch.Tensor(output_list) labels = torch.Tensor(test_label_list) labels.unsqueeze_(0) output_list.unsqueeze_(0) print("accuracy:", accuracy(output_list.t(), labels.t())) print("auc:", auc(output_list.t(), labels.t())) # loss_test = F.binary_cross_entropy_with_logits(output_list, torch.Tensor(test_label_list)) # print("single_test_loss:", loss_test) if phase1: save_pred("res_cheby/", turn, 1, output_list.t()) elif phase3: save_pred("res_cheby/", turn, 3, output_list.t())
def test_hinge(): output_list = [] label = 0 test_labels = [] #Cn_2 elements, ground truth for cos pairs in test model_hinge.eval() for i in range(len(test_adj_list)): adj = torch.Tensor(test_adj_list[i]) feature = torch.Tensor(test_feature_list[i]).cuda() output = model_hinge(feature, adj) output.squeeze_(0) output_list.append(output) for i in range(len(test_adj_list)): for j in range(i + 1, i + 1 + pair_num): j = j % len(test_adj_list) if test_label_list[i] == test_label_list[j]: test_labels.append([1]) else: test_labels.append([-1]) cos_list = get_cos_list(output_list, 0) test_labels = torch.Tensor(test_labels) # lossF = torch.nn.MarginRankingLoss(margin=0) # loss_test = lossF(cos_list, torch.Tensor([0]), test_labels) # print("test_loss:", loss_test) print("hinge_accuracy:", accuracy(cos_list, test_labels)) print("hinge_auc:", auc(cos_list, test_labels)) save_pred("res_cheby/", turn, 2, cos_list)
def save_pred_if_needed(y_pred, dataset, epoch, config, is_best, force_save=False): if config.save_pred: prefix = get_pred_prefix(dataset, config) if force_save or (config.save_step is not None and (epoch + 1) % config.save_step == 0): save_pred(y_pred, prefix + f'epoch:{epoch}_pred.csv') if config.save_last: save_pred(y_pred, prefix + f'epoch:last_pred.csv') if config.save_best and is_best: save_pred(y_pred, prefix + f'epoch:best_pred.csv')
def eval_run(C , p_model = None): if C.finger or C.mol2vec: finger_dict = load_fingers(C , C.data) else: finger_dict = None device = 0 (trainset , devset , testset) , lab_num = get_data (C , fold = "test") models = [] optimers = [] for k in range(C.ensemble): model = get_model (C , lab_num) if p_model is not None: copy_param(model , p_model) model = model.to(device) optimer , loss_func = get_others(C , model) models.append(model) optimers.append(optimer) ens_eval_m = EnsembleModel(models) best_epoch = -1 best_metric = -1 for epoch_id in range(C.num_epoch): train_loss = 0. for ens_id in range(C.ensemble): model , _train_loss = train(C, models[ens_id], trainset, loss_func, optimers[ens_id], epoch_id, "{0}-{1}".format(0 , ens_id), device , finger_dict) train_loss += (_train_loss / C.ensemble) E.log("Epoch %d ended." % (epoch_id)) E.log() if C.train_loss_val: metric_val = -train_loss else: assert False if (best_epoch < 0 or metric_val > best_metric) or C.no_valid: best_epoch = epoch_id best_metric = metric_val save_model(ens_eval_m , C.save_path , E.core.id , "eval") E.log("run ends. best epoch = %d" % (best_epoch)) E.log("Best metric = %.4f" % (best_metric)) E.log() E.log("model saved.") E.log("--------------------------------------------------------------") best_model = load_model(C.save_path , E.core.id , "eval") tot_pos_ps = evaluate(C, best_model, testset , loss_func, epoch_id, 0, device, "Dev" , finger_dict , ret_preds = True) save_pred(tot_pos_ps , C.data , "to_upload.csv") E.log("All run end!")