model.load_state_dict( torch.load("./model_train_relation_vse_type_cond_scales.pth")) criterion = nn.BCELoss() # Compatibility AUC test model.eval() total_loss = 0 outputs = [] targets = [] for batch_num, batch in enumerate(test_loader, 1): print("\r#{}".format(batch_num), end="", flush=True) lengths, images, names, offsets, set_ids, labels, is_compat = batch images = images.to(device) target = is_compat.float().to(device) with torch.no_grad(): output, _, _, _ = model._compute_score(images) output = output.squeeze(dim=1) loss = criterion(output, target) total_loss += loss.item() outputs.append(output) targets.append(target) print() print("Test Loss: {:.4f}".format(total_loss / batch_num)) outputs = torch.cat(outputs).cpu().data.numpy() targets = torch.cat(targets).cpu().data.numpy() print("AUC: {:.4f}".format(metrics.roc_auc_score(targets, outputs))) # Fill in the blank evaluation is_correct = [] for i in range(len(test_dataset)): print("\r#{}".format(i), end="", flush=True)
need_rep=True, vocabulary=len(train_dataset.vocabulary)).to(device) model.load_state_dict(torch.load("./model_train.pth")) print("Successfully load model weight...") model.eval() criterion = nn.BCELoss() # Compatibility AUC test total_loss = 0 outputs = [] targets = [] for batch in tqdm(test_loader): lengths, batch_g, names, offsets, set_ids, labels, is_compat = batch target = is_compat.float().to(device) with torch.no_grad(): output, _, _ = model._compute_score(batch_g) output = output.squeeze(dim=1) loss = criterion(output, target) total_loss += loss.item() outputs.append(output) targets.append(target) print() print("Test Loss: {:.4f}".format(total_loss / len(test_loader))) outputs = torch.cat(outputs).cpu().data.numpy() targets = torch.cat(targets).cpu().data.numpy() print("AUC: {:.4f}".format(metrics.roc_auc_score(targets, outputs))) def img2graph(items): l = items.shape[0] g = dgl.DGLGraph()