Example #1
0
 def update_LR_auc2(self, X_test, Y_test, LR_prev=None):
     test_size = len(X_test)
     all_edge_feat = []
     for i in range(test_size):
         start_node_emb = np.array(self.embeddings[X_test[i][0]])
         end_node_emb = np.array(self.embeddings[X_test[i][1]])
         edge_feat = np.abs(start_node_emb - end_node_emb)  # weighted-L1
         edge_feat = edge_feat * edge_feat  # then weighted-L2
         all_edge_feat.append(edge_feat)
     lr_clf = LR_prev
     if len(Y_test) == 0:
         print(
             '------- NOTE: two graphs do not have any change -> no testing data -> set result to 1......'
         )
         auc = 1.0
     else:
         Y_score = lr_clf.predict_proba(
             all_edge_feat
         )[:, 1]  # predict; the second col gives prob of positive edge
         auc = auc_score(y_true=Y_test, y_score=Y_score)
         lr_clf.fit(
             all_edge_feat,
             Y_test)  # incremental update model parameters after predict
     print("weighted-L2; auc=", "{:.9f}".format(auc))
     return lr_clf
Example #2
0
 def evaluate_auc(self, X_test, Y_test):
     test_size = len(X_test)
     Y_true = [int(i) for i in Y_test]
     Y_score = []
     for i in range(test_size):
         start_node_emb = np.array(self.embeddings[X_test[i][0]]).reshape(
             -1, 1)
         end_node_emb = np.array(self.embeddings[X_test[i][1]]).reshape(
             -1, 1)
         score = cosine_similarity(start_node_emb,
                                   end_node_emb)  # ranging from [-1, +1]
         Y_score.append(score)
     if len(Y_true) == 0:
         print(
             '------- NOTE: two graphs do not have any change -> no testing data -> set result to 1......'
         )
         auc = 1.0
     else:
         auc = auc_score(y_true=Y_true, y_score=Y_score)
     print("cos sim; auc=", "{:.9f}".format(auc))
Example #3
0
 def evaluate_auc(self, X_test, Y_test):
     test_size = len(X_test)
     Y_true = [int(i) for i in Y_test]
     Y_probs = []
     for i in range(test_size):
         start_node_emb = np.array(
             self.embeddings[X_test[i][0]]).reshape(-1, 1)
         end_node_emb = np.array(
             self.embeddings[X_test[i][1]]).reshape(-1, 1)
         # ranging from [-1, +1]
         score = cosine_similarity(start_node_emb, end_node_emb)
         # switch to prob... however, we may also directly y_score = score
         Y_probs.append((score + 1) / 2.0)
         # Y_probs.append(score)
         # in sklearn roc... which yields the same reasult
     if len(Y_true) == 0: # if there is no testing data (dyn networks not changed), set auc to 1
         print('------- NOTE: two graphs do not have any change -> no testing data -> set result to 1......')
         auc = 1.0
     else:
         auc = auc_score(y_true=Y_true, y_score=Y_probs)
     print("auc=", "{:.9f}".format(auc))
Example #4
0
        imgs_right[:, j, :, :] -= img_mean[j]
        imgs_right[:, j, :, :] /= (img_std[j] + 1e-5)

    imgs_left = Variable(torch.Tensor(imgs_left))
    imgs_right = Variable(torch.Tensor(imgs_right))
    if opts.cuda:
        imgs_left = imgs_left.cuda()
        imgs_right = imgs_right.cuda()

    pred = net.forward(imgs_left, imgs_right).cpu().data.numpy()
    match_predictions[start:end] = pred[:, 0]

    start += curr_batch_size

auc, tpr, fpr, thresh = auc_score(match_predictions,
                                  match_labels,
                                  get_roc=True)
"""
# Get embeddings and write to tensorboard
writer = SummaryWriter(log_dir = opts.result_path)

images_embedding = []
images_raw = []

num_samples = test_data_h5['pose_labels'].shape[0]

if num_samples % opts.batch_size == 0:
    num_batches = num_samples // opts.batch_size
else:
    num_batches = num_samples // opts.batch_size + 1
Example #5
0
def evaluate_model(model, X, y):
    preds = predict(model, X)
    auc = auc_score(y, preds)
    return auc
def evaluate(net, loader, task, opts):
    net.eval()
    isExhausted = False
    mean_loss = 0
    total_size = 0
    level_specific_loss = [0, 0, 0, 0, 0]
    # NOTE: Making use of the fact that the batches do not contain samples from more than
    # one level. This is true only if the batch_size is a factor of the number of
    # valid samples per level. Default batch_size: 250, Default samples per level: 1000
    samples_per_level = loader.nPosValid // loader.nLevels

    curr_idx = 0
    true_labels = []
    predicted_probs = []
    if task == 'match':
        while (not isExhausted):
            imgs_left, imgs_right, labels, isExhausted = loader.batch_match_valid(
                isPositive=True)
            total_size += labels.size()[0]

            imgs_left = Variable(imgs_left)
            imgs_right = Variable(imgs_right)
            labels = Variable(labels)
            labels = labels.float()

            if opts.cuda:
                imgs_left = imgs_left.cuda()
                imgs_right = imgs_right.cuda()
                labels = labels.cuda()

            pred = net.forward_match(imgs_left, imgs_right)
            loss_curr = criterion_match_valid(pred, labels)
            mean_loss += loss_curr.data[0]
            # Level specific loss computed only over the positive samples
            # since there are no levels in negative samples
            level_specific_loss[curr_idx] += loss_curr.data[0]
            # If current level samples are exhausted
            if total_size % samples_per_level == 0:
                curr_idx += 1
            true_labels.append(labels.data.cpu().numpy()[:, 0])
            predicted_probs.append(pred.data.cpu().numpy()[:, 0])

        isExhausted = False
        while (not isExhausted):
            imgs_left, imgs_right, labels, isExhausted = loader.batch_match_valid(
                isPositive=False)
            total_size += labels.size()[0]
            if opts.cuda:
                imgs_left = imgs_left.cuda()
                imgs_right = imgs_right.cuda()
                labels = labels.cuda()
            imgs_left = Variable(imgs_left)
            imgs_right = Variable(imgs_right)
            labels = Variable(labels)
            labels = labels.float()

            pred = net.forward_match(imgs_left, imgs_right)
            loss_curr = criterion_match_valid(pred, labels)
            mean_loss += loss_curr.data[0]
            true_labels.append(labels.data.cpu().numpy()[:, 0])
            predicted_probs.append(pred.data.cpu().numpy()[:, 0])

    for i in range(len(level_specific_loss)):
        level_specific_loss[i] /= samples_per_level

    true_labels = np.hstack(true_labels)
    predicted_probs = np.hstack(predicted_probs)
    mean_loss /= total_size
    auc_value = auc_score(predicted_probs, true_labels)
    # Set the network back to train mode
    net.train()
    return mean_loss, level_specific_loss, auc_value