Exemplo n.º 1
0
def hungarian_evaluate(subhead_index,
                       all_predictions,
                       class_names=None,
                       compute_purity=True,
                       compute_confusion_matrix=True,
                       confusion_matrix_file=None):
    # Evaluate model based on hungarian matching between predicted cluster assignment and gt classes.
    # This is computed only for the passed subhead index.

    # Hungarian matching
    head = all_predictions[subhead_index]
    targets = head['targets'].cuda()
    predictions = head['predictions'].cuda()
    probs = head['probabilities'].cuda()
    num_classes = torch.unique(targets).numel()
    num_elems = targets.size(0)

    match = _hungarian_match(predictions,
                             targets,
                             preds_k=num_classes,
                             targets_k=num_classes)
    reordered_preds = torch.zeros(num_elems, dtype=predictions.dtype).cuda()
    for pred_i, target_i in match:
        reordered_preds[predictions == int(pred_i)] = int(target_i)

    # Gather performance metrics
    acc = int((reordered_preds == targets).sum()) / float(num_elems)
    nmi = metrics.normalized_mutual_info_score(targets.cpu().numpy(),
                                               predictions.cpu().numpy())
    ari = metrics.adjusted_rand_score(targets.cpu().numpy(),
                                      predictions.cpu().numpy())

    report = metrics.classification_report(targets.cpu().numpy(),
                                           reordered_preds.cpu().numpy(),
                                           target_names=class_names)
    print(report)

    _, preds_top5 = probs.topk(5, 1, largest=True)
    reordered_preds_top5 = torch.zeros_like(preds_top5)
    for pred_i, target_i in match:
        reordered_preds_top5[preds_top5 == int(pred_i)] = int(target_i)
    correct_top5_binary = reordered_preds_top5.eq(
        targets.view(-1, 1).expand_as(reordered_preds_top5))
    top5 = float(correct_top5_binary.sum()) / float(num_elems)

    # Compute confusion matrix
    if compute_confusion_matrix:
        confusion_matrix(reordered_preds.cpu().numpy(),
                         targets.cpu().numpy(), class_names,
                         confusion_matrix_file)

    return {
        'ACC': acc,
        'ARI': ari,
        'NMI': nmi,
        'ACC Top-5': top5,
        'hungarian_match': match
    }
Exemplo n.º 2
0
def validate(model, cfg):
    torch.backends.cudnn.benchmark = True

    val_dataset = Seg_dataset(cfg)
    val_loader = data.DataLoader(val_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=4,
                                 pin_memory=True)

    total_batch = int(len(val_dataset)) + 1
    hist = np.zeros((cfg.class_num, cfg.class_num))

    with torch.no_grad():
        for i, (img, label) in enumerate(val_loader):
            image = img.cuda().detach()
            output = model(image)
            pred = torch.max(output, 1)[1].cpu().numpy().astype('int32')
            label = label.numpy().astype('int32')

            hist += confusion_matrix(pred.flatten(), label.flatten(),
                                     cfg.class_num)
            ious = per_class_iou(hist) * 100
            miou = np.nanmean(ious)
            print(f'\rBatch: {i + 1}/{total_batch}, mIOU: {miou:.2f}', end='')

    print('\nPer class iou:')
    for i, iou in enumerate(ious):
        print(f'{i}: {iou:.2f}')

    return miou
Exemplo n.º 3
0
def train_(train_iter, net, opt, loss_function, loss_type, ind_ignore,
           n_classes):
    net.train()
    train_loss = 0
    total = 0
    # Create the confusion matrix
    cm = np.zeros((n_classes, n_classes))
    nTrain = train_iter.nbatches
    for batch_idx in range(nTrain):
        all_data = train_iter.next()
        data = all_data[0]
        target = all_data[1]

        data, target = data.transpose((0, 3, 1, 2)), target.transpose(
            (0, 3, 1, 2))
        data, target = torch.from_numpy(data), torch.from_numpy(target)
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        opt.zero_grad()

        output = net(data)
        target = target.type(torch.FloatTensor).cuda()

        _, target_indices = torch.max(target, 1)
        _, output_indices = torch.max(output, 1)
        flattened_output = output_indices.view(-1)
        flattened_target = target_indices.view(-1)

        if loss_type == 'cce_soft':
            loss = cce_soft(output, target, ignore_label=ind_ignore)
        else:
            loss = loss_function(output, target_indices)

        cm = confusion_matrix(cm,
                              flattened_output.data.cpu().numpy(),
                              flattened_target.data.cpu().numpy(), n_classes)
        loss.backward()
        nn.utils.clip_grad_norm(net.parameters(), max_norm=4)
        opt.step()

        train_loss += loss.data[0]
        _, predicted = torch.max(output.data, 1)
        total += target.size(0)

        progress_bar(batch_idx, nTrain,
                     'Loss: %.3f' % (train_loss / (batch_idx + 1)))

        del (output)
        del (loss)
        del (flattened_output)
        del (output_indices)

    jaccard_per_class, jaccard, accuracy = compute_metrics(cm)
    metrics_string = print_metrics(train_loss, nTrain, n_classes,
                                   jaccard_per_class, jaccard, accuracy)
    print(metrics_string)
    return jaccard, jaccard_per_class, accuracy, train_loss / (nTrain)
Exemplo n.º 4
0
def hungarian_evaluate2(subhead_index,
                        all_predictions,
                        class_names=None,
                        compute_purity=True,
                        compute_confusion_matrix=True,
                        confusion_matrix_file=None):
    # Evaluate model based on hungarian matching between predicted cluster assignment and gt classes.
    # This is computed only for the passed subhead index.

    # Hungarian matching
    head = all_predictions[subhead_index]
    targets = head['targets'].cuda()
    predictions = head['predictions'].cuda()
    num_classes = torch.unique(targets).numel()
    num_elems = targets.size(0)

    match = _hungarian_match(predictions,
                             targets,
                             preds_k=num_classes,
                             targets_k=num_classes)
    reordered_preds = torch.zeros(num_elems, dtype=predictions.dtype).cuda()
    for pred_i, target_i in match:
        reordered_preds[predictions == int(pred_i)] = int(target_i)

    # Gather performance metrics
    acc = int((reordered_preds == targets).sum()) / float(num_elems)
    nmi = metrics.normalized_mutual_info_score(targets.cpu().numpy(),
                                               predictions.cpu().numpy())
    ari = metrics.adjusted_rand_score(targets.cpu().numpy(),
                                      predictions.cpu().numpy())

    # Compute confusion matrix
    if compute_confusion_matrix:
        confusion_matrix(reordered_preds.cpu().numpy(),
                         targets.cpu().numpy(), class_names,
                         confusion_matrix_file)

    return {'ACC': acc, 'ARI': ari, 'NMI': nmi, 'hungarian_match': match}
Exemplo n.º 5
0
def hungarian_evaluate(subhead_index,
                       all_predictions,
                       class_names=None,
                       compute_purity=True,
                       compute_confusion_matrix=True,
                       confusion_matrix_file=None,
                       tf_writer=None,
                       epoch=0):
    # Evaluate model based on hungarian matching between predicted cluster assignment and gt classes.
    # This is computed only for the passed subhead index.

    # Hungarian matching
    head = all_predictions[subhead_index]
    targets = head['targets'].cuda()
    predictions = head['predictions'].cuda()
    probs = head['probabilities'].cuda()
    num_classes = torch.unique(targets).numel()
    num_elems = targets.size(0)

    match = _hungarian_match(predictions,
                             targets,
                             preds_k=num_classes,
                             targets_k=num_classes)
    reordered_preds = torch.zeros(num_elems, dtype=predictions.dtype).cuda()
    for pred_i, target_i in match:
        reordered_preds[predictions == int(pred_i)] = int(target_i)

    # Gather performance metrics
    acc = int((reordered_preds == targets).sum()) / float(num_elems)
    nmi = metrics.normalized_mutual_info_score(targets.cpu().numpy(),
                                               predictions.cpu().numpy())
    ari = metrics.adjusted_rand_score(targets.cpu().numpy(),
                                      predictions.cpu().numpy())

    _, preds_top5 = probs.topk(min(5, num_classes), 1, largest=True)
    reordered_preds_top5 = torch.zeros_like(preds_top5)
    for pred_i, target_i in match:
        reordered_preds_top5[preds_top5 == int(pred_i)] = int(target_i)
    correct_top5_binary = reordered_preds_top5.eq(
        targets.view(-1, 1).expand_as(reordered_preds_top5))
    top5 = float(correct_top5_binary.sum()) / float(num_elems)

    reordered_preds = reordered_preds.cpu().numpy()
    targets = targets.cpu().numpy()

    if tf_writer is not None:
        from sklearn.metrics import precision_score, recall_score, f1_score
        precision = precision_score(targets,
                                    reordered_preds,
                                    average=None,
                                    zero_division=0)
        recall = recall_score(targets,
                              reordered_preds,
                              average=None,
                              zero_division=0)
        f1 = f1_score(targets, reordered_preds, average=None, zero_division=0)

        tf_writer.add_scalar('Evaluate/ACC', acc, epoch)
        tf_writer.add_scalar('Evaluate/NMI', nmi, epoch)
        tf_writer.add_scalar('Evaluate/ARI', ari, epoch)

        for i in range(len(f1)):
            tf_writer.add_scalar(f'Evaluate/f1_{i}', f1[i], epoch)
            tf_writer.add_scalar(f'Evaluate/precision_{i}', precision[i],
                                 epoch)
            tf_writer.add_scalar(f'Evaluate/recall_{i}', recall[i], epoch)

        # if epoch % cfg.embedding_freq == 0:
        #     tf_writer.add_embedding(intermediates, labels, images, epoch, cfg.session)

    # Visualize confusion matrix with matplotlib
    if compute_confusion_matrix:
        confusion_matrix(reordered_preds, targets, class_names,
                         confusion_matrix_file)

    return {
        'ACC': acc,
        'ARI': ari,
        'NMI': nmi,
        'ACC Top-5': top5,
        'hungarian_match': match
    }
Exemplo n.º 6
0
def test_(test_iter, net, experiment_dir_final, loss_function, loss_type,
          void_labels, save_test_images, n_classes):
    ckt_names = ['best_jaccard.t7']

    for ckt_name in ckt_names:
        print('Testing checkpoint ' + ckt_name)
        checkpoint = torch.load(
            os.path.join(experiment_dir_final, 'checkpoint', ckt_name))
        print('Checkpoint loaded for testing...')
        net.load_state_dict(checkpoint['net'])

        net.eval()
        test_loss = 0
        total = 0
        # Create the confusion matrix
        cm = np.zeros((n_classes, n_classes))
        nTest = test_iter.nbatches
        for batch_idx in range(nTest):
            all_data = test_iter.next()
            data_ = all_data[0]
            target_ = all_data[1]

            data, target = data_.transpose((0, 3, 1, 2)), target_.transpose(
                (0, 3, 1, 2))
            data, target = torch.from_numpy(data), torch.from_numpy(target)
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data), Variable(target)
            output = net(data)

            target = target.type(torch.LongTensor).cuda()
            _, target_indices = torch.max(target, 1)
            _, output_indices = torch.max(output, 1)
            flattened_output = output_indices.view(-1)
            flattened_target = target_indices.view(-1)

            loss = loss_function(output, target_indices)

            cm = confusion_matrix(cm,
                                  flattened_output.data.cpu().numpy(),
                                  flattened_target.data.cpu().numpy(),
                                  n_classes)

            test_loss += loss.data[0]
            _, predicted = torch.max(output.data, 1)
            total += target.size(0)

            progress_bar(batch_idx, test_iter.nbatches,
                         'Test loss: %.3f' % (test_loss / (batch_idx + 1)))

            if save_test_images:
                save_images(data_, target_, output, experiment_dir_final,
                            batch_idx, void_labels)

            del (output)
            del (loss)
            del (flattened_output)
            del (output_indices)

        jaccard_per_class, jaccard, accuracy = compute_metrics(cm)
        metrics_string = print_metrics(test_loss, nTest, n_classes,
                                       jaccard_per_class, jaccard, accuracy)
        print(metrics_string)
Exemplo n.º 7
0
def val_(val_iter, net, opt, loss_function, loss_type, epoch, es_step,
         ind_ignore, experiment_dir, max_patience, best_jacc, n_classes):
    code = 0
    net.eval()
    test_loss = 0
    total = 0
    # Create the confusion matrix
    cm = np.zeros((n_classes, n_classes))
    nVal = val_iter.nbatches
    for batch_idx in range(nVal):
        all_data = val_iter.next()
        data = all_data[0]
        target = all_data[1]

        data, target = data.transpose((0, 3, 1, 2)), target.transpose(
            (0, 3, 1, 2))
        data, target = torch.from_numpy(data), torch.from_numpy(target)
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)

        output = net(data)

        target = target.type(torch.FloatTensor).cuda()
        _, target_indices = torch.max(target, 1)
        _, output_indices = torch.max(output, 1)
        flattened_output = output_indices.view(-1)
        flattened_target = target_indices.view(-1)

        if loss_type == 'cce_soft':
            loss = cce_soft(output, target, ignore_label=ind_ignore)
        else:
            loss = loss_function(output, target_indices)

        cm = confusion_matrix(cm,
                              flattened_output.data.cpu().numpy(),
                              flattened_target.data.cpu().numpy(), n_classes)
        test_loss += loss.data[0]
        _, predicted = torch.max(output.data, 1)
        total += target.size(0)

        progress_bar(batch_idx, val_iter.nbatches,
                     'Val loss: %.3f' % (test_loss / (batch_idx + 1)))

        del (output)
        del (loss)
        del (flattened_output)
        del (output_indices)

    jaccard_per_class, jaccard, accuracy = compute_metrics(cm)

    metrics_string = print_metrics(test_loss, nVal, n_classes,
                                   jaccard_per_class, jaccard, accuracy)
    print(metrics_string)

    es_step, best_jacc = save_checkpoints(jaccard, net, epoch, opt,
                                          experiment_dir, best_jacc, es_step)

    # Early stopping
    if es_step >= max_patience:
        print('Early stopping! Max mean jaccard: ' + str(best_jacc))
        code = 1
    return es_step, best_jacc, code, jaccard, jaccard_per_class, accuracy, \
        test_loss / (nVal)