Exemplo n.º 1
0
def norm_auc(fpr, tpr, fpr_threshold=1):
    full_area_auc = calc_auc(fpr,tpr)
    # ROI = Region Of Interest
    roi_area = full_area_auc - tpr[-1] * (1 - fpr_threshold)
    roi_random_area = (fpr_threshold ** 2) / 2
    roi_norm_area = (roi_area - roi_random_area) / (fpr_threshold - roi_random_area)
    return roi_area / fpr_threshold, roi_norm_area
Exemplo n.º 2
0
def summary(model, loader, n_classes):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    acc_logger = Accuracy_Logger(n_classes=n_classes)
    model.eval()
    test_loss = 0.
    test_error = 0.

    all_probs = np.zeros((len(loader), n_classes))
    all_labels = np.zeros(len(loader))

    slide_ids = loader.dataset.slide_data['slide_id']
    patient_results = {}

    for batch_idx, (data, label) in enumerate(loader):
        data, label = data.to(device), label.to(device)
        slide_id = slide_ids.iloc[batch_idx]
        with torch.no_grad():
            logits, Y_prob, Y_hat, _, _ = model(data)

        acc_logger.log(Y_hat, label)
        probs = Y_prob.cpu().numpy()
        all_probs[batch_idx] = probs
        all_labels[batch_idx] = label.item()

        patient_results.update({
            slide_id: {
                'slide_id': np.array(slide_id),
                'prob': probs,
                'label': label.item()
            }
        })
        error = calculate_error(Y_hat, label)
        test_error += error

    test_error /= len(loader)

    if n_classes == 2:
        auc = roc_auc_score(all_labels, all_probs[:, 1])
        aucs = []
    else:
        aucs = []
        binary_labels = label_binarize(all_labels,
                                       classes=[i for i in range(n_classes)])
        for class_idx in range(n_classes):
            if class_idx in all_labels:
                fpr, tpr, _ = roc_curve(binary_labels[:, class_idx],
                                        all_probs[:, class_idx])
                aucs.append(calc_auc(fpr, tpr))
            else:
                aucs.append(float('nan'))

        auc = np.nanmean(np.array(aucs))

    return patient_results, test_error, auc, acc_logger
Exemplo n.º 3
0
def validate(cur,
             epoch,
             model,
             loader,
             n_classes,
             early_stopping=None,
             writer=None,
             loss_fn=None,
             results_dir=None):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.eval()
    cls_logger = Accuracy_Logger(n_classes=n_classes)
    site_logger = Accuracy_Logger(n_classes=2)
    cls_val_error = 0.
    cls_val_loss = 0.
    site_val_error = 0.
    site_val_loss = 0.

    cls_probs = np.zeros((len(loader), n_classes))
    cls_labels = np.zeros(len(loader))
    site_probs = np.zeros((len(loader), 2))
    site_labels = np.zeros(len(loader))

    with torch.no_grad():
        for batch_idx, (data, label, site, sex) in enumerate(loader):
            data = data.to(device)
            label = label.to(device)
            site = site.to(device)
            sex = sex.float().to(device)

            results_dict = model(data, sex)
            logits, Y_prob, Y_hat = results_dict['logits'], results_dict[
                'Y_prob'], results_dict['Y_hat']
            site_logits, site_prob, site_hat = results_dict[
                'site_logits'], results_dict['site_prob'], results_dict[
                    'site_hat']
            del results_dict

            cls_logger.log(Y_hat, label)
            site_logger.log(site_hat, site)

            cls_loss = loss_fn(logits, label)
            site_loss = loss_fn(site_logits, site)
            loss = cls_loss * 0.75 + site_loss * 0.25
            cls_loss_value = cls_loss.item()
            site_loss_value = site_loss.item()

            cls_probs[batch_idx] = Y_prob.cpu().numpy()
            cls_labels[batch_idx] = label.item()

            site_probs[batch_idx] = site_prob.cpu().numpy()
            site_labels[batch_idx] = site.item()

            cls_val_loss += cls_loss_value
            site_val_loss += site_loss_value
            cls_error = calculate_error(Y_hat, label)
            cls_val_error += cls_error
            site_error = calculate_error(site_hat, site)
            site_val_error += site_error

    cls_val_error /= len(loader)
    cls_val_loss /= len(loader)
    site_val_error /= len(loader)
    site_val_loss /= len(loader)

    if n_classes == 2:
        cls_auc = roc_auc_score(cls_labels, cls_probs[:, 1])
        cls_aucs = []
    else:
        cls_aucs = []
        binary_labels = label_binarize(cls_labels,
                                       classes=[i for i in range(n_classes)])
        for class_idx in range(n_classes):
            if class_idx in cls_labels:
                fpr, tpr, _ = roc_curve(binary_labels[:, class_idx],
                                        cls_probs[:, class_idx])
                cls_aucs.append(calc_auc(fpr, tpr))
            else:
                cls_aucs.append(float('nan'))

        cls_auc = np.nanmean(np.array(cls_aucs))

    site_auc = roc_auc_score(site_labels, site_probs[:, 1])

    if writer:
        writer.add_scalar('val/cls_loss', cls_val_loss, epoch)
        writer.add_scalar('val/cls_auc', cls_auc, epoch)
        writer.add_scalar('val/cls_error', cls_val_error, epoch)
        writer.add_scalar('val/site_loss', site_val_loss, epoch)
        writer.add_scalar('val/site_auc', site_auc, epoch)
        writer.add_scalar('val/site_error', site_val_error, epoch)

    print(
        '\nVal Set, cls val_loss: {:.4f}, cls val_error: {:.4f}, cls auc: {:.4f}'
        .format(cls_val_loss, cls_val_error, cls_auc) +
        ' site val_loss: {:.4f}, site val_error: {:.4f}, site auc: {:.4f}'.
        format(site_val_loss, site_val_error, site_auc))
    for i in range(n_classes):
        acc, correct, count = cls_logger.get_summary(i)
        print('class {}: tpr {}, correct {}/{}'.format(i, acc, correct, count))
        if writer:
            writer.add_scalar('val/class_{}_tpr'.format(i), acc, epoch)

    for i in range(2):
        acc, correct, count = site_logger.get_summary(i)
        print('site {}: tpr {}, correct {}/{}'.format(i, acc, correct, count))
        if writer:
            writer.add_scalar('val/site_{}_acc'.format(i), acc, epoch)

    if early_stopping:
        assert results_dir
        early_stopping(epoch,
                       cls_val_loss,
                       model,
                       ckpt_name=os.path.join(
                           results_dir, "s_{}_checkpoint.pt".format(cur)))

        if early_stopping.early_stop:
            print("Early stopping")
            return True

    return False
Exemplo n.º 4
0
def validate_clam(cur, epoch, model, loader, n_classes, early_stopping = None, writer = None, loss_fn = None, results_dir = None):
    device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.eval()
    acc_logger = Accuracy_Logger(n_classes=n_classes)
    inst_logger = Accuracy_Logger(n_classes=n_classes)
    val_loss = 0.
    val_error = 0.

    val_inst_loss = 0.
    val_inst_acc = 0.
    inst_count=0
    
    prob = np.zeros((len(loader), n_classes))
    labels = np.zeros(len(loader))
    sample_size = model.k_sample
    with torch.no_grad():
        for batch_idx, (data, label) in enumerate(loader):
            data, label = data.to(device), label.to(device)      
            logits, Y_prob, Y_hat, _, instance_dict = model(data, label=label, instance_eval=True)
            acc_logger.log(Y_hat, label)
            
            loss = loss_fn(logits, label)

            val_loss += loss.item()

            instance_loss = instance_dict['instance_loss']
            
            inst_count+=1
            instance_loss_value = instance_loss.item()
            val_inst_loss += instance_loss_value

            inst_preds = instance_dict['inst_preds']
            inst_labels = instance_dict['inst_labels']
            inst_logger.log_batch(inst_preds, inst_labels)

            prob[batch_idx] = Y_prob.cpu().numpy()
            labels[batch_idx] = label.item()
            
            error = calculate_error(Y_hat, label)
            val_error += error

    val_error /= len(loader)
    val_loss /= len(loader)

    if n_classes == 2:
        auc = roc_auc_score(labels, prob[:, 1])
        aucs = []
    else:
        aucs = []
        binary_labels = label_binarize(labels, classes=[i for i in range(n_classes)])
        for class_idx in range(n_classes):
            if class_idx in labels:
                fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], prob[:, class_idx])
                aucs.append(calc_auc(fpr, tpr))
            else:
                aucs.append(float('nan'))

        auc = np.nanmean(np.array(aucs))

    print('\nVal Set, val_loss: {:.4f}, val_error: {:.4f}, auc: {:.4f}'.format(val_loss, val_error, auc))
    if inst_count > 0:
        val_inst_loss /= inst_count
        for i in range(2):
            acc, correct, count = inst_logger.get_summary(i)
            print('class {} clustering acc {}: correct {}/{}'.format(i, acc, correct, count))
    
    if writer:
        writer.add_scalar('val/loss', val_loss, epoch)
        writer.add_scalar('val/auc', auc, epoch)
        writer.add_scalar('val/error', val_error, epoch)
        writer.add_scalar('val/inst_loss', val_inst_loss, epoch)


    for i in range(n_classes):
        acc, correct, count = acc_logger.get_summary(i)
        print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
        
        if writer and acc is not None:
            writer.add_scalar('val/class_{}_acc'.format(i), acc, epoch)
     

    if early_stopping:
        assert results_dir
        early_stopping(epoch, val_loss, model, ckpt_name = os.path.join(results_dir, "s_{}_checkpoint.pt".format(cur)))
        
        if early_stopping.early_stop:
            print("Early stopping")
            return True

    return False