Exemple #1
0
 def test_value(self, input_data, expected_value):
     result = compute_confusion_metric(**input_data)
     self.assertEqual(expected_value, result)
Exemple #2
0
def engine(loader: Any, checkpoint: Dict[str, Any], batchsize: int,
           classes: int, reg_args: Any, is_train: bool):

    overall_loss = []
    all_preds = torch.zeros((0, classes)).cuda()
    all_labels = torch.zeros((0, classes)).cuda()
    start = time.time()
    sigmoid = torch.nn.Sigmoid()

    with torch.set_grad_enabled(is_train):
        for iter_num, data in enumerate(loader):
            imgs = data[0].cuda().float()
            labels = data[1].cuda().float()

            predicted = checkpoint['model'](imgs)
            loss = checkpoint['criterion'](predicted, labels)

            if is_train:
                loss.backward()
                checkpoint['optimizer'].step()
                checkpoint['optimizer'].zero_grad()

            overall_loss.append(float(loss.item()))
            all_preds = torch.cat((predicted.detach(), all_preds))
            all_labels = torch.cat((labels.detach(), all_labels))

            speed = batchsize * iter_num // (time.time() - start)
            print('Epoch:',
                  checkpoint['epoch'],
                  'Iter:',
                  iter_num,
                  'Running loss:',
                  round(np.mean(overall_loss), 3),
                  'Speed:',
                  int(speed),
                  'img/s',
                  end='\r',
                  flush=True)

    loss = np.mean(overall_loss)
    if reg_args is None:
        rmetric = compute_roc_auc(all_preds, all_labels, other_act=sigmoid)
        sens = compute_confusion_metric(all_preds,
                                        all_labels,
                                        activation=sigmoid,
                                        metric_name='sensitivity')
        spec = compute_confusion_metric(all_preds,
                                        all_labels,
                                        activation=sigmoid,
                                        metric_name='specificity')
        summary = (
            f'Epoch Summary- Loss:{round(loss, 3)}  ROC:{round(rmetric * 100, 1)} '
            +
            f'Sensitivity:{round(100 * sens, 1)}  Specificity: {round(100 * spec, 1)}'
        )
    else:
        error_range = reg_args['error_range']
        all_labels = [((x * reg_args['max']) + reg_args['min']).item()
                      for x in all_labels]
        all_preds = [((x * reg_args['max']) + reg_args['min']).item()
                     for x in all_preds]
        rmetric = r2_score(all_labels, all_preds)
        a1 = regression_accuracy(all_labels, all_preds, error_range)
        a2 = regression_accuracy(all_labels, all_preds, error_range)
        summary = (
            f'Epoch Summary- Loss:{round(loss, 3)}  R2:{round(rmetric, 1)} ' +
            f'Accuracy at {error_range}:{round(100 * a1, 1)} ' +
            f'Accuracy at {(error_range * 2)}:{round(100 * a2, 1)}')

    print(summary)
    with open('/mnt/out/summary.txt', 'w') as f:
        f.write('Loss:{}\nROC:{}\nSensitivity:{}\nSpecificity:{}'.format(
            round(loss, 3), round(rmetric * 100, 1), round(100 * sens, 1),
            round(100 * spec, 1)))

    return loss, rmetric, summary
Exemple #3
0
 def test_value(self, input_data, expected_value):
     result = compute_confusion_metric(**input_data)
     np.testing.assert_allclose(expected_value, result, rtol=1e-7)
Exemple #4
0
def engine(loader: Any, checkpoint: Dict[str, Any], batchsize: int,
           classes: int, variable_type: str, error_range: int, is_train: bool):

    overall_loss = []
    all_preds = torch.zeros((0, classes))
    all_labels = torch.zeros((0, classes))
    labels_onehot = torch.FloatTensor(batchsize, classes).cuda()
    start = time.time()
    sigmoid = torch.nn.Sigmoid()

    with torch.set_grad_enabled(is_train):
        for iter_num, data in enumerate(loader):
            # name = data[0]
            imgs = data[1].cuda().float()
            labels = data[2].cuda()

            predicted = checkpoint['model'](imgs)
            loss = checkpoint['criterion'](predicted, labels)
            predicted, labels = predicted.detach(), labels.detach()

            if is_train:
                loss.backward()
                checkpoint['optimizer'].step()
                checkpoint['optimizer'].zero_grad()

            overall_loss.append(float(loss.item()))
            all_preds = torch.cat((predicted, all_preds))

            if variable_type == 'categorical':
                if labels_onehot.shape[0] != labels.shape[0]:
                    labels_onehot = torch.FloatTensor(labels.shape[0],
                                                      classes).cuda()
                labels_onehot.zero_()
                labels_onehot.scatter_(1, labels.unsqueeze(dim=1), 1)
                all_labels = torch.cat((labels_onehot.float(), all_labels))
                predicted = predicted.max(dim=1)[1]  # for correct printing
            else:
                all_labels = torch.cat((labels, all_labels))

            speed = batchsize * iter_num // (time.time() - start)
            print('Epoch:',
                  checkpoint['epoch'],
                  'Iter:',
                  iter_num,
                  'Pred:',
                  round(predicted.float().mean().item(), 3),
                  'Label:',
                  round(labels.float().mean().item(), 3),
                  'Loss:',
                  round(np.mean(overall_loss), 3),
                  'Speed:',
                  int(speed),
                  'img/s',
                  end='\r',
                  flush=True)

    loss = np.mean(overall_loss)
    if variable_type == 'continous':
        all_labels, all_preds = all_labels.cpu(), all_preds.cpu()
        rmetric = r2_score(all_labels, all_preds)
        acc = regression_accuracy(all_labels, all_preds, error_range)
        spear, pvalue = spearmanr(all_preds, all_labels)
        summary = (
            f'Epoch Summary - Loss:{round(loss, 3)} Spearman:{round(spear, 2)} PValue:{round(pvalue, 3)} '
            +
            f'R2:{round(rmetric, 1)} Accuracy(at {error_range}):{round(100 * acc, 1)}'
        )

    else:
        rmetric = compute_roc_auc(all_preds, all_labels, other_act=sigmoid)
        sens = compute_confusion_metric(all_preds,
                                        all_labels,
                                        activation=sigmoid,
                                        metric_name='sensitivity')
        spec = compute_confusion_metric(all_preds,
                                        all_labels,
                                        activation=sigmoid,
                                        metric_name='specificity')
        summary = (
            f'Epoch Summary- Loss:{round(loss, 3)}  ROC:{round(rmetric * 100, 1)} '
            +
            f'Sensitivity:{round(100 * sens, 1)}  Specificity: {round(100 * spec, 1)}'
        )

    print(summary)
    return loss, rmetric, summary