def do_train_dirac_one_image(verbose=True):
    def dirac(cur_batch, nb_of_batch):
        if cur_batch == -1:
            return 1
        else:
            return 0

    bay_net = GaussianClassifier(rho, number_of_classes=10)
    bay_net.to(device)
    loss_bbb = BBBLoss(bay_net, criterion, dirac)
    optimizer = optim.Adam(bay_net.parameters())
    observables = AccuracyAndUncertainty()
    train_bayesian_modular_with_one_different(
        bay_net,
        optimizer,
        loss_bbb,
        observables,
        number_of_epochs=nb_of_epochs,
        trainloader=trainloader,
        device=device,
        verbose=verbose,
    )

    return eval_bayesian(bay_net,
                         evalloader,
                         nb_of_tests,
                         device=device,
                         verbose=verbose)
示例#2
0
    def compute_val(self, model, valloader, device, **kwargs):
        """
        Logs we want to keep on the validation set
        Args:
            val_accuracy (float): accuracy on the validation set
            val_outputs (torch.Tensor): size = (number_of_tests, batch_size, number_of_classes):
            output of the evaluation on the validation set
        """
        if not self.validation_logging:
            self.validation_logging = True
        assert 'number_of_tests' in kwargs.keys(
        ), 'give number of tests for bayesian evaluation'
        val_accuracy, val_outputs = eval_bayesian(
            model,
            valloader,
            number_of_tests=kwargs['number_of_tests'],
            device=device,
            verbose=False)

        if val_accuracy > self.max_val_acc:
            self.max_val_acc = val_accuracy
            self.max_weights = model.state_dict()

        self.logs['val_accuracy'] = val_accuracy
        (
            self.logs['val_uncertainty_vr'],
            self.logs['val_uncertainty_pe'],
            self.logs['val_uncertainty_mi'],
        ) = get_all_uncertainty_measures_bayesian(val_outputs)
        self.add_to_history([
            'val_accuracy',
            'val_uncertainty_vr',
            'val_uncertainty_pe',
            'val_uncertainty_mi',
        ])

        for key in [
                'val_uncertainty_vr', 'val_uncertainty_pe',
                'val_uncertainty_mi'
        ]:
            self.logs[key] = self.logs[key].mean()
def do_train_ce(verbose=True):
    bay_net = GaussianClassifier(rho, number_of_classes=10)
    bay_net.to(device)
    criterion = nn.CrossEntropyLoss()
    loss_bbb = BaseLoss(criterion)
    optimizer = optim.Adam(bay_net.parameters())
    observables = AccuracyAndUncertainty()
    train_bayesian_modular(
        bay_net,
        optimizer,
        loss_bbb,
        observables,
        number_of_epochs=nb_of_epochs,
        trainloader=trainloader,
        device=device,
        verbose=verbose,
    )

    return eval_bayesian(bay_net,
                         evalloader,
                         nb_of_tests,
                         device=device,
                         verbose=verbose)
示例#4
0
def get_selection_threshold_one_unc(
        bay_net,
        trainloader,
        risk,
        delta,
        uncertainty_function,
        number_of_tests,
        verbose=False,
        device='cpu',
):
    """

    Args:
        bay_net (torch.nn.Module child):
        arguments (dict): must contain keys:
        risk (float): highest error we accept
        uncertainty_function (function): function to get uncertainty. Must be in src.uncertainty_measures

    Returns:
        float: threshold
    """

    true_labels, all_outputs_train = eval_bayesian(
        bay_net,
        trainloader,
        number_of_tests=number_of_tests,
        return_accuracy=False,
        device=device,
        verbose=verbose,
    )

    unc = uncertainty_function(all_outputs_train)
    labels_predicted = get_predictions_from_multiple_tests(all_outputs_train).int()

    correct_preds = (labels_predicted == true_labels)
    threshold, _ = bound(risk, delta, unc, correct_preds,)
    return threshold
示例#5
0
            random_idx = random_idx[:number_of_tests]
            all_outputs_seen = all_outputs_seen[random_idx]
            all_outputs_unseen = all_outputs_unseen[random_idx]
        elif os.path.exists(path_to_outputs / f'{number_of_tests}' / f'{exp}/true_labels_seen.pt'):
            true_labels_seen = torch.load(path_to_outputs / f'{number_of_tests}' / f'{exp}/true_labels_seen.pt')
            all_outputs_seen = torch.load(path_to_outputs / f'{number_of_tests}' / f'{exp}/all_outputs_seen.pt')
            all_outputs_unseen = torch.load(path_to_outputs / f'{number_of_tests}' / f'{exp}/all_outputs_unseen.pt')
        else:
            (path_to_outputs / f'{number_of_tests}' / f'{exp}').mkdir(exist_ok=True, parents=True)
            evalloader_seen = su.get_evalloader_seen(arguments)
            # BE CAREFUL: in the paper, the process is tested on the enterity of the unseen classes
            evalloader_unseen = su.get_evalloader_unseen(arguments)
            true_labels_seen, all_outputs_seen = e.eval_bayesian(
                model=bay_net_trained,
                evalloader=evalloader_seen,
                number_of_tests=number_of_tests,
                return_accuracy=False,
                device=device,
                verbose=True,
            )

            _, all_outputs_unseen = e.eval_bayesian(
                model=bay_net_trained,
                evalloader=evalloader_unseen,
                number_of_tests=number_of_tests,
                device=device,
                verbose=True,
            )

            torch.save(true_labels_seen, path_to_outputs / f'{number_of_tests}' / f'{exp}/true_labels_seen.pt')
            torch.save(all_outputs_seen, path_to_outputs / f'{number_of_tests}' / f'{exp}/all_outputs_seen.pt')
            torch.save(all_outputs_unseen, path_to_outputs / f'{number_of_tests}' / f'{exp}/all_outputs_unseen.pt')
示例#6
0
def main(
        exp_nbs=exp_nbs,
        path_to_exps=path_to_exps,
        path_to_results=save_path,
        nb_of_runs=nb_of_runs,
        nb_of_tests=nb_of_tests,
        device='cpu',
        **kwargs,
):
    """
    Evaluates the accuracy for the experiments given. Writes it in a csv.
    Args:
        exp_nbs (list): list of int or str. Experiments to evaluate
        path_to_exps (str): path to the experiments
        path_to_results (str): path to the directory to save the results
        nb_of_runs (int): number of times to run the same experiment for confidence interval
        nb_of_tests (int): number of tests for inference for each prediction
        device (torch.device): gpu or cpu, device to compute on
        **kwargs: args to be able to put any arguments in our functions and not raise an error.

    """

    save_path = pathlib.Path(path_to_results)

    if save_output:
        save_path.mkdir(exist_ok=True, parents=True)

    if not os.path.exists(save_path / 'all_accs_uncs_mean.pkl'):
        all_accs_uncs_mean = pd.DataFrame(columns=['exp_nb', 'group_nb', 'rho', 'std_prior', 'loss_type', 'number_of_tests'])
    else:
        all_accs_uncs_mean = load_from_file(save_path / 'all_accs_uncs_mean.pkl', )
        all_accs_uncs_mean.to_csv(save_path / 'all_uncs_mean_backup.csv')

    for _ in range(nb_of_runs):
        for exp in exp_nbs:
            print(f'Run {_+1}, Exp {exp}, computing accuracy and uncertainty...')
            bay_net_trained, arguments, group_nb = get_trained_model_and_args_and_groupnb(exp, path_to_exps)
            evalloader_seen = get_evalloader_seen(arguments)
            evalloader_unseen = get_evalloader_unseen(arguments)

            eval_acc, all_outputs_seen = eval_bayesian(
                bay_net_trained,
                evalloader_seen,
                number_of_tests=nb_of_tests if arguments.get('rho', 'determinist') != 'determinist' else 1,
                return_accuracy=True,
                device=device,
                verbose=True,
            )

            _, all_outputs_unseen = eval_bayesian(
                bay_net_trained,
                evalloader_unseen,
                number_of_tests=nb_of_tests if arguments.get('rho', 'determinist') != 'determinist' else 1,
                return_accuracy=True,
                device=device,
                verbose=True,
            )

            determinist = (
                arguments.get('determinist', False) or
                arguments.get('rho', 'determinist') == 'determinist'
            )

            if determinist:
                get_unc_func = get_all_uncertainty_measures_not_bayesian
                unc_names = ['us', 'pe']
            else:
                get_unc_func = get_all_uncertainty_measures_bayesian
                unc_names = ['vr', 'pe', 'mi']

            all_uncs_mean_seen = get_unc_func(all_outputs_seen)
            all_uncs_mean_unseen = get_unc_func(all_outputs_unseen)

            for unc_name, unc_seen, unc_unseen in zip(unc_names, all_uncs_mean_seen, all_uncs_mean_unseen):

                all_accs_uncs_mean = all_accs_uncs_mean.append(pd.DataFrame.from_dict({
                    'exp_nb': [exp],
                    'group_nb': [group_nb],
                    'split_labels': [arguments.get('split_labels', 10)],
                    'trainset': [arguments.get('trainset', 'mnist')],
                    'rho': [arguments.get('rho', 'determinist')],
                    'std_prior': [arguments.get('std_prior', -1)],
                    'epoch': [arguments['epoch']],
                    'loss_type': [arguments.get('loss_type', 'criterion')],
                    'number_of_tests': [nb_of_tests],
                    'eval_acc': [eval_acc],
                    'unc_name': [unc_name],
                    'unc_seen': [unc_seen.mean().item()],
                    'unc_unseen': [unc_unseen.mean().item()],
                    'ratio': [unc_unseen.mean().item()/unc_seen.mean().item()],
                }))

            all_accs_uncs_mean.exp_nb = all_accs_uncs_mean.exp_nb.astype('int')

            if save_output:
                all_accs_uncs_mean.to_csv(save_path / 'all_accs_uncs_mean.csv')
                all_accs_uncs_mean.to_pickle(save_path / 'all_accs_uncs_mean.pkl')
    optimizer,
    loss,
    observables,
    number_of_tests=number_of_tests,
    number_of_epochs=epoch,
    trainloader=trainloader,
    # valloader=valloader,
    # output_dir_tensorboard='./output',
    device=device,
    verbose=True,
)

true_train_labels, all_outputs_train = eval_bayesian(
    bay_net,
    trainloader,
    return_accuracy=False,
    number_of_tests=number_of_tests,
    device=device,
)

train_vr, train_pe, train_mi = get_all_uncertainty_measures_bayesian(
    all_outputs_train)

true_eval_labels, all_outputs_eval = eval_bayesian(
    bay_net,
    evalloader,
    return_accuracy=False,
    number_of_tests=number_of_tests,
    device=device,
)
eval_vr, eval_pe, eval_mi = get_all_uncertainty_measures_bayesian(
示例#8
0
    loss,
    observables,
    number_of_tests=number_of_tests,
    number_of_epochs=epoch,
    trainloader=trainloader_seen,
    # valloader=valloader,
    # output_dir_tensorboard='./output',
    device=device,
    verbose=True,
)

# Get uncertainty on train
true_labels_train, all_outputs_train = eval_bayesian(
    bay_net,
    trainloader_seen,
    return_accuracy=False,
    number_of_tests=number_of_tests,
    device=device,
)
train_vr, train_pe, train_mi = get_all_uncertainty_measures_bayesian(all_outputs_train)

# Get uncertainty on seen
true_seen_labels, all_outputs_seen = eval_bayesian(
    bay_net,
    evalloader_seen,
    return_accuracy=False,
    number_of_tests=number_of_tests,
    device=device,
)
# eval_vr_seen, eval_pe_seen, eval_mi_seen = get_all_uncertainty_measures(all_outputs_seen)
    observables,
    number_of_tests=number_of_tests,
    number_of_epochs=epoch,
    trainloader=trainloader_seen,
    valloader=valloader_seen,
    output_dir_tensorboard='./output',
    device=device,
    verbose=True,
)

bay_net.load_state_dict(observables.max_weights)

# Evaluation on seen test set
eval_acc, all_outputs_eval = eval_bayesian(
    bay_net,
    evalloader_seen,
    number_of_tests=number_of_tests,
    device=device,
)

# Evaluation on unseen test set
_, all_outputs_unseen = eval_bayesian(bay_net,
                                      evalloader_unseen,
                                      number_of_tests=number_of_tests,
                                      device=device)

res = pd.concat(
    (res,
     pd.DataFrame.from_dict({
         'trainset': [trainset],
         'split_labels': [split_labels],
         'type_of_unseen': [type_of_unseen],
示例#10
0
def main(
        exp_nbs=None,
        path_to_exps=path_to_exps,
        path_to_results=save_csv_path,
        nb_of_runs=nb_of_runs,
        number_of_tests=number_of_tests,
        rstars=rstars,
        delta=delta,
        recompute_outputs=recompute_outputs,
        verbose=verbose,
        save_csv=save_csv,
        do_save_animation=do_save_animation,
        device='cpu',
):
    """
    Performs selective classification given a trained network and testset. Computes different threshold depending on
    different accepted risks.
    Args:
        exp_nbs (int || str): number of the experiment
        path_to_exps (str): path to the experiment groups
        path_to_results (str): path to save the results
        nb_of_runs (int): number of times to perform the same operation to get a confidence interval
        number_of_tests (int): number of inferences for each predictions
        rstars (list): list of float of accepted risks
        delta (float): probability of being higher than the upper bound
        recompute_outputs (Bool): whether or not we compute the outputs of train / test set. Put False if it is already
                                  computed and you don't want to loose time.
        verbose (Bool): show or not progress bar
        save_csv (Bool): save or not in csv
        do_save_animation (Bool): save or not the animation of finding the threshold.
        device (torch.device): gpu or cpu

    """
    if exp_nbs is None:
        exp_nbs = these_exp_nbs
    save_csv_path = pathlib.Path(path_to_results)
    save_fig_path = pathlib.Path(path_to_results)

    if not os.path.exists(save_csv_path / 'results_train.csv'):
        results_train = pd.DataFrame(
            columns=['exp', 'unc', 'threshold', 'risk', 'acc', 'coverage', 'time', 'number_of_tests'])
        results_eval = pd.DataFrame(
            columns=['exp', 'unc', 'threshold', 'risk', 'acc', 'coverage', 'time', 'number_of_tests'])
        if save_csv:
            save_csv_path.mkdir(exist_ok=True, parents=True)
            results_train.to_csv(save_csv_path / 'results_train.csv')
            results_eval.to_csv(save_csv_path / 'results_eval.csv')
    else:
        results_train = pd.read_csv(save_csv_path / 'results_train.csv', )
        results_train = results_train.filter(regex=r'^(?!Unnamed)')
        results_train.to_csv(save_csv_path / 'results_train_backup.csv')
        results_eval = pd.read_csv(save_csv_path / 'results_eval.csv', )
        results_eval = results_eval.filter(regex=r'^(?!Unnamed)')
        results_eval.to_csv(save_csv_path / 'results_eval_backup.csv')

    global_start = time.time()
    for _ in range(nb_of_runs):
        for exp_nb in exp_nbs:
            print(exp_nb)
            bay_net, arguments, _ = get_trained_model_and_args_and_groupnb(exp_nb, exp_path=path_to_exps)
            if recompute_outputs:

                split_labels = arguments.get('split_labels', 10)
                if arguments.get('trainset', 'mnist') == 'mnist':
                    get_trainset = get_mnist
                elif arguments.get('trainset', 'mnist') == 'cifar10':
                    get_trainset = get_cifar10
                else:
                    assert False, 'trainset not recognized'

                trainloader_seen, _, evalloader_seen = get_trainset(
                    train_labels=range(split_labels),
                    eval_labels=range(split_labels),
                    batch_size=128,
                )

                bay_net.to(device)

                true_labels_train, all_outputs_train = eval_bayesian(
                    bay_net,
                    trainloader_seen,
                    number_of_tests=number_of_tests,
                    return_accuracy=False,
                    device=device,
                    verbose=True,
                )
                labels_predicted_train = get_predictions_from_multiple_tests(all_outputs_train).float()

                true_labels_eval, all_outputs_eval = eval_bayesian(
                    bay_net,
                    evalloader_seen,
                    number_of_tests=number_of_tests,
                    return_accuracy=False,
                    device=device,
                    verbose=True,
                )
                labels_predicted_eval = get_predictions_from_multiple_tests(all_outputs_eval).float()

            correct_preds_train = (labels_predicted_train == true_labels_train).float()
            residuals = 1 - correct_preds_train
            correct_preds_eval = (labels_predicted_eval == true_labels_eval).float()

            uncs_train = get_all_uncertainty_measures_bayesian(all_outputs_train)
            uncs_eval = get_all_uncertainty_measures_bayesian(all_outputs_eval)
            for idx_risk, rstar in enumerate(tqdm(rstars)):
                for unc_train, unc_eval, unc_name in zip(uncs_train, uncs_eval, ['vr', 'pe', 'mi']):
                    start = time.time()
                    thetas, bounds, risks, coverages = bound_animate(rstar, delta, -unc_train, residuals,
                                                                     verbose=verbose,
                                                                     max_iter=10,
                                                                     precision=1e-5, )
                    threshold = thetas[-1]
                    acc_train = correct_preds_train[
                        -unc_train > threshold].mean()  # .sum() / correct_preds_train.size(0)
                    coverage_train = (-unc_train >= threshold).sum().float() / unc_train.size(0)
                    new_res_train = pd.DataFrame.from_dict({
                        'exp': [exp_nb],
                        'unc': [unc_name],
                        'delta': [delta],
                        'threshold': [threshold],
                        'risk': [rstar],
                        'acc': [acc_train],
                        'coverage': [coverage_train],
                        'time': [time.time() - start],
                        'number_of_tests': [number_of_tests],
                        'loss_type': [arguments.get('loss_type', 'criterion')],
                    })
                    convert_tensor_to_float(new_res_train)
                    results_train = results_train.append(new_res_train, sort=True)

                    acc_eval = correct_preds_eval[-unc_eval > threshold].mean()
                    coverage_eval = (-unc_eval >= threshold).sum().float() / unc_eval.size(0)
                    new_res_eval = pd.DataFrame.from_dict({
                        'exp': [exp_nb],
                        'unc': [unc_name],
                        'delta': [delta],
                        'threshold': [threshold],
                        'risk': [rstar],
                        'acc': [acc_eval],
                        'coverage': [coverage_eval],
                        'time': [time.time() - start],
                        'number_of_tests': [number_of_tests],
                        'loss_type': [arguments.get('loss_type', 'criterion')],
                    })
                    convert_tensor_to_float(new_res_eval)
                    results_eval = results_eval.append(new_res_eval, sort=True)

                    if do_save_animation:
                        save_animation_path = save_fig_path / 'animation'
                        save_animation_path.mkdir(exist_ok=True, parents=True)
                        save_animation_path = save_animation_path / f'{exp_nb}_{unc_name}_{idx_risk}_' \
                            f'finding_threshold.gif'
                        save_animation(arguments, rstar, unc_train, correct_preds_train, risks, bounds, coverages,
                                       thetas, figsize,
                                       save_animation_path)

                if save_csv:
                    results_train.to_csv(save_csv_path / 'results_train.csv')
                    results_eval.to_csv(save_csv_path / 'results_eval.csv')
            print(f'Time since start: {time.time() - global_start}')
def train_bayesian_modular_with_one_different(
    model,
    optimizer,
    loss,
    observables,
    number_of_epochs,
    trainloader,
    valloader=None,
    number_of_tests=10,
    output_dir_tensorboard=None,
    output_dir_results=None,
    device='cpu',
    verbose=False,
):
    """
    Train Bayesian with modular arguments
    Args:
        model (torch.nn.Module child): model we want to train
        optimizer (torch.optim optimizer): how do we update the weights
        loss (src.loggers.losses.base_loss.BaseLoss child): loss object
        observables (src.loggers.observables.Observables): observable object
        number_of_epochs (int): how long do we train our model
        trainloader (torch.utils.data.dataloader.DataLoader): dataloader of train set
        valloader (torch.utils.data.dataloader.DataLoader): dataloader of validation set
        number_of_tests (int): number of tests to perform during validation evaluation
        output_dir_results (str): output directory in which to save the results (NOT IMPLEMENTED)
        output_dir_tensorboard (str): output directory in which to save the tensorboard
        device (torch.device || str): cpu or gpu
        verbose (Bool): print training steps or not
    Returns
        NOT IMPLEMENTED YET
    """
    start_time = time()
    number_of_batch = len(trainloader)
    interval = max(number_of_batch // 10, 1)

    for logger in [loss, observables]:
        logger.set_number_of_epoch(number_of_epochs)
        logger.set_number_of_batch(number_of_batch)
        logger.init_tensorboard_writer(output_dir_tensorboard)
        logger.init_results_writer(output_dir_results)

    model.train()
    for epoch in range(number_of_epochs):
        loss.set_current_epoch(epoch)
        observables.set_current_epoch(epoch)

        loss.set_current_batch_idx(-1)

        idx_of_first_img = np.random.choice(len(trainloader.dataset))
        first_img = trainloader.dataset.data[idx_of_first_img].to(device)
        first_label = trainloader.dataset.targets[idx_of_first_img].to(device)
        optimizer.zero_grad()
        output = model(first_img.unsqueeze(0).unsqueeze(0).float())
        loss.compute(output, first_label.unsqueeze(0))
        loss.backward()
        optimizer.step()

        for batch_idx, data in enumerate(trainloader):
            loss.set_current_batch_idx(batch_idx)
            observables.set_current_batch_idx(batch_idx)

            inputs, labels = [x.to(device) for x in data]
            optimizer.zero_grad()
            outputs = model(inputs)

            observables.compute_train_on_batch(outputs, labels)
            loss.compute(outputs, labels)
            loss.backward()
            optimizer.step()

            if batch_idx % interval == interval - 1:
                if valloader is not None:
                    val_acc, val_outputs = eval_bayesian(
                        model,
                        valloader,
                        number_of_tests=number_of_tests,
                        device=device,
                        verbose=verbose)
                    observables.compute_val(val_acc, val_outputs)

                if verbose:
                    print('======================================')
                    print(
                        f'Epoch [{epoch + 1}/{number_of_epochs}]. Batch [{batch_idx}/{number_of_batch}].'
                    )
                    loss.show()
                    observables.show()
                    print(f'Time Elapsed: {round(time() - start_time)} s')

                loss.write_tensorboard()
                observables.write_tensorboard()
                if output_dir_results is not None:
                    loss.write_results()
                    observables.write_results()

        observables.compute_train_on_epoch(model, trainloader, device)

    loss.close_writer()
    observables.close_writer()
    if verbose:
        print('Finished Training')

    return loss.results(), observables.results()
    def compute_tps_fps(exp_nbs, get_uncs, unc_names, number_of_tests):
        all_xs = dict({k: [] for k in unc_names})
        all_ys = dict({k: [] for k in unc_names})
        all_xs_pr = dict({k: [] for k in unc_names})
        all_ys_pr = dict({k: [] for k in unc_names})
        all_uncs = {}
        for exp_nb in exp_nbs:

            bay_net_trained, arguments, _ = get_trained_model_and_args_and_groupnb(
                exp_nb, path_to_res)

            if number_of_tests < 100 and os.path.exists(
                    path_to_outputs / '100' / f'{exp_nb}/true_labels_seen.pt'):

                true_labels_seen = torch.load(path_to_outputs / f'100' /
                                              f'{exp_nb}/true_labels_seen.pt')
                all_outputs_seen = torch.load(path_to_outputs / f'100' /
                                              f'{exp_nb}/all_outputs_seen.pt')
                all_outputs_unseen = torch.load(
                    path_to_outputs / f'100' /
                    f'{exp_nb}/all_outputs_unseen.pt')

                random_idx = np.arange(100)
                np.random.shuffle(random_idx)
                random_idx = random_idx[:number_of_tests]
                all_outputs_seen = all_outputs_seen[random_idx]
                all_outputs_unseen = all_outputs_unseen[random_idx]
            elif os.path.exists(path_to_outputs / f'{number_of_tests}' /
                                f'{exp_nb}/true_labels_seen.pt'):
                true_labels_seen = torch.load(path_to_outputs /
                                              f'{number_of_tests}' /
                                              f'{exp_nb}/true_labels_seen.pt')
                all_outputs_seen = torch.load(path_to_outputs /
                                              f'{number_of_tests}' /
                                              f'{exp_nb}/all_outputs_seen.pt')
                all_outputs_unseen = torch.load(
                    path_to_outputs / f'{number_of_tests}' /
                    f'{exp_nb}/all_outputs_unseen.pt')
            else:
                (path_to_outputs / f'{number_of_tests}' / f'{exp_nb}').mkdir(
                    exist_ok=True, parents=True)
                evalloader_seen = get_evalloader_seen(arguments)
                # BE CAREFUL: in the paper, the process is tested on the enterity of the unseen classes
                evalloader_unseen = get_evalloader_unseen(arguments)
                true_labels_seen, all_outputs_seen = eval_bayesian(
                    model=bay_net_trained,
                    evalloader=evalloader_seen,
                    number_of_tests=number_of_tests,
                    return_accuracy=False,
                    device=device,
                    verbose=True,
                )

                _, all_outputs_unseen = eval_bayesian(
                    model=bay_net_trained,
                    evalloader=evalloader_unseen,
                    number_of_tests=number_of_tests,
                    device=device,
                    verbose=True,
                )

                torch.save(
                    true_labels_seen, path_to_outputs / f'{number_of_tests}' /
                    f'{exp_nb}/true_labels_seen.pt')
                torch.save(
                    all_outputs_seen, path_to_outputs / f'{number_of_tests}' /
                    f'{exp_nb}/all_outputs_seen.pt')
                torch.save(
                    all_outputs_unseen, path_to_outputs /
                    f'{number_of_tests}' / f'{exp_nb}/all_outputs_unseen.pt')

            preds = get_predictions_from_multiple_tests(
                all_outputs_seen).float()

            if typ == 'seen_unseen':
                all_outputs_true = all_outputs_seen
                all_outputs_false = all_outputs_unseen

            elif typ == 'true_false':
                all_outputs_true = all_outputs_seen[:, preds ==
                                                    true_labels_seen, :]
                all_outputs_false = all_outputs_seen[:, preds !=
                                                     true_labels_seen, :]
            else:
                all_outputs_true = all_outputs_seen[:, preds ==
                                                    true_labels_seen, :]
                all_outputs_false = torch.cat(
                    (all_outputs_seen[:, preds != true_labels_seen, :],
                     all_outputs_unseen), 1)

            uncs_true = get_uncs(all_outputs_true)
            uncs_false = get_uncs(all_outputs_false)

            all_uncs[exp_nb] = pd.DataFrame()

            for idx, unc_name in enumerate(unc_names):
                all_uncs[exp_nb] = all_uncs[exp_nb].append(
                    pd.DataFrame().assign(unc=torch.cat((
                        uncs_true[idx],
                        uncs_false[idx]))).assign(is_ood=torch.cat((
                            torch.zeros_like(uncs_true[idx]),
                            torch.ones_like(uncs_false[idx])))).assign(
                                unc_name=unc_name).assign(
                                    number_of_tests=number_of_tests))

                this_df = all_uncs[exp_nb].loc[all_uncs[exp_nb].unc_name ==
                                               unc_name]
                positives = this_df.is_ood.sum()
                negatives = (1 - this_df.is_ood).sum()
                grouped_unc = this_df.groupby('unc')

                to_plot = (grouped_unc.sum(
                ).assign(n=grouped_unc.apply(lambda df: len(df))).assign(
                    tp=lambda df: df.iloc[::-1].is_ood.cumsum()).assign(
                        fp=lambda df: df.iloc[::-1].n.cumsum() - df.tp).assign(
                            fn=lambda df: df.is_ood.cumsum()).assign(
                                precision=lambda df: df.tp / (df.tp + df.fp)).
                           assign(recall=lambda df: df.tp / (df.tp + df.fn))
                           ).reset_index()

                all_xs[unc_name].append((to_plot.fp / negatives))
                all_ys[unc_name].append((to_plot.tp / positives))
                all_xs_pr[unc_name].append((to_plot.recall))
                all_ys_pr[unc_name].append(to_plot.precision)
                # xs = to_plot.fp/negatives
                # ys = to_plot.tp/positives
        return all_xs, all_ys, all_xs_pr, all_ys_pr