Ejemplo n.º 1
0
    def test_validation_split(data_folder_teardown):
        dir_path = os.path.dirname(os.path.realpath(__file__))
        dir_path = os.path.join(dir_path, 'data')

        trainloader, valloader, evalloader = get_mnist(root=dir_path,
                                                       train_labels=range(6),
                                                       eval_labels=range(6),
                                                       split_val=0)
        size_full_test = len(evalloader.dataset)
        trainloader, valloader, evalloader = get_mnist(root=dir_path,
                                                       train_labels=range(6),
                                                       eval_labels=range(6),
                                                       split_val=0.5)
        assert size_full_test == len(valloader.dataset) + len(
            evalloader.dataset)
def get_evalloader_unseen(arguments):
    type_of_unseen = arguments['type_of_unseen']
    dim_channels = arguments['dim_channels']
    dim_input = arguments['dim_input']
    split_labels = arguments['split_labels']
    if type_of_unseen == 'random':
        _, _, evalloader_unseen = get_random(number_of_channels=dim_channels,
                                             img_dim=dim_input,
                                             number_of_classes=10)
    if type_of_unseen == 'unseen_classes':
        _, _, evalloader_unseen = get_trainset(
            train_labels=(),
            eval_labels=test_labels,
        )
    if type_of_unseen == 'unseen_dataset':
        res['unseen_dataset'] = unseen_evalset
        assert trainset != unseen_evalset, 'Train Set must be different from Unseen Test Set'
        transform = transforms.Compose([
            transforms.Grayscale(num_output_channels=dim_channels),
            transforms.Resize(dim_input),
            transforms.ToTensor(),
        ])
        if unseen_evalset == 'cifar10':
            _, _, evalloader_unseen = get_cifar10(transform=transform)
        if unseen_evalset == 'mnist':
            _, _, evalloader_unseen = get_mnist(transform=transform)
        if unseen_evalset == 'omniglot':
            _, _, evalloader_unseen = get_omniglot(transform=transform,
                                                   download=False)

    return evalloader_unseen
Ejemplo n.º 3
0
    def test_number_of_train(data_folder_teardown):
        dir_path = os.path.dirname(os.path.realpath(__file__))
        dir_path = os.path.join(dir_path, 'data')

        trainloader, evalloader, evalloader = get_mnist(root=dir_path,
                                                        split_train=5)
        assert len(trainloader.dataset) == 5
Ejemplo n.º 4
0
    def test_label_specification(data_folder_teardown):
        dir_path = os.path.dirname(os.path.realpath(__file__))
        dir_path = os.path.join(dir_path, 'data')
        train_labels = np.random.randint(0, 10, 5)
        eval_labels = np.random.randint(0, 10, 5)

        trainloader, valloader, evalloader = get_mnist(
            root=dir_path,
            train_labels=train_labels,
            split_val=0,
            eval_labels=eval_labels)

        assert np.isin(trainloader.dataset.targets, train_labels).all()
        assert np.isin(evalloader.dataset.targets, eval_labels).all()
number_of_tests = args.number_of_tests
loss_type = args.loss_type
std_prior = args.std_prior
stds_prior = (std_prior, std_prior)
delta = args.delta
risks = np.linspace(0.01, 0.5, 50)

if torch.cuda.is_available():
    device = 'cuda'
else:
    device = 'cpu'
device = torch.device(device)

if trainset == 'mnist':
    trainloader, valloader, evalloader = get_mnist(train_labels=range(10),
                                                   eval_labels=range(10),
                                                   batch_size=batch_size)
    dim_input = 28
    dim_channels = 1
if trainset == 'cifar10':
    trainloader, evalloader = get_cifar10(batch_size=batch_size)
    dim_input = 32
    dim_channels = 3

seed_model = set_and_print_random_seed()
bay_net = GaussianClassifier(rho=rho,
                             stds_prior=stds_prior,
                             dim_input=dim_input,
                             number_of_classes=10,
                             dim_channels=dim_channels)
bay_net.to(device)
Ejemplo n.º 6
0
    _, _, evalloader_unseen = get_random(number_of_channels=dim_channels, img_dim=dim_input, number_of_classes=10)
if type_of_unseen == 'unseen_classes':
    _, _, evalloader_unseen = get_trainset(train_labels=(), eval_labels=range(split_labels, 10, ), )
    res['split_labels'] = split_labels
if type_of_unseen == 'unseen_dataset':
    res['unseen_dataset'] = unseen_evalset
    assert trainset != unseen_evalset, 'Train Set must be different from Unseen Test Set'
    transform = transforms.Compose([
        transforms.Grayscale(num_output_channels=dim_channels),
        transforms.Resize(dim_input),
        transforms.ToTensor(),
    ])
    if unseen_evalset == 'cifar10':
        _, _, evalloader_unseen = get_cifar10(transform=transform)
    if unseen_evalset == 'mnist':
        _, _, evalloader_unseen = get_mnist(transform=transform)
    if unseen_evalset == 'omniglot':
        _, _, evalloader_unseen = get_omniglot(transform=transform, download=False)


seed_model = set_and_print_random_seed()
bay_net = GaussianClassifier(rho=rho, stds_prior=stds_prior, dim_input=dim_input, number_of_classes=10, dim_channels=dim_channels)
bay_net.to(device)
criterion = CrossEntropyLoss()
if loss_type == 'uniform':
    step_function = uniform
    loss = BBBLoss(bay_net, criterion, step_function)
elif loss_type == 'exp':
    def step_function(batch_idx, number_of_batches):
        return 2 ** (number_of_batches - batch_idx) / (2 ** number_of_batches - 1)
##### TO CHANGE ######
nb_of_runs = args.nb_of_runs
nb_of_epochs = args.nb_of_epochs
nb_of_tests = args.nb_of_tests
rho = args.rho
######################

if torch.cuda.is_available():
    device = "cuda"
else:
    device = "cpu"
device = torch.device(device)
print(device)

trainloader, valloader, evalloader = get_mnist(batch_size=32)
criterion = nn.CrossEntropyLoss()


def train_bayesian_modular_with_one_different(
    model,
    optimizer,
    loss,
    observables,
    number_of_epochs,
    trainloader,
    valloader=None,
    number_of_tests=10,
    output_dir_tensorboard=None,
    output_dir_results=None,
    device='cpu',