def nkl():
    """Returns a Pytorchlearner"""
    model = get_mock_model()
    dl = get_mock_dataloader()
    opt = get_mock_optimiser()
    crit = get_mock_criterion()
    nkl = PytorchLearner(model=model,
                         train_loader=dl,
                         optimizer=opt,
                         criterion=crit,
                         num_train_batches=1,
                         num_test_batches=1)
    return nkl
Beispiel #2
0
def prepare_learner(
    data_loaders: Tuple[DataLoader, DataLoader, DataLoader],
    learning_rate: float = 0.001,
    steps_per_epoch: int = 40,
    vote_batches: int = 10,
    no_cuda: bool = False,
    vote_on_accuracy: bool = True,
) -> PytorchLearner:
    """
    Creates new instance of PytorchLearner
    :param data_loaders: Tuple of train_loader and test_loader
    :param learning_rate: Learning rate for optimiser
    :param steps_per_epoch: Number of batches per training epoch
    :param vote_batches: Number of batches to get vote_score
    :param no_cuda: True = disable GPU computing
    :param vote_on_accuracy: True = vote on accuracy metric, False = vote on loss
    :return: New instance of PytorchLearner
    """

    cuda = not no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")

    model = TorchXrayConv2DModel()

    if vote_on_accuracy:
        learner_vote_kwargs = dict(vote_criterion=auc_from_logits,
                                   minimise_criterion=False)
    else:
        learner_vote_kwargs = {}

    # Make n instances of PytorchLearner with model and torch dataloaders
    opt = torch.optim.Adam(model.parameters(), lr=learning_rate)
    learner = PytorchLearner(
        model=model,
        train_loader=data_loaders[0],
        vote_loader=data_loaders[1],
        test_loader=data_loaders[2],
        device=device,
        optimizer=opt,
        criterion=nn.BCEWithLogitsLoss(
            # pos_weight=pos_weight,
            reduction='mean'),
        num_train_batches=steps_per_epoch,
        num_test_batches=vote_batches,
        **learner_vote_kwargs  # type: ignore[arg-type]
    )

    return learner
Beispiel #3
0
def prepare_learner(data_loaders: Tuple[DataLoader, DataLoader],
                    learning_rate: float = 0.001,
                    steps_per_epoch: int = 40,
                    vote_batches: int = 10,
                    no_cuda: bool = False,
                    vote_on_accuracy: bool = True,
                    **_kwargs) -> PytorchLearner:
    """
    Creates new instance of PytorchLearner
    :param data_loaders: Tuple of train_loader and test_loader
    :param learning_rate: Learning rate for optimiser
    :param steps_per_epoch: Number of batches per training epoch
    :param vote_batches: Number of batches to get vote_score
    :param no_cuda: True = disable GPU computing
    :param vote_on_accuracy: True = vote on accuracy metric, False = vote on loss
    :param _kwargs: Residual parameters not used by this function
    :return: New instance of PytorchLearner
    """
    cuda = not no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")

    model = TorchCovidXrayPerceptronModel()

    if vote_on_accuracy:
        learner_vote_kwargs = dict(vote_criterion=categorical_accuracy,
                                   minimise_criterion=False)
    else:
        learner_vote_kwargs = {}

    # Make n instances of PytorchLearner with model and torch dataloaders
    opt = torch.optim.Adam(model.parameters(), lr=learning_rate)
    learner = PytorchLearner(model=model,
                             train_loader=data_loaders[0],
                             test_loader=data_loaders[1],
                             device=device,
                             optimizer=opt,
                             criterion=torch.nn.NLLLoss(),
                             num_train_batches=steps_per_epoch,
                             num_test_batches=vote_batches,
                             **learner_vote_kwargs)  # type: ignore[arg-type]

    return learner
Beispiel #4
0
                               minimise_criterion=False)
    score_name = "Categorical accuracy"
else:
    learner_vote_kwargs = {}
    score_name = "loss"

# Make n instances of PytorchLearner with model and torch dataloaders
all_learner_models = []
for i in range(n_learners):
    model = Net()
    opt = torch.optim.Adam(model.parameters(), lr=learning_rate)
    learner = PytorchLearner(
        model=model,
        train_loader=learner_train_dataloaders[i],
        test_loader=learner_test_dataloaders[i],
        device=device,
        optimizer=opt,
        criterion=torch.nn.NLLLoss(),
        num_test_batches=vote_batches,
        **learner_vote_kwargs  # type: ignore[arg-type]
    )

    all_learner_models.append(learner)

# Ensure all learners starts with exactly same weights
set_equal_weights(all_learner_models)

# print a summary of the model architecture
summary(all_learner_models[0].model,
        input_size=(channels, width, height),
        device=str(device))
Beispiel #5
0
        x = x.view(-1, 4 * 4 * 50)
        x = nn_func.relu(self.fc1(x))
        x = self.fc2(x)
        return nn_func.log_softmax(x, dim=1)


# Make n instances of PytorchLearner with model and torch dataloaders
all_learner_models = []
for i in range(n_learners):
    model = Net().to(device)
    opt = torch.optim.Adam(model.parameters(), lr=learning_rate)
    learner = PytorchLearner(model=model,
                             train_loader=learner_train_dataloaders[i],
                             vote_loader=learner_vote_dataloaders[i],
                             test_loader=learner_test_dataloaders[i],
                             device=device,
                             optimizer=opt,
                             criterion=torch.nn.NLLLoss(),
                             num_test_batches=vote_batches,
                             vote_criterion=categorical_accuracy,
                             minimise_criterion=False)

    all_learner_models.append(learner)

# Ensure all learners starts with exactly same weights
set_equal_weights(all_learner_models)

summary(all_learner_models[0].model,
        input_size=(width, height),
        device=str(device))

# Train the model using Collective Learning
Beispiel #6
0
    score_name = "auc"
else:
    learner_vote_kwargs = {}
    score_name = "loss"

# Make n instances of PytorchLearner with model and torch dataloaders
all_learner_models = []
for i in range(n_learners):
    model = Net()
    opt = torch.optim.Adam(model.parameters(), lr=learning_rate)
    learner = PytorchLearner(
        model=model,
        train_loader=learner_train_dataloaders[i],
        test_loader=learner_test_dataloaders[i],
        device=device,
        optimizer=opt,
        criterion=nn.BCEWithLogitsLoss(
            reduction='mean'),
        num_train_batches=steps_per_epoch,
        num_test_batches=vote_batches,
        **learner_vote_kwargs  # type: ignore[arg-type]
    )

    all_learner_models.append(learner)

set_equal_weights(all_learner_models)

# print a summary of the model architecture
summary(all_learner_models[0].model, input_size=(1, width, height), device=str(device))

# Now we're ready to start collective learning
# Get initial accuracy
# Make n instances of PytorchLearner with model and torch dataloaders
all_learner_models = []
for i in range(n_learners):
    model = Net().to(device)
    opt = torch.optim.Adam(model.parameters(), lr=learning_rate)
    privacy_engine = PrivacyEngine(model,
                                   batch_size=batch_size,
                                   sample_size=sample_size,
                                   alphas=alphas,
                                   noise_multiplier=noise_multiplier,
                                   max_grad_norm=max_grad_norm)
    privacy_engine.attach(opt)
    learner = PytorchLearner(model=model,
                             train_loader=learner_train_dataloaders[i],
                             test_loader=learner_test_dataloaders[i],
                             device=device,
                             optimizer=opt,
                             criterion=torch.nn.NLLLoss(),
                             num_test_batches=vote_batches)

    all_learner_models.append(learner)

# print a summary of the model architecture
summary(all_learner_models[0].model,
        input_size=(width, height),
        device=str(device))

# Now we're ready to start collective learning
# Get initial accuracy
results = Results()
results.data.append(initial_result(all_learner_models))