Exemplo n.º 1
0
def main():
    in_arg = get_input_args()
    device = "cuda" if in_arg.gpu else "cpu"

    trainloader, testloader, validloader, train_data = utils.create_loaders(in_arg.data_dir)
    model, device, criterion, optimizer = utils.set_up_model_params(in_arg.arch, in_arg.learning_rate, in_arg.hidden_units, device)
    utils.train_the_model(model, trainloader, validloader, criterion, optimizer, device, in_arg.epochs)
    if in_arg.validate:
        utils.validate_model(model, testloader, device)
    utils.save_model(model, optimizer, in_arg.save_dir, in_arg.arch, train_data)
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser(
        description='This program predicts a flower name from an image')
    parser.add_argument('data_dir', type=str, help='Dataset directory')
    parser.add_argument('--save_dir',
                        type=str,
                        default='./',
                        help='Saved checkpoint directory')
    parser.add_argument('--arch',
                        type=str,
                        default='vgg16',
                        help='Network architecture')
    parser.add_argument('--hidden_units',
                        type=int,
                        default='256',
                        help='Hidden units')
    parser.add_argument('--dropout',
                        type=float,
                        default='0.2',
                        help='Dropout for the hidden layers')
    parser.add_argument('--num_classes',
                        type=int,
                        default='256',
                        help='Number of classes for classification')
    parser.add_argument('--learning_rate',
                        type=float,
                        default='0.005',
                        help='Learning rate')
    parser.add_argument('--epochs',
                        type=int,
                        default='20',
                        help='Number of epochs')
    parser.add_argument('--gpu',
                        action='store_true',
                        help='Use GPU',
                        default=False)

    args = parser.parse_args()
    device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu'
                          ) if args.gpu else 'cpu'

    model, criterion, optimizer, scheduler = initialize_model(
        args.arch, args.hidden_units, args.dropout, args.num_classes, device,
        args.learning_rate)

    dataloaders, image_datasets = create_loaders(args.data_dir)

    train_model(model, dataloaders, criterion, optimizer, scheduler,
                image_datasets, args.epochs, device)
    test_model(model, dataloaders, image_datasets, device)
    save_checkpoint(model, optimizer, scheduler, args.epochs,
                    args.learning_rate, f'{args.arch}_checkpoint.pth')
Exemplo n.º 3
0
    def main(self):
        if self.uncertainty_sampling_method == 'mc_dropout':
            uncertainty_sampler = UncertaintySamplingMCDropout()
            self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
        elif self.uncertainty_sampling_method == 'augmentations_based':
            uncertainty_sampler = UncertaintySamplingAugmentationBased()
            self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
        elif self.uncertainty_sampling_method == 'entropy_based':
            uncertainty_sampler = UncertaintySamplingEntropyBased(
                verbose=True, uncertainty_sampling_method='entropy_based')
            self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
        else:
            uncertainty_sampler = None
            self.args.weak_supervision_strategy = "random_sampling"

        dataset_cls = self.datasets[self.args.dataset](
            root=self.args.root,
            add_labeled=self.args.add_labeled,
            advanced_transforms=True,
            merged=self.args.merged,
            remove_classes=self.args.remove_classes,
            oversampling=self.args.oversampling,
            unlabeled_subset_ratio=self.args.unlabeled_subset,
            expand_labeled=self.args.fixmatch_k_img,
            expand_unlabeled=self.args.fixmatch_k_img * self.args.fixmatch_mu,
            unlabeled_augmentations=True if self.uncertainty_sampling_method
            == 'augmentations_based' else False,
            seed=self.args.seed,
            start_labeled=self.args.start_labeled)

        base_dataset, labeled_dataset, unlabeled_dataset, labeled_indices, unlabeled_indices, test_dataset = \
            dataset_cls.get_dataset()

        train_loader, unlabeled_loader, val_loader = create_loaders(
            self.args, labeled_dataset, unlabeled_dataset, test_dataset,
            labeled_indices, unlabeled_indices, self.kwargs,
            dataset_cls.unlabeled_subset_num)

        labeled_dataset_fix, unlabeled_dataset_fix = dataset_cls.get_datasets_fixmatch(
            base_dataset, labeled_indices, unlabeled_indices)

        self.args.lr = 0.0003
        model, optimizer, _ = create_model_optimizer_scheduler(
            self.args, dataset_cls)

        if self.init == 'pretrained':
            model = load_pretrained(model)
        elif self.init == 'autoencoder':
            model, optimizer, _ = create_model_optimizer_autoencoder(
                self.args, dataset_cls)
        elif self.init == 'simclr':
            model, optimizer, _, _ = create_model_optimizer_simclr(
                self.args, dataset_cls)

        labeled_loader_fix = DataLoader(dataset=labeled_dataset_fix,
                                        batch_size=self.args.batch_size,
                                        shuffle=True,
                                        **self.kwargs)
        unlabeled_loader_fix = DataLoader(dataset=unlabeled_dataset_fix,
                                          batch_size=self.args.batch_size,
                                          shuffle=True,
                                          **self.kwargs)

        criterion_labeled = get_loss(self.args,
                                     dataset_cls.labeled_class_samples,
                                     reduction='none')
        criterion_unlabeled = get_loss(self.args,
                                       dataset_cls.labeled_class_samples,
                                       reduction='none')

        criterions = {
            'labeled': criterion_labeled,
            'unlabeled': criterion_unlabeled
        }

        model.zero_grad()

        best_recall, best_report, last_best_epochs = 0, None, 0
        best_model = deepcopy(model)

        metrics_per_cycle = pd.DataFrame([])
        metrics_per_epoch = pd.DataFrame([])
        num_class_per_cycle = pd.DataFrame([])

        self.args.start_epoch = 0
        current_labeled = dataset_cls.start_labeled

        for epoch in range(self.args.start_epoch, self.args.fixmatch_epochs):
            train_loader_fix = zip(labeled_loader_fix, unlabeled_loader_fix)
            train_loss = self.train(train_loader_fix, model, optimizer, epoch,
                                    len(labeled_loader_fix), criterions,
                                    base_dataset.classes, last_best_epochs)
            val_loss, val_report = self.validate(val_loader, model,
                                                 last_best_epochs, criterions)

            is_best = val_report['macro avg']['recall'] > best_recall
            last_best_epochs = 0 if is_best else last_best_epochs + 1

            val_report = pd.concat([val_report, train_loss, val_loss], axis=1)
            metrics_per_epoch = pd.concat([metrics_per_epoch, val_report])

            if epoch > self.args.labeled_warmup_epochs and last_best_epochs > self.args.add_labeled_epochs:
                metrics_per_cycle = pd.concat([metrics_per_cycle, best_report])

                train_loader, unlabeled_loader, val_loader, labeled_indices, unlabeled_indices = \
                    perform_sampling(self.args, uncertainty_sampler, None,
                                     epoch, model, train_loader, unlabeled_loader,
                                     dataset_cls, labeled_indices,
                                     unlabeled_indices, labeled_dataset,
                                     unlabeled_dataset,
                                     test_dataset, self.kwargs, current_labeled,
                                     model)

                labeled_dataset_fix, unlabeled_dataset_fix = dataset_cls.get_datasets_fixmatch(
                    base_dataset, labeled_indices, unlabeled_indices)

                labeled_loader_fix = DataLoader(
                    dataset=labeled_dataset_fix,
                    batch_size=self.args.batch_size,
                    shuffle=True,
                    **self.kwargs)
                unlabeled_loader_fix = DataLoader(
                    dataset=unlabeled_dataset_fix,
                    batch_size=self.args.batch_size,
                    shuffle=True,
                    **self.kwargs)

                current_labeled += self.args.add_labeled
                last_best_epochs = 0

                if self.args.reset_model:
                    if self.init == 'pretrained':
                        model = load_pretrained(model)
                    elif self.init == 'autoencoder':
                        model, optimizer, _ = create_model_optimizer_autoencoder(
                            self.args, dataset_cls)
                    elif self.init == 'simclr':
                        model, optimizer, _, self.args = create_model_optimizer_simclr(
                            self.args, dataset_cls)

                if self.args.novel_class_detection:
                    num_classes = [
                        np.sum(
                            np.array(base_dataset.targets)[labeled_indices] ==
                            i) for i in range(len(base_dataset.classes))
                    ]
                    num_class_per_cycle = pd.concat([
                        num_class_per_cycle,
                        pd.DataFrame.from_dict(
                            {
                                cls: num_classes[i]
                                for i, cls in enumerate(base_dataset.classes)
                            },
                            orient='index').T
                    ])

                criterion_labeled = get_loss(self.args,
                                             dataset_cls.labeled_class_samples,
                                             reduction='none')
                criterion_unlabeled = get_loss(
                    self.args,
                    dataset_cls.labeled_class_samples,
                    reduction='none')
                criterions = {
                    'labeled': criterion_labeled,
                    'unlabeled': criterion_unlabeled
                }
            else:
                best_recall = val_report['macro avg'][
                    'recall'] if is_best else best_recall
                best_report = val_report if is_best else best_report
                best_model = deepcopy(model) if is_best else best_model

            save_checkpoint(
                self.args, {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_prec1': best_recall,
                }, is_best)

            if current_labeled > self.args.stop_labeled:
                break

        if self.args.store_logs:
            store_logs(self.args, metrics_per_cycle)
            store_logs(self.args, metrics_per_epoch, log_type='epoch_wise')
            store_logs(self.args, num_class_per_cycle, log_type='novel_class')

        return best_recall
Exemplo n.º 4
0
    model_dim = config.hidden_size
elif args.model_type in ['gpt2']:
    model_dim = config.n_embd

mlp = Context_MLP(in_size=model_dim)
mlp = mlp.to(device)
#-----------------------------------------

#-----------------------------------------
# The loss function:
criterion = nn.MarginRankingLoss(margin=args.loss_margin, reduction='none')
#-----------------------------------------

#-----------------------------------------
# Creating the data loaders:
train_dataloader, test_dataloader = create_loaders(args, Ranking_Dataset,
                                                   tokenizer)
#-----------------------------------------

#-----------------------------------------
# Tensorboard writer:
tb_writer = SummaryWriter(
    log_dir=f'{logs_path}/{datetime.now().strftime("%d%m%Y-%H_%M_%S")}/')
#-----------------------------------------

#-----------------------------------------
# Creating the optimiser:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{
    'params': mlp.parameters(),
    'weight_decay': args.weight_decay
}, {
Exemplo n.º 5
0
    def main(self):
        dataset_class = self.datasets[self.args.dataset](
            root=self.args.root,
            add_labeled=self.args.add_labeled,
            advanced_transforms=True,
            merged=self.args.merged,
            remove_classes=self.args.remove_classes,
            oversampling=self.args.oversampling,
            unlabeled_subset_ratio=self.args.unlabeled_subset,
            seed=self.args.seed,
            start_labeled=self.args.start_labeled)

        _, labeled_dataset, unlabeled_dataset, labeled_indices, unlabeled_indices, test_dataset = \
            dataset_class.get_dataset()

        labeled_loader, unlabeled_loader, val_loader = create_loaders(
            self.args, labeled_dataset, unlabeled_dataset, test_dataset,
            labeled_indices, unlabeled_indices, self.kwargs,
            dataset_class.unlabeled_subset_num)

        base_dataset = dataset_class.get_base_dataset_autoencoder()

        base_loader = create_base_loader(base_dataset, self.kwargs,
                                         self.args.batch_size)

        reconstruction_loss_log = []

        bce_loss = nn.BCELoss().cuda()
        l1_loss = nn.L1Loss()
        l2_loss = nn.MSELoss()
        ssim_loss = SSIM(size_average=True,
                         data_range=1.0,
                         nonnegative_ssim=True)

        criterions_reconstruction = {
            'bce': bce_loss,
            'l1': l1_loss,
            'l2': l2_loss,
            'ssim': ssim_loss
        }
        criterion_cl = get_loss(self.args,
                                dataset_class.labeled_class_samples,
                                reduction='none')

        model, optimizer, self.args = create_model_optimizer_autoencoder(
            self.args, dataset_class)

        best_loss = np.inf

        metrics_per_cycle = pd.DataFrame([])
        metrics_per_epoch = pd.DataFrame([])
        num_class_per_cycle = pd.DataFrame([])

        best_recall, best_report, last_best_epochs = 0, None, 0
        best_model = deepcopy(model)

        self.args.start_epoch = 0
        self.args.weak_supervision_strategy = "random_sampling"
        current_labeled = dataset_class.start_labeled

        for epoch in range(self.args.start_epoch, self.args.epochs):
            cl_train_loss, losses_avg_reconstruction, losses_reconstruction = \
                self.train(labeled_loader, model, criterion_cl, optimizer, last_best_epochs, epoch,
                           criterions_reconstruction, base_loader)
            val_loss, val_report = self.validate(val_loader, model,
                                                 last_best_epochs,
                                                 criterion_cl)

            reconstruction_loss_log.append(losses_avg_reconstruction.tolist())
            best_loss = min(best_loss, losses_reconstruction.avg)

            is_best = val_report['macro avg']['recall'] > best_recall
            last_best_epochs = 0 if is_best else last_best_epochs + 1

            val_report = pd.concat([val_report, cl_train_loss, val_loss],
                                   axis=1)
            metrics_per_epoch = pd.concat([metrics_per_epoch, val_report])

            if epoch > self.args.labeled_warmup_epochs and last_best_epochs > self.args.add_labeled_epochs:
                metrics_per_cycle = pd.concat([metrics_per_cycle, best_report])

                labeled_loader, unlabeled_loader, val_loader, labeled_indices, unlabeled_indices = \
                    perform_sampling(self.args, None, None,
                                     epoch, model, labeled_loader, unlabeled_loader,
                                     dataset_class, labeled_indices,
                                     unlabeled_indices, labeled_dataset,
                                     unlabeled_dataset,
                                     test_dataset, self.kwargs, current_labeled,
                                     model)

                current_labeled += self.args.add_labeled
                last_best_epochs = 0

                if self.args.reset_model:
                    model, optimizer, self.args = create_model_optimizer_autoencoder(
                        self.args, dataset_class)

                if self.args.novel_class_detection:
                    num_classes = [
                        np.sum(
                            np.array(base_dataset.targets)[labeled_indices] ==
                            i) for i in range(len(base_dataset.classes))
                    ]
                    num_class_per_cycle = pd.concat([
                        num_class_per_cycle,
                        pd.DataFrame.from_dict(
                            {
                                cls: num_classes[i]
                                for i, cls in enumerate(base_dataset.classes)
                            },
                            orient='index').T
                    ])

                criterion_cl = get_loss(self.args,
                                        dataset_class.labeled_class_samples,
                                        reduction='none')
            else:
                best_recall = val_report['macro avg'][
                    'recall'] if is_best else best_recall
                best_report = val_report if is_best else best_report
                best_model = deepcopy(model) if is_best else best_model

            if current_labeled > self.args.stop_labeled:
                break

            save_checkpoint(
                self.args, {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_recall,
                }, is_best)

        if self.args.store_logs:
            store_logs(self.args,
                       pd.DataFrame(reconstruction_loss_log,
                                    columns=['bce', 'l1', 'l2', 'ssim']),
                       log_type='ae_loss')
            store_logs(self.args, metrics_per_cycle)
            store_logs(self.args, metrics_per_epoch, log_type='epoch_wise')
            store_logs(self.args, num_class_per_cycle, log_type='novel_class')

        self.model = model
        return model
Exemplo n.º 6
0
    def train_validate_classifier(self):
        if self.uncertainty_sampling_method == 'mc_dropout':
            uncertainty_sampler = UncertaintySamplingMCDropout()
            self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
        elif self.uncertainty_sampling_method == 'augmentations_based':
            uncertainty_sampler = UncertaintySamplingAugmentationBased()
            self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
        elif self.uncertainty_sampling_method == 'entropy_based':
            uncertainty_sampler = UncertaintySamplingEntropyBased(
                verbose=True, uncertainty_sampling_method='entropy_based')
            self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
        else:
            uncertainty_sampler = None
            self.args.weak_supervision_strategy = "random_sampling"

        dataset_class = self.datasets[self.args.dataset](
            root=self.args.root,
            add_labeled=self.args.add_labeled,
            advanced_transforms=True,
            merged=self.args.merged,
            remove_classes=self.args.remove_classes,
            oversampling=self.args.oversampling,
            unlabeled_subset_ratio=self.args.unlabeled_subset,
            unlabeled_augmentations=True if self.uncertainty_sampling_method
            == 'augmentations_based' else False,
            seed=self.args.seed,
            k_medoids=self.args.k_medoids,
            k_medoids_model=self.model,
            k_medoids_n_clusters=self.args.k_medoids_n_clusters,
            start_labeled=self.args.start_labeled)

        base_dataset, labeled_dataset, unlabeled_dataset, labeled_indices, unlabeled_indices, test_dataset = \
            dataset_class.get_dataset()

        train_loader, unlabeled_loader, val_loader = create_loaders(
            self.args, labeled_dataset, unlabeled_dataset, test_dataset,
            labeled_indices, unlabeled_indices, self.kwargs,
            dataset_class.unlabeled_subset_num)

        model = self.model

        criterion = get_loss(self.args,
                             dataset_class.labeled_class_samples,
                             reduction='none')

        optimizer = torch.optim.Adam(model.parameters())

        metrics_per_cycle = pd.DataFrame([])
        metrics_per_epoch = pd.DataFrame([])
        num_class_per_cycle = pd.DataFrame([])

        best_recall, best_report, last_best_epochs = 0, None, 0
        best_model = deepcopy(model)

        self.args.start_epoch = 0
        current_labeled = dataset_class.start_labeled

        for epoch in range(self.args.start_epoch, self.args.epochs):
            train_loss = self.train_classifier(train_loader, model, criterion,
                                               optimizer, last_best_epochs,
                                               epoch)
            val_loss, val_report = self.validate_classifier(
                val_loader, model, last_best_epochs, criterion)

            is_best = val_report['macro avg']['recall'] > best_recall
            last_best_epochs = 0 if is_best else last_best_epochs + 1

            val_report = pd.concat([val_report, train_loss, val_loss], axis=1)
            metrics_per_epoch = pd.concat([metrics_per_epoch, val_report])

            if epoch > self.args.labeled_warmup_epochs and last_best_epochs > self.args.add_labeled_epochs:
                metrics_per_cycle = pd.concat([metrics_per_cycle, best_report])

                train_loader, unlabeled_loader, val_loader, labeled_indices, unlabeled_indices = \
                    perform_sampling(self.args, uncertainty_sampler, None,
                                     epoch, model, train_loader, unlabeled_loader,
                                     dataset_class, labeled_indices,
                                     unlabeled_indices, labeled_dataset,
                                     unlabeled_dataset,
                                     test_dataset, self.kwargs, current_labeled,
                                     model)

                current_labeled += self.args.add_labeled
                last_best_epochs = 0

                if self.args.reset_model:
                    model, optimizer, _, self.args = create_model_optimizer_simclr(
                        self.args, dataset_class)
                    optimizer = torch.optim.Adam(model.parameters())

                if self.args.novel_class_detection:
                    num_classes = [
                        np.sum(
                            np.array(base_dataset.targets)[labeled_indices] ==
                            i) for i in range(len(base_dataset.classes))
                    ]
                    num_class_per_cycle = pd.concat([
                        num_class_per_cycle,
                        pd.DataFrame.from_dict(
                            {
                                cls: num_classes[i]
                                for i, cls in enumerate(base_dataset.classes)
                            },
                            orient='index').T
                    ])

                criterion = get_loss(self.args,
                                     dataset_class.labeled_class_samples,
                                     reduction='none')
            else:
                best_recall = val_report['macro avg'][
                    'recall'] if is_best else best_recall
                best_report = val_report if is_best else best_report
                best_model = deepcopy(model) if is_best else best_model

            if current_labeled > self.args.stop_labeled:
                break

        if self.args.store_logs:
            store_logs(self.args, metrics_per_cycle)
            store_logs(self.args, metrics_per_epoch, log_type='epoch_wise')
            store_logs(self.args, num_class_per_cycle, log_type='novel_class')

        return best_recall
Exemplo n.º 7
0
        valid_loss /= len(valid_loader)

    return valid_loss


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', type=int, default=10)
    parser.add_argument('--first_time', action='store_true')
    parser.add_argument('--dataset', type=str, default='CIFAR')
    args = vars(parser.parse_args())

    if args['dataset'] == 'CIFAR':
        model = CIFAR_Model()
        train_loader, valid_loader, _, _ = create_loaders(is_train=True,
                                                          is_valid=True,
                                                          is_test=False)
    else:
        model = MNIST_Model()
        train_loader, valid_loader, _, _ = create_loaders(
            which_dataset='MNIST', is_train=True, is_valid=True, is_test=False)

    mask = create_mask(model)

    if args['first_time']:
        torch.save(model.state_dict(), 'environment\\initial_model.pth')
        torch.save(mask.state_dict(), 'environment\\initial_mask.pth')
        save_as_csv(model)
    else:
        model.load_state_dict(torch.load('environment\\model.pth'))
        mask.load_state_dict(torch.load('environment\\mask.pth'))
Exemplo n.º 8
0
    def main(self):
        dataset_cl = self.datasets[self.args.dataset](root=self.args.root,
                                                      add_labeled=self.args.add_labeled,
                                                      advanced_transforms=True,
                                                      merged=self.args.merged,
                                                      remove_classes=self.args.remove_classes,
                                                      oversampling=self.args.oversampling,
                                                      unlabeled_subset_ratio=self.args.unlabeled_subset,
                                                      seed=self.args.seed, start_labeled=self.args.start_labeled)

        base_dataset, labeled_dataset, unlabeled_dataset, labeled_indices, unlabeled_indices, test_dataset = \
            dataset_cl.get_dataset()

        train_loader, unlabeled_loader, val_loader = create_loaders(self.args, labeled_dataset, unlabeled_dataset,
                                                                    test_dataset, labeled_indices, unlabeled_indices,
                                                                    self.kwargs, dataset_cl.unlabeled_subset_num)

        model_backbone, optimizer_backbone, _ = create_model_optimizer_scheduler(self.args, dataset_cl)
        model_module = LossNet().cuda()
        optimizer_module = torch.optim.Adam(model_module.parameters())

        models = {'backbone': model_backbone, 'module': model_module}
        optimizers = {'backbone': optimizer_backbone, 'module': optimizer_module}

        criterion_backbone = get_loss(self.args, dataset_cl.labeled_class_samples, reduction='none')

        criterions = {'backbone': criterion_backbone, 'module': loss_module_objective_func}

        uncertainty_sampler = UncertaintySamplingEntropyBased(verbose=True,
                                                              uncertainty_sampling_method=self.args.
                                                              uncertainty_sampling_method)

        current_labeled = dataset_cl.start_labeled
        metrics_per_cycle = pd.DataFrame([])
        metrics_per_epoch = pd.DataFrame([])
        num_class_per_cycle = pd.DataFrame([])

        print_args(self.args)

        best_recall, best_report, last_best_epochs = 0, None, 0
        best_model = deepcopy(models['backbone'])

        for epoch in range(self.args.start_epoch, self.args.epochs):
            train_loss = self.train(train_loader, models, optimizers, criterions, epoch, last_best_epochs)
            val_loss, val_report = self.validate(val_loader, models, criterions, last_best_epochs)

            is_best = val_report['macro avg']['recall'] > best_recall
            last_best_epochs = 0 if is_best else last_best_epochs + 1

            val_report = pd.concat([val_report, train_loss, val_loss], axis=1)
            metrics_per_epoch = pd.concat([metrics_per_epoch, val_report])

            if epoch > self.args.labeled_warmup_epochs and last_best_epochs > self.args.add_labeled_epochs:
                metrics_per_cycle = pd.concat([metrics_per_cycle, best_report])

                train_loader, unlabeled_loader, val_loader, labeled_indices, unlabeled_indices = \
                    perform_sampling(self.args, uncertainty_sampler, None,
                                     epoch, models, train_loader, unlabeled_loader,
                                     dataset_cl, labeled_indices,
                                     unlabeled_indices, labeled_dataset,
                                     unlabeled_dataset,
                                     test_dataset, self.kwargs, current_labeled,
                                     None)

                current_labeled += self.args.add_labeled
                last_best_epochs = 0

                if self.args.reset_model:
                    model_backbone, optimizer_backbone, scheduler_backbone = \
                        create_model_optimizer_scheduler(self.args, dataset_cl)
                    model_module, optimizer_module = create_model_optimizer_loss_net()
                    models = {'backbone': model_backbone, 'module': model_module}
                    optimizers = {'backbone': optimizer_backbone, 'module': optimizer_module}

                if self.args.novel_class_detection:
                    num_classes = [np.sum(np.array(base_dataset.targets)[labeled_indices] == i)
                                   for i in range(len(base_dataset.classes))]
                    num_class_per_cycle = pd.concat([num_class_per_cycle,
                                                     pd.DataFrame.from_dict({cls: num_classes[i] for i, cls in
                                                                             enumerate(base_dataset.classes)},
                                                                            orient='index').T])

                criterion_backbone = get_loss(self.args, dataset_cl.labeled_class_samples, reduction='none')
                criterions = {'backbone': criterion_backbone, 'module': loss_module_objective_func}
            else:
                best_recall = val_report['macro avg']['recall'] if is_best else best_recall
                best_report = val_report if is_best else best_report
                best_model = deepcopy(models['backbone']) if is_best else best_model

            if current_labeled > self.args.stop_labeled:
                break

            save_checkpoint(self.args, {
                'epoch': epoch + 1,
                'state_dict': model_backbone.state_dict(),
                'best_prec1': best_recall,
            }, is_best)

        if self.args.store_logs:
            store_logs(self.args, metrics_per_cycle)
            store_logs(self.args, metrics_per_epoch, log_type='epoch_wise')
            store_logs(self.args, num_class_per_cycle, log_type='novel_class')

        return best_recall