class ActiveLearning(torch.nn.Module):
    def __init__(self, exp_dict):
        super().__init__()
        self.backbone = models.vgg16(
            pretrained=exp_dict["imagenet_pretraining"], progress=True)
        num_ftrs = self.backbone.classifier[-1].in_features
        self.backbone.classifier[-1] = torch.nn.Linear(num_ftrs,
                                                       exp_dict["num_classes"])
        self.backbone = patch_module(self.backbone)
        self.initial_weights = deepcopy(self.backbone.state_dict())
        self.backbone.cuda()

        self.batch_size = exp_dict['batch_size']
        self.calibrate = exp_dict.get('calibrate', False)
        self.learning_epoch = exp_dict['learning_epoch']
        self.optimizer = torch.optim.SGD(self.backbone.parameters(),
                                         lr=exp_dict['lr'],
                                         weight_decay=5e-4,
                                         momentum=0.9,
                                         nesterov=True)

        self.criterion = CrossEntropyLoss()
        shuffle_prop = exp_dict.get('shuffle_prop', 0.0)
        max_sample = -1
        self.heuristic = get_heuristic(exp_dict['heuristic'],
                                       shuffle_prop=shuffle_prop)
        self.wrapper = ModelWrapper(self.backbone, criterion=self.criterion)
        self.wrapper.add_metric(
            'cls_report',
            lambda: ClassificationReport(exp_dict["num_classes"]))
        self.wrapper.add_metric('accuracy', lambda: Accuracy())
        self.loop = ActiveLearningLoop(None,
                                       self.wrapper.predict_on_dataset,
                                       heuristic=self.heuristic,
                                       ndata_to_label=exp_dict['query_size'],
                                       batch_size=self.batch_size,
                                       iterations=exp_dict['iterations'],
                                       use_cuda=True,
                                       max_sample=max_sample)

        self.calib_set = get_dataset('calib', exp_dict['dataset'])
        self.valid_set = get_dataset('val', exp_dict['dataset'])
        self.calibrator = DirichletCalibrator(
            self.wrapper,
            exp_dict["num_classes"],
            lr=0.001,
            reg_factor=exp_dict['reg_factor'],
            mu=exp_dict['mu'])

        self.active_dataset = None
        self.active_dataset_settings = None

    def train_on_loader(self, loader: DataLoader):
        self.wrapper.load_state_dict(self.initial_weights)
        if self.active_dataset is None:
            self.active_dataset = loader.dataset
            if self.active_dataset_settings is not None:
                self.active_dataset.load_state_dict(
                    self.active_dataset_settings)
            self.loop.dataset = self.active_dataset
        self.criterion.train()
        self.wrapper.train_on_dataset(self.active_dataset,
                                      self.optimizer,
                                      self.batch_size,
                                      epoch=self.learning_epoch,
                                      use_cuda=True)

        metrics = self.wrapper.metrics
        return self._format_metrics(metrics, 'train')

    def val_on_loader(self, loader, savedir=None):
        val_data = loader.dataset
        self.loop.step()
        self.criterion.eval()
        self.wrapper.test_on_dataset(val_data,
                                     batch_size=self.batch_size,
                                     use_cuda=True,
                                     average_predictions=20)
        metrics = self.wrapper.metrics
        mets = self._format_metrics(metrics, 'test')
        mets.update({'num_samples': len(self.active_dataset)})
        return mets

    def on_train_end(self, savedir, epoch):
        h5_path = pjoin(savedir, 'ckpt.h5')
        labelled = self.active_dataset.state_dict()['labelled']
        with h5py.File(h5_path, 'a') as f:
            if f'epoch_{epoch}' not in f:
                g = f.create_group(f'epoch_{epoch}')
                g.create_dataset('labelled', data=labelled.astype(np.bool))

    def _format_metrics(self, metrics, step):
        mets = {k: v.value for k, v in metrics.items() if step in k}
        mets_unpacked = {}
        for k, v in mets.items():
            if isinstance(v, float):
                mets_unpacked[k] = v
            elif isinstance(v, np.ndarray):
                mets_unpacked[k] = v.mean()
            else:
                mets_unpacked.update(
                    {f"{k}_{ki}": np.mean(vi)
                     for ki, vi in v.items()})
        return mets_unpacked

    def get_state_dict(self):
        state = {}
        state["model"] = self.backbone.state_dict()
        state["optimizer"] = self.optimizer.state_dict()
        if self.active_dataset is None:
            state['dataset'] = None
        else:
            state["dataset"] = self.active_dataset.state_dict()
        return state

    def set_state_dict(self, state_dict):
        self.backbone.load_state_dict(state_dict["model"])
        self.optimizer.load_state_dict(state_dict["optimizer"])
        self.active_dataset_settings = state_dict["dataset"]
        if self.active_dataset is not None:
            self.active_dataset.load_state_dict(self.active_dataset_settings)
示例#2
0
class DirichletCalibrator(object):
    """
    Adding a linear layer to a classifier model after the model is
    trained and train this new layer until convergence.
    Together with the linear layer, the model is now calibrated.
    Source: https://arxiv.org/abs/1910.12656
    Code inspired from: https://github.com/dirichletcal/experiments_neurips

    References:
        @article{kullbeyond,
                title={Beyond temperature scaling: Obtaining well-calibrated multi-class
                 probabilities with Dirichlet calibration Supplementary material},
                author={Kull, Meelis and Perello-Nieto,
                 Miquel and K{\"a}ngsepp, Markus and Silva Filho,
                  Telmo and Song, Hao and Flach, Peter}
                }

    Args:

        wrapper (ModelWrapper): Provides training and testing methods.
        num_classes (int): Number of classes in classification task.
        lr (float): Learning rate.
        reg_factor (float): Regularization factor for the linear layer weights.
        mu (float): Regularization factor for the linear layer biases.
            If not given, will be initialized by "l".

    """
    def __init__(
        self,
        wrapper: ModelWrapper,
        num_classes: int,
        lr: float,
        reg_factor: float,
        mu: float = None,
    ):
        self.num_classes = num_classes
        self.criterion = nn.CrossEntropyLoss()
        self.lr = lr
        self.reg_factor = reg_factor
        self.mu = mu or reg_factor
        self.dirichlet_linear = nn.Linear(self.num_classes, self.num_classes)
        self.model = nn.Sequential(wrapper.model, self.dirichlet_linear)
        self.wrapper = ModelWrapper(self.model, self.criterion)

        self.wrapper.add_metric("ece", lambda: ECE())
        self.wrapper.add_metric("ece", lambda: ECE_PerCLs(num_classes))

    def l2_reg(self):
        """Using trainable layer's parameters for l2 regularization.

        Returns:
            The regularization term for the linear layer.
        """
        weight_p, bias_p = self.dirichlet_linear.parameters()
        w_l2_factor = weight_p.norm(2)
        b_l2_factor = bias_p.norm(2)
        return self.reg_factor * w_l2_factor + self.mu * b_l2_factor

    def calibrate(self,
                  train_set: Dataset,
                  test_set: Dataset,
                  batch_size: int,
                  epoch: int,
                  use_cuda: bool,
                  double_fit: bool = False,
                  **kwargs):
        """
        Training the linear layer given a training set and a validation set.
        The training set should be different from what model is trained on.

        Args:
            train_set (Dataset): The training set.
            test_set (Dataset): The validation set.
            batch_size (int): Batch size used.
            epoch (int): Number of epochs to train the linear layer for.
            use_cuda (bool): If "True", will use GPU.
            double_fit (bool): If "True" would fit twice on the train set.
            kwargs (dict): Rest of parameters for baal.ModelWrapper.train_and_test_on_dataset().

        Returns:
            loss_history (list[float]): List of loss values for each epoch.
            model.state_dict (dict): Model weights.

        """

        # reinitialize the dirichlet calibration layer
        self.dirichlet_linear.weight.data.copy_(
            torch.eye(self.dirichlet_linear.weight.shape[0]))
        self.dirichlet_linear.bias.data.copy_(
            torch.zeros(*self.dirichlet_linear.bias.shape))
        if use_cuda:
            self.dirichlet_linear.cuda()

        optimizer = Adam(self.dirichlet_linear.parameters(), lr=self.lr)

        loss_history, weights = self.wrapper.train_and_test_on_datasets(
            train_set,
            test_set,
            optimizer,
            batch_size,
            epoch,
            use_cuda,
            regularizer=self.l2_reg,
            return_best_weights=True,
            patience=None,
            **kwargs)
        self.model.load_state_dict(weights)

        if double_fit:
            lr = self.lr / 10
            optimizer = Adam(self.dirichlet_linear.parameters(), lr=lr)
            loss_history, weights = self.wrapper.train_and_test_on_datasets(
                train_set,
                test_set,
                optimizer,
                batch_size,
                epoch,
                use_cuda,
                regularizer=self.l2_reg,
                return_best_weights=True,
                patience=None,
                **kwargs)
            self.model.load_state_dict(weights)

        return loss_history, self.model.state_dict()

    @property
    def calibrated_model(self):
        return self.model

    @property
    def metrics(self):
        return self.wrapper.metrics