def train(
        self,
        experiences: Union[
            ClassificationExperience, Sequence[ClassificationExperience]
        ],
        eval_streams: Optional[
            Sequence[
                Union[
                    ClassificationExperience, Sequence[ClassificationExperience]
                ]
            ]
        ] = None,
        **kwargs
    ):
        self.is_training = True
        self._stop_training = False

        self.model.train()
        self.model.to(self.device)

        # Normalize training and eval data.
        if not isinstance(experiences, Sequence):
            experiences = [experiences]
        if eval_streams is None:
            eval_streams = [experiences]
        self._eval_streams = eval_streams

        trigger_plugins(self, "before_training")
        for self.experience in experiences:
            self.train_exp(self.experience, eval_streams, **kwargs)
        trigger_plugins(self, "after_training")
        res = self.evaluator.get_last_metrics()
        return res
    def eval_epoch(self, **kwargs):
        """Evaluation loop over the current `self.dataloader`."""
        for self.mbatch in self.dataloader:
            self._unpack_minibatch()
            trigger_plugins(self, 'before_eval_iteration')

            trigger_plugins(self, 'before_eval_forward')
            self.mb_output = self.forward()
            trigger_plugins(self, 'after_eval_forward')
            self.loss = self.criterion()
            trigger_plugins(self, 'after_eval_iteration')
Example #3
0
 def _before_backward(self, **kwargs):
     trigger_plugins(self, "before_backward", **kwargs)
Example #4
0
 def _before_training_iteration(self, **kwargs):
     trigger_plugins(self, "before_training_iteration", **kwargs)
Example #5
0
 def _after_training_epoch(self, **kwargs):
     trigger_plugins(self, "after_training_epoch", **kwargs)
    def train_exp(self, experience: Experience, eval_streams=None, **kwargs):
        self.experience = experience
        self.model.train()

        if eval_streams is None:
            eval_streams = [experience]
        for i, exp in enumerate(eval_streams):
            if not isinstance(exp, Sequence):
                eval_streams[i] = [exp]

        # Data Adaptation (e.g. add new samples/data augmentation)
        trigger_plugins(self, 'before_train_dataset_adaptation')
        self.train_dataset_adaptation(**kwargs)
        trigger_plugins(self, 'after_train_dataset_adaptation')
        self.make_train_dataloader(**kwargs)

        # Model Adaptation (e.g. freeze/add new units)
        self.model = self.model_adaptation()
        self.make_optimizer()

        trigger_plugins(self, 'before_training_exp')
        for _ in range(self.train_epochs):
            trigger_plugins(self, 'before_training_epoch')
            if self._stop_training:  # Early stopping
                self._stop_training = False
                break
            self.training_epoch(**kwargs)
            trigger_plugins(self, 'after_training_epoch')
        trigger_plugins(self, 'after_training_exp')
Example #7
0
 def _after_eval_forward(self, **kwargs):
     trigger_plugins(self, "after_eval_forward", **kwargs)
Example #8
0
 def _before_eval_iteration(self, **kwargs):
     trigger_plugins(self, "before_eval_iteration", **kwargs)
Example #9
0
 def _before_update(self, **kwargs):
     trigger_plugins(self, "before_update", **kwargs)
Example #10
0
 def _before_eval_exp(self, **kwargs):
     trigger_plugins(self, "before_eval_exp", **kwargs)
Example #11
0
 def _after_eval(self, **kwargs):
     trigger_plugins(self, "after_eval", **kwargs)
Example #12
0
 def _after_training(self, **kwargs):
     trigger_plugins(self, "after_training", **kwargs)
Example #13
0
 def _before_training(self, **kwargs):
     trigger_plugins(self, "before_training", **kwargs)
    def training_epoch(self, **kwargs):
        """ Training epoch.

        :param kwargs:
        :return:
        """
        for self.mbatch in self.dataloader:
            if self._stop_training:
                break

            self._unpack_minibatch()
            trigger_plugins(self, 'before_training_iteration')

            self.optimizer.zero_grad()
            self.loss = 0

            # Forward
            trigger_plugins(self, 'before_forward')
            self.mb_output = self.forward()
            trigger_plugins(self, 'after_forward')

            # Loss & Backward
            self.loss += self.criterion()

            trigger_plugins(self, 'before_backward')
            self.loss.backward()
            trigger_plugins(self, 'after_backward')

            # Optimization step
            trigger_plugins(self, 'before_update')
            self.optimizer.step()
            trigger_plugins(self, 'after_update')
            trigger_plugins(self, 'after_training_iteration')
    def eval(self, exp_list: Union[Experience, Sequence[Experience]],
             **kwargs):
        """
        Evaluate the current model on a series of experiences and
        returns the last recorded value for each metric.

        :param exp_list: CL experience information.
        :param kwargs: custom arguments.

        :return: dictionary containing last recorded value for
            each metric name
        """
        # eval can be called inside the train method.
        # Save the shared state here to restore before returning.
        train_state = self._save_train_state()
        self.is_training = False
        self.model.eval()

        if not isinstance(exp_list, Sequence):
            exp_list = [exp_list]
        self.current_eval_stream = exp_list

        trigger_plugins(self, 'before_eval')
        for self.experience in exp_list:
            # Data Adaptation
            trigger_plugins(self, 'before_eval_dataset_adaptation')
            self.eval_dataset_adaptation(**kwargs)
            trigger_plugins(self, 'after_eval_dataset_adaptation')
            self.make_eval_dataloader(**kwargs)

            # Model Adaptation (e.g. freeze/add new units)
            self.model = self.model_adaptation()

            trigger_plugins(self, 'before_eval_exp')
            self.eval_epoch(**kwargs)
            trigger_plugins(self, 'after_eval_exp')

        trigger_plugins(self, 'after_eval')
        res = self.evaluator.get_last_metrics()

        # restore previous shared state.
        self._load_train_state(*train_state)
        return res
Example #16
0
 def _after_backward(self, **kwargs):
     trigger_plugins(self, "after_backward", **kwargs)
Example #17
0
 def _after_training_iteration(self, **kwargs):
     trigger_plugins(self, "after_training_iteration", **kwargs)
Example #18
0
 def _after_eval_exp(self, **kwargs):
     trigger_plugins(self, "after_eval_exp", **kwargs)
Example #19
0
 def _after_update(self, **kwargs):
     trigger_plugins(self, "after_update", **kwargs)
Example #20
0
 def _before_eval_dataset_adaptation(self, **kwargs):
     trigger_plugins(self, "before_eval_dataset_adaptation", **kwargs)
Example #21
0
 def _before_eval_forward(self, **kwargs):
     trigger_plugins(self, "before_eval_forward", **kwargs)
Example #22
0
 def _after_eval_dataset_adaptation(self, **kwargs):
     trigger_plugins(self, "after_eval_dataset_adaptation", **kwargs)
Example #23
0
 def _after_eval_iteration(self, **kwargs):
     trigger_plugins(self, "after_eval_iteration", **kwargs)
Example #24
0
 def _before_training_epoch(self, **kwargs):
     trigger_plugins(self, "before_training_epoch", **kwargs)