def on_validation_end(self, **kwargs):
     # Consolidate the state dict of sharded optimizers
     consolidate_optim_state_dict(self.trainer.optimizer)
     stop = self.early_stopping(self.trainer.num_updates,
                                self.trainer.current_iteration,
                                kwargs["meter"])
     stop = bool(broadcast_scalar(stop, src=0, device=self.trainer.device))
     return stop
Ejemplo n.º 2
0
    def change_dataloader(self):
        choice = 0

        if self.num_datasets <= 1:
            self.current_index = choice
            return

        if self._is_main:
            choice = self.iteration_strategy()

            # self._finished_iterators will always be empty in case of
            # non-proportional (equal) sampling
            while self.dataset_list[choice] in self._finished_iterators:
                choice = self.iteration_strategy()

        choice = broadcast_scalar(choice, 0, device=get_current_device())
        self.current_index = choice
Ejemplo n.º 3
0
    def change_dataloader(self):
        choice = 0

        if self.num_datasets <= 1:
            self.current_index = choice
            return

        if self._is_master:
            choice = np.random.choice(
                self.num_datasets, 1, p=self._dataset_probabilities
            )[0]

            # self._finished_iterators will always be empty in case of
            # non-proportional (equal) sampling
            while self.dataset_list[choice] in self._finished_iterators:
                choice = np.random.choice(
                    self.num_datasets, 1, p=self._dataset_probabilities
                )[0]

        choice = broadcast_scalar(choice, 0, device=get_current_device())
        self.current_index = choice
Ejemplo n.º 4
0
    def change_dataloader(self):
        if self.num_datasets <= 1:
            return
        choice = 0

        if self._is_master:
            choice = np.random.choice(self.num_datasets,
                                      1,
                                      p=self._dataset_probabilities)[0]

            while choice in self._finished_iterators:
                choice = np.random.choice(self.num_datasets,
                                          1,
                                          p=self._dataset_probabilities)[0]

        choice = broadcast_scalar(choice,
                                  0,
                                  device=registry.get("current_device"))
        self.current_index = choice
        self.current_dataset = self.datasets[self.current_index]
        self.current_loader = self.loaders[self.current_index]
        self._chosen_iterator = self.iterators[self.current_index]
Ejemplo n.º 5
0
    def _try_full_validation(self, force=False):
        should_break = False

        if self.num_updates % self.evaluation_interval == 0 or force:
            self.snapshot_timer.reset()
            self.writer.write(
                "Evaluation time. Running on full validation set...")
            # Validation and Early stopping
            # Create a new meter for this case
            report, meter = self.evaluate(self.val_loader)

            extra = {
                "num_updates": self.num_updates,
                "epoch": self.current_epoch,
                "iterations": self.current_iteration,
                "max_updates": self.max_updates,
                "val_time": self.snapshot_timer.get_time_since_start(),
            }

            stop = self.early_stopping(self.num_updates,
                                       self.current_iteration, meter)
            stop = bool(broadcast_scalar(stop, src=0, device=self.device))

            extra.update(self.early_stopping.get_info())

            self._summarize_report(meter, extra=extra)
            gc.collect()

            if "cuda" in str(self.device):
                torch.cuda.empty_cache()

            if stop is True:
                self.writer.write("Early stopping activated")
                should_break = True

            self.train_timer.reset()

        return should_break
Ejemplo n.º 6
0
    def change_dataloader(self):
        if self.num_datasets <= 1:
            return
        choice = 0

        if self._is_master:
            choice = np.random.choice(self.num_datasets,
                                      1,
                                      p=self._dataset_probabilities)[0]

            # self._finished_iterators will always be empty in case of
            # non-proportional (equal) sampling
            while choice in self._finished_iterators:
                choice = np.random.choice(self.num_datasets,
                                          1,
                                          p=self._dataset_probabilities)[0]

        choice = broadcast_scalar(choice,
                                  0,
                                  device=registry.get("current_device"))
        self.current_index = choice
        self.current_dataset = self.datasets[self.current_index]
        self.current_loader = self.loaders[self.current_index]
        self._chosen_iterator = self.iterators[self.current_index]
Ejemplo n.º 7
0
 def on_validation_end(self, **kwargs):
     stop = self.early_stopping(self.trainer.num_updates,
                                self.trainer.current_iteration,
                                kwargs["meter"])
     stop = bool(broadcast_scalar(stop, src=0, device=self.trainer.device))
     return stop