Ejemplo n.º 1
0
    def _try_full_validation(self, force=False):
        should_break = False

        if self.current_iteration % self.snapshot_interval == 0 or force:
            self.writer.write("Evaluation time. Running on full validation set...")
            # Validation and Early stopping
            # Create a new meter for this case
            report, meter = self.evaluate(self.val_loader)

            extra = {"validation time": self.snapshot_timer.get_time_since_start()}

            stop = self.early_stopping(self.current_iteration, meter)
            stop = bool(broadcast_scalar(stop, src=0, device=self.device))

            extra.update(self.early_stopping.get_info())

            prefix = "{}: full val".format(report.dataset_name)

            self._summarize_report(meter, prefix=prefix, extra=extra)
            self.snapshot_timer.reset()
            gc.collect()

            if "cuda" in str(self.device):
                torch.cuda.empty_cache()

            if stop is True:
                self.writer.write("Early stopping activated")
                should_break = True

        return should_break
Ejemplo n.º 2
0
    def change_dataloader(self):
        if self._num_datasets <= 1:
            return
        choice = 0

        if self._is_main_process:
            choice = np.random.choice(
                self._num_datasets, 1, p=self._dataset_probablities
            )[0]

            while choice in self._finished_iterators:
                choice = np.random.choice(
                    self._num_datasets, 1, p=self._dataset_probablities
                )[0]

        choice = broadcast_scalar(choice, 0, device=registry.get("current_device"))
        self._loader_index = choice
        self._chosen_dataset = self._datasets[self._loader_index]
        self._chosen_loader = self._loaders[self._loader_index]
        self._chosen_iterator = self._iterators[self._loader_index]