def pretrain_decoder_run(self):
        self._model.disable_grad_all()
        self._model.enable_grad(from_=self._enable_grad_from,
                                util=self._extract_position)
        self.to(self._device)

        for self._cur_epoch in range(self._start_epoch,
                                     self._max_epoch_train_decoder):
            pretrain_decoder_dict = PretrainDecoderEpoch(
                model=self._model,
                projection_head=self._projector,
                optimizer=self._optimizer,
                pretrain_decoder_loader=self._pretrain_loader_iter,
                contrastive_criterion=self._contrastive_criterion,
                num_batches=self._num_batches,
                cur_epoch=self._cur_epoch,
                device=self._device,
                feature_extractor=self._feature_extractor).run()
            self._scheduler.step()
            storage_dict = StorageIncomeDict(
                PRETRAIN_DECODER=pretrain_decoder_dict, )
            self._pretrain_encoder_storage.put_from_dict(storage_dict,
                                                         epoch=self._cur_epoch)
            self._writer.add_scalar_with_StorageDict(storage_dict,
                                                     self._cur_epoch)
            self._save_to("last.pth",
                          path=os.path.join(self._save_dir,
                                            "pretrain_decoder"))
 def _start_training(self):
     for self._cur_epoch in range(self._start_epoch, self._max_epoch):
         train_result: EpochResultDict
         eval_result: EpochResultDict
         cur_score: float
         train_result = self.run_epoch()
         with torch.no_grad():
             eval_result, cur_score = self.eval_epoch()
         # update lr_scheduler
         if hasattr(self, "_scheduler"):
             self._scheduler.step()
         storage_per_epoch = StorageIncomeDict(tra=train_result,
                                               val=eval_result)
         self._storage.put_from_dict(storage_per_epoch, self._cur_epoch)
         self._writer.add_scalar_with_StorageDict(storage_per_epoch,
                                                  self._cur_epoch)
         # save_checkpoint
         self.save(cur_score)
         # save storage result on csv file.
         self._storage.to_csv(self._save_dir)
    def finetune_network_run(self, epocher_type=MeanTeacherEpocher):
        self.to(self._device)
        self._model.enable_grad_decoder()  # noqa
        self._model.enable_grad_encoder()  # noqa

        for self._cur_epoch in range(self._start_epoch,
                                     self._max_epoch_train_finetune):
            finetune_dict = epocher_type.create_from_trainer(self).run()
            val_dict, cur_score = EvalEpoch(self._teacher_model,
                                            val_loader=self._val_loader,
                                            sup_criterion=self._sup_criterion,
                                            cur_epoch=self._cur_epoch,
                                            device=self._device).run()
            self._scheduler.step()
            storage_dict = StorageIncomeDict(finetune=finetune_dict,
                                             val=val_dict)
            self._finetune_storage.put_from_dict(storage_dict,
                                                 epoch=self._cur_epoch)
            self._writer.add_scalar_with_StorageDict(storage_dict,
                                                     self._cur_epoch)
            self.save(cur_score, os.path.join(self._save_dir, "finetune"))