コード例 #1
0
    def check_predict_details(self):
        # Thanks https://discuss.pytorch.org/t/how-can-l-load-my-best-model-as-a-feature-extractor-evaluator/17254/6
        assert self.model_ft is not None
        activations = kernel_utils.get_obj_or_dump("dev_output_results.pkl")
        self.analyzer = TorchModelAnalyzer(self)
        analyzer = self.analyzer

        if activations is not None:
            self.analyzer.activation = activations
        else:
            analyzer.register_forward_hook(
                self.model_ft.roi_heads,
                analyzer.get_output_saved("roi_heads"))
            # for mask_head output, size torch.Size([40, 256, 14, 14]),
            # (mask_fcn4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            # (relu4): ReLU(inplace)
            # for mask_predictor, torch.Size([46, 2, 28, 28])  (in evaluate mode)
            # for roi_heads, output.shape, tuple length 2, ([{'boxes', 'labels', 'scores':torch.Size([7]), 'masks':torch.Size([7, 1, 28, 28])},T,T,T],[ PLACE_FOR_LOSSES ])
            # for roi_heads, in training mode, output different number of test result, according to threshold thing

            self.eval_model_loss(
                self.model_ft,
                self.data_loader_dev,
                self.device,
                self.metric_logger,
                print_freq=150,
            )
            kernel_utils.dump_obj(analyzer.activation,
                                  "dev_output_results.pkl",
                                  force=True)

        roi_acts = []
        for acts in analyzer.activation["roi_heads"]:
            roi_acts += acts[0]
        self.analyzer.test_out_threshold(roi_acts)
コード例 #2
0
    def dump_state(self,
                   exec_flag=False,
                   force=True):  # only dataloader ... others cannot dumped
        kernel_utils.logger.debug(f"state {self._stage}")
        if exec_flag:
            kernel_utils.logger.debug(f"dumping state {self._stage}")
            self_data = vars(self)

            if self.model_ft is not None:
                torch.save(self.model_ft.state_dict(), "cv_model.pth")
            names_to_exclude = {
                "model_ft",
                "optimizer",
                "lr_scheduler",
                "metric_logger",
            }

            data_to_save = {
                k: v
                for k, v in self_data.items() if k not in names_to_exclude
            }

            kernel_utils.dump_obj(data_to_save,
                                  f"run_state_{self._stage}.pkl",
                                  force=force)
コード例 #3
0
ファイル: kernel.py プロジェクト: pennz/kaggle_runner
    def dump_state(self, exec_flag=False):
        """dump_state.

        Args:
          exec_flag: (Default value = False)

        Returns:

        """
        self.logger.debug("state %s" % self._stage)

        if exec_flag:
            self.logger.debug("dumping state to file for %s" % self._stage)
            # dump_obj(self, 'run_state.pkl', force=True)  # too large
            kernel_utils.dump_obj(self,
                                  "run_state_%s.pkl" % self._stage,
                                  force=True)
コード例 #4
0
    def test_out_threshold(self, activations):
        stat_for_threshold = {}
        pred_for_threshold = {}
        for threshold in [0.5, 0.52, 0.55, 0.6]:
            preds, stat = self.kernel.predict_rle_from_acts_with_threshold(
                activations, threshold)

            stat_for_threshold[threshold] = stat
            pred_for_threshold[threshold] = preds
            # my_trace()
            # print(self.metric_cal(preds))

        kernel_utils.dump_obj(stat_for_threshold,
                              "stat_for_threshold.pkl",
                              force=True)
        kernel_utils.dump_obj(pred_for_threshold,
                              "pred_for_threshold.pkl",
                              force=True)
コード例 #5
0
ファイル: attention.py プロジェクト: pennz/kaggle_runner
 def dump_state(self, exec_flag=False, force=True):
     logger.debug(f"state {self._stage}")
     if exec_flag:
         logger.debug(f"dumping state {self._stage}")
         dump_obj(self, f"run_state_{self._stage}.pkl", force=force)