def log_iter_stats(self, cur_epoch, cur_iter):
        """
        log the stats for cur iteration
        :param cur_epoch:
        :param cur_iter:
        :return:
        """
        if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
            return
        eta_sec = self.iter_timer.seconds() * (
            self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1))
        eta = str(datetime.timedelta(seconds=int(eta_sec)))

        stats = {
            "_type": "train_iter",
            "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
            "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
            "time": self.iter_timer.seconds(),
            "eta": eta,
            "mse_loss": self.mse_loss.get_win_median(),
            "entropy_loss": self.entropy_loss.get_win_median(),
            "combine_loss": self.combine_loss.get_win_median(),
            "lr": self.lr,
            "gpu":
            "{:.2f}GB".format(torch.cuda.max_memory_allocated() / 1024**3)
        }
        logging.log_json_stats(stats)
예제 #2
0
    def log_epoch_stats(self, cur_epoch):
        """
        Log the stats of the current epoch.
        Args:
            cur_epoch (int): the number of current epoch.
        """
        stats = {
            "_type": "val_epoch",
            "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
            "time_diff": self.iter_timer.seconds(),
            "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
            "RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()),
        }
        if self._cfg.DATA.MULTI_LABEL:
            stats["map"] = get_map(
                torch.cat(self.all_preds).cpu().numpy(),
                torch.cat(self.all_labels).cpu().numpy(),
            )
        else:
            top1_err = self.num_top1_mis / self.num_samples
            top5_err = self.num_top5_mis / self.num_samples
            self.min_top1_err = min(self.min_top1_err, top1_err)
            self.min_top5_err = min(self.min_top5_err, top5_err)

            stats["top1_err"] = top1_err
            stats["top5_err"] = top5_err
            stats["min_top1_err"] = self.min_top1_err
            stats["min_top5_err"] = self.min_top5_err

        logging.log_json_stats(stats)
예제 #3
0
    def finalize_metrics(self, ks=(1, 5)):
        """
        Calculate and log the final ensembled metrics.
        ks (tuple): list of top-k values for topk_accuracies. For example,
            ks = (1, 5) correspods to top-1 and top-5 accuracy.
        """
        if not all(self.clip_count == self.num_clips):
            logger.warning("clip count {} ~= num clips {}".format(
                ", ".join([
                    "{}: {}".format(i, k)
                    for i, k in enumerate(self.clip_count.tolist())
                ]),
                self.num_clips,
            ))

        stats = {"split": "test_final"}
        if self.multi_label:
            map = get_map(self.video_preds.cpu().numpy(),
                          self.video_labels.cpu().numpy())
            stats["map"] = map
        else:
            # num_topks_correct = metrics.topks_correct(
            #     self.video_preds, self.video_labels, ks
            # )

            # check self.video_preds
            # print("self.video_preds shape",self.video_preds.shape)
            num_topks_correct, self.finall_label = metrics.topks_correct_each_class(
                self.video_preds, self.video_labels, ks, self.finall_label)

            # do each label
            topks = [(x / self.video_preds.size(0)) * 100.0
                     for x in num_topks_correct]
            # dict {label:top-1}
            assert len({len(ks), len(topks)}) == 1
            for k, topk in zip(ks, topks):
                stats["top{}_acc".format(k)] = "{:.{prec}f}".format(topk,
                                                                    prec=2)
        # 保存dict 到json
        # print(self.finall_label)
        jsObj = json.dumps(self.finall_label)

        # fileObject = open('HMDB51_temporal_Kmeans_split1_8X8_FIX.json', 'w')
        fileObject = open('HMDB51_MAXindex_split2_SF8X8_Test_MUltiGPU.json',
                          'w')
        fileObject.write(jsObj)
        fileObject.close()

        logging.log_json_stats(stats)
예제 #4
0
    def log_epoch_stats(self, cur_epoch):
        """
        Log the stats of the current epoch.
        Args:
            cur_epoch (int): the number of current epoch.
        """
        eta_sec = self.iter_timer.seconds() * (
            self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters)
        eta = str(datetime.timedelta(seconds=int(eta_sec)))
        # stats in G or D
        stats = {
            "_type": "train_epoch",
            "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
            "time_diff": self.iter_timer.seconds(),
            "eta": eta,
            "lr_D": self.lr_D,
            "loss_D": self.loss_D_total / self.num_samples_D,
            "lr_G": self.lr_G,
            "loss_G": self.loss_G_total / self.num_samples_G,
            "appe_loss": self.appe_loss_total / self.num_samples_G,
            "flow_loss": self.flow_loss_total / self.num_samples_G,
            "total_G_loss": self.loss_G_three_part_total / self.num_samples_G,
            "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
            "RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()),
        }

        # avg_loss = self.loss_total_D / self.num_samples_D
        # stats["loss_D"] = avg_loss

        # stats = {
        #     "_type": "train_epoch",
        #     "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
        #     "time_diff": self.iter_timer.seconds(),
        #     "eta": eta,
        #     "lr": self.lr,
        #     "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
        #     "RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()),
        # }
        # if not self._cfg.DATA.MULTI_LABEL:
        #     top1_err = self.num_top1_mis / self.num_samples
        #     top5_err = self.num_top5_mis / self.num_samples
        #     avg_loss = self.loss_total / self.num_samples
        #     stats["top1_err"] = top1_err
        #     stats["top5_err"] = top5_err
        #     stats["loss"] = avg_loss
        logging.log_json_stats(stats)
예제 #5
0
 def log_iter_stats(self, cur_iter, top1_acc, top5_acc):
     """
     Log the stats.
     Args:
         cur_iter (int): the current iteration of testing.
     """
     eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter)
     eta = str(datetime.timedelta(seconds=int(eta_sec)))
     stats = {
         "split": "test_iter",
         "cur_iter": "{}".format(cur_iter + 1),
         "eta": eta,
         "time_diff": self.iter_timer.seconds(),
         "top-1 acc": top1_acc,
         "top-5 acc": top5_acc,
     }
     logging.log_json_stats(stats)
예제 #6
0
    def log_iter_stats(self, cur_epoch, cur_iter, mode):
        """
        log the stats of the current iteration.
        Args:
            cur_epoch (int): the number of current epoch.
            cur_iter (int): the number of current iteration.
        """
        if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
            return
        eta_sec = self.iter_timer.seconds() * (
            self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1))
        eta = str(datetime.timedelta(seconds=int(eta_sec)))

        # stats in D or G
        if mode in ["D", "Discriminator"]:
            stats = {
                "_type": "train_iter",
                "epoch": "{}/{}".format(cur_epoch + 1,
                                        self._cfg.SOLVER.MAX_EPOCH),
                "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
                "time_diff": self.iter_timer.seconds(),
                "eta": eta,
                "loss_D": self.loss_D.get_win_median(),
                "lr_D": self.lr_D,
                "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
            }
        elif mode in ["G", "Generator"]:
            stats = {
                "_type": "train_iter",
                "epoch": "{}/{}".format(cur_epoch + 1,
                                        self._cfg.SOLVER.MAX_EPOCH),
                "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
                "time_diff": self.iter_timer.seconds(),
                "eta": eta,
                "loss_G": self.loss_G.get_win_median(),
                "appe_loss": self.appe_loss.get_win_median(),
                "flow_loss": self.flow_loss.get_win_median(),
                "three_part_loss_G": self.loss_G_three_part.get_win_median(),
                "lr_G": self.lr_G,
                "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
            }

        else:
            raise NotImplementedError("Does not support  state")
        logging.log_json_stats(stats)
예제 #7
0
    def each_label_test(self, ks=(1, 5)):
        """
        Calculate and log the final ensembled metrics.
        ks (tuple): list of top-k values for topk_accuracies. For example,
            ks = (1, 5) correspods to top-1 and top-5 accuracy.
        """

        stats = {"split": "test_each_class"}

        num_topks_correct = metrics.topks_correct(self.video_preds,
                                                  self.video_labels, ks)
        topks = [(x / self.video_preds.size(0)) * 100.0
                 for x in num_topks_correct]
        # dict {label:top-1}
        assert len({len(ks), len(topks)}) == 1
        for k, topk in zip(ks, topks):
            stats["top{}_acc".format(k)] = "{:.{prec}f}".format(topk, prec=2)
        logging.log_json_stats(stats)
    def log_epoch_stats(self, cur_epoch):
        """

        :param cur_epoch:
        :return:
        """
        stats = {
            "_type":
            "train_epoch",
            "epoch":
            "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
            "time_diff":
            self.iter_timer.seconds(),
            "mse_loss":
            self.mse_loss.get_win_avg(),
            "entropy_loss":
            self.entropy_loss.get_win_avg(),
            "combine_loss":
            self.combine_loss.get_win_avg(),
            "gpu_mem":
            "{:.2f} GB".format(torch.cuda.max_memory_allocated() / 1024**3),
        }
        logging.log_json_stats(stats)
예제 #9
0
 def log_iter_stats(self, cur_epoch, cur_iter):
     """
     log the stats of the current iteration.
     Args:
         cur_epoch (int): the number of current epoch.
         cur_iter (int): the number of current iteration.
     """
     if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
         return
     eta_sec = self.iter_timer.seconds() * (self.max_iter - cur_iter - 1)
     eta = str(datetime.timedelta(seconds=int(eta_sec)))
     stats = {
         "_type": "val_iter",
         "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
         "iter": "{}/{}".format(cur_iter + 1, self.max_iter),
         "time_diff": self.iter_timer.seconds(),
         "eta": eta,
         "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
     }
     if not self._cfg.DATA.MULTI_LABEL:
         stats["top1_err"] = self.mb_top1_err.get_win_median()
         stats["top5_err"] = self.mb_top5_err.get_win_median()
     logging.log_json_stats(stats)