コード例 #1
0
ファイル: meters.py プロジェクト: yztongzhan/SlowFast
 def log_epoch_stats(self, cur_epoch):
     """
     Log the stats of the current epoch.
     Args:
         cur_epoch (int): the number of current epoch.
     """
     if self.mode in ["val", "test"]:
         self.finalize_metrics(log=False)
         stats = {
             "_type": "{}_epoch".format(self.mode),
             "cur_epoch": "{}".format(cur_epoch + 1),
             "mode": self.mode,
             "map": self.full_map,
             "gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
             "RAM": "{:.2f}/{:.2f}G".format(*misc.cpu_mem_usage()),
         }
         logging.log_json_stats(stats)
コード例 #2
0
    def log_iter_stats(self, cur_epoch, cur_iter):
        """
        Log the stats.
        Args:
            cur_epoch (int): the current epoch.
            cur_iter (int): the current iteration.
        """

        if (cur_iter + 1) % self.cfg.LOG_PERIOD != 0:
            return

        eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter)
        eta = str(datetime.timedelta(seconds=int(eta_sec)))
        if self.mode == "train":
            stats = {
                "_type": "{}_iter".format(self.mode),
                "cur_epoch": "{}".format(cur_epoch + 1),
                "cur_iter": "{}".format(cur_iter + 1),
                "eta": eta,
                "time_diff": self.iter_timer.seconds(),
                "mode": self.mode,
                "loss": self.loss.get_win_median(),
                "lr": self.lr,
            }
        elif self.mode == "val":
            stats = {
                "_type": "{}_iter".format(self.mode),
                "cur_epoch": "{}".format(cur_epoch + 1),
                "cur_iter": "{}".format(cur_iter + 1),
                "eta": eta,
                "time_diff": self.iter_timer.seconds(),
                "mode": self.mode,
            }
        elif self.mode == "test":
            stats = {
                "_type": "{}_iter".format(self.mode),
                "cur_iter": "{}".format(cur_iter + 1),
                "eta": eta,
                "time_diff": self.iter_timer.seconds(),
                "mode": self.mode,
            }
        else:
            raise NotImplementedError("Unknown mode: {}".format(self.mode))

        logging.log_json_stats(stats)
コード例 #3
0
    def finalize_metrics(self, ks=(1, 5)):
        """
        Calculate and log the final ensembled metrics.
        ks (tuple): list of top-k values for topk_accuracies. For example,
            ks = (1, 5) correspods to top-1 and top-5 accuracy.
        """
        if not all(self.clip_count == self.num_clips):
            logger.warning(
                "clip count {} ~= num clips {}".format(
                    ", ".join(
                        [
                            "{}: {}".format(i, k)
                            for i, k in enumerate(self.clip_count.tolist())
                        ]
                    ),
                    self.num_clips,
                )
            )

        self.stats = {"split": "test_final"}
        if self.multi_label:
            map = get_map(
                self.audio_preds.cpu().numpy(), self.audio_labels.cpu().numpy()
            )
            self.stats["map"] = map
        else:
            num_topks_correct = metrics.topks_correct(
                self.audio_preds, self.audio_labels, ks
            )
            topks = [
                (x / self.audio_preds.size(0)) * 100.0
                for x in num_topks_correct
            ]
            assert len({len(ks), len(topks)}) == 1
            for k, topk in zip(ks, topks):
                self.stats["top{}_acc".format(k)] = "{:.{prec}f}".format(
                    topk, prec=2
                )

        logging.log_json_stats(self.stats)
        return self.audio_preds.numpy().copy(), \
               self.audio_preds_clips.numpy().copy(), \
               F.one_hot(self.audio_labels, num_classes=self.audio_preds.shape[1]).numpy().copy(), \
               None
コード例 #4
0
    def log_epoch_stats(self, cur_epoch):
        """
        Log the stats of the current epoch.
        Args:
            cur_epoch (int): the number of current epoch.
        """
        verb_top1_acc = self.num_verb_top1_cor / self.num_samples
        verb_top5_acc = self.num_verb_top5_cor / self.num_samples
        noun_top1_acc = self.num_noun_top1_cor / self.num_samples
        noun_top5_acc = self.num_noun_top5_cor / self.num_samples
        top1_acc = self.num_top1_cor / self.num_samples
        top5_acc = self.num_top5_cor / self.num_samples
        self.max_verb_top1_acc = max(self.max_verb_top1_acc, verb_top1_acc)
        self.max_verb_top5_acc = max(self.max_verb_top5_acc, verb_top5_acc)
        self.max_noun_top1_acc = max(self.max_noun_top1_acc, noun_top1_acc)
        self.max_noun_top5_acc = max(self.max_noun_top5_acc, noun_top5_acc)
        is_best_epoch = top1_acc > self.max_top1_acc
        self.max_top1_acc = max(self.max_top1_acc, top1_acc)
        self.max_top5_acc = max(self.max_top5_acc, top5_acc)
        mem_usage = misc.gpu_mem_usage()
        stats = {
            "_type": "val_epoch",
            "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
            "time_diff": self.iter_timer.seconds(),
            "verb_top1_acc": verb_top1_acc,
            "verb_top5_acc": verb_top5_acc,
            "noun_top1_acc": noun_top1_acc,
            "noun_top5_acc": noun_top5_acc,
            "top1_acc": top1_acc,
            "top5_acc": top5_acc,
            "max_verb_top1_acc": self.max_verb_top1_acc,
            "max_verb_top5_acc": self.max_verb_top5_acc,
            "max_noun_top1_acc": self.max_noun_top1_acc,
            "max_noun_top5_acc": self.max_noun_top5_acc,
            "max_top1_acc": self.max_top1_acc,
            "max_top5_acc": self.max_top5_acc,
            "mem": int(np.ceil(mem_usage)),
        }
        log_to_tensorboard(self.tb_writer, stats, False)
        logging.log_json_stats(stats)

        return is_best_epoch
コード例 #5
0
    def finalize_metrics(self, ks=(1, 5)):
        """
        Calculate and log the final ensembled metrics.
        ks (tuple): list of top-k values for topk_accuracies. For example,
            ks = (1, 5) correspods to top-1 and top-5 accuracy.
        """
        if not all(self.clip_count == self.num_clips):
            logger.warning("clip count {} ~= num clips {}".format(
                self.clip_count, self.num_clips))
            logger.warning(self.clip_count)

        num_topks_correct = metrics.topks_correct(self.video_preds,
                                                  self.video_labels, ks)
        topks = [(x / self.video_preds.size(0)) * 100.0
                 for x in num_topks_correct]
        assert len({len(ks), len(topks)}) == 1
        stats = {"split": "test_final"}
        for k, topk in zip(ks, topks):
            stats["top{}_acc".format(k)] = "{:.{prec}f}".format(topk, prec=2)
        logging.log_json_stats(stats)
コード例 #6
0
    def log_epoch_stats(self, cur_epoch):
        """
        Log the stats of the current epoch.
        Args:
            cur_epoch (int): the number of current epoch.
        """
        verb_top1_acc = self.num_verb_top1_cor / self.num_samples
        verb_top5_acc = self.num_verb_top5_cor / self.num_samples
        noun_top1_acc = self.num_noun_top1_cor / self.num_samples
        noun_top5_acc = self.num_noun_top5_cor / self.num_samples
        top1_acc = self.num_top1_cor / self.num_samples
        top5_acc = self.num_top5_cor / self.num_samples
        self.max_verb_top1_acc = max(self.max_verb_top1_acc, verb_top1_acc)
        self.max_verb_top5_acc = max(self.max_verb_top5_acc, verb_top5_acc)
        self.max_noun_top1_acc = max(self.max_noun_top1_acc, noun_top1_acc)
        self.max_noun_top5_acc = max(self.max_noun_top5_acc, noun_top5_acc)
        is_best_epoch = top1_acc > self.max_top1_acc
        self.max_top1_acc = max(self.max_top1_acc, top1_acc)
        self.max_top5_acc = max(self.max_top5_acc, top5_acc)
        stats = {
            "_type": "val_epoch",
            "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
            "time_diff": self.iter_timer.seconds(),
            "verb_top1_acc": verb_top1_acc,
            "verb_top5_acc": verb_top5_acc,
            "noun_top1_acc": noun_top1_acc,
            "noun_top5_acc": noun_top5_acc,
            "top1_acc": top1_acc,
            "top5_acc": top5_acc,
            "max_verb_top1_acc": self.max_verb_top1_acc,
            "max_verb_top5_acc": self.max_verb_top5_acc,
            "max_noun_top1_acc": self.max_noun_top1_acc,
            "max_noun_top5_acc": self.max_noun_top5_acc,
            "max_top1_acc": self.max_top1_acc,
            "max_top5_acc": self.max_top5_acc,
            "gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
            "RAM": "{:.2f}/{:.2f}G".format(*misc.cpu_mem_usage()),
        }
        logging.log_json_stats(stats)

        return is_best_epoch, {"top1_acc": top1_acc, "verb_top1_acc": verb_top1_acc, "noun_top1_acc": noun_top1_acc}
コード例 #7
0
 def log_epoch_stats(self, cur_epoch):
     """
     Log the stats of the current epoch.
     Args:
         cur_epoch (int): the number of current epoch.
     """
     eta_sec = self.iter_timer.seconds() * (
         self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters
     )
     eta = str(datetime.timedelta(seconds=int(eta_sec)))
     verb_top1_acc = self.num_verb_top1_cor / self.num_samples
     verb_top5_acc = self.num_verb_top5_cor / self.num_samples
     noun_top1_acc = self.num_noun_top1_cor / self.num_samples
     noun_top5_acc = self.num_noun_top5_cor / self.num_samples
     top1_acc = self.num_top1_cor / self.num_samples
     top5_acc = self.num_top5_cor / self.num_samples
     avg_loss_verb = self.loss_verb_total / self.num_samples
     avg_loss_noun = self.loss_noun_total / self.num_samples
     avg_loss = self.loss_total / self.num_samples
     stats = {
         "_type": "train_epoch",
         "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
         "dt": self.iter_timer.seconds(),
         "dt_data": self.data_timer.seconds(),
         "dt_net": self.net_timer.seconds(),
         "eta": eta,
         "verb_top1_acc": verb_top1_acc,
         "verb_top5_acc": verb_top5_acc,
         "noun_top1_acc": noun_top1_acc,
         "noun_top5_acc": noun_top5_acc,
         "top1_acc": top1_acc,
         "top5_acc": top5_acc,
         "verb_loss": avg_loss_verb,
         "noun_loss": avg_loss_noun,
         "loss": avg_loss,
         "lr": self.lr,
         "gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
         "RAM": "{:.2f}/{:.2f}G".format(*misc.cpu_mem_usage()),
     }
     logging.log_json_stats(stats)
コード例 #8
0
ファイル: meters.py プロジェクト: karttikeya/SlowFast
    def finalize_metrics(self, ks=(1, 5)):
        """
        Calculate and log the final ensembled metrics.
        ks (tuple): list of top-k values for topk_accuracies. For example,
            ks = (1, 5) correspods to top-1 and top-5 accuracy.
        """
        clip_check = self.clip_count == self.num_clips
        if not all(clip_check):
            logger.warning(
                "clip count Ids={} = {} (should be {})".format(
                    np.argwhere(~clip_check),
                    self.clip_count[~clip_check],
                    self.num_clips,
                )
            )

        self.stats = {"split": "test_final"}
        if self.multi_label:
            mean_ap = get_map(
                self.video_preds.cpu().numpy(), self.video_labels.cpu().numpy()
            )
            map_str = "{:.{prec}f}".format(mean_ap * 100.0, prec=2)
            self.stats["map"] = map_str
            self.stats["top1_acc"] = map_str
            self.stats["top5_acc"] = map_str
        else:
            num_topks_correct = metrics.topks_correct(
                self.video_preds, self.video_labels, ks
            )
            topks = [
                (x / self.video_preds.size(0)) * 100.0
                for x in num_topks_correct
            ]
            assert len({len(ks), len(topks)}) == 1
            for k, topk in zip(ks, topks):
                # self.stats["top{}_acc".format(k)] = topk.cpu().numpy()
                self.stats["top{}_acc".format(k)] = "{:.{prec}f}".format(
                    topk, prec=2
                )
        logging.log_json_stats(self.stats)
コード例 #9
0
 def log_iter_stats(self, cur_epoch, cur_iter):
     """
     log the stats of the current iteration.
     Args:
         cur_epoch (int): the number of current epoch.
         cur_iter (int): the number of current iteration.
     """
     if (cur_iter + 1) % self._cfg.LOGS.PERIOD != 0:
         return
     eta_sec = self.iter_timer.seconds() * (
         self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1))
     eta = str(datetime.timedelta(seconds=int(eta_sec)))
     stats = {
         "_type": "train_iter",
         "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
         "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
         "time_diff": self.iter_timer.seconds(),
         "eta": eta,
         "loss": self.loss.get_win_median(),
         "lr": self.lr,
         "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
     }
     if not self._cfg.DATA.MULTI_LABEL:
         stats["top1_err"] = self.mb_top1_err.get_win_median()
         stats["top5_err"] = self.mb_top5_err.get_win_median()
     logging.log_json_stats(stats)
     if du.is_master_proc():
         iters = cur_iter + 1 + self.epoch_iters * cur_epoch
         for k, v in stats.items():
             if 'err' in k or 'loss' in k:
                 self.tblogger.add_scalar('train/{}'.format(k), v, iters)
             elif k == 'eta':
                 self.tblogger.add_scalar('other/eta', eta_sec, iters)
             elif k == 'epoch':
                 self.tblogger.add_scalar('other/epoch', cur_epoch + 1,
                                          iters)
             elif k == 'lr':
                 self.tblogger.add_scalar('other/lr', v, iters)
             else:
                 continue
コード例 #10
0
ファイル: meters.py プロジェクト: anton-br/SlowFast
 def log_iter_stats(self, cur_epoch, cur_iter):
     """
     log the stats of the current iteration.
     Args:
         cur_epoch (int): the number of current epoch.
         cur_iter (int): the number of current iteration.
     """
     if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
         return
     eta_sec = self.iter_timer.seconds() * (self.max_iter - cur_iter - 1)
     eta = str(datetime.timedelta(seconds=int(eta_sec)))
     stats = {
         "_type": "val_iter",
         "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
         "iter": "{}/{}".format(cur_iter + 1, self.max_iter),
         "time_diff": self.iter_timer.seconds(),
         "eta": eta,
         "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
     }
     if not self._cfg.DATA.MULTI_LABEL:
         stats["top1_err"] = self.mb_top1_err.get_win_median()
     logging.log_json_stats(stats)
コード例 #11
0
ファイル: meters.py プロジェクト: yangyuren03/rsna-2019
 def log_epoch_stats(self, cur_epoch):
     """
     Log the stats of the current epoch.
     Args:
         cur_epoch (int): the number of current epoch.
     """
     top1_err = self.num_top1_mis / self.num_samples
     top5_err = self.num_top5_mis / self.num_samples
     self.min_top1_err = min(self.min_top1_err, top1_err)
     self.min_top5_err = min(self.min_top5_err, top5_err)
     mem_usage = misc.gpu_mem_usage()
     stats = {
         "_type": "val_epoch",
         "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
         "time_diff": self.iter_timer.seconds(),
         "top1_err": top1_err,
         "top5_err": top5_err,
         "min_top1_err": self.min_top1_err,
         "min_top5_err": self.min_top5_err,
         "mem": int(np.ceil(mem_usage)),
     }
     logging.log_json_stats(stats)
コード例 #12
0
 def log_epoch_stats(self, cur_epoch):
     """
     Log the stats of the current epoch.
     Args:
         cur_epoch (int): the number of current epoch.
     """
     eta_sec = self.iter_timer.seconds() * (
         self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters)
     eta = str(datetime.timedelta(seconds=int(eta_sec)))
     mem_usage = misc.gpu_mem_usage()
     verb_top1_acc = self.num_verb_top1_cor / self.num_samples
     verb_top5_acc = self.num_verb_top5_cor / self.num_samples
     noun_top1_acc = self.num_noun_top1_cor / self.num_samples
     noun_top5_acc = self.num_noun_top5_cor / self.num_samples
     top1_acc = self.num_top1_cor / self.num_samples
     top5_acc = self.num_top5_cor / self.num_samples
     avg_loss_verb = self.loss_verb_total / self.num_samples
     avg_loss_noun = self.loss_noun_total / self.num_samples
     avg_loss = self.loss_total / self.num_samples
     stats = {
         "_type": "train_epoch",
         "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
         "time_diff": self.iter_timer.seconds(),
         "eta": eta,
         "verb_top1_acc": verb_top1_acc,
         "verb_top5_acc": verb_top5_acc,
         "noun_top1_acc": noun_top1_acc,
         "noun_top5_acc": noun_top5_acc,
         "top1_acc": top1_acc,
         "top5_acc": top5_acc,
         "verb_loss": avg_loss_verb,
         "noun_loss": avg_loss_noun,
         "loss": avg_loss,
         "lr": self.lr,
         "mem": int(np.ceil(mem_usage)),
     }
     log_to_tensorboard(self.tb_writer, stats, False)
     logging.log_json_stats(stats)
コード例 #13
0
ファイル: meters.py プロジェクト: serre-lab/pred_gn
 def log_iter_stats(self, cur_epoch, cur_iter):
     """
     log the stats of the current iteration.
     Args:
         cur_epoch (int): the number of current epoch.
         cur_iter (int): the number of current iteration.
     """
     if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
         return
     eta_sec = self.iter_timer.seconds() * (
         self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1)
     )
     eta = str(datetime.timedelta(seconds=int(eta_sec)))
     mem_usage = misc.gpu_mem_usage()
     
     stats = {
         "_type": "train_iter",
         "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
         "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
         "time_diff": self.iter_timer.seconds(),
         "time_left": eta,
         # "top1_err": self.mb_top1_err.get_win_median(),
         # "top5_err": self.mb_top5_err.get_win_median(),
         # "loss": self.loss.get_win_median(),
         "lr": self.lr,
         "mem": int(np.ceil(mem_usage)),
         
     }
     for k, v in self.stats.items():
         stats[k] = v.get_win_median()
     # if self.mb_top1_err:
     #     stats = {**stats, **{"top1_err": self.mb_top1_err.get_win_median(),
     #                         "top5_err": self.mb_top5_err.get_win_median()}}
         
         
     logging.log_json_stats(stats)
コード例 #14
0
    def finalize_metrics(self, ks=(1, 5)):
        """
            Calculate and log the final ensembled metrics.
            ks (tuple): list of top-k values for topk_accuracies. For example,
                ks = (1, 5) correspods to top-1 and top-5 accuracy.
            """
        if not all(self.clip_count == self.num_clips):
            logger.warning("clip count {} ~= num clips {}".format(
                ", ".join([
                    "{}: {}".format(i, k)
                    for i, k in enumerate(self.clip_count.tolist())
                ]),
                self.num_clips,
            ))

        self.stats = {"split": "test_final"}
        timestamp = datetime.datetime.now().isoformat()
        computed_representations_path = "comp_repr_{}.csv".format(timestamp)
        actual_labels_path = "act_labels_{}.csv".format(timestamp)
        csv_repr_file = open(computed_representations_path, "w")
        csv_label_file = open(actual_labels_path, "w")
        csv_repr_writer = csv.writer(csv_repr_file)
        csv_label_writer = csv.writer(csv_label_file)
        csv_repr_writer.writerows(self.video_preds.tolist())
        csv_label_writer.writerows([[self.video_labels.tolist()]])
        csv_repr_file.close()
        csv_label_file.close()
        logger.info("Saving computed representations to {}".format(
            computed_representations_path))
        logger.info("Running linear model on the computed representations")
        logger.info("Running for {} iterations".format(self.lin_epochs))
        iter = 0
        logit_model = LogisticRegression(self.video_preds.shape[-1],
                                         self.num_test_classes)
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
        for epoch in range(int(self.lin_epochs)):
            optimizer.zero_grad()
            self.final_preds = logit_model(self.video_preds.cpu())
            loss = torch.nn.CrossEntropyLoss()(self.final_preds,
                                               self.video_labels)
            loss.backward()
            optimizer.step()
            iter += 1
            if iter % 500 == 0:
                # calculate Accuracy
                _, self.final_preds = torch.max(self.video_preds.data, 1)
                total = self.video_preds.size(0)
                correct = (self.final_preds == self.video_labels).sum()
                accuracy = 100 * correct / total
                print("Iteration: {}. Loss: {}. Accuracy: {}.".format(
                    iter, loss.item(), accuracy))

        logger.info("Approx Acc of the linear model {}", accuracy)
        self.video_preds = self.video_preds_res
        if self.multi_label:
            map = get_map(self.video_preds.cpu().numpy(),
                          self.video_labels.cpu().numpy())
            self.stats["map"] = map
        else:
            num_topks_correct = metrics.topks_correct(self.video_preds,
                                                      self.video_labels, ks)
            topks = [(x / self.video_preds.size(0)) * 100.0
                     for x in num_topks_correct]
            assert len({len(ks), len(topks)}) == 1
            for k, topk in zip(ks, topks):
                self.stats["top{}_acc".format(k)] = "{:.{prec}f}".format(
                    topk, prec=2)
        logging.log_json_stats(self.stats)