def build_data_logger(cfg: CfgNode) -> logging.Logger:
    r"""Build logger for data module
    
    Parameters
    ----------
    cfg : CfgNode
        cfg, node name: data
    
    Returns
    -------
    logging.Logger
        logger built with file handler at "exp_save/exp_name/logs/data.log"
    """
    log_dir = osp.join(cfg.exp_save, cfg.exp_name, "logs")
    ensure_dir(log_dir)
    log_file = osp.join(log_dir, "data.log")
    data_logger = logging.getLogger(_DATA_LOGGER_NAME)
    data_logger.setLevel(logging.INFO)
    # file handler
    fh = logging.FileHandler(log_file)
    format_str = "[%(asctime)s - %(filename)s] - %(message)s"
    formatter = logging.Formatter(format_str)
    fh.setFormatter(formatter)
    # add file handler
    data_logger.addHandler(fh)
    logger.info("Data log file registered at: %s" % log_file)
    data_logger.info("Data logger built.")

    return data_logger
 def save_snapshot(self, model_param_only=False):
     r"""
     save snapshot for current epoch
     """
     epoch = self._state["epoch"]
     # save dir/filename
     if model_param_only:
         snapshot_dir = self._state["snapshot_dir"]
         snapshot_file = osp.join(snapshot_dir, "final_model.pkl")
     else:
         snapshot_dir, snapshot_file = self._infer_snapshot_dir_file_from_epoch(
             epoch)
     # prepare snapshot dict to save
     if model_param_only:
         snapshot_dict = {
             'epoch': epoch,
             'model_state_dict': unwrap_model(self._model).state_dict(),
         }
     else:
         snapshot_dict = {
             'epoch': epoch,
             'model_state_dict': unwrap_model(self._model).state_dict(),
             'optimizer_state_dict': self._optimizer.state_dict(),
         }
     # ensure & save
     ensure_dir(snapshot_dir)
     torch.save(snapshot_dict, snapshot_file)
     # retrying in case of failure (e.g. nfs error)
     while not osp.exists(snapshot_file):
         logger.info("retrying")
         torch.save(snapshot_dict, snapshot_file)
     logger.info("Snapshot saved at: %s" % snapshot_file)
Beispiel #3
0
 def _build_writer(self, global_step=0):
     log_dir = self._hyper_params["log_dir"]
     ensure_dir(log_dir)
     self._state["writer"] = SummaryWriter(
         log_dir=log_dir,
         purge_step=global_step,
         filename_suffix="",
     )
Beispiel #4
0
 def test(self):
     r"""
     Run test
     """
     # set dir
     self.tracker_name = self._hyper_params["exp_name"]
     for dataset_name in self._hyper_params["dataset_names"]:
         self.dataset_name = dataset_name
         self.tracker_dir = os.path.join(self._hyper_params["exp_save"],
                                         self.dataset_name)
         self.save_root_dir = os.path.join(self.tracker_dir,
                                           self.tracker_name, "baseline")
         ensure_dir(self.save_root_dir)
         # track videos
         self.run_tracker()
         # evaluation
         self.evaluation('default_hp')
 def save_snapshot(self, ):
     r""" 
     save snapshot for current epoch
     """
     epoch = self._state["epoch"]
     snapshot_dir, snapshot_file = self._infer_snapshot_dir_file_from_epoch(
         epoch)
     snapshot_dict = {
         'epoch': epoch,
         'model_state_dict': unwrap_model(self._model).state_dict(),
         'optimizer_state_dict': self._optimizer.state_dict()
     }
     ensure_dir(snapshot_dir)
     torch.save(snapshot_dict, snapshot_file)
     while not osp.exists(snapshot_file):
         logger.info("retrying")
         torch.save(snapshot_dict, snapshot_file)
     logger.info("Snapshot saved at: %s" % snapshot_file)
Beispiel #6
0
 def test(self) -> Dict:
     r"""
     Run test
     """
     # set dir
     self.tracker_name = self._hyper_params["exp_name"]
     test_result_dict = None
     for dataset_name in self._hyper_params["dataset_names"]:
         self.dataset_name = dataset_name
         self.tracker_dir = os.path.join(self._hyper_params["exp_save"],
                                         self.dataset_name)
         self.save_root_dir = os.path.join(self.tracker_dir,
                                           self.tracker_name, "baseline")
         ensure_dir(self.save_root_dir)
         # track videos
         self.run_tracker()
         # evaluation
         test_result_dict = self.evaluation()
     return test_result_dict
Beispiel #7
0
def setup(cfg: CfgNode):
    r"""
    automatically setup some attributes

    Args:
    cfg: task specific config
    
    Returns:
    config
    """
    ensure_dir(cfg["exp_save"])
    cfg.auto = CfgNode()

    cfg.auto.exp_dir = os.path.join(cfg.exp_save, cfg.exp_name)
    ensure_dir(cfg.auto.exp_dir)

    cfg.auto.log_dir = os.path.join(cfg.auto.exp_dir, "logs")
    ensure_dir(cfg.auto.log_dir)

    cfg.auto.model_dir = os.path.join(cfg.auto.exp_dir, "datasets")
    ensure_dir(cfg.auto.model_dir)

    cfg.auto.model_dir = os.path.join(cfg.auto.exp_dir, "models")
    ensure_dir(cfg.auto.model_dir)
Beispiel #8
0
    def track_single_video(self, tracker, video, v_id=0):
        r"""
        track frames in single video with VOT rules

        Arguments
        ---------
        tracker: PipelineBase
            pipeline
        video: str
            video name
        v_id: int
            video id
        """
        regions = []
        video = self.dataset[video]
        image_files, gt = video['image_files'], video['gt']
        start_frame, end_frame, lost_times, toc = 0, len(image_files), 0, 0
        for f, image_file in enumerate(tqdm(image_files)):
            im = vot_benchmark.get_img(image_file)
            im_show = im.copy().astype(np.uint8)

            tic = cv2.getTickCount()
            if f == start_frame:  # init
                cx, cy, w, h = vot_benchmark.get_axis_aligned_bbox(gt[f])
                location = vot_benchmark.cxy_wh_2_rect((cx, cy), (w, h))
                tracker.init(im, location)
                regions.append(1 if 'VOT' in self.dataset_name else gt[f])
                gt_polygon = None
                pred_polygon = None
            elif f > start_frame:  # tracking
                location = tracker.update(im)

                gt_polygon = (gt[f][0], gt[f][1], gt[f][2], gt[f][3], gt[f][4],
                              gt[f][5], gt[f][6], gt[f][7])
                pred_polygon = (location[0], location[1],
                                location[0] + location[2], location[1],
                                location[0] + location[2],
                                location[1] + location[3], location[0],
                                location[1] + location[3])
                b_overlap = vot_benchmark.vot_overlap(
                    gt_polygon, pred_polygon, (im.shape[1], im.shape[0]))
                gt_polygon = ((gt[f][0], gt[f][1]), (gt[f][2], gt[f][3]),
                              (gt[f][4], gt[f][5]), (gt[f][6], gt[f][7]))
                pred_polygon = ((location[0], location[1]),
                                (location[0] + location[2],
                                 location[1]), (location[0] + location[2],
                                                location[1] + location[3]),
                                (location[0], location[1] + location[3]))

                if b_overlap:
                    regions.append(location)
                else:  # lost
                    regions.append(2)
                    lost_times += 1
                    start_frame = f + 5  # skip 5 frames
            else:  # skip
                regions.append(0)
            toc += cv2.getTickCount() - tic

        toc /= cv2.getTickFrequency()

        # save result
        result_dir = join(self.save_root_dir, video['name'])
        ensure_dir(result_dir)
        result_path = join(result_dir, '{:s}_001.txt'.format(video['name']))
        with open(result_path, "w") as fin:
            for x in regions:
                fin.write("{:d}\n".format(x)) if isinstance(x, int) else \
                    fin.write(','.join([vot_benchmark.vot_float2str("%.4f", i) for i in x]) + '\n')

        logger.info(
            '({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps Lost: {:d} '
            .format(v_id, video['name'], toc, f / toc, lost_times))

        return lost_times, f / toc
Beispiel #9
0
if __name__ == '__main__':
    # parsing
    parser = make_parser()
    parsed_args = parser.parse_args()
    # experiment config
    exp_cfg_path = osp.realpath(parsed_args.config)
    root_cfg.merge_from_file(exp_cfg_path)
    # resolve config
    root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)
    root_cfg = root_cfg.train
    task, task_cfg = specify_task(root_cfg)
    task_cfg.freeze()
    # log config
    log_dir = osp.join(task_cfg.exp_save, task_cfg.exp_name, "logs")
    ensure_dir(log_dir)
    logger.configure(
        handlers=[
            dict(sink=sys.stderr, level="INFO"),
            dict(sink=osp.join(log_dir, "train_log.txt"),
                 enqueue=True,
                 serialize=True,
                 diagnose=True,
                 backtrace=True,
                 level="INFO")
        ],
        extra={"common_to_all": "default"},
    )
    # backup config
    logger.info("Load experiment configuration at: %s" % exp_cfg_path)
    logger.info(
Beispiel #10
0
 parser = make_parser()
 parsed_args = parser.parse_args()
 # experiment config
 exp_cfg_path = osp.realpath(parsed_args.config)
 root_cfg.merge_from_file(exp_cfg_path)
 logger.info("Load experiment configuration at: %s" % exp_cfg_path)
 logger.info(
     "Merged with root_cfg imported from videoanalyst.config.config.cfg")
 # resolve config
 root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)
 root_cfg = root_cfg.train
 task, task_cfg = specify_task(root_cfg)
 task_cfg.freeze()
 # backup config
 cfg_bak_dir = osp.join(task_cfg.exp_save, task_cfg.exp_name, "logs")
 ensure_dir(cfg_bak_dir)
 cfg_bak_file = osp.join(cfg_bak_dir, "%s_bak.yaml" % task_cfg.exp_name)
 with open(cfg_bak_file, "w") as f:
     f.write(task_cfg.dump())
 logger.info("Task configuration backed up at %s" % cfg_bak_file)
 # build dummy dataloader (for dataset initialization)
 with Timer(name="Dummy dataloader building", verbose=True, logger=logger):
     dataloader = dataloader_builder.build(task, task_cfg.data)
 del dataloader
 logger.info("Dummy dataloader destroyed.")
 # build model
 model = model_builder.build(task, task_cfg.model)
 # prepare to spawn
 world_size = task_cfg.num_processes
 torch.multiprocessing.set_start_method('spawn', force=True)
 # spawn trainer process
Beispiel #11
0
    def track_single_video(self, tracker, video, v_id=0):
        r"""
        track frames in single video with VOT rules

        Arguments
        ---------
        tracker: PipelineBase
            pipeline
        video: str
            video name
        v_id: int
            video id
        """
        vot_float2str = importlib.import_module(
            "videoanalyst.evaluation.vot_benchmark.pysot.utils.region",
            package="vot_float2str").vot_float2str
        regions = []
        scores = []
        times = []
        video = self.dataset[video]
        image_files, gt = video['image_files'], video['gt']
        start_frame, end_frame, toc = 0, len(image_files), 0
        vw = None

        for f, image_file in enumerate(tqdm(image_files)):
            im = vot_benchmark.get_img(image_file)
            im_show = im.copy().astype(np.uint8)
            if self._hyper_params["save_video"] and vw is None:
                fourcc = cv2.VideoWriter_fourcc(*'MJPG')
                video_path = os.path.join(self.save_video_dir,
                                          video['name'] + ".avi")
                width, height = im.shape[1], im.shape[0]
                vw = cv2.VideoWriter(video_path, fourcc, 25,
                                     (int(width), int(height)))
            tic = cv2.getTickCount()
            if f == start_frame:  # init
                cx, cy, w, h = vot_benchmark.get_axis_aligned_bbox(gt[f])
                location = vot_benchmark.cxy_wh_2_rect((cx, cy), (w, h))
                tracker.init(im, location)
                regions.append(1)
                scores.append(None)
            elif f > start_frame:  # tracking
                location = tracker.update(im)
                regions.append(location)
                scores.append(tracker._state["pscore"])
            toc += cv2.getTickCount() - tic
            if self._hyper_params["save_video"]:
                cv2.rectangle(im_show, (int(location[0]), int(location[1])),
                              (int(location[0] + location[2]),
                               int(location[1] + location[3])), (255, 0, 0), 2)
                cv2.putText(im_show, str(scores[-1]), (40, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
                vw.write(im_show)
        if vw is not None:
            vw.release()

        toc /= cv2.getTickFrequency()

        # save result
        result_dir = join(self.save_root_dir, video['name'])
        ensure_dir(result_dir)
        result_path = join(result_dir, '{:s}_001.txt'.format(video['name']))
        with open(result_path, "w") as fin:
            for x in regions:
                fin.write("{:d}\n".format(x)) if isinstance(x, int) else \
                    fin.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n')
        result_path = os.path.join(
            result_dir, '{}_001_confidence.value'.format(video['name']))
        with open(result_path, 'w') as fin:
            for x in scores:
                fin.write('\n') if x is None else fin.write(
                    "{:.6f}\n".format(x))
        logger.info(
            '({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}'.format(
                v_id, video['name'], toc, f / toc))

        return f / toc
Beispiel #12
0
def setup(cfg: CfgNode):
    """Setup for working directory
    
    Parameters
    ----------
    cfg : CfgNode
        task specific config
    """
    ensure_dir(cfg["exp_save"])
    cfg.auto = CfgNode()  

    cfg.auto.exp_dir = os.path.join(cfg.exp_save, cfg.exp_name)
    ensure_dir(cfg.auto.exp_dir)

    cfg.auto.log_dir = os.path.join(cfg.auto.exp_dir, "logs")
    ensure_dir(cfg.auto.log_dir)

    cfg.auto.log_dir = os.path.join(cfg.auto.exp_dir, "snapshots")
    ensure_dir(cfg.auto.log_dir)

    cfg.auto.model_dir = os.path.join(cfg.auto.exp_dir, "datasets")
    ensure_dir(cfg.auto.model_dir)

    cfg.auto.model_dir = os.path.join(cfg.auto.exp_dir, "models")
    ensure_dir(cfg.auto.model_dir)
Beispiel #13
0
    def track_single_video(self, tracker, video, v_id=0):
        r"""
        track frames in single video with VOT rules

        Arguments
        ---------
        tracker: PipelineBase
            pipeline
        video: str
            video name
        v_id: int
            video id
        """
        vot_overlap = importlib.import_module(
            "videoanalyst.evaluation.vot_benchmark.pysot.utils.region",
            package="vot_overlap").vot_overlap
        vot_float2str = importlib.import_module(
            "videoanalyst.evaluation.vot_benchmark.pysot.utils.region",
            package="vot_float2str").vot_float2str
        regions = []
        video = self.dataset[video]
        if self.test_video != '':
            # test one special video
            if video['name'] != self.test_video:
                return 0, 0
        image_files, gt = video['image_files'], video['gt']
        start_frame, end_frame, lost_times, toc = 0, len(image_files), 0, 0
        for f, image_file in enumerate(tqdm(image_files)):
            im = vot_benchmark.get_img(image_file)
            im_show = im.copy().astype(np.uint8)

            tic = cv2.getTickCount()
            if f == start_frame:  # init
                cx, cy, w, h = vot_benchmark.get_axis_aligned_bbox(gt[f])
                location = vot_benchmark.cxy_wh_2_rect((cx, cy), (w, h))
                tracker.init(im, location, gt[f])
                regions.append(1 if 'VOT' in self.dataset_name else gt[f])
                gt_polygon = None
                pred_polygon = None
                if self.vis:
                    cv2.destroyAllWindows()
            elif f > start_frame:  # tracking

                location = tracker.update(im)


                gt_polygon = (gt[f][0], gt[f][1], gt[f][2], gt[f][3], gt[f][4],
                              gt[f][5], gt[f][6], gt[f][7])
                pred_polygon = (location[0], location[1],
                                location[0] + location[2], location[1],
                                location[0] + location[2],
                                location[1] + location[3], location[0],
                                location[1] + location[3])
                b_overlap = vot_overlap(gt_polygon, pred_polygon,
                                        (im.shape[1], im.shape[0]))
                gt_polygon = ((gt[f][0], gt[f][1]), (gt[f][2], gt[f][3]),
                              (gt[f][4], gt[f][5]), (gt[f][6], gt[f][7]))
                pred_polygon = ((location[0], location[1]),
                                (location[0] + location[2],
                                 location[1]), (location[0] + location[2],
                                                location[1] + location[3]),
                                (location[0], location[1] + location[3]))
                # visualization
                if self.vis:
                    cv2.polylines(im_show, [np.array(gt_polygon, np.int).reshape((-1, 1, 2))],
                                  True, (0, 255, 0), 3)
                    cv2.polylines(im_show, [np.array(pred_polygon, np.int).reshape((-1, 1, 2))],
                                  True, (0, 255, 255), 3)
                    cv2.putText(im_show, str(f), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
                    cv2.putText(im_show, str(lost_times), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    cv2.imshow(video['name'], im_show)
                    cv2.waitKey(10)

                if b_overlap:
                    regions.append(location)
                else:  # lost
                    regions.append(2)
                    lost_times += 1
                    start_frame = f + 5  # skip 5 frames
            else:  # skip
                regions.append(0)
            toc += cv2.getTickCount() - tic

        toc /= cv2.getTickFrequency()

        # save result
        result_dir = join(self.save_root_dir, video['name'])
        ensure_dir(result_dir)
        result_path = join(result_dir, '{:s}_001.txt'.format(video['name']))
        with open(result_path, "w") as fin:
            for x in regions:
                fin.write("{:d}\n".format(x)) if isinstance(x, int) else \
                    fin.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n')

        logger.info(
            '({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps Lost: {:d} '
            .format(v_id, video['name'], toc, f / toc, lost_times))

        return lost_times, f / toc