Beispiel #1
0
def iter_benchmark(iterator,
                   num_iter: int,
                   warmup: int = 5,
                   max_time_seconds: float = 60) -> Tuple[float, List[float]]:
    """
    Benchmark an iterator/iterable for `num_iter` iterations with an extra
    `warmup` iterations of warmup.
    End early if `max_time_seconds` time is spent on iterations.

    Returns:
        float: average time (seconds) per iteration
        list[float]: time spent on each iteration. Sometimes useful for further analysis.
    """
    num_iter, warmup = int(num_iter), int(warmup)

    iterator = iter(iterator)
    for _ in range(warmup):
        next(iterator)
    timer = Timer()
    all_times = []
    for curr_iter in tqdm.trange(num_iter):
        start = timer.seconds()
        if start > max_time_seconds:
            num_iter = curr_iter
            break
        next(iterator)
        all_times.append(timer.seconds() - start)
    avg = timer.seconds() / num_iter
    return avg, all_times
Beispiel #2
0
    def __init__(self, overall_iters, cfg, mode):
        """
            overall_iters (int): the overall number of iterations of one epoch.
            cfg (CfgNode): configs.
            mode (str): `train`, `val`, or `test` mode.
        """
        self.cfg = cfg
        self.lr = None
        self.loss = ScalarMeter(cfg.LOG_PERIOD)
        self.full_ava_test = cfg.AVA.FULL_TEST_ON_VAL
        self.mode = mode
        self.iter_timer = Timer()
        self.all_preds = []
        self.all_ori_boxes = []
        self.all_metadata = []
        self.overall_iters = overall_iters
        self.excluded_keys = read_exclusions(
            os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.EXCLUSION_FILE))
        self.categories, self.class_whitelist = read_labelmap(
            os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.LABEL_MAP_FILE))
        gt_filename = os.path.join(cfg.AVA.ANNOTATION_DIR,
                                   cfg.AVA.GROUNDTRUTH_FILE)
        self.full_groundtruth = read_csv(gt_filename, self.class_whitelist)
        self.mini_groundtruth = get_ava_mini_groundtruth(self.full_groundtruth)

        _, self.video_idx_to_name = ava_helper.load_image_lists(
            cfg, mode == "train")
Beispiel #3
0
    def benchmark_distributed(self, num_iter, warmup=10):
        """
        Benchmark the dataloader in each distributed worker, and log results of
        all workers. This helps understand the final performance as well as
        the variances among workers.

        It also prints startup time (first iter) of the dataloader.
        """
        gpu = comm.get_world_size()
        dataset = MapDataset(self.dataset, self.mapper)
        n = self.num_workers
        loader = build_batch_data_loader(dataset,
                                         self.sampler,
                                         self.total_batch_size,
                                         num_workers=n)

        timer = Timer()
        loader = iter(loader)
        next(loader)
        startup_time = timer.seconds()
        logger.info(
            "Dataloader startup time: {:.2f} seconds".format(startup_time))

        comm.synchronize()

        avg, all_times = self._benchmark(loader, num_iter * max(n, 1),
                                         warmup * max(n, 1))
        del loader
        self._log_time(
            f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})",
            avg,
            all_times,
            True,
        )
Beispiel #4
0
class TestMeter(object):
    def __init__(self, cfg):
        self.cfg = cfg
        self.forward_timer = Timer()
        self.total_time = 0
        self.cnt = 0
        self.score = dict()
        self.output_dir = Join(cfg.TEST.OUTPUT_DIR, cfg.TEST.DATASET)
        self.save_img = cfg.TEST.SAVE_IMG
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        self.score_csv = open(Join(self.output_dir, "score.csv"), 'w')
        self.score_csv.write("vid, image_id, psnr, ssim\n")

    def forward_tic(self):
        """
        Start to record time.
        """
        self.forward_timer.reset()

    def forward_toc(self):
        """
        Stop to record time.
        """
        self.forward_timer.pause()
        self.total_time += self.forward_timer.seconds()
        self.cnt += 1

    def log_img_result(self, img_out, vid, img_id, psnr, ssim):
        if vid not in self.score.keys():
            self.score[vid] = {}

        # log score
        self.score[vid][img_id] = (psnr, ssim)
        self.score_csv.write("{},{},{},{}\n".format(vid, img_id, psnr, ssim))

        # save img
        if self.save_img:
            # if not os.path.exists(Join(self.output_dir, vid)):
            #     os.makedirs(Join(self.output_dir, vid))
            img_out = cv2.cvtColor(img_out, cv2.COLOR_RGB2BGR)
            cv2.imwrite(Join(self.output_dir, img_id), img_out)

    def log_average_score(self):
        score_per_vid = {}
        for vid in self.score.keys():
            psnrs = [x[0] for x in self.score[vid].values()]
            ssims = [x[1] for x in self.score[vid].values()]
            score_per_vid[vid] = (np.mean(psnrs), np.mean(ssims))

        with open(Join(self.output_dir, 'videos_scores.csv'), 'w') as f:
            f.write('video_id, psnr, ssim\n')
            for vid in self.score.keys():
                f.write("{},{},{}\n".format(vid, score_per_vid[vid][0],
                                            score_per_vid[vid][1]))
        return score_per_vid

    def speed(self):
        return self.total_time, self.total_time / self.cnt
Beispiel #5
0
 def __init__(self, warmup_iter=3):
     """
     Args:
         warmup_iter (int): the number of iterations at the beginning to exclude
             from timing.
     """
     self._warmup_iter = warmup_iter
     self._step_timer = Timer()
Beispiel #6
0
def load_lvis_json(json_file, image_root, dataset_name=None):
    from lvis import LVIS

    json_file = PathManager.get_local_path(json_file)

    timer = Timer()
    lvis_api = LVIS(json_file)
    if timer.seconds() > 1:
        logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))

    if dataset_name is not None:
        meta = get_lvis_instances_meta(dataset_name)
        MetadataCatalog.get(dataset_name).set(**meta)

    img_ids = sorted(lvis_api.imgs.keys())
    imgs = lvis_api.load_imgs(img_ids)
    anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]

    ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
    assert len(set(ann_ids)) == len(ann_ids), \
        f"Annotation ids in '{json_file}' are not unique"

    imgs_anns = list(zip(imgs, anns))

    logger.info(f"Loaded {len(imgs_anns)} images in the LVIS format from {json_file}")

    dataset_dicts = []

    for (img_dict, anno_dict_list) in imgs_anns:
        record = {}
        file_name = img_dict["file_name"]
        if img_dict["file_name"].startswith("COCO"):
            file_name = file_name[-16:]
        record["file_name"] = os.path.join(image_root, file_name)
        record["height"] = img_dict["height"]
        record["width"] = img_dict["width"]
        record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
        record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
        image_id = record["image_id"] = img_dict["id"]

        objs = []
        for anno in anno_dict_list:
            assert anno["image_id"] == image_id

            obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
            obj["category_id"] = anno["category_id"] - 1
            segm = anno["segmentation"]
            valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
            assert len(segm) == len(valid_segm), \
                "Annotation contains an invalid polygon with < 3 points"
            assert len(segm) > 0

            obj["segmentation"] = segm
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)

    return dataset_dicts
Beispiel #7
0
 def __init__(self, warmup_iter=3):
     """
     Args:
         warmup_iter (int): the number of iterations at the beginning to exclude
             from timing.
     """
     self._warmup_iter = warmup_iter
     self._step_timer = Timer()  # 记录每个step 的时间
     self._start_time = time.perf_counter()  #记录当前时间 时间单位是s
     self._total_timer = Timer()  # 记录总的训练时间
Beispiel #8
0
def benchmark_data(args):
    cfg = setup(args)

    dataloader = build_detection_train_loader(cfg)

    timer = Timer()
    itr = iter(dataloader)
    for i in range(10):  # warmup
        next(itr)
        if i == 0:
            startup_time = timer.seconds()
    timer = Timer()
    max_iter = 1000
    for _ in tqdm.trange(max_iter):
        next(itr)
    logger.info("{} iters ({} images) in {} seconds.".format(
        max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()))
    logger.info("Startup time: {} seconds".format(startup_time))
    vram = psutil.virtual_memory()
    logger.info("RAM Usage: {:.2f}/{:.2f} GB".format(
        (vram.total - vram.available) / 1024**3, vram.total / 1024**3))

    # test for a few more rounds
    for _ in range(10):
        timer = Timer()
        max_iter = 1000
        for _ in tqdm.trange(max_iter):
            next(itr)
        logger.info("{} iters ({} images) in {} seconds.".format(
            max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()))
Beispiel #9
0
    def __call__(self):
        timer = Timer()
        json_file = PathManager.get_local_path(self.json_file)
        with open(json_file, 'r') as file:
            # imgs_anns = json.load(file)
            imgs_anns = file.readlines()
        if timer.seconds() > 1:
            logger.info("Loading {} takes {:.2f} seconds.".format(
                json_file, timer.seconds()))

        logger.info("Loaded {} images in CrowdHuman format from {}".format(
            len(imgs_anns), json_file))

        dataset_dicts = []
        # aspect_ratios = []

        for idx, ann in enumerate(imgs_anns):
            v = json.loads(ann)
            record = {}

            filename = v["filename"]
            # NOTE when filename starts with '/', it is an absolute filename thus os.path.join doesn't work
            if filename.startswith('/'):
                filename = os.path.normpath(self.image_root + filename)
            else:
                filename = os.path.join(self.image_root, filename)
            height, width = v["image_height"], v["image_width"]

            record["file_name"] = filename
            record["image_id"] = idx
            record["height"] = height
            record["width"] = width

            objs = []
            for anno in v.get('instances', []):
                x1, y1, x2, y2 = anno['bbox']
                w = x2 - x1
                h = y2 - y1
                obj = {
                    "category_id": anno['label'],
                    "bbox": anno['bbox'],
                    "vbbox": anno['vbbox'],
                    "is_ignored": anno.get('is_ignored', False),
                    'area': w * h,
                    # 'bbox_mode': BoxMode.XYXY_ABS
                }
                objs.append(obj)
            # ratio = 1.0 * (height + 1) / (width + 1) # do something with ratio ?
            record["annotations"] = objs
            # dataset_dicts.append(record) # to print class histogram
            dataset_dicts.append(ImageMeta.encode(
                record))  #this saves up to x2 memory when serializing the data
            # aspect_ratios.append(ratio)
        return dataset_dicts
Beispiel #10
0
def update_meta(json_file, dataset_name=None):

    from pyherbtools.herb import HERB

    if dataset_name is not None and "test" not in dataset_name:

        logger.info("Update Metadat of {} dataset".format(dataset_name))
        timer = Timer()
        json_file = PathManager.get_local_path(json_file)
        with contextlib.redirect_stdout(io.StringIO()):
            herb_api = HERB(json_file)
        if timer.seconds() > 1:
            logger.info("Loading {} takes {:.2f} seconds.".format(
                json_file, timer.seconds()))

        meta = MetadataCatalog.get(dataset_name)
        cat_ids = sorted(herb_api.getCatIds())
        cats = herb_api.loadCats(cat_ids)
        # The categories in a custom json file may not be sorted.
        thing_classes = [
            c["name"] for c in sorted(cats, key=lambda x: x["id"])
        ]
        meta.thing_classes = thing_classes

        logger.info("Creating hierarchy target from given annotation")

        order_family_hierarchy = torch.zeros(len(meta.family_map),
                                             len(meta.order_map))
        family_species_hierarchy = torch.zeros(len(meta.species_map),
                                               len(meta.family_map))

        for cat in cats:
            order_id = meta.order_map[cat["order"]]
            family_id = meta.family_map[cat["family"]]
            species_id = meta.species_map[cat["name"]]

            order_family_hierarchy[family_id][order_id] = 1
            family_species_hierarchy[species_id][family_id] = 1

        from torch import nn
        order_family_hierarchy = nn.Softmax(dim=1)(order_family_hierarchy)
        family_species_hierarchy = nn.Softmax(dim=1)(family_species_hierarchy)

        meta.hierarchy_prior = {
            "order|family": order_family_hierarchy,
            "family|species": family_species_hierarchy
        }
        meta.cats = cats

        meta.num_classes = {
            "family": len(meta.family_map),
            "order": len(meta.order_map),
            "species": len(meta.species_map),
        }
Beispiel #11
0
 def __init__(self, max_iter, cfg):
     """
     Args:
         max_iter (int): the max number of iteration of the current epoch.
         cfg (CfgNode): configs.
     """
     self._cfg = cfg
     self.max_iter = max_iter
     self.iter_timer = Timer()
     self.num_samples = 0
     self.stats = {}
Beispiel #12
0
 def __init__(self, cfg):
     self.cfg = cfg
     self.forward_timer = Timer()
     self.total_time = 0
     self.cnt = 0
     self.score = dict()
     self.output_dir = Join(cfg.TEST.OUTPUT_DIR, cfg.TEST.DATASET)
     self.save_img = cfg.TEST.SAVE_IMG
     if not os.path.exists(self.output_dir):
         os.makedirs(self.output_dir)
     self.score_csv = open(Join(self.output_dir, "score.csv"), 'w')
     self.score_csv.write("vid, image_id, psnr, ssim\n")
Beispiel #13
0
    def __init__(self, epoch_iters, cfg):
        """
        Args:
            epoch_iters (int): the overall number of iterations of one epoch.
            cfg (CfgNode): configs.
        """
        self._cfg = cfg
        self.epoch_iters = epoch_iters
        self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters
        self.iter_timer = Timer()
        self.log_period = cfg.LOG_PERIOD

        self.infos = None
        self.num_samples = 0
Beispiel #14
0
def benchmark_data(args):
    cfg = setup(args)

    dataloader = build_detection_train_loader(cfg)

    itr = iter(dataloader)
    for _ in range(10):  # warmup
        next(itr)
    timer = Timer()
    max_iter = 1000
    for _ in tqdm.trange(max_iter):
        next(itr)
    logger.info("{} iters ({} images) in {} seconds.".format(
        max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()))
Beispiel #15
0
class EpochTimer:
    """
    A timer which computes the epoch time.
    """

    def __init__(self) -> None:
        self.timer = Timer()
        self.timer.reset()
        self.epoch_times = []

    def reset(self) -> None:
        """
        Reset the epoch timer.
        """
        self.timer.reset()
        self.epoch_times = []

    def epoch_tic(self):
        """
        Start to record time.
        """
        self.timer.reset()

    def epoch_toc(self):
        """
        Stop to record time.
        """
        self.timer.pause()
        self.epoch_times.append(self.timer.seconds())

    def last_epoch_time(self):
        """
        Get the time for the last epoch.
        """
        assert len(self.epoch_times) > 0, "No epoch time has been recorded!"

        return self.epoch_times[-1]

    def avg_epoch_time(self):
        """
        Calculate the average epoch time among the recorded epochs.
        """
        assert len(self.epoch_times) > 0, "No epoch time has been recorded!"

        return np.mean(self.epoch_times)

    def median_epoch_time(self):
        """
        Calculate the median epoch time among the recorded epochs.
        """
        assert len(self.epoch_times) > 0, "No epoch time has been recorded!"

        return np.median(self.epoch_times)
    def __init__(self, epoch_iters, cfg):
        """

        :param epoch_iters: iters in one epoch
        :param cfg:
        """
        self._cfg = cfg
        self.epoch_iters = epoch_iters
        # self.loss=ScalarMeter(cfg.LOG_PERIOD)
        self.mse_loss = ScalarMeter(cfg.LOG_PERIOD)
        self.entropy_loss = ScalarMeter(cfg.LOG_PERIOD)
        self.combine_loss = ScalarMeter(cfg.LOG_PERIOD)
        self.iter_timer = Timer()
        self.lr = None
        # self.loss_total=0.0
        self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters
Beispiel #17
0
def load_cub_json(ann_files, image_root, dataset_name=None):
    images_txt, classes_txt, image_class_txt, train_test_split_txt = ann_files

    split = 0
    if 'test' in dataset_name:
        split = 1

    images_txt = open(images_txt, 'r').readlines()
    image_class_txt = open(image_class_txt, 'r').readlines()
    classes_txt = open(classes_txt, 'r').readlines()
    train_test_split_txt = open(train_test_split_txt, 'r').readlines()

    imgs = []
    anns = []
    classes = {}

    for i in range(len(train_test_split_txt)):
        image_id, curr_split = train_test_split_txt[i].split()
        if int(curr_split) == split:
            _, image_path = images_txt[i].split()
            _, class_id = image_class_txt[i].split()
            _, class_name = classes_txt[int(class_id) - 1].split()

            curr_image = {"id": int(image_id), "file_name": image_path}
            curr_ann = {"id": i, "category_id":int(class_id) - 1, "image_id":int(image_id)}

            imgs.append(curr_image)
            anns.append(curr_ann)
            classes[int(class_id) - 1] = class_name

    imgs_anns = list(zip(imgs, anns))
    logger.info("Loaded {} images in CUB format from CUB-200".format(len(imgs_anns)))

    dataset_dicts = []

    ann_keys = ["category_id"]

    logger.info("Convert CUB format into herbarium format")

    timer = Timer()

    meta = MetadataCatalog.get(dataset_name)
    dataset_dicts = [process_per_record(anns, image_root, ann_keys, meta) for anns in imgs_anns]

    logger.info("Processing Record takes {:.2f} seconds.".format(timer.seconds()))

    return dataset_dicts
Beispiel #18
0
    def __init__(self, overall_iters, cfg, mode):
        """
            overall_iters (int): the overall number of iterations of one epoch.
            cfg (CfgNode): configs.
            mode (str): `train`, `val`, or `test` mode.
        """
        is_custom_dataset = cfg.TRAIN.DATASET == "custom" and cfg.TEST.DATASET == "custom"
        if is_custom_dataset:
            logger.info("Creating AVA Meter for custom dataset in %s mode" %
                        mode)
        else:
            logger.info("Creating AVA Meter for AVA dataset in %s mode" % mode)

        self.cfg = cfg
        self.lr = None
        self.loss = ScalarMeter(cfg.LOG_PERIOD)
        self.full_ava_test = cfg.AVA.FULL_TEST_ON_VAL if not is_custom_dataset else cfg.CUSTOM_DATASET.FULL_TEST_ON_VAL
        self.mode = mode
        self.iter_timer = Timer()
        self.all_preds = []
        self.all_ori_boxes = []
        self.all_metadata = []
        self.overall_iters = overall_iters
        self.excluded_keys = read_exclusions(
            os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.EXCLUSION_FILE)
        ) if not is_custom_dataset else read_exclusions(
            os.path.join(cfg.CUSTOM_DATASET.ANNOTATION_DIR,
                         cfg.CUSTOM_DATASET.EXCLUSION_FILE))
        self.categories, self.class_whitelist = read_labelmap(
            os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.LABEL_MAP_FILE)
        ) if not is_custom_dataset else read_labelmap(
            os.path.join(cfg.CUSTOM_DATASET.ANNOTATION_DIR,
                         cfg.CUSTOM_DATASET.LABEL_MAP_FILE))
        gt_filename = os.path.join(
            cfg.AVA.ANNOTATION_DIR, cfg.AVA.GROUNDTRUTH_FILE
        ) if not is_custom_dataset else os.path.join(
            cfg.CUSTOM_DATASET.ANNOTATION_DIR,
            cfg.CUSTOM_DATASET.GROUNDTRUTH_FILE)
        self.full_groundtruth = read_csv(gt_filename, self.class_whitelist)
        self.mini_groundtruth = get_ava_mini_groundtruth(self.full_groundtruth)

        _, self.video_idx_to_name = ava_helper.load_image_lists(
            cfg, mode == "train"
        ) if not is_custom_dataset else custom_helper.load_image_lists(
            cfg, mode == "train")
Beispiel #19
0
 def test_avg_second(self) -> None:
     """
     Test avg_seconds that counts the average time.
     """
     for pause_second in (0.1, 0.15):
         timer = Timer()
         for t in (pause_second,) * 10:
             if timer.is_paused():
                 timer.resume()
             time.sleep(t)
             timer.pause()
             self.assertTrue(
                 math.isclose(pause_second, timer.avg_seconds(), rel_tol=1e-1),
                 msg="{}: {}".format(pause_second, timer.avg_seconds()),
             )
Beispiel #20
0
 def __init__(self, max_iter, cfg):
     """
     Args:
         max_iter (int): the max number of iteration of the current epoch.
         cfg (CfgNode): configs.
     """
     self._cfg = cfg
     self.max_iter = max_iter
     self.iter_timer = Timer()
     # Current minibatch errors (smoothed over a window).
     self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
     # Min errors (over the full val set).
     self.min_top1_err = 100.0
     self.min_top5_err = 100.0
     # Number of misclassified examples.
     self.num_top1_mis = 0
     self.num_top5_mis = 0
     self.num_samples = 0
Beispiel #21
0
    def __init__(
        self,
        num_videos,
        num_clips,
        num_cls,
        overall_iters,
        multi_label=False,
        ensemble_method="sum",
        log_period=1,
    ):
        """
        Construct tensors to store the predictions and labels. Expect to get
        num_clips predictions from each video, and calculate the metrics on
        num_videos videos.
        Args:
            num_videos (int): number of videos to test.
            num_clips (int): number of clips sampled from each video for
                aggregating the final prediction for the video.
            num_cls (int): number of classes for each prediction.
            overall_iters (int): overall iterations for testing.
            multi_label (bool): if True, use map as the metric.
            ensemble_method (str): method to perform the ensemble, options
                include "sum", and "max".
        """

        self.iter_timer = Timer()
        self.num_clips = num_clips
        self.overall_iters = overall_iters
        self.multi_label = multi_label
        self.ensemble_method = ensemble_method
        self.log_period = log_period
        # Initialize tensors.
        self.video_preds = torch.zeros((num_videos, num_cls))
        if multi_label:
            self.video_preds -= 1e10

        self.video_labels = (torch.zeros(
            (num_videos, num_cls)) if multi_label else torch.zeros(
                (num_videos)).long())
        self.clip_count = torch.zeros((num_videos)).long()
        self.view_count = torch.zeros((num_videos, num_clips)).long()
        # Reset metric.
        self.reset()
Beispiel #22
0
 def __init__(self, epoch_iters, cfg):
     """
     Args:
         epoch_iters (int): the overall number of iterations of one epoch.
         cfg (CfgNode): configs.
     """
     self._cfg = cfg
     self.epoch_iters = epoch_iters
     self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters
     self.iter_timer = Timer()
     self.loss = ScalarMeter(cfg.LOG_PERIOD)
     self.loss_total = 0.0
     self.lr = None
     # Current minibatch errors (smoothed over a window).
     self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
     # Number of misclassified examples.
     self.num_top1_mis = 0
     self.num_top5_mis = 0
     self.num_samples = 0
Beispiel #23
0
def _load_coco_annotations(json_file: str):
    """
    Load COCO annotations from a JSON file

    Args:
        json_file: str
            Path to the file to load annotations from
    Returns:
        Instance of `pycocotools.coco.COCO` that provides access to annotations
        data
    """
    from pycocotools.coco import COCO

    logger = logging.getLogger(__name__)
    timer = Timer()
    with contextlib.redirect_stdout(io.StringIO()):
        coco_api = COCO(json_file)
    if timer.seconds() > 1:
        logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
    return coco_api
Beispiel #24
0
def _load_lvis_annotations(json_file: str):
    """
    Load COCO annotations from a JSON file

    Args:
        json_file: str
            Path to the file to load annotations from
    Returns:
        Instance of `pycocotools.coco.COCO` that provides access to annotations
        data
    """
    from lvis import LVIS

    json_file = PathManager.get_local_path(json_file)
    logger = logging.getLogger(__name__)
    timer = Timer()
    lvis_api = LVIS(json_file)
    if timer.seconds() > 1:
        logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
    return lvis_api
Beispiel #25
0
 def __init__(self, epoch_iters, cfg):
     """
     Args:
         epoch_iters (int): the overall number of iterations of one epoch.
         cfg (CfgNode): configs.
     """
     self._cfg = cfg
     self.epoch_iters = epoch_iters
     self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters
     self.iter_timer = Timer()
     self.data_timer = Timer()
     self.net_timer = Timer()
     self.loss = ScalarMeter(cfg.LOG_PERIOD)
     self.loss_total = 0.0
     self.loss_verb = ScalarMeter(cfg.LOG_PERIOD)
     self.loss_verb_total = 0.0
     self.loss_noun = ScalarMeter(cfg.LOG_PERIOD)
     self.loss_noun_total = 0.0
     self.lr = None
     # Current minibatch accuracies (smoothed over a window).
     self.mb_top1_acc = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_top5_acc = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_verb_top1_acc = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_verb_top5_acc = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_noun_top1_acc = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_noun_top5_acc = ScalarMeter(cfg.LOG_PERIOD)
     # Number of correctly classified examples.
     self.num_top1_cor = 0
     self.num_top5_cor = 0
     self.num_verb_top1_cor = 0
     self.num_verb_top5_cor = 0
     self.num_noun_top1_cor = 0
     self.num_noun_top5_cor = 0
     self.num_samples = 0
     self.output_dir = cfg.OUTPUT_DIR
Beispiel #26
0
 def __init__(self, max_iter, cfg):
     """
     Args:
         max_iter (int): the max number of iteration of the current epoch.
         cfg (CfgNode): configs.
     """
     self._cfg = cfg
     self.max_iter = max_iter
     self.iter_timer = Timer()
     self.data_timer = Timer()
     self.net_timer = Timer()
     # Current minibatch errors (smoothed over a window).
     self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
     # Min errors (over the full val set).
     self.min_top1_err = 100.0
     self.min_top5_err = 100.0
     # Number of misclassified examples.
     self.num_top1_mis = 0
     self.num_top5_mis = 0
     self.num_samples = 0
     self.all_preds = []
     self.all_labels = []
     self.output_dir = cfg.OUTPUT_DIR
     self.extra_stats = {}
     self.extra_stats_total = {}
     self.log_period = cfg.LOG_PERIOD
Beispiel #27
0
    def __init__(self, num_videos, num_clips, num_cls, overall_iters):
        """
        Construct tensors to store the predictions and labels. Expect to get
        num_clips predictions from each video, and calculate the metrics on
        num_videos videos.
        Args:
            num_videos (int): number of videos to test.
            num_clips (int): number of clips sampled from each video for
                aggregating the final prediction for the video.
            num_cls (int): number of classes for each prediction.
            overall_iters (int): overall iterations for testing.
        """

        self.iter_timer = Timer()
        self.num_clips = num_clips
        self.overall_iters = overall_iters
        # Initialize tensors.
        self.video_preds = torch.zeros((num_videos, num_cls))
        self.video_labels = torch.zeros((num_videos)).long()
        self.clip_count = torch.zeros((num_videos)).long()
        # Reset metric.
        self.reset()
Beispiel #28
0
def benchmark_eval(args):
    cfg = setup(args)
    if args.config_file.endswith(".yaml"):
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        cfg.defrost()
        cfg.DATALOADER.NUM_WORKERS = 0
        data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    else:
        model = instantiate(cfg.model)
        model.to(cfg.train.device)
        DetectionCheckpointer(model).load(cfg.train.init_checkpoint)

        cfg.dataloader.num_workers = 0
        data_loader = instantiate(cfg.dataloader.test)

    model.eval()
    logger.info("Model:\n{}".format(model))
    dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)),
                                 copy=False)

    def f():
        while True:
            yield from dummy_data

    for k in range(5):  # warmup
        model(dummy_data[k])

    max_iter = 300
    timer = Timer()
    with tqdm.tqdm(total=max_iter) as pbar:
        for idx, d in enumerate(f()):
            if idx == max_iter:
                break
            model(d)
            pbar.update()
    logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
Beispiel #29
0
 def __init__(self, max_iter, max_epoch, cfg):
     """
     Args:
         max_iter (int): the max number of iteration of the current epoch.
         max_epoch (int): the maximum number of epochs of the training phase
         cfg (CfgNode): configs.
     """
     self._cfg = cfg
     self.max_iter = max_iter
     self.MAX_EPOCH = max_epoch
     self.iter_timer = Timer()
     # Current minibatch errors (smoothed over a window).
     self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
     self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
     # Min errors (over the full val set).
     self.min_top1_err = 100.0
     self.min_top5_err = 100.0
     # Number of misclassified examples.
     self.num_top1_mis = 0
     self.num_top5_mis = 0
     self.num_samples = 0
     self.all_preds = []
     self.all_labels = []
     self.max_map = 0.0
    def __init__(self, cfg, args, iter_every_epoch, **kwargs):
        super().__init__()

        # fmt: off
        self.cfg = cfg
        self.args = args
        self.iter_every_epoch = iter_every_epoch
        # fmt: on

        self.timer = Timer()
        self.device = torch.device(f'cuda:{comm.get_rank()}')
        self.logger = logging.getLogger('tl')
        self.num_gpu = comm.get_world_size()
        self.distributed = comm.get_world_size() > 1

        # torch.cuda.set_device(self.device)
        # self.build_models(cfg=cfg)
        self.to(self.device)