Ejemplo n.º 1
0
 def build_train_loader(cls, cfg):
     if cfg.INPUT.AUG:
         mapper = DatasetMapper(
             cfg,
             is_train=True,
             augmentations=build_polyp_segm_train_aug(cfg))
     else:
         mapper = DatasetMapper(cfg, True)
     return build_detection_train_loader(cfg, mapper=mapper)
Ejemplo n.º 2
0
    def build_hooks(self):
        """
        Build a list of default hooks, including timing, evaluation,
        checkpointing, lr scheduling, precise BN, writing events.

        Returns:
            list[HookBase]:
        """
        cfg = self.cfg.clone()
        cfg.defrost()
        cfg.DATALOADER.NUM_WORKERS = 0  # save some memory and time for PreciseBN

        ret = [
            hooks.IterationTimer(),
            hooks.LRScheduler(self.optimizer, self.scheduler),
            hooks.PreciseBN(
                # Run at the same freq as (but before) evaluation.
                cfg.TEST.EVAL_PERIOD,
                self.model,
                # Build a new data loader to not affect training
                self.build_train_loader(cfg),
                cfg.TEST.PRECISE_BN.NUM_ITER,
            ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
            else None,
        ]

        ## insert custom hook
        ret.insert(
            -1,
            LossEvalHook(
                self.cfg.TEST.EVAL_PERIOD, self.model,
                build_detection_test_loader(self.cfg,
                                            self.cfg.DATASETS.TEST[0],
                                            DatasetMapper(self.cfg, True))))
        ## change dataset index (custom)
        cfg.Test_index += 1
        if cfg.Test_index == cfg.Test_index_MAX:
            cfg.Test_index = 0

        # Do PreciseBN before checkpointer, because it updates the model and need to
        # be saved by checkpointer.
        # This is not always the best: if checkpointing has a different frequency,
        # some checkpoints may have more precise statistics than others.
        if comm.is_main_process():
            ret.append(
                hooks.PeriodicCheckpointer(self.checkpointer,
                                           cfg.SOLVER.CHECKPOINT_PERIOD))

        def test_and_save_results():
            self._last_eval_results = self.test(self.cfg, self.model)
            return self._last_eval_results

        # Do evaluation after checkpointer, because then if it fails,
        # we can use the saved checkpoint to debug.
        ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))

        if comm.is_main_process():
            # run writers in the end, so that evaluation metrics are written
            ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
        return ret
Ejemplo n.º 3
0
def build_detection_test_loader(cfg, dataset_name, batch_size, mapper=None):

    dataset_dicts = get_detection_dataset_dicts(
        [dataset_name],
        filter_empty=False,
        proposal_files=[
            cfg.DATASETS.PROPOSAL_FILES_TEST[list(
                cfg.DATASETS.TEST).index(dataset_name)]
        ] if cfg.MODEL.LOAD_PROPOSALS else None,
    )

    dataset = DatasetFromList(dataset_dicts)
    if mapper is None:
        mapper = DatasetMapper(cfg, False)
    dataset = MapDataset(dataset, mapper)

    sampler = samplers.InferenceSampler(len(dataset))
    # Always use 1 image per worker during inference since this is the
    # standard when reporting inference time in papers.
    batch_sampler = torch.utils.data.sampler.BatchSampler(sampler,
                                                          batch_size,
                                                          drop_last=False)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        num_workers=cfg.DATALOADER.NUM_WORKERS,
        batch_sampler=batch_sampler,
        collate_fn=_trivial_batch_collator,
    )
    return data_loader
 def build_train_loader(cls, cfg: CfgNode):
     return build_detection_train_loader(
         cfg,
         # pylint:disable=redundant-keyword-arg,missing-kwoa
         mapper=DatasetMapper(cfg,
                              is_train=True,
                              augmentations=TRAIN_TRANSF),
     )
Ejemplo n.º 5
0
 def build_train_loader(cls, cfg):
     if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
         mapper = DatasetMapper(cfg,
                                is_train=True,
                                augmentations=build_sem_seg_train_aug(cfg))
     else:
         mapper = None
     return build_detection_train_loader(cfg, mapper=mapper)
Ejemplo n.º 6
0
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         EarlyStop(
             cfg.TEST.EVAL_PERIOD, self.model,
             build_detection_test_loader(self.cfg,
                                         self.cfg.DATASETS.TEST[0],
                                         DatasetMapper(self.cfg, True))))
     hooks.insert(
         -1,
         LossEvalHook(
             cfg.TEST.EVAL_PERIOD, self.model,
             build_detection_test_loader(self.cfg,
                                         self.cfg.DATASETS.TEST[0],
                                         DatasetMapper(self.cfg, True))))
     return hooks
 def build_test_loader(cls, cfg: CfgNode, dataset_name: str):
     return build_detection_test_loader(
         cfg,
         dataset_name,
         # pylint:disable=redundant-keyword-arg,missing-kwoa
         mapper=DatasetMapper(cfg, is_train=False,
                              augmentations=VAL_TRANSF),
     )
Ejemplo n.º 8
0
    def evaluate_loss(self, cfg, model):
        """Compute and log the validation loss to Comet

        Args:
            cfg (CfgNode): Detectron Config Object
            model (torch.nn.Module): Detectron Model

        Returns:
            dict: Empty Dict to satisfy Detectron Eval Hook API requirements
        """
        eval_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0],
                                                  DatasetMapper(cfg, True))

        # Copying inference_on_dataset from evaluator.py
        total = len(eval_loader)
        num_warmup = min(5, total - 1)

        start_time = time.perf_counter()
        total_compute_time = 0
        losses = []

        if comm.is_main_process():
            storage = get_event_storage()

            for idx, inputs in enumerate(eval_loader):
                if idx == num_warmup:
                    start_time = time.perf_counter()
                    total_compute_time = 0
                start_compute_time = time.perf_counter()
                if torch.cuda.is_available():
                    torch.cuda.synchronize()
                total_compute_time += time.perf_counter() - start_compute_time
                iters_after_start = idx + 1 - num_warmup * int(
                    idx >= num_warmup)
                seconds_per_img = total_compute_time / iters_after_start
                if idx >= num_warmup * 2 or seconds_per_img > 5:
                    total_seconds_per_img = (time.perf_counter() -
                                             start_time) / iters_after_start
                    eta = datetime.timedelta(
                        seconds=int(total_seconds_per_img * (total - idx - 1)))
                    log_every_n_seconds(
                        logging.INFO,
                        "Loss on Validation  done {}/{}. {:.4f} s / img. ETA={}"
                        .format(idx + 1, total, seconds_per_img, str(eta)),
                        n=5,
                    )
                loss_batch = self._get_loss(model, inputs)
                losses.append(loss_batch)
            mean_loss = np.mean(losses)

            # Log to Comet
            self.experiment.log_metric("eval_loss", mean_loss)

            storage.put_scalar("eval_loss", mean_loss)
            comm.synchronize()

        # Returns empty dict to satisfy Dectron Eval Hook requirement
        return {}
Ejemplo n.º 9
0
def inference(cfg, out_dir):

    # build model
    model = build_model(cfg)
    # resume
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        "./output/autoaug_post_train/model_final.pth", resume=True)

    # data_loader
    mapper = DatasetMapper(cfg, False)
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0],
                                              mapper)

    total = len(data_loader)  # inference data loader must have a fixed length

    num_devices = torch.distributed.get_world_size(
    ) if torch.distributed.is_initialized() else 1

    num_warmup = min(5, total - 1)
    start_time = time.perf_counter()
    total_compute_time = 0
    torch.no_grad()
    model.eval()
    for idx, inputs in enumerate(data_loader):

        start_compute_time = time.perf_counter()
        outputs = model(inputs)
        if torch.cuda.is_available():
            torch.cuda.synchronize()
        total_compute_time += time.perf_counter() - start_compute_time

        # log
        iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
        seconds_per_img = total_compute_time / iters_after_start
        if idx >= num_warmup * 2 or seconds_per_img > 5:
            total_seconds_per_img = (time.perf_counter() -
                                     start_time) / iters_after_start
            eta = datetime.timedelta(seconds=int(total_seconds_per_img *
                                                 (total - idx - 1)))
            log_every_n_seconds(
                logging.INFO,
                "Inference done {}/{}. {:.4f} s / img. ETA={}".format(
                    idx + 1, total, seconds_per_img, str(eta)),
                n=5,
            )

        for input, output in zip(inputs, outputs):

            pred_segm = output["sem_seg"].to("cpu")
            pred = torch.max(pred_segm, dim=0)[1].data
            pred = pred.numpy()[:, :, np.newaxis]
            pred = np.dstack((pred, pred, pred))

            cv2.imwrite(
                out_dir +
                input["file_name"].split("/")[-1].replace("jpg", "png"),
                pred * 255)
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         LossEvalHook(
             self.cfg, self.val_period, self.model, self.scheduler,
             build_detection_test_loader(self.cfg, self.val_data,
                                         DatasetMapper(self.cfg, True))))
     return hooks
Ejemplo n.º 11
0
 def build_train_loader(cls, cfg):
     if "PointsCollection" in cfg.MODEL.META_ARCHITECTURE:
         mapper = DatasetMapper(
             cfg,
             is_train=True,
             augmentations=build_Pt_collect_train_aug(cfg))
     else:
         mapper = None
     return build_detection_train_loader(cfg, mapper=mapper)
Ejemplo n.º 12
0
 def build_train_loader(cls, cfg):
     print(cfg.INPUT.MIN_SIZE_TRAIN)
     mapper = DatasetMapper(cfg,
                            is_train=True,
                            augmentations=[
                                transforms.Resize(cfg.INPUT.MIN_SIZE_TEST),
                                transforms.RandomFlip()
                            ])
     return build_detection_train_loader(cfg, mapper)
Ejemplo n.º 13
0
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         LossEvalHook(
             self.cfg.TEST.EVAL_PERIOD, self.model,
             build_detection_test_loader(self.cfg, "solar_val",
                                         DatasetMapper(self.cfg, True))))
     return hooks
def build_test_loader(cfg, dataset_name):
    input_size = cfg.MODEL.CLSNET.INPUT_SIZE
    return build_detection_test_loader(
        cfg,
        dataset_name,
        mapper=DatasetMapper(
            cfg,
            is_train=False,
            augmentations=[T.Resize((input_size, input_size))]))
 def build_train_loader(cls, cfg):
     return build_detection_train_loader(
         cfg,
         mapper=DatasetMapper(
             cfg,
             is_train=True,
             augmentations=[
                 # T.RandomBrightness(0.9, 1.1),
                 # T.RandomFlip(prob=0.5, horizontal=True, vertical=False),
                 T.RandomCrop("absolute", (640, 640))
             ]))
Ejemplo n.º 16
0
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         ValidationHook(self.cfg.TEST.EVAL_PERIOD,
                        self.model,
                        build_detection_test_loader(
                            self.cfg, self.cfg.DATASETS.TEST[0],
                            DatasetMapper(self.cfg, True)),
                        patience=3))
     return hooks
 def build_train_loader(cls, cfg):#https://detectron2.readthedocs.io/tutorials/data_loading.html#how-the-existing-dataloader-works
     #return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
     dataloader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, is_train=True)) #https://github.com/facebookresearch/detectron2/blob/63f11718c68f1ae951caee157b4e10fae4d7e4be/projects/DensePose/densepose/data/dataset_mapper.py
     #T.Augmentation
     #dataloader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, is_train=True, augmentations=[]))
     #dataloader = build_detection_train_loader(cfg, mapper=mapper)
     #data_loader = build_detection_train_loader(cfg, mapper=mapper)
     #dataloader = build_detection_train_loader(cfg, mapper=MyDatasetMapper(cfg, is_train=True))
     #dataloader = build_detection_train_loader(cfg, mapper=NewDatasetMapper(cfg, is_train=True))
     #https://detectron2.readthedocs.io/_modules/detectron2/data/detection_utils.html#build_augmentation
     #https://github.com/facebookresearch/detectron2/blob/63f11718c68f1ae951caee157b4e10fae4d7e4be/detectron2/data/transforms/augmentation_impl.py
     return dataloader
Ejemplo n.º 18
0
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         ValidationHook(self.cfg.TEST.EVAL_PERIOD,
                        self.model,
                        build_detection_train_loader(
                            dataset=self.cfg.DATASETS.TEST[0],
                            mapper=DatasetMapper(self.cfg, is_train=False),
                            total_batch_size=16),
                        patience=3))
     return hooks
def build_train_loader(cfg):
    input_size = cfg.MODEL.CLSNET.INPUT_SIZE
    return build_detection_train_loader(
        cfg,
        mapper=DatasetMapper(cfg,
                             is_train=True,
                             augmentations=[
                                 T.Resize((input_size, input_size)),
                                 T.RandomContrast(0.5, 1.5),
                                 T.RandomBrightness(0.5, 1.5),
                                 T.RandomSaturation(0.5, 1.5)
                             ]))
 def build_hooks(self):
     """
     Extra hook to collect and plot evaluation metrics into TensorBoard. 
     """
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         LossEvalHook(
             self.cfg.TEST.EVAL_PERIOD, self.model,
             build_detection_test_loader(self.cfg,
                                         self.cfg.DATASETS.TEST[0],
                                         DatasetMapper(self.cfg, True))))
     return hooks
Ejemplo n.º 21
0
 def build_train_loader(cls, cfg):
     return build_detection_train_loader(
         cfg,
         mapper=DatasetMapper(cfg,
                              is_train=True,
                              augmentations=[
                                  T.RandomCrop("absolute_range",
                                               (300, 600)),
                                  T.RandomRotation(random.randrange(0,
                                                                    360)),
                                  T.RandomContrast(0.5, 1.5),
                                  T.RandomSaturation(0.5, 1.5)
                              ]))
Ejemplo n.º 22
0
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         utils.LossEvalHook(
             self.cfg.TEST.EVAL_PERIOD, self.model,
             build_detection_test_loader(self.cfg,
                                         self.cfg.DATASETS.TEST[0],
                                         DatasetMapper(self.cfg, True))))
     # swap the order of PeriodicWriter and ValidationLoss
     # code hangs with no GPUs > 1 if this line is removed
     hooks = hooks[:-2] + hooks[-2:][::-1]
     return hooks
Ejemplo n.º 23
0
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(
         -1,
         LossEvalHook(
             ONE_EPOCH, self.model,
             build_detection_test_loader(
                 self.cfg, "tower_foreground_val",
                 DatasetMapper(self.cfg,
                               True,
                               augmentations=build_sem_seg_train_aug(
                                   self.cfg)))))
     return hooks
Ejemplo n.º 24
0
def do_test(cfg, model):
    for dataset_name in cfg.DATASETS.TEST:
        # data_loader = build_detection_test_loader(cfg, dataset_name)
        if 'build_detection_test_loader':
            if dataset_name == 'coco_2017_val':
                dicts_valid: List[Dict] = DatasetCatalog.get(dataset_name)
                if "filter_empty and has_instances":
                    ...
                ds_valid = DatasetFromList(dicts_valid, copy=False)
                mapper = DatasetMapper(cfg, is_train=False)
            else:  # Open-Image-Dataset
                if 'get_detection_dataset_dicts':
                    descs_get: List[Dict] = DatasetCatalog.get(dataset_name)
                # validation dataset is too large.
                random.seed(2020)
                descs_valid = random.choices(descs_get, k=N_IMAGES_PER_TEST)
                # TODO: clear cache.
                ds_valid = DatasetFromList(descs_valid)
                if 'DatasetMapper':
                    mapper = make_mapper(dataset_name, is_train=False, augmentations=None)

            ds_valid = MapDataset(ds_valid, mapper)

            sampler = InferenceSampler(len(ds_valid))
            # Always use 1 image per worker during inference since this is the
            # standard when reporting inference time in papers.
            batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)

            data_loader = torch.utils.data.DataLoader(
                ds_valid,
                num_workers=cfg.DATALOADER.NUM_WORKERS,
                batch_sampler=batch_sampler,
                collate_fn=trivial_batch_collator,
            )

        evaluator = get_evaluator2(
            cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        )

        results_i = inference_on_dataset(model, data_loader, evaluator)
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(dataset_name))
            # print_csv_format(results_i)
            for tsk, res in results_i.items():
                res_df = pd.DataFrame(pd.Series(res, name='value'))
                res_df = res_df[res_df['value'].notna()]
                res_df.index = res_df.index.map(lambda x: '/'.join(x.split('/')[1:]))
                pd.set_option('display.max_rows', None)
                print(res_df)
                pd.reset_option('display.max_rows')
Ejemplo n.º 25
0
 def build_train_loader(cls, cfg):
     if not cls.augs:
         augs = [
             T.ResizeShortestEdge(cfg.INPUT.MIN_SIZE_TRAIN,
                                  cfg.INPUT.MAX_SIZE_TRAIN,
                                  cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING)
         ]
     else:
         augs = cls.augs
     return build_detection_train_loader(
         cfg,
         mapper=DatasetMapper(cfg,
                              is_train=True,
                              augmentations=build_aug_transforms(cfg)))
Ejemplo n.º 26
0
 def __init__(self, cfg: CfgNode, val_augmentation: Sequence[Augmentation],
              period: int):
     super().__init__()
     self.cfg = cfg.clone()
     self.cfg.DATASETS.TRAIN = cfg.DATASETS.TEST
     self._loader = iter(
         build_detection_train_loader(
             self.cfg,
             mapper=DatasetMapper(self.cfg,
                                  is_train=True,
                                  augmentations=val_augmentation),
         ))
     self._period = period
     self.num_steps = 0
Ejemplo n.º 27
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:          # perform inference on all testing dataset
        data_loader = build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
        evaluator = get_evaluator(
            cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        )
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Ejemplo n.º 28
0
    def build_train_loader(cls, cfg, dataset_dicts, curr_to_prev_filename,
                           curr_to_prev_img_id):
        """
        Returns:
            iterable

        It now calls :func:`detectron2.data.build_detection_train_loader`.
        Overwrite it if you'd like a different data loader.
        """
        mapper = DatasetMapper(cfg,
                               dataset_dicts=dataset_dicts,
                               curr_to_prev_filename=curr_to_prev_filename,
                               curr_to_prev_img_id=curr_to_prev_img_id)

        print(mapper.curr_to_prev_img_id)
        return build_detection_train_loader(cfg, mapper=mapper)
Ejemplo n.º 29
0
 def build_train_loader1(cls, cfg: CfgNode, mapper=None):
     if mapper is None:
         mapper = DatasetMapper(
             cfg=cfg,
             is_train=True,
             augmentations=[
                 T.ResizeShortestEdge(
                     cfg.INPUT.MIN_SIZE_TRAIN,
                     cfg.INPUT.MAX_SIZE_TRAIN,
                     cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
                 ),
                 T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
                 T.RandomFlip(),
             ],
         )
     return build_detection_train_loader(cfg, mapper=mapper)
def build_detection_test_loader(cfg, dataset_name, batch_size, mapper=None):
    """
  Similar to `build_detection_train_loader`.
  But this function uses the given `dataset_name` argument (instead of the names in cfg),
  and uses batch size 1.

  Args:
      cfg: a detectron2 CfgNode
      dataset_name (str): a name of the dataset that's available in the DatasetCatalog
      mapper (callable): a callable which takes a sample (dict) from dataset
         and returns the format to be consumed by the model.
         By default it will be `DatasetMapper(cfg, False)`.

  Returns:
      DataLoader: a torch DataLoader, that loads the given detection
      dataset, with test-time transformation and batching.
  """
    dataset_dicts = get_detection_dataset_dicts(
        [dataset_name],
        filter_empty=False,
        proposal_files=[
            cfg.DATASETS.PROPOSAL_FILES_TEST[list(
                cfg.DATASETS.TEST).index(dataset_name)]
        ] if cfg.MODEL.LOAD_PROPOSALS else None,
    )

    dataset = DatasetFromList(dataset_dicts)
    if mapper is None:
        mapper = DatasetMapper(cfg, False)
    dataset = MapDataset(dataset, mapper)

    sampler = samplers.InferenceSampler(len(dataset))
    # Always use 1 image per worker during inference since this is the
    # standard when reporting inference time in papers.
    batch_sampler = torch.utils.data.sampler.BatchSampler(sampler,
                                                          batch_size,
                                                          drop_last=False)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        num_workers=cfg.DATALOADER.NUM_WORKERS,
        batch_sampler=batch_sampler,
        collate_fn=trivial_batch_collator,
    )
    return data_loader