Exemplo n.º 1
0
 def build_train_loader(
         cls,
         cfg,
         aug_settings_file_path: str = None,
         aug_on: bool = True,
         train_val: bool = False,
         train_type: str = 'seg',
         aug_vis_save_path: str = 'aug_vis.png',
         show_aug_seg: bool = False,
         aug_n_rows: int = 3,
         aug_n_cols: int = 5,
         aug_save_dims: Tuple[int] = (3 * 500, 5 * 500),
 ):
     if aug_on:
         aug_seq = get_augmentation(load_path=aug_settings_file_path)
         aug_loader = AugmentedLoader(
             cfg=cfg,
             train_type=train_type,
             aug=aug_seq,
             aug_vis_save_path=aug_vis_save_path,
             show_aug_seg=show_aug_seg,
             aug_n_rows=aug_n_rows,
             aug_n_cols=aug_n_cols,
             aug_save_dims=aug_save_dims,
         )
         return build_detection_train_loader(cfg, mapper=aug_loader)
     else:
         return build_detection_train_loader(cfg, mapper=None)
Exemplo n.º 2
0
    def build_train_loader(cls, cfg):
        """
        Returns:
            iterable

        It now calls :func:`detectron2.data.build_detection_train_loader`.
        Overwrite it if you'd like a different data loader.
        """
        if cfg.DATALOADER.MAPPER == "amodal_and_visible":
            return build_detection_train_loader(cfg,
                                                mapper=AmodalDatasetMapper(
                                                    cfg, is_train=True))
        return build_detection_train_loader(cfg)
Exemplo n.º 3
0
    def build_train_loader(cls, cfg):
        """
        Returns:
            iterable

        It now calls :func:`detectron2.data.build_detection_train_loader`.
        Overwrite it if you'd like a different data loader.
        """
        # _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
        assert len(cfg.INPUT.MIN_SIZE_TRAIN) == 1
        assert cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING == "choice"

        min_size = cfg.INPUT.MIN_SIZE_TRAIN
        max_size = cfg.INPUT.MAX_SIZE_TRAIN
        if not isinstance(min_size, int):
            min_size = min(min_size)
        tfm_gens = []
        tfm_gens.append(custom_aug.ResizeWithPad((min_size, max_size)))
        tfm_gens.append(aug.RandomFlip(prob=0.4, horizontal=True, vertical=False))
        tfm_gens.append(aug.RandomFlip(prob=0.4, horizontal=False, vertical=True))
        tfm_gens.append(aug.RandomApply(aug.RandomRotation([-20, 20], expand=False), prob=0.5))
        tfm_gens.append(aug.RandomApply(aug.RandomContrast(0.8, 1.2), prob=0.3))
        tfm_gens.append(aug.RandomApply(aug.RandomBrightness(0.9, 1.1), prob=0.2))

        mapper = DatasetMapper(cfg, True, augs=tfm_gens)
        return build_detection_train_loader(cfg, mapper=mapper)
Exemplo n.º 4
0
 def build_train_loader(cls, cfg):
     dataset_names = cfg.DATASETS.TRAIN
     return build_detection_train_loader(cfg,
                                         mapper=PlaneRCNNMapper(
                                             cfg,
                                             True,
                                             dataset_names=dataset_names))
Exemplo n.º 5
0
def setup(file):
    # get cfg
    cfg = get_cfg()
    cfg.merge_from_file(file)
    cfg.SOLVER.IMS_PER_BATCH = 2

    # get data loader iter
    data_loader = build_detection_train_loader(cfg)
    data_loader_iter = iter(data_loader)
    batched_inputs = next(data_loader_iter)

    # build anchors
    backbone = build_backbone(cfg).to(device)
    images = [x["image"].to(device) for x in batched_inputs]
    images = ImageList.from_tensors(images, backbone.size_divisibility)
    features = backbone(images.tensor.float())

    input_shape = backbone.output_shape()
    in_features = cfg.MODEL.RPN.IN_FEATURES
    anchor_generator = build_anchor_generator(
        cfg, [input_shape[f] for f in in_features])
    anchors = anchor_generator([features[f] for f in in_features])
    anchors = Boxes.cat(anchors).to(device)

    # build matcher
    raw_matcher = Matcher(cfg.MODEL.RPN.IOU_THRESHOLDS,
                          cfg.MODEL.RPN.IOU_LABELS,
                          allow_low_quality_matches=True)
    matcher = TopKMatcher(cfg.MODEL.RPN.IOU_THRESHOLDS,
                          cfg.MODEL.RPN.IOU_LABELS, 9)

    return cfg, data_loader_iter, anchors, matcher, raw_matcher
Exemplo n.º 6
0
def benchmark_train(args):
    cfg = setup(args)
    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    if comm.get_world_size() > 1:
        model = DistributedDataParallel(model,
                                        device_ids=[comm.get_local_rank()],
                                        broadcast_buffers=False)
    optimizer = build_optimizer(cfg, model)
    checkpointer = DetectionCheckpointer(model, optimizer=optimizer)
    checkpointer.load(cfg.MODEL.WEIGHTS)

    cfg.defrost()
    cfg.DATALOADER.NUM_WORKERS = 2
    data_loader = build_detection_train_loader(cfg)
    dummy_data = list(itertools.islice(data_loader, 100))

    def f():
        data = DatasetFromList(dummy_data, copy=False, serialize=False)
        while True:
            yield from data

    max_iter = 400
    trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
        model, f(), optimizer)
    trainer.register_hooks([
        hooks.IterationTimer(),
        hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]),
        hooks.TorchProfiler(lambda trainer: trainer.iter == max_iter - 1,
                            cfg.OUTPUT_DIR,
                            save_tensorboard=True),
    ])
    trainer.train(1, max_iter)
Exemplo n.º 7
0
 def __init__(self, cfg):
     super().__init__()
     self.cfg = cfg.clone()
     self.cfg.DATASETS.TRAIN = cfg.DATASETS.TEST
     self._loader = iter(build_detection_train_loader(self.cfg))
     self.best_loss = float('inf')
     self.weights = None
Exemplo n.º 8
0
    def build_train_loader(cls, cfg):
        """
        custom dataloader to provide model with ground truth bounding boxes
        """

        # returns a list of dicts. Every entry in the list corresponds to one sample, represented by a dict.
        dataset_dicts = detectron2.data.get_detection_dataset_dicts(
            cfg.DATASETS.TRAIN[0])

        # add proposal boxes
        for i, s in enumerate(dataset_dicts):
            s["proposal_boxes"] = np.array(
                [ann["bbox"] for ann in dataset_dicts[i]["annotations"]]
            )  # np.array([[xmin, ymin, xmax, ymax],[xmin, ymin, xmax, ...], ...]) # kx4 matrix for k proposed bounding boxes
            s["proposal_objectness_logits"] = np.full(
                (s["proposal_boxes"].shape[0], ),
                10)  # logit of 10 is 99.999...%
            s["proposal_bbox_mode"] = detectron2.structures.BoxMode.XYWH_ABS  # 1 # (x0, y0, w, h) in absolute floating points coordinates

        print("Proposal boxes added.")

        return build_detection_train_loader(
            dataset_dicts,
            mapper=DatasetMapper(is_train=True,
                                 augmentations=[],
                                 image_format=cfg.INPUT.FORMAT,
                                 precomputed_proposal_topk=500),
            total_batch_size=cfg.SOLVER.IMS_PER_BATCH,
            aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
            num_workers=cfg.DATALOADER.NUM_WORKERS)
Exemplo n.º 9
0
 def build_train_loader(cls, cfg):
     return build_detection_train_loader(cfg,
                                         mapper=SickTreesDatasetMapper(
                                             cfg,
                                             is_train=True,
                                             nb_channels=4,
                                             augmenter=Augmenter()))
Exemplo n.º 10
0
def benchmark_data(args):
    cfg = setup(args)

    logger.info("After spawning " + RAM_msg())
    timer = Timer()
    dataloader = build_detection_train_loader(cfg)
    logger.info("Initialize loader using {} seconds.".format(timer.seconds()))

    timer.reset()
    itr = iter(dataloader)
    for i in range(10):  # warmup
        next(itr)
        if i == 0:
            startup_time = timer.seconds()
    logger.info("Startup time: {} seconds".format(startup_time))
    timer = Timer()
    max_iter = 1000
    for _ in tqdm.trange(max_iter):
        next(itr)
    logger.info("{} iters ({} images) in {} seconds.".format(
        max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()))

    # test for a few more rounds
    for k in range(10):
        logger.info(f"Iteration {k} " + RAM_msg())
        timer = Timer()
        max_iter = 1000
        for _ in tqdm.trange(max_iter):
            next(itr)
        logger.info("{} iters ({} images) in {} seconds.".format(
            max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()))
Exemplo n.º 11
0
    def build_train_loader(cls, cfg):

        return build_detection_train_loader(cfg,
                                            mapper=PersonalMapper(
                                                cfg,
                                                is_train=True,
                                                augmentations=[]))
Exemplo n.º 12
0
def benchmark_data(args):
    cfg = setup(args)

    dataloader = build_detection_train_loader(cfg)

    timer = Timer()
    itr = iter(dataloader)
    for i in range(10):  # warmup
        next(itr)
        if i == 0:
            startup_time = timer.seconds()
    timer = Timer()
    max_iter = 1000
    for _ in tqdm.trange(max_iter):
        next(itr)
    logger.info(
        "{} iters ({} images) in {} seconds.".format(
            max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()
        )
    )
    logger.info("Startup time: {} seconds".format(startup_time))
    vram = psutil.virtual_memory()
    logger.info(
        "RAM Usage: {:.2f}/{:.2f} GB".format(
            (vram.total - vram.available) / 1024 ** 3, vram.total / 1024 ** 3
        )
    )
def benchmark_train(args):
    cfg = setup(args)
    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    if comm.get_world_size() > 1:
        model = DistributedDataParallel(
            model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
        )
    optimizer = build_optimizer(cfg, model)
    checkpointer = DetectionCheckpointer(model, optimizer=optimizer)
    checkpointer.load(cfg.MODEL.WEIGHTS)

    cfg.defrost()
    cfg.DATALOADER.NUM_WORKERS = 0
    data_loader = build_detection_train_loader(cfg)
    dummy_data = list(itertools.islice(data_loader, 100))

    def f():
        while True:
            yield from DatasetFromList(dummy_data, copy=False)

    max_iter = 400
    trainer = SimpleTrainer(model, f(), optimizer)
    trainer.register_hooks(
        [hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)])]
    )
    trainer.train(1, max_iter)
Exemplo n.º 14
0
 def build_train_loader(cls, cfg):
     try:
         data = data_dict[cfg.DATASETS.TRAIN[0].split('_')[0]]
         data_mapper = data['mapper'] if 'mapper' in list(
             data.keys()) else DatasetMapper
     except KeyError:
         data_mapper = DatasetMapper
     return build_detection_train_loader(cfg, mapper=data_mapper(cfg, True))
 def build_train_loader(cls, cfg: CfgNode):
     return build_detection_train_loader(
         cfg,
         # pylint:disable=redundant-keyword-arg,missing-kwoa
         mapper=DatasetMapper(cfg,
                              is_train=True,
                              augmentations=TRAIN_TRANSF),
     )
Exemplo n.º 16
0
 def build_train_loader(cls, cfg):
     if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
         mapper = DatasetMapper(cfg,
                                is_train=True,
                                augmentations=build_sem_seg_train_aug(cfg))
     else:
         mapper = None
     return build_detection_train_loader(cfg, mapper=mapper)
Exemplo n.º 17
0
 def build_train_loader(cls, cfg):
     mapper = RotatedDatasetMapper(cfg)
     res = build._train_loader_from_config(cfg, mapper=mapper)
     dataset = res["dataset"]
     return build_detection_train_loader(
         dataset=dataset,
         mapper=mapper,
         total_batch_size=cfg.SOLVER.IMS_PER_BATCH)
Exemplo n.º 18
0
 def build_train_loader(cls, cfg):
     if 'coco' in cfg.DATASETS.TRAIN[0]:
         mapper = DqrfDatasetMapper(cfg, True)
     elif 'crowd' in cfg.DATASETS.TRAIN[0]:
         mapper = CH_DqrfDatasetMapper(cfg, True)
     else:
         mapper = None
     return build_detection_train_loader(cfg, mapper=mapper)
Exemplo n.º 19
0
 def build_train_loader(cls, cfg):
     """
     Returns:
         iterable
     It calls :func:`detectron2.data.build_detection_train_loader` with a customized
     DatasetMapper, which adds categorical labels as a semantic mask.
     """
     mapper = DatasetMapper(cfg, True)
     return build_detection_train_loader(cfg, mapper)
Exemplo n.º 20
0
 def build_train_loader(cls, cfg):
     if cfg.INPUT.AUG:
         mapper = DatasetMapper(
             cfg,
             is_train=True,
             augmentations=build_polyp_segm_train_aug(cfg))
     else:
         mapper = DatasetMapper(cfg, True)
     return build_detection_train_loader(cfg, mapper=mapper)
Exemplo n.º 21
0
 def build_train_loader(cls, cfg):
     if "PointsCollection" in cfg.MODEL.META_ARCHITECTURE:
         mapper = DatasetMapper(
             cfg,
             is_train=True,
             augmentations=build_Pt_collect_train_aug(cfg))
     else:
         mapper = None
     return build_detection_train_loader(cfg, mapper=mapper)
Exemplo n.º 22
0
    def build_train_loader(cls, cfg):
        """
        Returns:
            iterable

        dataloader for semi datasets.
        """
        return build_detection_train_loader(cfg,
                                            mapper=DatasetMapper(cfg, True))
Exemplo n.º 23
0
 def build_train_loader(cls, cfg):
     print(cfg.INPUT.MIN_SIZE_TRAIN)
     mapper = DatasetMapper(cfg,
                            is_train=True,
                            augmentations=[
                                transforms.Resize(cfg.INPUT.MIN_SIZE_TEST),
                                transforms.RandomFlip()
                            ])
     return build_detection_train_loader(cfg, mapper)
Exemplo n.º 24
0
def do_train(cfg, model, resume=False):
    model.train()
    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = DetectionCheckpointer(
        model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
    )
    start_iter = (
        checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
    )
    max_iter = cfg.SOLVER.MAX_ITER

    periodic_checkpointer = PeriodicCheckpointer(
        checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
    )

    writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []

    # compared to "train_net.py", we do not support accurate timing and
    # precise BN here, because they are not trivial to implement in a small training loop
    data_loader = build_detection_train_loader(cfg)
    logger.info("Starting training from iteration {}".format(start_iter))
    with EventStorage(start_iter) as storage:
        for data, iteration in zip(data_loader, range(start_iter, max_iter)):
            storage.iter = iteration

            loss_dict = model(data)
            losses = sum(loss_dict.values())
            assert torch.isfinite(losses).all(), loss_dict

            loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            if comm.is_main_process():
                storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)

            optimizer.zero_grad()
            losses.backward()
            optimizer.step()
            storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
            scheduler.step()

            if (
                cfg.TEST.EVAL_PERIOD > 0
                and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
                and iteration != max_iter - 1
            ):
                do_test(cfg, model)
                # Compared to "train_net.py", the test results are not dumped to EventStorage
                comm.synchronize()

            if iteration - start_iter > 5 and (
                (iteration + 1) % 20 == 0 or iteration == max_iter - 1
            ):
                for writer in writers:
                    writer.write()
            periodic_checkpointer.step(iteration)
Exemplo n.º 25
0
    def build_train_loader(cls, cfg):
        """
        Returns:
            iterable

        It now calls :func:`detectron2.data.build_detection_train_loader`.
        Overwrite it if you'd like a different data loader.
        """
        return build_detection_train_loader(cfg)
Exemplo n.º 26
0
 def build_train_loader(cls,
                        cfg,
                        aug_settings_file_path: str = None,
                        aug_on: bool = True,
                        train_val: bool = False,
                        train_type: str = 'seg',
                        aug_vis_save_path: str = 'aug_vis.png',
                        show_aug_seg: bool = False):
     if aug_on:
         aug_seq = get_augmentation(load_path=aug_settings_file_path)
         aug_loader = AugmentedLoader(cfg=cfg,
                                      train_type=train_type,
                                      aug=aug_seq,
                                      aug_vis_save_path=aug_vis_save_path,
                                      show_aug_seg=show_aug_seg)
         return build_detection_train_loader(cfg, mapper=aug_loader)
     else:
         return build_detection_train_loader(cfg, mapper=None)
Exemplo n.º 27
0
    def build_train_loader(cls, cfg, mapper=None):
        if cfg.ISPRS.MODE != "COCO":
            dataset_name = cfg.DATASETS.TRAIN[0]
            if mapper is None:
                mapper = ISPRSOnlineTrainMapper(cfg, True)

            func = DatasetCatalog[dataset_name]
            isprs = func(cfg,
                         cfg.SOLVER.IMS_PER_BATCH,
                         mapper=mapper,
                         dataset_name=dataset_name)
            return isprs
        elif cfg.ISPRS.LABEL.BOXMODE == "ROTATED":
            # mapper = ISPRSCOCOStyleMapperRotated(cfg,is_train = True, augmentations = [T.ISPRSRandomRotation([0,360])] )
            mapper = ISPRSCOCOStyleMapperRotated(cfg, is_train=True)
            return build_detection_train_loader(cfg, mapper=mapper)
        else:
            mapper = ISPRSCOCOStyleMapperAxisAligned(cfg, is_train=True)
            return build_detection_train_loader(cfg, mapper=mapper)
Exemplo n.º 28
0
 def __init__(self, cfg: CfgNode, dataset_name: str, trainer: DefaultTrainer, steps: int=10, ndata: int=5):
     super().__init__()
     self.cfg = cfg.clone()
     self.cfg.DATASETS.TRAIN = (dataset_name, )
     self._loader = iter(build_detection_train_loader(self.cfg))
     self.trainer = trainer
     self.steps = steps
     self.ndata = ndata
     self.loss_dict = {}
     self.data_time = 0
Exemplo n.º 29
0
    def build_train_loader(cls, cfg):
        """
        Builds DataLoader for train set.
        Args:
            cfg(CfgNode): a detectron2 CfgNode

        Returns:
            detectron2 DataLoader object specific to the train set.
        """
        return build_detection_train_loader(cfg)
Exemplo n.º 30
0
 def build_train_loader(cls, cfg):
     if cfg.MODE == "caffe":
         return build_detection_train_loader(cfg,
                                             mapper=DatasetMapper(
                                                 cfg, True))
     elif cfg.MODE == "d2":
         return build_detection_train_loader_with_attributes(cfg)
     else:
         raise Exception("detectron mode note supported: {}".format(
             args.model))