Пример #1
0
 def build_train_loader(cls, cfg):
     """
     It now calls :func:`detectron2.data.build_detection_train_loader`.
     Overwrite it if you'd like a different data loader.
     """
     # TODO : Augmentation is not working
     mapr = cls.build_mapper(cfg, is_train=True)
     return build_detection_train_loader(cfg, mapper=None)
Пример #2
0
    def build_train_loader(cls, cfg: CfgNode) -> DataLoader:
        """
        Instanciate the training data loader.

        Args:
            cfg (CfgNode):  The global config.

        Returns:
            a DataLoader yielding formatted training examples.
        """
        mapper = PanelSegDatasetMapper(cfg, is_train=True)
        return build_detection_train_loader(cfg, mapper=mapper)
Пример #3
0
def build_as_detection_loader(cfg, root):
    # d = "train"
    # dataset_dicts = get_grasp_dicts(root, mode=d)
    # inputs = cgrcnn_mapper(dataset_dicts[0])

    for d in ["train", "test"]:
        DatasetCatalog.register("grasp_" + d,
                                lambda d=d: get_grasp_dicts(root, mode=d))
        MetadataCatalog.get("grasp_" + d).set(thing_classes=["grasps"])
    grasp_metadata = MetadataCatalog.get("grasp_train")

    trainloader = build_detection_train_loader(cfg, mapper=cgrcnn_mapper)
    return trainloader
Пример #4
0
 def build_train_loader1(cls, cfg: CfgNode, mapper=None):
     if mapper is None:
         mapper = DatasetMapper(
             cfg=cfg,
             is_train=True,
             augmentations=[
                 T.ResizeShortestEdge(
                     cfg.INPUT.MIN_SIZE_TRAIN,
                     cfg.INPUT.MAX_SIZE_TRAIN,
                     cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
                 ),
                 T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
                 T.RandomFlip(),
             ],
         )
     return build_detection_train_loader(cfg, mapper=mapper)
Пример #5
0
    def get_training_dataloader(self, dataset_name=None):
        """
        Get the dataloader with the given datasetname.
        By default it will create the dataset using the last 
        created dataset. 
        """

        if dataset_name is None:
            dataset_name = self._history[-1].name
            max_iter = self.calculate_iterations_for_cur_datasets()
        else:
            assert dataset_name in self._history.all_dataset_names
            max_iter = self._cfg.SOLVER.MAX_ITER / self.total_rounds

        self._history[-1].training_iter = max_iter

        cfg = self._cfg.clone()
        cfg.defrost()
        cfg.DATASETS.TRAIN = tuple(self._history.all_dataset_names)
        dataloader = build_detection_train_loader(cfg)

        return dataloader, max_iter
Пример #6
0
def mapper_train_loader(cls, cfg):
    return build_detection_train_loader(cfg, mapper=TrainMapper(cfg))
Пример #7
0
 def build_train_loader(cls, cfg):
     return build_detection_train_loader(cfg, mapper=mapper)
Пример #8
0
def custom_train_loader(cfg):
    return build_detection_train_loader(cfg,
                                        mapper=CustomDatasetMapper(cfg, True))
Пример #9
0
def get_lvis_train_dataloader(cfg, h, w):
    default_mapper = DatasetMapper(cfg, is_train=True)
    mapper = partial(wrapper, default_m=default_mapper, h=h, w=w)
    dl = build_detection_train_loader(cfg, mapper=mapper)
    return dl
Пример #10
0
 def build_train_loader(cls, cfg):
     return build_detection_train_loader(cfg,
                                         mapper=MyDatasetMapper(cfg, True))
Пример #11
0
def do_train(cfg, model, resume=False):
    model.train()
    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = DetectionCheckpointer(model,
                                         cfg.OUTPUT_DIR,
                                         optimizer=optimizer,
                                         scheduler=scheduler)

    start_iter = (checkpointer.resume_or_load(
        cfg.MODEL.WEIGHTS,
        resume=resume,
    ).get("iteration", -1) + 1)
    if cfg.SOLVER.RESET_ITER:
        logger.info('Reset loaded iteration. Start training from iteration 0.')
        start_iter = 0
    max_iter = cfg.SOLVER.MAX_ITER if cfg.SOLVER.TRAIN_ITER < 0 else cfg.SOLVER.TRAIN_ITER

    periodic_checkpointer = PeriodicCheckpointer(checkpointer,
                                                 cfg.SOLVER.CHECKPOINT_PERIOD,
                                                 max_iter=max_iter)

    writers = ([
        CommonMetricPrinter(max_iter),
        JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
        TensorboardXWriter(cfg.OUTPUT_DIR),
    ] if comm.is_main_process() else [])


    mapper = DatasetMapper(cfg, True) if cfg.INPUT.CUSTOM_AUG == '' else \
        DatasetMapper(cfg, True, augmentations=build_custom_augmentation(cfg, True))
    if cfg.DATALOADER.SAMPLER_TRAIN in [
            'TrainingSampler', 'RepeatFactorTrainingSampler'
    ]:
        data_loader = build_detection_train_loader(cfg, mapper=mapper)
    else:
        from centernet.data.custom_dataset_dataloader import build_custom_train_loader
        data_loader = build_custom_train_loader(cfg, mapper=mapper)

    logger.info("Starting training from iteration {}".format(start_iter))
    with EventStorage(start_iter) as storage:
        step_timer = Timer()
        data_timer = Timer()
        start_time = time.perf_counter()
        for data, iteration in zip(data_loader, range(start_iter, max_iter)):
            data_time = data_timer.seconds()
            storage.put_scalars(data_time=data_time)
            step_timer.reset()
            iteration = iteration + 1
            storage.step()
            loss_dict = model(data)

            losses = sum(loss for k, loss in loss_dict.items())
            assert torch.isfinite(losses).all(), loss_dict

            loss_dict_reduced = {k: v.item() \
                for k, v in comm.reduce_dict(loss_dict).items()}
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            if comm.is_main_process():
                storage.put_scalars(total_loss=losses_reduced,
                                    **loss_dict_reduced)

            optimizer.zero_grad()
            losses.backward()
            optimizer.step()

            storage.put_scalar("lr",
                               optimizer.param_groups[0]["lr"],
                               smoothing_hint=False)

            step_time = step_timer.seconds()
            storage.put_scalars(time=step_time)
            data_timer.reset()
            scheduler.step()

            if (cfg.TEST.EVAL_PERIOD > 0
                    and iteration % cfg.TEST.EVAL_PERIOD == 0
                    and iteration != max_iter):
                do_test(cfg, model)
                comm.synchronize()

            if iteration - start_iter > 5 and \
                (iteration % 20 == 0 or iteration == max_iter):
                for writer in writers:
                    writer.write()
            periodic_checkpointer.step(iteration)

        total_time = time.perf_counter() - start_time
        logger.info("Total training time: {}".format(
            str(datetime.timedelta(seconds=int(total_time)))))