コード例 #1
0
ファイル: build.py プロジェクト: wuminghu666/SMOKE
def build_test_loader(cfg, is_train=False):
    path_catalog = import_file(
        "smoke.config.paths_catalog", cfg.PATHS_CATALOG, True
    )
    DatasetCatalog = path_catalog.DatasetCatalog

    transforms = build_transforms(cfg, is_train)
    datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)

    data_loaders = []
    for dataset in datasets:
        sampler = samplers.InferenceSampler(len(dataset))
        batch_sampler = torch.utils.data.sampler.BatchSampler(
            sampler, 1, drop_last=False
        )
        collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
        num_workers = cfg.DATALOADER.NUM_WORKERS
        data_loader = torch.utils.data.DataLoader(
            dataset,
            num_workers=num_workers,
            batch_sampler=batch_sampler,
            collate_fn=collator,
        )
        data_loaders.append(data_loader)

    return data_loader
コード例 #2
0
ファイル: check_point.py プロジェクト: AIpakchoi/SMOKE
    def _load_file(self, f):
        if f.startswith("catalog://"):
            paths_catalog = import_file("smoke.config.paths_catalog",
                                        self.cfg.PATHS_CATALOG, True)
            catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://"):])
            self.logger.info("{} points to {}".format(f, catalog_f))
            f = catalog_f
        if f.startswith("http"):
            # if the file is a url path, download it and cache it
            cached_f = cache_url(f)
            self.logger.info("url {} cached in {}".format(f, cached_f))
            f = cached_f

        loaded = super(DetectronCheckpointer, self)._load_file(f)
        if "model" not in loaded:
            loaded = dict(model=loaded)
        return loaded
コード例 #3
0
ファイル: build.py プロジェクト: wuminghu666/SMOKE
def make_data_loader(cfg, is_train=True):
    num_gpus = get_world_size()
    if is_train:
        images_per_batch = cfg.SOLVER.IMS_PER_BATCH
        assert images_per_batch % num_gpus == 0, \
            "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used." \
                .format(images_per_batch, num_gpus)

        images_per_gpu = images_per_batch // num_gpus
    else:
        images_per_batch = cfg.TEST.IMS_PER_BATCH
        assert images_per_batch % num_gpus == 0, \
            "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used." \
                .format(images_per_batch, num_gpus)

        images_per_gpu = images_per_batch // num_gpus

    # if images_per_gpu > 1:
    #     logger = logging.getLogger(__name__)
    #     logger.warning(
    #         "When using more than one image per GPU you may encounter "
    #         "an out-of-memory (OOM) error if your GPU does not have "
    #         "sufficient memory. If this happens, you can reduce "
    #         "SOLVER.IMS_PER_BATCH (for training) or "
    #         "TEST.IMS_PER_BATCH (for inference). For training, you must "
    #         "also adjust the learning rate and schedule length according "
    #         "to the linear scaling rule. See for example: "
    #         "https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14"
    #     )

    # group images which have similar aspect ratio. In this case, we only
    # group in two cases: those with width / height > 1, and the other way around,
    # but the code supports more general grouping strategy
    aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []

    path_catalog = import_file(
        "smoke.config.paths_catalog", cfg.PATHS_CATALOG, True
    )
    DatasetCatalog = path_catalog.DatasetCatalog

    transforms = build_transforms(cfg, is_train)
    datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)

    data_loaders = []
    for dataset in datasets:
        sampler = samplers.TrainingSampler(len(dataset))
        batch_sampler = torch.utils.data.sampler.BatchSampler(
            sampler, images_per_gpu, drop_last=True
        )
        collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
        num_workers = cfg.DATALOADER.NUM_WORKERS
        data_loader = torch.utils.data.DataLoader(
            dataset,
            num_workers=num_workers,
            batch_sampler=batch_sampler,
            collate_fn=collator,
            worker_init_fn=worker_init_reset_seed,
        )
        data_loaders.append(data_loader)

    if is_train:
        # during training, a single (possibly concatenated) data_loader is returned
        assert len(data_loaders) == 1
        return data_loaders[0]
    return data_loaders