def _load_file(self, f): # catalog lookup if f.startswith("catalog://"): paths_catalog = import_file( "detectron.config.paths_catalog", self.cfg.PATHS_CATALOG, True ) catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :]) self.logger.info("{} points to {}".format(f, catalog_f)) f = catalog_f # download url files if f.startswith("http"): # if the file is a url path, download it and cache it cached_f = cache_url(f) self.logger.info("url {} cached in {}".format(f, cached_f)) f = cached_f # convert Caffe2 checkpoint from pkl if f.endswith(".pkl"): return load_c2_format(self.cfg, f) # load native detectron.pytorch checkpoint if self.is_train and True in self.cfg.MODEL.VOVNET.STAGE_WITH_DCN: return load_deformable_vovnet(self.cfg, f) else: loaded = super(DetectronCheckpointer, self)._load_file(f) if "model" not in loaded: loaded = dict(model=loaded) return loaded
def setup_custom_environment(custom_module_path): """Load custom environment setup from a Python source file and run the setup function. """ module = import_file("detectron.utils.env.custom_module", custom_module_path) assert hasattr(module, "setup_environment") and callable( module.setup_environment), ( "Custom environment module defined in {} does not have the " "required callable attribute 'setup_environment'." ).format(custom_module_path) module.setup_environment()
def make_data_loader(cfg, is_train=True, start_iter=0, is_for_period=False): if is_train: images_per_batch = cfg.SOLVER.IMS_PER_BATCH shuffle = True else: images_per_batch = cfg.TEST.IMS_PER_BATCH shuffle = False start_iter = 0 # group images which have similar aspect ratio. In this case, we only # group in two cases: those with width / height > 1, and the other way around, # but the code supports more general grouping strategy paths_catalog = import_file("detectron.config.paths_catalog", cfg.PATHS_CATALOG, True) DatasetCatalog = paths_catalog.DatasetCatalog dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST # If bbox aug is enabled in testing, simply set transforms to None and we will apply transforms later transforms = None if not is_train and cfg.TEST.BBOX_AUG.ENABLED else build_transforms( cfg, is_train) datasets = build_dataset(dataset_list, transforms, DatasetCatalog, is_train or is_for_period, with_masks=cfg.MODEL.MASK_ON) if is_train: # save category_id to label name mapping save_labels(datasets, cfg.OUTPUT_DIR) data_loaders = [] for dataset in datasets: collator = BBoxAugCollator() if not is_train and cfg.TEST.BBOX_AUG.ENABLED else \ BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY,is_train=is_train) num_workers = cfg.DATALOADER.NUM_WORKERS data_loader = dataset data_loader.collate_batch = collator data_loader.shuffle = shuffle data_loader.num_workers = num_workers data_loader.batch_size = images_per_batch data_loader.is_train = is_train data_loaders.append(data_loader) if is_train or is_for_period: # during training, a single (possibly concatenated) data_loader is returned assert len(data_loaders) == 1 return data_loaders[0] return data_loaders
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0): num_gpus = get_world_size() if is_train: images_per_batch = cfg.SOLVER.IMS_PER_BATCH assert ( images_per_batch % num_gpus == 0 ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number " "of GPUs ({}) used.".format(images_per_batch, num_gpus) images_per_gpu = images_per_batch // num_gpus shuffle = True num_iters = cfg.SOLVER.MAX_ITER else: images_per_batch = cfg.TEST.IMS_PER_BATCH assert ( images_per_batch % num_gpus == 0 ), "TEST.IMS_PER_BATCH ({}) must be divisible by the number " "of GPUs ({}) used.".format(images_per_batch, num_gpus) images_per_gpu = images_per_batch // num_gpus shuffle = False if not is_distributed else True num_iters = None start_iter = 0 if images_per_gpu > 1: logger = logging.getLogger(__name__) logger.warning( "When using more than one image per GPU you may encounter " "an out-of-memory (OOM) error if your GPU does not have " "sufficient memory. If this happens, you can reduce " "SOLVER.IMS_PER_BATCH (for training) or " "TEST.IMS_PER_BATCH (for inference). For training, you must " "also adjust the learning rate and schedule length according " "to the linear scaling rule. See for example: " "https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14" ) # group images which have similar aspect ratio. In this case, we only # group in two cases: those with width / height > 1, and the other way around, # but the code supports more general grouping strategy aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else [] paths_catalog = import_file( "detectron.config.paths_catalog", cfg.PATHS_CATALOG, True ) DatasetCatalog = paths_catalog.DatasetCatalog dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST transforms = build_transforms(cfg, is_train) datasets = build_dataset(dataset_list, transforms, DatasetCatalog, is_train) data_loaders = [] for dataset in datasets: sampler = make_data_sampler(dataset, shuffle, is_distributed) batch_sampler = make_batch_data_sampler( dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter ) collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY) num_workers = cfg.DATALOADER.NUM_WORKERS data_loader = torch.utils.data.DataLoader( dataset, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=collator, ) data_loaders.append(data_loader) if is_train: # during training, a single (possibly concatenated) data_loader is returned assert len(data_loaders) == 1 return data_loaders[0] return data_loaders