def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, )) if evaluator_type in ["coco", "coco_panoptic_seg"]: evaluator_list.append( COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg": evaluator_list.append( COCOPanopticEvaluator(dataset_name, output_folder)) if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() >= comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() >= comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) elif evaluator_type == "pascal_voc": return PascalVOCDetectionEvaluator(dataset_name) elif evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type)) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list)
def setup(args): cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="masktextspotter") return cfg
def log_info(cfg, runner): num_processes = get_num_processes_per_machine() logger.info( "Using {} processes per machine. Rank of current process: {}".format( num_processes, comm.get_rank() ) ) logger.info("Environment info:\n" + collect_env_info()) logger.info("Running with full config:\n{}".format(cfg)) logger.info("Running with runner: {}".format(runner))
def setup(args): cfg = get_cfg() get_meshrcnn_cfg_defaults(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) # Setup logger for "meshrcnn" module setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="meshrcnn") return cfg
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() set_global_cfg(cfg) default_setup(cfg, args) # Setup loggers setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="doublefewshot") setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name='OurPaper') return cfg
def setup(args): cfg = get_cfg() add_gambler_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="imbalance detection") set_global_cfg(cfg) return cfg
def setup(args): if args.config_file.endswith(".yaml"): cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway. cfg.merge_from_list(args.opts) cfg.freeze() else: cfg = LazyConfig.load(args.config_file) cfg = LazyConfig.apply_overrides(cfg, args.opts) setup_logger(distributed_rank=comm.get_rank()) return cfg
def _file_storage_gather( storage: SingleProcessFileTensorStorage, dst_rank: int = 0, mode: str = "rb", ) -> Optional[MultiProcessFileTensorStorage]: storage.storage_impl.close() fpath_list = gather(storage.fpath, dst=dst_rank) if get_rank() != dst_rank: return None rank_to_fpath = {i: fpath_list[i] for i in range(len(fpath_list))} return MultiProcessFileTensorStorage(storage.data_schema, rank_to_fpath, mode)
def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) if cfg.OWOD.COMPUTE_ENERGY: PathManager.mkdirs(os.path.join(output_dir, cfg.OWOD.ENERGY_SAVE_PATH)) if cfg.OWOD.ENABLE_CLUSTERING: PathManager.mkdirs(os.path.join(output_dir, cfg.OWOD.FEATURE_STORE_SAVE_PATH)) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info( "Contents of args.config_file={}:\n{}".format( args.config_file, PathManager.open(args.config_file, "r").read() ) ) logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def __init__(self, size: int): """ Args: size (int): the total number of data of the underlying dataset to sample from """ self._size = size assert size > 0 self._rank = comm.get_rank() self._world_size = comm.get_world_size() shard_size = (self._size - 1) // self._world_size + 1 self.begin = shard_size * self._rank self.end = min(shard_size * (self._rank + 1), self._size)
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_pointrend_config(cfg) add_point_sup_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) # Setup logger for "point_sup" module setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="point_sup") return cfg
def _ram_storage_gather(storage: SingleProcessRamTensorStorage, dst_rank: int = 0) -> MultiProcessRamTensorStorage: storage.storage_impl.seek(0, os.SEEK_SET) # TODO: overhead, pickling a bytes object, can just pass bytes in a tensor directly # see detectron2/utils.comm.py data_list = gather(storage.storage_impl.read(), dst=dst_rank) if get_rank() != dst_rank: return None rank_to_buffer = { i: io.BytesIO(data_list[i]) for i in range(len(data_list)) } storage = MultiProcessRamTensorStorage(storage.data_schema, rank_to_buffer) return storage