def simple_default_setup(cfg): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, and config 3. Backup the config to the output directory Args: cfg (CfgNode): the full config to be used """ output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format( rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(os.path.abspath(path))) # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)
def log_info(cfg, runner): num_processes = get_num_processes_per_machine() logger.info( "Using {} processes per machine. Rank of current process: {}".format( num_processes, comm.get_rank())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Running with full config:\n{}".format(cfg)) logger.info("Running with runner: {}".format(runner))
def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format( rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info("Contents of args.config_file={}:\n{}".format( args.config_file, _highlight( PathManager.open(args.config_file, "r").read(), args.config_file), )) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") if isinstance(cfg, CfgNode): logger.info("Running with full config:\n{}".format( _highlight(cfg.dump(), ".yaml"))) with PathManager.open(path, "w") as f: f.write(cfg.dump()) else: LazyConfig.save(cfg, path) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) seed_all_rng(None if seed < 0 else seed + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = _try_get_key(cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False)
def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) if cfg.OWOD.COMPUTE_ENERGY: PathManager.mkdirs(os.path.join(output_dir, cfg.OWOD.ENERGY_SAVE_PATH)) if cfg.OWOD.ENABLE_CLUSTERING: PathManager.mkdirs(os.path.join(output_dir, cfg.OWOD.FEATURE_STORE_SAVE_PATH)) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info( "Contents of args.config_file={}:\n{}".format( args.config_file, PathManager.open(args.config_file, "r").read() ) ) logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def my_default_setup(cfg, args): """Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: mmcv.mkdir_or_exist(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank) for _mod in ["PIL", "chardet"]: # disable DEBUG logs logging.getLogger(_mod).setLevel(logging.INFO) logger.info("Rank of current process: {}. World size: {}".format( rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info("Contents of args.config_file={}:\n{}".format( args.config_file, PathManager.open(args.config_file, "r").read())) logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory # path = os.path.join(output_dir, "config.yaml") # with PathManager.open(path, "w") as f: # f.write(cfg.dump()) path = osp.join(output_dir, osp.basename(args.config_file)) cfg.dump(path) logger.info("Full config saved to {}".format(path)) assert ( args.num_gpus <= torch.cuda.device_count() and args.num_gpus >= 1 ), f"args.num_gpus: {args.num_gpus}, available num gpus: {torch.cuda.device_count()}" # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def log_system_info(): num_processes = get_num_processes_per_machine() logger.info( "Using {} processes per machine. Rank of current process: {}".format( num_processes, comm.get_rank())) wf_id = os.getenv("WORKFLOW_RUN_ID", None) if wf_id is not None: logger.info("FBLearner Flow Run ID: {}".format(wf_id)) logger.info("Environment info:\n" + collect_env_info()) try: from detectron2.fb.utils import print_fbcode_info print_fbcode_info() except ImportError: pass
def setup(args): """ Create configs and setup logger from arguments and the given config file. """ cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # register dataset data_dir, splits_file = register_shapenet(cfg.DATASETS.NAME) cfg.DATASETS.DATA_DIR = data_dir cfg.DATASETS.SPLITS_FILE = splits_file ## cfg.PRETRAINED_MODEL = args.trained_model_from_Pix_2_Vox cfg.PRETRAINED_MODEL2 = args.trained_model_from_Mesh_RCNN ## # if data was copied the data dir has changed if args.copy_data: cfg.DATASETS.DATA_DIR = args.data_dir cfg.freeze() colorful_logging = not args.no_color output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: os.makedirs(output_dir, exist_ok=True) comm.synchronize() logger = setup_logger( output_dir, color=colorful_logging, name="shapenet", distributed_rank=comm.get_rank() ) logger.info( "Using {} GPUs per machine. Rank of current process: {}".format( args.num_gpus, comm.get_rank() ) ) logger.info(args) logger.info("Environment info:\n" + collect_env_info()) logger.info( "Loaded config file {}:\n{}".format(args.config_file, open(args.config_file, "r").read()) ) logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: path = os.path.join(output_dir, "config.yaml") with open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(os.path.abspath(path))) return cfg
pass try: import cv2 data.append(("cv2", cv2.__version__)) except ImportError: data.append(("cv2", "Not found")) env_str = tabulate(data) + "\n" env_str += collect_torch_env() return env_str if __name__ == "__main__": try: import detectron2 # noqa except ImportError: print(collect_env_info()) else: from detectron2.utils.collect_env import collect_env_info print(collect_env_info()) if torch.cuda.is_available(): for k in range(torch.cuda.device_count()): device = f"cuda:{k}" try: x = torch.tensor([1, 2.0], dtype=torch.float32) x = x.to(device) except Exception: print(f"Unable to copy tensor to device={device}")
def test(self): _ = collect_env_info()
def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ if args.aml: if args.itp: data_store = '/' + os.environ['AZUREML_DATAREFERENCE_{}'.format( args.aml_data_store)].split('/')[-1] else: data_store = os.environ['AZUREML_DATAREFERENCE_{}'.format( args.aml_data_store)] cfg.defrost() cfg.OUTPUT_DIR = os.path.join( data_store, args.aml_work_dir_prefix, os.path.splitext(os.path.basename(args.config_file))[0]) cfg.freeze() print('output directory: ', cfg.OUTPUT_DIR) else: cfg.defrost() cfg.OUTPUT_DIR = os.path.join( './work_dirs', os.path.splitext(os.path.basename(args.config_file))[0]) cfg.freeze() output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format( rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info("Contents of args.config_file={}:\n{}".format( args.config_file, PathManager.open(args.config_file, "r").read())) logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
if idx == max_iter: break model(d) pbar.update() logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) if __name__ == "__main__": parser = default_argument_parser() parser.add_argument("--task", choices=["train", "eval", "data", "data_advanced"], required=True) args = parser.parse_args() assert not args.eval_only logger.info("Environment info:\n" + collect_env_info()) if "data" in args.task: print("Initial " + RAM_msg()) if args.task == "data": f = benchmark_data if args.task == "data_advanced": f = benchmark_data_advanced elif args.task == "train": """ Note: training speed may not be representative. The training cost of a R-CNN model varies with the content of the data and the quality of the model. """ f = benchmark_train elif args.task == "eval": f = benchmark_eval