def worker_init_reset_seed(worker_id): seed_all_rng(np.random.randint(2**31) + worker_id)
def main(): parser = argparse.ArgumentParser(description="wetectron training") parser.add_argument( "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument( "--skip-test", dest="skip_test", help="Do not test the final model", action="store_true", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) parser.add_argument( "--use-tensorboard", dest="use_tensorboard", help="Use tensorboardX logger (Requires tensorboardX installed)", action="store_true", ) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.distributed = num_gpus > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group( backend="nccl", init_method="env://" ) synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) update_iters() cfg.freeze() # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + get_rank()) output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger("wetectron", output_dir, get_rank()) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Collecting env info (might take some time)") logger.info("\n" + collect_env_info()) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) output_config_path = os.path.join(cfg.OUTPUT_DIR, 'config.yml') logger.info("Saving config into: {}".format(output_config_path)) # save overloaded model config in the output directory save_config(cfg, output_config_path) if cfg.DB.METHOD == "concrete": model = train_cdb( cfg=cfg, local_rank=args.local_rank, distributed=args.distributed, use_tensorboard=args.use_tensorboard ) else: model = train( cfg=cfg, local_rank=args.local_rank, distributed=args.distributed, use_tensorboard=args.use_tensorboard ) if not args.skip_test: run_test(cfg, model, args.distributed)
def main(): parser = argparse.ArgumentParser(description="wetectron training") parser.add_argument( "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "--data_path", type=str, help="path to dataset training", ) parser.add_argument( "--proposal_path", type=str, help="path to proposal training", ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument( "--skip-test", dest="skip_test", help="Do not test the final model", action="store_true", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) parser.add_argument( "--use-tensorboard", dest="use_tensorboard", help="Use tensorboardX logger (Requires tensorboardX installed)", action="store_true", ) #################### AD CODE ############## args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.distributed = num_gpus > 1 # begin: The rows tha #run = Run.get_context() #ws = run.experiment.workspace #datastore = ws.get_default_datastore() #dataset = Dataset.get_by_name(workspace=ws, name='datasets/voc') #proposal = Dataset.get_by_name(workspace=ws, name= 'datasets/proposal') #dataset = Dataset.File.from_files(path=(datastore, 'datasets/voc')) #proposal = Dataset.File.from_files(path=(datastore, 'datasets/proposal')) #dataset = dataset.as_named_input('inputdata').as_mount() #proposal = proposal.as_named_input('inputp').as_mount() print('Proposals path', args.proposal_path) print('Dataset path', args.data_path) #print('DATA SET FILES', os.listdir((os.path.join(args.data_path), 'datasets/voc'))) print('PROPOSALS SET FILES', os.listdir(args.proposal_path)) #cfg.DATASETS.TRAIN = (args.data_path, args.data_path) #p_train_path = glob.glob(os.path.join(proposal.path_on_compute, '**/SS-voc_2007_train-boxes.pkl'), recursive=True)[0] #p_val_path = glob.glob(os.path.join(proposal.path_on_compute, '**/SS-voc_2007_val-boxes.pkl'), recursive=True)[0] #cfg.PROPOSAL_FILES.TRAIN = (args.proposal_path+'/SS-voc_2007_train-boxes.pkl', args.proposal_path+'/SS-voc_2007_val-boxes.pkl') # End of added row if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) update_iters() ######## ADD ############## cfg.PATH_DATA_TRAIN = args.data_path cfg.PROPOSAL_FILES.TRAIN = ( os.path.join(args.proposal_path, 'SS-voc_2007_train-boxes.pkl'), os.path.join(args.proposal_path, 'SS-voc_2007_val-boxes.pkl'), ) ############################ cfg.freeze() # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + get_rank()) output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger("wetectron", output_dir, get_rank()) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Collecting env info (might take some time)") logger.info("\n" + collect_env_info()) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) output_config_path = os.path.join(cfg.OUTPUT_DIR, 'config.yml') logger.info("Saving config into: {}".format(output_config_path)) # save overloaded model config in the output directory save_config(cfg, output_config_path) print('CFG_DIRE', cfg.PATH_DATA_TRAIN) if cfg.DB.METHOD == "concrete": model = train_cdb(cfg=cfg, local_rank=args.local_rank, distributed=args.distributed, use_tensorboard=args.use_tensorboard) else: model = train(cfg=cfg, local_rank=args.local_rank, distributed=args.distributed, use_tensorboard=args.use_tensorboard) if not args.skip_test: run_test(cfg, model, args.distributed)