def main(): args = parse_args() from partnet.config.ins_seg_3d import cfg cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) purge_cfg(cfg) cfg.freeze() output_dir = cfg.OUTPUT_DIR # Replace '@' with config path if output_dir: config_path = osp.splitext(args.config_file)[0] config_path = config_path.replace('configs', 'outputs') output_dir_merge = 'outputs/pn_stage2_fusion_l%d_merge' % cfg.TEST.LEVEL os.makedirs(output_dir_merge, exist_ok=True) output_dir = osp.join('outputs/stage1/', cfg.DATASET.PartNetInsSeg.TRAIN.stage1) output_dir_save = './results/' + cfg.DATASET.PartNetInsSeg.TEST.shape os.makedirs(output_dir_save, exist_ok=True) output_dir_save = osp.join(output_dir_save, 'Level_%d' % cfg.TEST.LEVEL) os.makedirs(output_dir_save, exist_ok=True) os.makedirs(output_dir, exist_ok=True) logger = setup_logger('shaper', output_dir_save, prefix='test') logger.info('Using {} GPUs'.format(torch.cuda.device_count())) logger.info(args) logger.info('Loaded configuration file {}'.format(args.config_file)) logger.info('Running with config:\n{}'.format(cfg)) assert cfg.TASK == 'ins_seg_3d' test(cfg, output_dir, output_dir_merge, output_dir_save)
def run_model(args): # args = parse_args() # reference maskrcnn-benchmark num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.num_gpus = num_gpus args.distributed = num_gpus > 1 if not args.no_cuda and torch.cuda.is_available(): cudnn.benchmark = True args.device = "cuda" else: args.distributed = False args.device = "cpu" if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() args.lr = args.lr * num_gpus logger = setup_logger("semantic_segmentation", args.log_dir, get_rank(), filename='{}_{}_{}_log.txt'.format( args.model, args.backbone, args.dataset)) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) trainer = Trainer(args) trainer.train() torch.cuda.empty_cache()
def main(): parser = argparse.ArgumentParser( description="PyTorch Semantic Segmentation Training") parser.add_argument( "-cfg", "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument( "--skip-test", dest="skip_test", help="Do not test the final model", action="store_true", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() torch.backends.cudnn.benchmark = True num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.distributed = num_gpus > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger("FADA", output_dir, args.local_rank) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) fea, cls = train(cfg, args.local_rank, args.distributed) if not args.skip_test: run_test(cfg, fea, cls, args.local_rank, args.distributed)
def main(): args = parse_args() # load the configuration # import on-the-fly to avoid overwriting cfg from shaper.config.classification import cfg cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) purge_cfg(cfg) cfg.freeze() output_dir = cfg.OUTPUT_DIR # replace '@' with config path if output_dir: config_path = osp.splitext(args.config_file)[0] config_path = config_path.replace('configs', 'outputs') output_dir = output_dir.replace('@', config_path) os.makedirs(output_dir, exist_ok=True) logger = setup_logger('shaper', output_dir, prefix='train') logger.info('Using {} GPUs'.format(torch.cuda.device_count())) logger.info(args) from core.utils.torch_util import collect_env_info logger.info('Collecting env info (might take some time)\n' + collect_env_info()) logger.info('Loaded configuration file {}'.format(args.config_file)) logger.info('Running with config:\n{}'.format(cfg)) assert cfg.TASK == 'classification' train(cfg, output_dir)
def main(): parser = argparse.ArgumentParser(description="PyTorch Target Pseudo Label Testing") parser.add_argument("-cfg", "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() torch.backends.cudnn.benchmark = True cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() save_dir = cfg.OUTPUT_DIR if save_dir: mkdir(save_dir) logger = setup_logger("pseudo_label", save_dir, 0) logger.info(cfg) logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) thres_const = get_threshold(cfg) test(cfg, thres_const)
def main(): print('begin program\n') args = parse_args() # load the configuration # import on-the-fly to avoid overwriting cfg from partnet.config.ins_seg_3d import cfg cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) purge_cfg(cfg) cfg.freeze() output_dir = cfg.OUTPUT_DIR # replace '@' with config path if output_dir: config_path = osp.splitext(args.config_file)[0] config_path = config_path.replace('configs', 'outputs') output_dir_merge = output_dir.replace('@', config_path)+'_merge' os.makedirs(output_dir_merge, exist_ok=True) output_dir = osp.join('outputs/stage1/', cfg.DATASET.PartNetInsSeg.TRAIN.stage1) os.makedirs(output_dir, exist_ok=True) logger = setup_logger('shaper', output_dir_merge, prefix='train') logger.info('Using {} GPUs'.format(torch.cuda.device_count())) logger.info(args) logger.info('Loaded configuration file {}'.format(args.config_file)) logger.info('Running with config:\n{}'.format(cfg)) assert cfg.TASK == 'ins_seg_3d' train(cfg, output_dir, output_dir_merge)
def __init__(self, cfg): """ Args: cfg (CfgNode): """ logger = logging.getLogger("core") if not logger.isEnabledFor( logging.INFO): # setup_logger is not called for core setup_logger() # Assume these objects must be constructed in this order. data_loader = self.build_train_loader(cfg) cfg = self.auto_scale_hyperparams(cfg, data_loader) model = self.build_model(cfg) optimizer = self.build_optimizer(cfg, model) # For training, wrap with DDP. But don't need this for inference. if comm.get_world_size() > 1: # ref to https://github.com/pytorch/pytorch/issues/22049 to set `find_unused_parameters=True` # for part of the parameters is not updated. model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False) super().__init__(model, data_loader, optimizer) self.scheduler = self.build_lr_scheduler(cfg, optimizer) # Assume no other objects need to be checkpointed. # We can later make it checkpoint the stateful hooks self.checkpointer = Checkpointer( # Assume you want to save checkpoints together with logs/statistics model, cfg.OUTPUT_DIR, save_to_disk=comm.is_main_process(), optimizer=optimizer, scheduler=self.scheduler, ) self.start_iter = 0 if cfg.SOLVER.SWA.ENABLED: self.max_iter = cfg.SOLVER.MAX_ITER + cfg.SOLVER.SWA.ITER else: self.max_iter = cfg.SOLVER.MAX_ITER self.cfg = cfg self.register_hooks(self.build_hooks())
def __init__(self, args): self.work_path = os.path.join(args.output,args.model) if not os.path.exists(self.work_path): os.makedirs(self.work_path) self.model_name = datetime.datetime.now().strftime("%m-%d_%H-%M-%S") self.logger = setup_logger("EarthLearning", self.work_path, filename='{}_{}_log.txt'.format( self.model_name, args.model )) self.logger.info(args)
def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format( rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info("Contents of args.config_file={}:\n{}".format( args.config_file, PathManager.open(args.config_file, "r").read())) logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(os.path.abspath(path))) # make sure each worker has a different, yet deterministic seed if specified seed_all_rng() # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def main(): parser = argparse.ArgumentParser(description="PyTorch Segmentation Inference") parser.add_argument( "--config-file", default="./configs/Encoder_UNet.yaml", metavar="FILE", help="path to config file", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() save_dir = cfg.OUTPUT_DIR logger = setup_logger("core", save_dir) logger.info(cfg) logger.info("Collecting env info (might take some time)") logger.info("\n" + collect_env_info()) model = build_segmentation_model(cfg) model.to(cfg.MODEL.DEVICE) output_dir = cfg.OUTPUT_DIR checkpointer = SegmentationCheckpointer(cfg, model, save_dir=output_dir) _ = checkpointer.load(cfg.MODEL.WEIGHT) dataset_names = cfg.DATASETS.TEST output_folders = [None] * len(cfg.DATASETS.TEST) if cfg.OUTPUT_DIR: for idx, dataset_name in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", cfg.MODEL.ENCODER + '_' + cfg.MODEL.ARCHITECTURE, dataset_name) mkdir(output_folder) output_folders[idx] = output_folder else: raise RuntimeError("Output directory is missing!") test_data_loaders = make_data_loader(cfg, split='test') for output_folder, dataset_name, test_data_loader in zip(output_folders, dataset_names, test_data_loaders): inference( model, test_data_loader, dataset_name=dataset_name, device=cfg.MODEL.DEVICE, output_folder=output_folder, )
def main(): parser = argparse.ArgumentParser(description="PyTorch Segmentation") parser.add_argument( "--config-file", default="./configs/Encoder_UNet.yaml", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "--skip-test", default=True, help="whether to run testing script with the best model", type=bool, ) args = parser.parse_args() tuner_params = nni.get_next_parameter() tuner_params_list = list() for key, value in tuner_params.items(): tuner_params_list.append(key) tuner_params_list.append(value) cfg.merge_from_file(args.config_file) cfg.merge_from_list(tuner_params_list) cfg.update({'OUTPUT_DIR': os.path.join('./training_dir', os.path.basename(args.config_file).split('.yaml')[0], '_'.join([str(i) for i in tuner_params_list]))}) cfg.freeze() os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.MODEL.GPU_NUM) output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger("core", output_dir) logger.info(args) logger.info("Collecting env info (might take some time)") logger.info("\n" + collect_env_info()) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) logger.debug(tuner_params) best_model = train(cfg) if not args.skip_test: run_test(cfg, best_model)
def main(): parser = argparse.ArgumentParser( description="PyTorch Semantic Segmentation Testing") parser.add_argument( "-cfg", "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument('--saveres', action="store_true", help='save the result') parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() torch.backends.cudnn.benchmark = True cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() path, model = os.path.split(cfg.resume) save_dir = os.path.join(path, os.path.splitext(model)[0]) if save_dir: mkdir(save_dir) logger = setup_logger("FADA", save_dir, 0) logger.info(cfg) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if os.path.isdir(cfg.resume): test_all(cfg, args.saveres) else: test(cfg, args.saveres)
def __init__(self, image_dir, result_dir, label_dir=None, calib_dir=None, calib_file=None, online=False, save_dir=None): # path config self.image_dir = image_dir self.result_dir = result_dir self.label_dir = label_dir # used for visualize 3d bbox, if not, it can be None # all image use the same calib or each one is different self.calib_dir = calib_dir self.calib_file = calib_file # unknown now, add class to the list later # and each class is assigned with a unique color generated randomly self.classes = [] self.colors = [] self.side_plane_colors = [] # for i in range(6): # self.side_plane_colors.append(self.get_random_color()) self.front_side_color = (255, 0, 255) # display online or just save it self.online = online if not online: assert save_dir is not None, \ 'save_dir should be specified when not in online mode' self.save_dir = save_dir self.logger = setup_logger() # image config self.max_size = 1280 self.min_size = 384 # stats self.start_ind = 0
def __init__(self, args): # first setup logger self.logger = setup_logger() self.args = args self.config = self.generate_config(args, self.logger) self.data_config = self.config['eval_data_config'] self.dataset_config = self.data_config['dataset_config'] self.classes = ['bg'] + self.dataset_config['classes'] self.n_classes = len(self.classes) colors = [] for i in range(self.n_classes): colors.append(self.get_random_color()) self.eval_config = self.config['eval_config'] self.thresh = self.eval_config['thresh'] self.nms = self.eval_config['nms'] image_dir = '/data/object/training/image_2' result_dir = './results/data' save_dir = 'results/images' calib_dir = '/data/object/training/calib' label_dir = None calib_file = None self.visualizer = ImageVisualizer( image_dir, result_dir, label_dir=label_dir, calib_dir=calib_dir, calib_file=calib_file, online=False, save_dir=save_dir) self.visualizer.colors = colors self.visualizer.classes = self.classes
def main(): parser = argparse.ArgumentParser( description="PyTorch Semantic Segmentation Testing") parser.add_argument( "-cfg", "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() torch.backends.cudnn.benchmark = True cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() save_dir = "" logger = setup_logger("TEST", save_dir, 0) logger.info(cfg) logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) if os.path.isdir(cfg.resume): test_all(cfg) else: test(cfg)
def main(): parser = argparse.ArgumentParser( description="PyTorch Semantic Segmentation Training") parser.add_argument( "-cfg", "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() torch.backends.cudnn.benchmark = True cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger("semantic_dist_init", output_dir, 0) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) semantic_dist_init(cfg)
dataset_config = data_config['dataset_config'] # disable dataset file,just use image directly dataset_config['dataset_file'] = None dataset_config['demo_file'] = args.img_path dataset_config['calib_file'] = args.calib_file if args.img_dir: dataset_config = data_config['dataset_config'] # disable dataset file,just use image directly dataset_config['dataset_file'] = None dataset_config['img_dir'] = args.img_dir if args.calib_file: dataset_config = data_config['dataset_config'] dataset_config['calib_file'] = args.calib_file if args.calib_dir: dataset_config = data_config['dataset_config'] dataset_config['calib_dir'] = args.calib_dir return config if __name__ == '__main__': args = parse_args() # first setup logger logger = setup_logger() config = generate_config(args, logger) test(config, logger)
args.distributed = num_gpus > 1 if not args.no_cuda and torch.cuda.is_available(): cudnn.benchmark = True args.device = "cuda" else: args.distributed = False args.device = "cpu" if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() # TODO: optim code args.save_pred = True if args.save_pred: outdir = '../runs/pred_pic/{}_{}_{}'.format(args.model, args.backbone, args.dataset) if not os.path.exists(outdir): os.makedirs(outdir) logger = setup_logger("semantic_segmentation", args.log_dir, get_rank(), filename='{}_{}_{}_log.txt'.format( args.model, args.backbone, args.dataset), mode='a+') evaluator = Evaluator(args) evaluator.eval()
args.semantic_b ) if args.val_backdoor else 'val_clean_{}_{}_{}_{}_{}_{}_{}_{}_log.txt'.format( args.model, args.backbone, args.dataset, args.attack_method, args.poison_rate, args.test_semantic_mode, args.semantic_a, args.semantic_b) else: if args.attack_method == "blend": filename = '{}_{}_{}_{}_{}_log.txt'.format(args.model, args.backbone, args.dataset, args.poison_rate, args.alpha) # elif (args.attack_method == "semantic" or args.attack_method=="semantic_s"): else: filename = '{}_{}_{}_{}_{}_{}_{}_log.txt'.format( args.model, args.backbone, args.dataset, args.attack_method, args.poison_rate, args.semantic_a, args.semantic_b) logger = setup_logger("semantic_segmentation", args.log_dir, get_rank(), filename) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) trainer = Trainer(args) if not args.val_only: trainer.train() else: trainer.validation() torch.cuda.empty_cache()
num_gpus = len(run_config['gpu_ids']) run_config['num_gpus'] = num_gpus config['optim_config'][ 'batch_size'] = num_gpus * config['optim_config']['batch_size'] else: run_config['distributed'] = False run_config['device'] = "cpu" if run_config['distributed']: torch.cuda.set_device(run_config['local_rank']) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() # TODO: optim code run_config['save_pred'] = True run_config[ 'eval_model'] = r'E:\repos\awesome-semantic-segmentation-pytorch\runs\experiments\fcn_resnet50_LR0.0001_001\models\fcn_resnet50_LR0.0001_001_200000.pth' if run_config['save_pred']: if not os.path.exists(run_config['path']['pred_pic']): os.makedirs(run_config['path']['pred_pic']) logger = setup_logger("semantic_segmentation", run_config['path']['log_dir'], get_rank(), filename='{}_log.txt'.format(util.get_timestamp()), mode='a+') evaluator = Evaluator(config) evaluator.eval() torch.cuda.empty_cache()
args.distributed = num_gpus > 1 if not args.no_cuda and torch.cuda.is_available(): cudnn.benchmark = True args.device = "cuda" else: args.distributed = False args.device = "cpu" if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group( backend="nccl", init_method="env://" ) synchronize() args.lr = args.lr * num_gpus logger = setup_logger( "semantic_segmentation", os.path.join(args.project_dir, args.task_dir), get_rank(), filename="{}_{}_{}_log.txt".format( args.model, args.backbone, args.dataset ), ) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) trainer = Trainer(args, logger) trainer.train() torch.cuda.empty_cache()
def main(): parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference") parser.add_argument( "--config-file", default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml", metavar="FILE", help="path to config file", ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 distributed = num_gpus > 1 if distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group( backend="nccl", init_method="env://" ) synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() save_dir = "" logger = setup_logger("core", save_dir, get_rank()) logger.info("Using {} GPUs".format(num_gpus)) logger.info(cfg) logger.info("Collecting env info (might take some time)") logger.info("\n" + collect_env_info()) model = build_detection_model(cfg) model.to(cfg.MODEL.DEVICE) num_parameters = sum([param.nelement() for param in model.parameters()]) logger.info('# parameters totally: '+str(num_parameters)) output_dir = cfg.OUTPUT_DIR checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir) _ = checkpointer.load(cfg.MODEL.WEIGHT, is_train=False) suffix = cfg.MODEL.WEIGHT.split('/')[-1][:-4] iou_types = ("bbox",) if cfg.MODEL.MASK_ON: iou_types = iou_types + ("segm",) if cfg.MODEL.KEYPOINT_ON: iou_types = iou_types + ("keypoints",) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for idx, dataset_name in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, "inference_"+suffix, dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed) for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val): inference( model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=False if cfg.MODEL.FCOS_ON or cfg.MODEL.PACKDET_ON or cfg.MODEL.RETINAPACK_ON or cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder, ) synchronize()
def main(): parser = argparse.ArgumentParser( description="PyTorch Semantic Segmentation Testing") parser.add_argument( "-cfg", "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() torch.backends.cudnn.benchmark = True cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() save_dir = "" logger = setup_logger("TSNE", save_dir, 0) logger.info(cfg) logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) logger = logging.getLogger("TSNE.tester") logger.info("Start") device = torch.device(cfg.MODEL.DEVICE) feature_extractor = build_feature_extractor(cfg) feature_extractor.to(device) classifier = build_classifier(cfg) classifier.to(device) if cfg.resume: logger.info("Loading checkpoint from {}".format(cfg.resume)) checkpoint = torch.load(cfg.resume, map_location=torch.device('cpu')) feature_extractor_weights = strip_prefix_if_present( checkpoint['feature_extractor'], 'module.') feature_extractor.load_state_dict(feature_extractor_weights) classifier_weights = strip_prefix_if_present(checkpoint['classifier'], 'module.') classifier.load_state_dict(classifier_weights) feature_extractor.eval() classifier.eval() torch.cuda.empty_cache() dataset_name = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) mkdir(output_folder) test_data = build_dataset(cfg, mode='test', is_source=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=cfg.TSNE.BATCH_SIZE, shuffle=False, num_workers=4, pin_memory=True, sampler=None) for batch in tqdm(test_loader): x, y, name = batch x = x.cuda(non_blocking=True) y = y.cuda(non_blocking=True).long() pred, feat, outs = inference(feature_extractor, classifier, x, y) filename = name[0] if len( name[0].split("/")) < 2 else name[0].split("/")[1] # draw t-sne B, A, Ht, Wt = outs.size() tSNE_features = outs.permute(0, 2, 3, 1).contiguous().view(B * Ht * Wt, A) tSNE_labels = F.interpolate(y.unsqueeze(0).float(), size=(Ht, Wt), mode='nearest').squeeze(0).long() tSNE_labels = tSNE_labels.contiguous().view(B * Ht * Wt, ) mask = (tSNE_labels != cfg.INPUT.IGNORE_LABEL ) # remove IGNORE_LABEL pixels tSNE_labels = tSNE_labels[mask] tSNE_features = tSNE_features[mask] draw(tSNE_features=tSNE_features, tSNE_labels=tSNE_labels, name=filename, cfg=cfg)
shutil.copyfile(filename, best_filename) if __name__ == '__main__': args = parse_args() # reference maskrcnn-benchmark num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.num_gpus = num_gpus args.distributed = num_gpus > 1 if not args.no_cuda and torch.cuda.is_available(): cudnn.benchmark = True args.device = "cuda" else: args.distributed = False args.device = "cpu" if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() args.lr = args.lr * num_gpus logger = setup_logger("mobilenetv3_segmentation", args.log_dir, get_rank(), filename='{}_{}_log.txt'.format( args.model, args.dataset)) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) trainer = Trainer(args) trainer.train() torch.cuda.empty_cache()
def test_logger(): logger = setup_logger('test') logger.info('asdg')