def main(): args = parse_args() if len(args.config_file) > 0: print('Configration file is loaded from {}'.format(args.config_file)) cfg.merge_from_file(args.config_file) if len(args.output_dirname) == 0: dt_now = datetime.datetime.now() output_dirname = os.path.join('output', str(dt_now.date()) + '_' + str(dt_now.time())) else: output_dirname = args.output_dirname cfg.OUTPUT_DIR = output_dirname cfg.freeze() print('output dirname: {}'.format(cfg.OUTPUT_DIR)) torch.manual_seed(cfg.SEED) np.random.seed(cfg.SEED) if torch.cuda.is_available(): torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True torch.cuda.manual_seed(cfg.SEED) else: raise Exception('GPU not found.') if not args.debug: os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) if not len(args.config_file) == 0: shutil.copy2(args.config_file, os.path.join(cfg.OUTPUT_DIR, 'config.yaml')) train(args, cfg)
def main(): parser = argparse.ArgumentParser( description='Perceptual Extreme Super Resolution for NTIRE2020') parser.add_argument('--config_file', type=str, default='', metavar='FILE') parser.add_argument('--output_dirname', type=str, default='test') parser.add_argument('--num_workers', type=int, default=8) parser.add_argument('--trained_model', type=str, default='weights/iteration_400000.pth') parser.add_argument('--num_gpus', type=int, default=4) parser.add_argument('--center_crop', type=str2bool, default=False) parser.add_argument('--ensemble', type=str2bool, default=False) parser.add_argument('--small', type=str2bool, default=False) args = parser.parse_args() cuda = torch.cuda.is_available() if cuda: torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True if len(args.config_file) > 0: print('Loaded configration file {}'.format(args.config_file)) cfg.merge_from_file(args.config_file) cfg.freeze() test(args, cfg)
def main(): args = parse_args() if len(args.config_file) > 0: print('Configuration file is loaded from {}'.format(args.config_file)) cfg.merge_from_file(args.config_file) output_dirname = os.path.join('output', "images", os.path.splitext(args.trained_model)[0]) os.makedirs(output_dirname, exist_ok=True) cfg.OUTPUT_DIRNAME = output_dirname cfg.freeze() print('OUTPUT DIRNAME: {}'.format(cfg.OUTPUT_DIR)) torch.manual_seed(cfg.SEED) np.random.seed(cfg.SEED) if torch.cuda.is_available(): torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True torch.cuda.manual_seed(cfg.SEED) else: raise Exception('GPU not found') test(args, cfg)
def main(): parser = argparse.ArgumentParser(description="SSD Demo.") parser.add_argument( "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--ckpt", type=str, default=None, help="Trained weights.") parser.add_argument("--score_threshold", type=float, default=0.7) parser.add_argument("--images_dir", default='demo', type=str, help='Specify a image dir to do prediction.') parser.add_argument("--output_dir", default='demo/result', type=str, help='Specify a image dir to save predicted images.') parser.add_argument( "--dataset_type", default="voc", type=str, help='Specify dataset type. Currently support voc and coco.') parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() print(args) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() print("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() print(config_str) print("Running with config:\n{}".format(cfg)) run_demo(cfg=cfg, ckpt=args.ckpt, score_threshold=args.score_threshold, images_dir=args.images_dir, output_dir=args.output_dir, dataset_type=args.dataset_type)
def main(): parser = argparse.ArgumentParser( description='Perceptual Extreme Super-Resolution for NTIRE2020') parser.add_argument('-c', '--config_file', type=str, default='', metavar='FILE', help='path to config file') parser.add_argument('-m', '--mixed_precision', type=str2bool, default=False, help='') parser.add_argument('-t', '--tensorboard', type=str2bool, default=True) parser.add_argument('-nw', '--num_workers', type=int, default=24, help='') parser.add_argument('-ls', '--log_step', type=int, default=50, help='') parser.add_argument('-ss', '--save_step', type=int, default=1000, help='') parser.add_argument('-es', '--eval_step', type=int, default=1000, help='') parser.add_argument('-o', '--output_dirname', type=str, default='', help='') parser.add_argument('-r', '--resume_iter', type=int, default=0, help='') parser.add_argument('-p', '--pretrain', type=str2bool, default=False, help='') parser.add_argument('-s', '--sync_batchnorm', type=str2bool, default=False, help='') parser.add_argument('-l', '--load_model_path', type=str, default='', help='') args = parser.parse_args() # load configration file if len(args.config_file) > 0: print('Loaded configration file {}'.format(args.config_file)) cfg.merge_from_file(args.config_file) # define output folder name for save log if len(args.output_dirname) == 0: dt_now = datetime.datetime.now() output_dirname = str(dt_now.date()) + '_' + str(dt_now.time()) else: output_dirname = args.output_dirname cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, output_dirname) cfg.freeze() # save configration if not os.path.exists(cfg.OUTPUT_DIR): os.makedirs(cfg.OUTPUT_DIR) if len(args.config_file) > 0: shutil.copy(args.config_file, cfg.OUTPUT_DIR) argsfile_path = os.path.join(cfg.OUTPUT_DIR, "args.txt") save_args(args, argsfile_path) # setting for cuda torch.manual_seed(cfg.SEED) cuda = torch.cuda.is_available() if cuda: torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True torch.cuda.manual_seed(cfg.SEED) print('Running with config:\n{}'.format(cfg)) # train model train(args, cfg)
def main(): parser = argparse.ArgumentParser( description='SSD Evaluation on VOC and COCO dataset.') parser.add_argument( "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument( "--ckpt", help= "The path to the checkpoint for test, default is the latest checkpoint.", default=None, type=str, ) parser.add_argument("--output_dir", default="eval_results", type=str, help="The directory to store evaluation results.") parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 distributed = num_gpus > 1 if torch.cuda.is_available(): # This flag allows you to enable the inbuilt cudnn auto-tuner to # find the best algorithm to use for your hardware. torch.backends.cudnn.benchmark = True if distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() # evaluation(cfg, ckpt=args.ckpt, distributed=distributed) logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) evaluation(cfg, ckpt=args.ckpt, distributed=distributed)
def main(): parser = argparse.ArgumentParser( description='Single Shot MultiBox Detector Training With PyTorch') parser.add_argument( "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument('--log_step', default=10, type=int, help='Print logs every log_step') parser.add_argument('--save_step', default=2500, type=int, help='Save checkpoint every save_step') parser.add_argument( '--eval_step', default=2500, type=int, help='Evaluate dataset every eval_step, disabled when eval_step < 0') parser.add_argument('--use_tensorboard', default=True, type=str2bool) parser.add_argument('--sr', dest='sr', action='store_true', help='train with channel sparsity regularization') parser.add_argument('--finetune', dest='finetune', action='store_true', help='train with channel sparsity regularization') parser.add_argument('--s', type=float, default=0.0001, help='scale sparse rate (default: 0.0001)') parser.add_argument( "--skip-test", dest="skip_test", help="Do not test the final model", action="store_true", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.distributed = num_gpus > 1 args.num_gpus = num_gpus if torch.cuda.is_available(): # This flag allows you to enable the inbuilt cudnn auto-tuner to # find the best algorithm to use for your hardware. torch.backends.cudnn.benchmark = True if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() if cfg.OUTPUT_DIR: mkdir(cfg.OUTPUT_DIR) logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) model = train(cfg, args) if not args.skip_test: logger.info('Start evaluating...') torch.cuda.empty_cache() # speed up evaluating after training finished do_evaluation(cfg, model, distributed=args.distributed)
def main(): parser = argparse.ArgumentParser( description='Single Shot MultiBox Detector Training With PyTorch') parser.add_argument( "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument('--log_step', default=10, type=int, help='Print logs every log_step') parser.add_argument('--save_step', default=1, type=int, help='Save checkpoint every save_step') parser.add_argument( '--eval_step', default=1, type=int, help='Evaluate dataset every eval_step, disabled when eval_step < 0') parser.add_argument('--use_tensorboard', default=True, type=str2bool) parser.add_argument( '--pruner', default='SlimmingPruner', type=str, choices=['AutoSlimPruner', 'SlimmingPruner', 'l1normPruner'], help='architecture to use') parser.add_argument('--pruneratio', default=0.4, type=float, help='architecture to use') parser.add_argument('--sr', dest='sr', action='store_true', help='train with channel sparsity regularization') parser.add_argument('--s', type=float, default=0.0001, help='scale sparse rate (default: 0.0001)') parser.add_argument( "--skip-test", dest="skip_test", help="Do not test the final model", action="store_true", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.distributed = num_gpus > 1 args.num_gpus = num_gpus if torch.cuda.is_available(): # This flag allows you to enable the inbuilt cudnn auto-tuner to # find the best algorithm to use for your hardware. torch.backends.cudnn.benchmark = True if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) ###### ## prune ########### model = build_detection_model(cfg) newmodel = build_detection_model(cfg) checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR) _ = checkpointer.load() model.eval() newmodel.eval() if args.pruner == 'l1normPruner': kwargs = {'pruneratio': args.pruneratio} elif args.pruner == 'SlimmingPruner': kwargs = {'pruneratio': args.pruneratio} elif args.pruner == 'AutoSlimPruner': kwargs = {'prunestep': 16, 'constrain': 200e6} pruner = prune.__dict__[args.pruner](model=model, newmodel=newmodel, args=args, **kwargs) pruner.prune() ##---------count op input = torch.randn(1, 3, 320, 320) flops, params = profile(model, inputs=(input, ), verbose=False) flops, params = clever_format([flops, params], "%.3f") flopsnew, paramsnew = profile(newmodel, inputs=(input, ), verbose=False) flopsnew, paramsnew = clever_format([flopsnew, paramsnew], "%.3f") logger.info("flops:{}->{}, params: {}->{}".format(flops, flopsnew, params, paramsnew)) save_path = os.path.join(cfg.OUTPUT_DIR, "pruned_model.pth") torch.save(newmodel, save_path)