def parse_args(): parser = ArgsParser() parser.add_argument( "--output_eval", default=None, type=str, help="Evaluation directory, default is current directory.") parser.add_argument( '--json_eval', action='store_true', default=False, help='Whether to re eval with already exists bbox.json or mask.json') parser.add_argument("--slim_config", default=None, type=str, help="Configuration file of slim method.") # TODO: bias should be unified parser.add_argument("--bias", action="store_true", help="whether add bias or not while getting w and h") parser.add_argument( "--classwise", action="store_true", help="whether per-category AP and draw P-R Curve or not.") parser.add_argument('--save_prediction_only', action='store_true', default=False, help='Whether to save the evaluation results only') args = parser.parse_args() return args
parser = ArgsParser() parser.add_argument("--infer_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument( "--infer_img", type=str, default=None, help="Image path, has higher priority over --infer_dir") parser.add_argument( "--output_dir", type=str, default="output", help="Directory for storing the output visualization files.") parser.add_argument( "--draw_threshold", type=float, default=0.5, help="Threshold to reserve the result for visualization.") parser.add_argument("--use_tb", type=bool, default=False, help="whether to record the data to Tensorboard.") parser.add_argument('--tb_log_dir', type=str, default="tb_log_dir/image", help='Tensorboard logging directory for image.') FLAGS = parser.parse_args() main()
help="Whether to perform evaluation in train") parser.add_argument("--use_vdl", default=True, type=bool, help="whether to record the data to VisualDL.") # NOTE:args for profiler tools, used for benchmark parser.add_argument('--is_profiler', default=0, type=int, help='The switch of profiler tools. (used for benchmark)') parser.add_argument('--profiler_path', default="save_models/detection.profiler", type=str, help='The profiler output file path. (used for benchmark)') args = parser.parse_args() def main(): env = os.environ args.dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env if args.dist: trainer_id = int(env['PADDLE_TRAINER_ID']) local_seed = (99 + trainer_id) random.seed(local_seed) np.random.seed(local_seed) cfg = load_config(args.config) merge_config(args.opt) check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu)
def main(): parser = ArgsParser() parser.add_argument('--n', '-n', default=9, type=int, help='num of clusters') parser.add_argument('--iters', '-i', default=1000, type=int, help='num of iterations for kmeans') parser.add_argument('--verbose', '-v', default=True, type=bool, help='whether print result') parser.add_argument('--size', '-s', default=None, type=str, help='image size: w,h, using comma as delimiter') parser.add_argument('--method', '-m', default='v2', type=str, help='cluster method, v2 is only supported now') parser.add_argument('--cache_path', default='cache', type=str, help='cache path') parser.add_argument('--cache', action='store_true', help='whether use cache') FLAGS = parser.parse_args() cfg = load_config(FLAGS.config) merge_config(FLAGS.opt) check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() # get dataset dataset = cfg['TrainReader']['dataset'] if FLAGS.size: if ',' in FLAGS.size: size = list(map(int, FLAGS.size.split(','))) assert len(size) == 2, "the format of size is incorrect" else: size = int(FLAGS.size) size = [size, size] elif 'image_shape' in cfg['TestReader']['inputs_def']: size = cfg['TestReader']['inputs_def']['image_shape'][1:] else: raise ValueError('size is not specified') if FLAGS.method == 'v2': cluster = YOLOv2AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path, FLAGS.cache, FLAGS.iters, FLAGS.verbose) else: raise ValueError('cluster method: %s is not supported' % FLAGS.method) anchors = cluster()