FLAGS.draw_threshold, bbox_results, mask_results) save_name = get_save_image_name(FLAGS.output_dir, image_path) logger.info( "Detection bbox results save in {}".format(save_name)) image.save(save_name, quality=95) except (StopIteration, fluid.core.EOFException): loader.reset() if __name__ == '__main__': paddle.enable_static() parser = ArgsParser() parser.add_argument("--infer_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument( "--infer_img", type=str, default=None, help="Image path, has higher priority over --infer_dir") parser.add_argument( "--output_dir", type=str, default="output", help="Directory for storing the output visualization files.") parser.add_argument( "--draw_threshold", type=float, default=0.5,
resolution = None if 'Mask' in cfg.architecture or cfg.architecture == 'HybridTaskCascade': resolution = model.mask_head.resolution eval_run(exe, compile_program, loader, clsid2catid, catid2name, keys, values, cls, cfg, sub_eval_prog, sub_keys, sub_values, resolution) #print(cfg['EvalReader']['dataset'].__dict__) # evaluation # if map_type not set, use default 11point, only use in VOC eval map_type = cfg.map_type if 'map_type' in cfg else '11point' save_only = getattr(cfg, 'save_prediction_only', False) eval_results(cfg.metric, dataset=dataset) if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "--json_eval", action='store_true', default=False, help="Whether to re eval with already exists bbox.json or mask.json") parser.add_argument( "-f", "--output_eval", default=None, type=str, help="Evaluation file directory, default is current directory.") FLAGS = parser.parse_args() main()
if FLAGS.save_inference: feeded_var_names = ['image', 'im_size'] targets = list(fetches.values()) fluid.io.save_inference_model(save_dir + '/infer', feeded_var_names, targets, exe, eval_prog) logger.info("Best test box ap: {}, in step: {}".format( best_box_ap_list[0], best_box_ap_list[1])) train_loader.reset() if __name__ == '__main__': parser = ArgsParser() parser.add_argument("-r", "--resume_checkpoint", default=None, type=str, help="Checkpoint path for resuming training.") parser.add_argument("-t", "--teacher_config", default=None, type=str, help="Config file of teacher architecture.") parser.add_argument("--teacher_pretrained", default=None, type=str, help="Whether to use pretrained model.") parser.add_argument( "--output_eval", default=None, type=str,
im_ids = res['im_id'][0] for im_id in im_ids: image_path = imid2path[int(im_id)] image = Image.open(image_path).convert('RGB') image = visualize_results(image, int(im_id), catid2name, FLAGS.draw_threshold, bbox_results, mask_results) save_name = get_save_image_name(FLAGS.output_dir, image_path) logger.info("Detection bbox results save in {}".format(save_name)) image.save(save_name, quality=95) if __name__ == '__main__': parser = ArgsParser() parser.add_argument("--infer_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument( "--infer_img", type=str, default=None, help="Image path, has higher priority over --infer_dir") parser.add_argument( "--output_dir", type=str, default="output", help="Directory for storing the output visualization files.") parser.add_argument( "--draw_threshold", type=float, default=0.5,
face_eval_run( exe, eval_prog, fetches, img_root_dir, gt_file, pred_dir=pred_dir, eval_mode=FLAGS.eval_mode, multi_scale=FLAGS.multi_scale) if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "-d", "--dataset_dir", default=None, type=str, help="Dataset path, same as DataFeed.dataset.dataset_dir") parser.add_argument( "-f", "--output_eval", default=None, type=str, help="Evaluation file directory, default is current directory.") parser.add_argument( "-e", "--eval_mode", default="widerface", type=str, help="Evaluation mode, include `widerface` and `fddb`, default is `widerface`." )
map_type = cfg.map_type if 'map_type' in cfg else '11point' eval_results(results, cfg.metric, cfg.num_classes, resolution, is_bbox_normalized, FLAGS.output_eval, map_type, dataset=dataset) if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "--json_eval", action='store_true', default=False, help="Whether to re eval with already exists bbox.json or mask.json") parser.add_argument( "-f", "--output_eval", default=None, type=str, help="Evaluation file directory, default is current directory.") parser.add_argument( "-p", "--pruned_params", default=None, type=str, help="The parameters to be pruned when calculating sensitivities.")
model = create(main_arch) startup_prog = fluid.Program() infer_prog = fluid.Program() with fluid.program_guard(infer_prog, startup_prog): with fluid.unique_name.guard(): inputs_def = cfg['TestReader']['inputs_def'] inputs_def['use_dataloader'] = False feed_vars, _ = model.build_inputs(**inputs_def) test_fetches = model.test(feed_vars) infer_prog = infer_prog.clone(True) exe.run(startup_prog) checkpoint.load_params(exe, infer_prog, cfg.weights) save_serving_model(FLAGS, exe, feed_vars, test_fetches, infer_prog) dump_infer_config(FLAGS, cfg) if __name__ == '__main__': enable_static_mode() parser = ArgsParser() parser.add_argument("--output_dir", type=str, default="output", help="Directory for storing the output model files.") FLAGS = parser.parse_args() main()
def parse_args(): parser = ArgsParser() parser.add_argument( "--data_type", type=str, default='mot', help='Data type of tracking dataset, should be in ["mot", "kitti"]') parser.add_argument("--det_results_dir", type=str, default=None, help="Directory name for detection results.") parser.add_argument('--output_dir', type=str, default='output', help='Directory name for output tracking results.') parser.add_argument('--save_images', action='store_true', help='Save tracking results (image).') parser.add_argument('--save_videos', action='store_true', help='Save tracking results (video).') parser.add_argument('--show_image', action='store_true', help='Show tracking results (image).') args = parser.parse_args() return args
pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(" ")] logger.info("pruned ratios: {}".format(pruned_ratios)) sensitivity( eval_prog, place, pruned_params, test, sensitivities_file=FLAGS.sensitivities_file, pruned_ratios=pruned_ratios) if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "--output_eval", default=None, type=str, help="Evaluation directory, default is current directory.") parser.add_argument( "-d", "--dataset_dir", default=None, type=str, help="Dataset path, same as DataFeed.dataset.dataset_dir") parser.add_argument( "-s", "--sensitivities_file", default="sensitivities.data", type=str, help="The file used to save sensitivities.") parser.add_argument(
best_box_ap_list[0] = box_ap_stats[0] best_box_ap_list[1] = it save_checkpoint(exe, eval_prog, os.path.join(save_dir, "best_model"), train_prog) logger.info("Best test box ap: {}, in iter: {}".format( best_box_ap_list[0], best_box_ap_list[1])) train_loader.reset() if __name__ == '__main__': enable_static_mode() parser = ArgsParser() parser.add_argument("--loss_scale", default=8., type=float, help="Mixed precision training loss scale.") parser.add_argument("--eval", action='store_true', default=False, help="Whether to perform evaluation in train") parser.add_argument( "--output_eval", default=None, type=str, help="Evaluation directory, default is current directory.") parser.add_argument( "--not_quant_pattern", nargs='+', type=str, help=
eval_reader=eval_reader, eval_feed_list=test_feed, eval_func={'map': eval_func}, eval_fetch_list=[eval_fetch_list[0]], prune_infer_model=[["image", "im_size"], ["multiclass_nms_0.tmp_0"]], train_optimizer=None) com.config(FLAGS.slim_file) com.run() if __name__ == '__main__': parser = ArgsParser() parser.add_argument("-s", "--slim_file", default=None, type=str, help="Config file of PaddleSlim.") parser.add_argument( "--output_eval", default=None, type=str, help="Evaluation directory, default is current directory.") parser.add_argument( "-d", "--dataset_dir", default=None, type=str, help="Dataset path, same as DataFeed.dataset.dataset_dir") FLAGS = parser.parse_args() main()
map_type = cfg.map_type if 'map_type' in cfg else '11point' eval_results(results, cfg.metric, cfg.num_classes, resolution, is_bbox_normalized, FLAGS.output_eval, map_type, dataset=dataset) if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "--json_eval", action='store_true', default=False, help="Whether to re eval with already exists bbox.json or mask.json") parser.add_argument( "-f", "--output_eval", default=None, type=str, help="Evaluation file directory, default is current directory.") parser.add_argument( "--not_quant_pattern", nargs='+', type=str, help= "Layers which name_scope contains string in not_quant_pattern will not be quantized" )
image = visualize_results(image, int(im_id), catid2name, FLAGS.draw_threshold, bbox_results) save_name = get_save_image_name(FLAGS.output_dir, image_path) logger.info("Detection bbox results save in {}".format(save_name)) image.save(save_name, quality=95) if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "--json_mode", action="store_true", default=False, help="Infer image to json result") parser.add_argument( "--infer_img", type=str, default=None, help="Image path, has higher priority over --infer_dir") parser.add_argument( "--infer_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument( "--random_seed", type=int,
os.path.join(save_dir, "best_model")) logger.info("Best test box ap: {}, in iter: {}".format( best_box_ap_list[0], best_box_ap_list[1])) if 'use_ema' in cfg and cfg['use_ema']: exe.run(ema.restore_program) train_loader.reset() if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "-r", # 从某一检查点恢复训练 "--resume_checkpoint", default=None, type=str, help="Checkpoint path for resuming training.") parser.add_argument( "--fp16", # 是否使用混合精度训练模式 action='store_true', default=False, help="Enable mixed precision training.") parser.add_argument( "--loss_scale", # 设置混合精度训练模式中损失值的缩放比例 default=8., type=float, help="Mixed precision training loss scale.") parser.add_argument( "--eval", # 指定是否边训练边测试 action='store_true',
image = Image.open(image_path).convert('RGB') image = visualize_results(image, int(im_id), catid2name, FLAGS.draw_threshold, bbox_results, mask_results) save_name = get_save_image_name(FLAGS.output_dir, image_path) logger.info("Detection bbox results save in {}".format(save_name)) image.save(save_name, quality=95) if __name__ == '__main__': enable_static_mode() parser = ArgsParser() parser.add_argument("--infer_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument( "--infer_img", type=str, default=None, help="Image path, has higher priority over --infer_dir") parser.add_argument( "--output_dir", type=str, default="output", help="Directory for storing the output visualization files.") parser.add_argument( "--draw_threshold", type=float, default=0.5,
eval_fetch_list=eval_fetch_list[0:1], save_eval_model=True, prune_infer_model=[["image", "im_size"], ["multiclass_nms_0.tmp_0"]], teacher_programs=teacher_programs, train_optimizer=None, distiller_optimizer=opt, log_period=20) com.config(FLAGS.slim_file) com.run() if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "-t", "--teacher_config", default=None, type=str, help="Config file of teacher architecture.") parser.add_argument( "-s", "--slim_file", default=None, type=str, help="Config file of PaddleSlim.") parser.add_argument( "-r", "--resume_checkpoint", default=None, type=str, help="Checkpoint path for resuming training.") parser.add_argument(
checkpoint.save(exe, train_prog, os.path.join(save_dir, "best_model")) logger.info("Best test box ap: {}, in iter: {}".format( best_box_ap_list[0], best_box_ap_list[1])) if 'use_ema' in cfg and cfg['use_ema']: exe.run(ema.restore_program) train_loader.reset() if __name__ == '__main__': parser = ArgsParser() parser.add_argument("-r", "--resume_checkpoint", default=None, type=str, help="Checkpoint path for resuming training.") parser.add_argument("--fp16", action='store_true', default=False, help="Enable mixed precision training.") parser.add_argument("--loss_scale", default=8., type=float, help="Mixed precision training loss scale.") parser.add_argument("--eval", action='store_true', default=False, help="Whether to perform evaluation in train") parser.add_argument(
def parse_args(): parser = ArgsParser() parser.add_argument( "--output_eval", default=None, type=str, help="Evaluation directory, default is current directory.") parser.add_argument( '--json_eval', action='store_true', default=False, help='Whether to re eval with already exists bbox.json or mask.json') parser.add_argument("--slim_config", default=None, type=str, help="Configuration file of slim method.") # TODO: bias should be unified parser.add_argument("--bias", action="store_true", help="whether add bias or not while getting w and h") parser.add_argument( "--classwise", action="store_true", help="whether per-category AP and draw P-R Curve or not.") parser.add_argument('--save_prediction_only', action='store_true', default=False, help='Whether to save the evaluation results only') args = parser.parse_args() return args
ap = calculate_ap_py(results) train_loader.reset() eval_loader.reset() logger.info('rewards: ap is {}'.format(ap)) sa_nas.reward(float(ap)) current_best_tokens = sa_nas.current_info()['best_tokens'] logger.info("All steps end, the best BlazeFace-NAS structure is: ") sa_nas.tokens2arch(current_best_tokens) if __name__ == '__main__': parser = ArgsParser() parser.add_argument("-r", "--resume_checkpoint", default=None, type=str, help="Checkpoint path for resuming training.") parser.add_argument("--fp16", action='store_true', default=False, help="Enable mixed precision training.") parser.add_argument("--loss_scale", default=8., type=float, help="Mixed precision training loss scale.") parser.add_argument("--eval", action='store_true', default=True, help="Whether to perform evaluation in train") FLAGS = parser.parse_args()
pruned_flops = flops(infer_prog) logger.info("pruned FLOPS: {}".format( float(base_flops - pruned_flops) / base_flops)) exe.run(startup_prog) checkpoint.load_checkpoint(exe, infer_prog, cfg.weights) dump_infer_config(FLAGS, cfg) save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog) if __name__ == '__main__': enable_static_mode() parser = ArgsParser() parser.add_argument("--output_dir", type=str, default="output", help="Directory for storing the output model files.") parser.add_argument( "-p", "--pruned_params", default=None, type=str, help="The parameters to be pruned when calculating sensitivities.") parser.add_argument( "--pruned_ratios", default=None, type=str, help= "The ratios pruned iteratively for each parameter when calculating sensitivities." )
vdl_writer.add_image("bbox/frame_{}".format(vdl_image_frame), infer_image_np, vdl_image_step) vdl_image_step += 1 if vdl_image_step % 10 == 0: vdl_image_step = 0 vdl_image_frame += 1 save_name = get_save_image_name(FLAGS.output_dir, image_path) logger.info("Detection bbox results save in {}".format(save_name)) image.save(save_name, quality=95) if __name__ == '__main__': parser = ArgsParser() parser.add_argument("--infer_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument( "--infer_img", type=str, default=None, help="Image path, has higher priority over --infer_dir") parser.add_argument( "--output_dir", type=str, default="output", help="Directory for storing the output visualization files.") parser.add_argument( "--draw_threshold", type=float, default=0.5,
eval_keys, eval_values, eval_cls) resolution = None if 'mask' in results[0]: resolution = model.mask_head.resolution eval_results(results, eval_feed, cfg.metric, resolution, FLAGS.output_file) checkpoint.save(exe, train_prog, os.path.join(save_dir, "model_final")) train_pyreader.reset() if __name__ == '__main__': parser = ArgsParser() parser.add_argument("-r", "--resume_checkpoint", default=None, type=str, help="Checkpoint path for resuming training.") parser.add_argument("--eval", action='store_true', default=False, help="Whether to perform evaluation in train") parser.add_argument( "-f", "--output_file", default=None, type=str, help="Evaluation file name, default to bbox.json and mask.json.") FLAGS = parser.parse_args() main()
with fluid.program_guard(infer_prog, startup_prog): with fluid.unique_name.guard(): inputs_def = cfg['TestReader']['inputs_def'] inputs_def['use_dataloader'] = False feed_vars, _ = model.build_inputs(**inputs_def) # postprocess not need in exclude_nms, exclude NMS in exclude_nms mode test_fetches = model.test(feed_vars, exclude_nms=FLAGS.exclude_nms) infer_prog = infer_prog.clone(True) check_py_func(infer_prog) exe.run(startup_prog) checkpoint.load_params(exe, infer_prog, cfg.weights) dump_infer_config(FLAGS, cfg) save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog) if __name__ == '__main__': parser = ArgsParser() parser.add_argument("--output_dir", type=str, default="output", help="Directory for storing the output model files.") parser.add_argument("--exclude_nms", action='store_true', default=False, help="Whether prune NMS for benchmark") FLAGS = parser.parse_args() main()
def parse_args(): parser = ArgsParser() parser.add_argument("--infer_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument( "--infer_img", type=str, default=None, help="Image path, has higher priority over --infer_dir") parser.add_argument( "--output_dir", type=str, default="output", help="Directory for storing the output visualization files.") parser.add_argument( "--draw_threshold", type=float, default=0.5, help="Threshold to reserve the result for visualization.") parser.add_argument("--slim_config", default=None, type=str, help="Configuration file of slim method.") parser.add_argument("--use_vdl", type=bool, default=False, help="Whether to record the data to VisualDL.") parser.add_argument('--vdl_log_dir', type=str, default="vdl_log_dir/image", help='VisualDL logging directory for image.') parser.add_argument("--save_txt", type=bool, default=False, help="Whether to save inference result in txt.") args = parser.parse_args() return args
checkpoint.load_params(exe, infer_prog, cfg.weights) infer_prog, int8_program = convert(infer_prog, place, config, save_int8=True) save_infer_model(os.path.join(FLAGS.output_dir, 'float'), exe, feed_vars, test_fetches, infer_prog) save_infer_model(os.path.join(FLAGS.output_dir, 'int'), exe, feed_vars, test_fetches, int8_program) if __name__ == '__main__': parser = ArgsParser() parser.add_argument("--output_dir", type=str, default="output", help="Directory for storing the output model files.") parser.add_argument( "--not_quant_pattern", nargs='+', type=str, help= "Layers which name_scope contains string in not_quant_pattern will not be quantized" ) FLAGS = parser.parse_args() main()
def parse_args(): parser = ArgsParser() parser.add_argument('--video_file', type=str, default=None, help='Video name for tracking.') parser.add_argument('--frame_rate', type=int, default=-1, help='Video frame rate for tracking.') parser.add_argument("--image_dir", type=str, default=None, help="Directory for images to perform inference on.") parser.add_argument("--det_results_dir", type=str, default='', help="Directory name for detection results.") parser.add_argument('--output_dir', type=str, default='output', help='Directory name for output tracking results.') parser.add_argument('--save_images', action='store_true', help='Save tracking results (image).') parser.add_argument('--save_videos', action='store_true', help='Save tracking results (video).') parser.add_argument('--show_image', action='store_true', help='Show tracking results (image).') parser.add_argument( '--scaled', type=bool, default=False, help= "Whether coords after detector outputs are scaled, False in JDE YOLOv3 " "True in general detector.") parser.add_argument( "--draw_threshold", type=float, default=0.5, help="Threshold to reserve the result for visualization.") args = parser.parse_args() return args
if box_ap_stats[0] > best_box_ap_list[0]: best_box_ap_list[0] = box_ap_stats[0] best_box_ap_list[1] = it checkpoint.save(exe, train_prog, os.path.join(save_dir, "best_model")) logger.info("Best test box ap: {}, in iter: {}".format( best_box_ap_list[0], best_box_ap_list[1])) train_loader.reset() if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "-r", "--resume_checkpoint", default=None, type=str, help="Checkpoint path for resuming training.") parser.add_argument( "--fp16", action='store_true', default=False, help="Enable mixed precision training.") parser.add_argument( "--loss_scale", default=8., type=float, help="Mixed precision training loss scale.") parser.add_argument( "--eval", action='store_true',
server_int8_program = test_graph.to_program() fluid.io.save_inference_model(dirname=os.path.join(FLAGS.save_path, 'int8'), feeded_var_names=feed_names, target_vars=fetch_targets, executor=exe, main_program=server_int8_program, model_filename='model', params_filename='weights') if __name__ == '__main__': parser = ArgsParser() parser.add_argument("-m", "--model_path", default=None, type=str, help="path of checkpoint") parser.add_argument( "--output_eval", default=None, type=str, help="Evaluation directory, default is current directory.") parser.add_argument( "-d", "--dataset_dir", default=None, type=str, help="Dataset path, same as DataFeed.dataset.dataset_dir") parser.add_argument("--weight_quant_type", default='abs_max',
if box_ap_stats[0] > best_box_ap_list[0]: best_box_ap_list[0] = box_ap_stats[0] best_box_ap_list[1] = step_id checkpoint.save(exe, distill_prog, os.path.join("./", "best_model")) logger.info("Best test box ap: {}, in step: {}".format( best_box_ap_list[0], best_box_ap_list[1])) train_loader.reset() if __name__ == '__main__': parser = ArgsParser() parser.add_argument( "-t", "--teacher_config", default=None, type=str, help="Config file of teacher architecture.") parser.add_argument( "--teacher_pretrained", default=None, type=str, help="Whether to use pretrained model.") parser.add_argument( "--output_eval", default=None, type=str, help="Evaluation directory, default is current directory.") parser.add_argument( "-p",
def parse_args(): parser = ArgsParser() parser.add_argument("--det_results_dir", type=str, default='', help="Directory name for detection results.") parser.add_argument('--output_dir', type=str, default='output', help='Directory name for output tracking results.') parser.add_argument('--save_images', action='store_true', help='Save tracking results (image).') parser.add_argument('--save_videos', action='store_true', help='Save tracking results (video).') parser.add_argument('--show_image', action='store_true', help='Show tracking results (image).') parser.add_argument( '--scaled', type=bool, default=False, help= "Whether coords after detector outputs are scaled, False in JDE YOLOv3 " "True in general detector.") args = parser.parse_args() return args