Exemple #1
0
def main():
    pred_config = PredictConfig(FLAGS.model_dir)
    detector = Detector(pred_config,
                        FLAGS.model_dir,
                        device=FLAGS.device,
                        run_mode=FLAGS.run_mode,
                        batch_size=FLAGS.batch_size,
                        trt_min_shape=FLAGS.trt_min_shape,
                        trt_max_shape=FLAGS.trt_max_shape,
                        trt_opt_shape=FLAGS.trt_opt_shape,
                        trt_calib_mode=FLAGS.trt_calib_mode,
                        cpu_threads=FLAGS.cpu_threads,
                        enable_mkldnn=FLAGS.enable_mkldnn)
    if pred_config.arch == 'SOLOv2':
        detector = DetectorSOLOv2(pred_config,
                                  FLAGS.model_dir,
                                  device=FLAGS.device,
                                  run_mode=FLAGS.run_mode,
                                  batch_size=FLAGS.batch_size,
                                  trt_min_shape=FLAGS.trt_min_shape,
                                  trt_max_shape=FLAGS.trt_max_shape,
                                  trt_opt_shape=FLAGS.trt_opt_shape,
                                  trt_calib_mode=FLAGS.trt_calib_mode,
                                  cpu_threads=FLAGS.cpu_threads,
                                  enable_mkldnn=FLAGS.enable_mkldnn)

    # predict from video file or camera video stream
    if FLAGS.video_file is not None or FLAGS.camera_id != -1:
        predict_video(detector, FLAGS.camera_id)
    else:
        # predict from image
        if FLAGS.image_dir is None and FLAGS.image_file is not None:
            assert FLAGS.batch_size == 1, "batch_size should be 1, when image_file is not None"
        img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
        predict_image(detector, img_list, FLAGS.batch_size)
        if not FLAGS.run_benchmark:
            detector.det_times.info(average=True)
        else:
            mems = {
                'cpu_rss_mb': detector.cpu_mem / len(img_list),
                'gpu_rss_mb': detector.gpu_mem / len(img_list),
                'gpu_util': detector.gpu_util * 100 / len(img_list)
            }

            perf_info = detector.det_times.report(average=True)
            model_dir = FLAGS.model_dir
            mode = FLAGS.run_mode
            model_info = {
                'model_name': model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            data_info = {
                'batch_size': FLAGS.batch_size,
                'shape': "dynamic_shape",
                'data_num': perf_info['img_num']
            }
            det_log = PaddleInferBenchmark(detector.config, model_info,
                                           data_info, perf_info, mems)
            det_log('Det')
Exemple #2
0
def main():
    pred_config = PredictConfig(FLAGS.model_dir)
    detector = JDE_Detector(pred_config,
                            FLAGS.model_dir,
                            device=FLAGS.device,
                            run_mode=FLAGS.run_mode,
                            trt_min_shape=FLAGS.trt_min_shape,
                            trt_max_shape=FLAGS.trt_max_shape,
                            trt_opt_shape=FLAGS.trt_opt_shape,
                            trt_calib_mode=FLAGS.trt_calib_mode,
                            cpu_threads=FLAGS.cpu_threads,
                            enable_mkldnn=FLAGS.enable_mkldnn)

    # predict from video file or camera video stream
    if FLAGS.video_file is not None or FLAGS.camera_id != -1:
        predict_video(detector,
                      FLAGS.video_file,
                      threshold=FLAGS.threshold,
                      output_dir=FLAGS.output_dir,
                      save_images=FLAGS.save_images,
                      save_mot_txts=FLAGS.save_mot_txts,
                      draw_center_traj=FLAGS.draw_center_traj,
                      secs_interval=FLAGS.secs_interval,
                      do_entrance_counting=FLAGS.do_entrance_counting,
                      camera_id=FLAGS.camera_id)
    else:
        # predict from image
        img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
        predict_image(detector,
                      img_list,
                      threshold=FLAGS.threshold,
                      output_dir=FLAGS.output_dir,
                      save_images=FLAGS.save_images,
                      run_benchmark=FLAGS.run_benchmark)
        if not FLAGS.run_benchmark:
            detector.det_times.info(average=True)
        else:
            mems = {
                'cpu_rss_mb': detector.cpu_mem / len(img_list),
                'gpu_rss_mb': detector.gpu_mem / len(img_list),
                'gpu_util': detector.gpu_util * 100 / len(img_list)
            }
            perf_info = detector.det_times.report(average=True)
            model_dir = FLAGS.model_dir
            mode = FLAGS.run_mode
            model_info = {
                'model_name': model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            data_info = {
                'batch_size': 1,
                'shape': "dynamic_shape",
                'data_num': perf_info['img_num']
            }
            det_log = PaddleInferBenchmark(detector.config, model_info,
                                           data_info, perf_info, mems)
            det_log('MOT')
Exemple #3
0
def bench_log(detector, img_list, model_info, batch_size=1, name=None):
    mems = {
        'cpu_rss_mb': detector.cpu_mem / len(img_list),
        'gpu_rss_mb': detector.gpu_mem / len(img_list),
        'gpu_util': detector.gpu_util * 100 / len(img_list)
    }
    perf_info = detector.det_times.report(average=True)
    data_info = {
        'batch_size': batch_size,
        'shape': "dynamic_shape",
        'data_num': perf_info['img_num']
    }
    log = PaddleInferBenchmark(detector.config, model_info, data_info,
                               perf_info, mems)
    log(name)
Exemple #4
0
def main():
    detector = KeyPointDetector(FLAGS.model_dir,
                                device=FLAGS.device,
                                run_mode=FLAGS.run_mode,
                                batch_size=FLAGS.batch_size,
                                trt_min_shape=FLAGS.trt_min_shape,
                                trt_max_shape=FLAGS.trt_max_shape,
                                trt_opt_shape=FLAGS.trt_opt_shape,
                                trt_calib_mode=FLAGS.trt_calib_mode,
                                cpu_threads=FLAGS.cpu_threads,
                                enable_mkldnn=FLAGS.enable_mkldnn,
                                threshold=FLAGS.threshold,
                                output_dir=FLAGS.output_dir,
                                use_dark=FLAGS.use_dark)

    # predict from video file or camera video stream
    if FLAGS.video_file is not None or FLAGS.camera_id != -1:
        detector.predict_video(FLAGS.video_file, FLAGS.camera_id)
    else:
        # predict from image
        img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
        detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)
        if not FLAGS.run_benchmark:
            detector.det_times.info(average=True)
        else:
            mems = {
                'cpu_rss_mb': detector.cpu_mem / len(img_list),
                'gpu_rss_mb': detector.gpu_mem / len(img_list),
                'gpu_util': detector.gpu_util * 100 / len(img_list)
            }
            perf_info = detector.det_times.report(average=True)
            model_dir = FLAGS.model_dir
            mode = FLAGS.run_mode
            model_info = {
                'model_name': model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            data_info = {
                'batch_size': 1,
                'shape': "dynamic_shape",
                'data_num': perf_info['img_num']
            }
            det_log = PaddleInferBenchmark(detector.config, model_info,
                                           data_info, perf_info, mems)
            det_log('KeyPoint')
def main():
    detector = AttrDetector(FLAGS.model_dir,
                            device=FLAGS.device,
                            run_mode=FLAGS.run_mode,
                            batch_size=FLAGS.batch_size,
                            trt_min_shape=FLAGS.trt_min_shape,
                            trt_max_shape=FLAGS.trt_max_shape,
                            trt_opt_shape=FLAGS.trt_opt_shape,
                            trt_calib_mode=FLAGS.trt_calib_mode,
                            cpu_threads=FLAGS.cpu_threads,
                            enable_mkldnn=FLAGS.enable_mkldnn,
                            threshold=FLAGS.threshold,
                            output_dir=FLAGS.output_dir)

    # predict from image
    if FLAGS.image_dir is None and FLAGS.image_file is not None:
        assert FLAGS.batch_size == 1, "batch_size should be 1, when image_file is not None"
    img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
    detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)
    if not FLAGS.run_benchmark:
        detector.det_times.info(average=True)
    else:
        mems = {
            'cpu_rss_mb': detector.cpu_mem / len(img_list),
            'gpu_rss_mb': detector.gpu_mem / len(img_list),
            'gpu_util': detector.gpu_util * 100 / len(img_list)
        }

        perf_info = detector.det_times.report(average=True)
        model_dir = FLAGS.model_dir
        mode = FLAGS.run_mode
        model_info = {
            'model_name': model_dir.strip('/').split('/')[-1],
            'precision': mode.split('_')[-1]
        }
        data_info = {
            'batch_size': FLAGS.batch_size,
            'shape': "dynamic_shape",
            'data_num': perf_info['img_num']
        }
        det_log = PaddleInferBenchmark(detector.config, model_info, data_info,
                                       perf_info, mems)
        det_log('Attr')
Exemple #6
0
def main():
    detector = ActionRecognizer(FLAGS.model_dir,
                                device=FLAGS.device,
                                run_mode=FLAGS.run_mode,
                                batch_size=FLAGS.batch_size,
                                trt_min_shape=FLAGS.trt_min_shape,
                                trt_max_shape=FLAGS.trt_max_shape,
                                trt_opt_shape=FLAGS.trt_opt_shape,
                                trt_calib_mode=FLAGS.trt_calib_mode,
                                cpu_threads=FLAGS.cpu_threads,
                                enable_mkldnn=FLAGS.enable_mkldnn,
                                threshold=FLAGS.threshold,
                                output_dir=FLAGS.output_dir,
                                window_size=FLAGS.window_size,
                                random_pad=FLAGS.random_pad)
    # predict from numpy array
    input_list = get_test_skeletons(FLAGS.action_file)
    detector.predict_skeleton(input_list, FLAGS.run_benchmark, repeats=10)
    if not FLAGS.run_benchmark:
        detector.det_times.info(average=True)
    else:
        mems = {
            'cpu_rss_mb': detector.cpu_mem / len(input_list),
            'gpu_rss_mb': detector.gpu_mem / len(input_list),
            'gpu_util': detector.gpu_util * 100 / len(input_list)
        }

        perf_info = detector.det_times.report(average=True)
        model_dir = FLAGS.model_dir
        mode = FLAGS.run_mode
        model_info = {
            'model_name': model_dir.strip('/').split('/')[-1],
            'precision': mode.split('_')[-1]
        }
        data_info = {
            'batch_size': FLAGS.batch_size,
            'shape': "dynamic_shape",
            'data_num': perf_info['img_num']
        }
        det_log = PaddleInferBenchmark(detector.config, model_info, data_info,
                                       perf_info, mems)
        det_log('Action')