Beispiel #1
0
def predict_naive(model_dir,
                  video_file,
                  image_dir,
                  device='gpu',
                  threshold=0.5,
                  output_dir='output'):
    pred_config = PredictConfig(model_dir)
    detector = JDE_Detector(pred_config, model_dir, device=device.upper())

    if video_file is not None:
        predict_video(detector,
                      video_file,
                      threshold=threshold,
                      output_dir=output_dir,
                      save_images=True,
                      save_mot_txts=True,
                      draw_center_traj=False,
                      secs_interval=10,
                      do_entrance_counting=False)
    else:
        img_list = get_test_images(image_dir, infer_img=None)
        predict_image(detector,
                      img_list,
                      threshold=threshold,
                      output_dir=output_dir,
                      save_images=True)
Beispiel #2
0
def main():
    pred_config = PredictConfig(FLAGS.model_dir)
    detector = JDE_Detector(pred_config,
                            FLAGS.model_dir,
                            device=FLAGS.device,
                            run_mode=FLAGS.run_mode,
                            trt_min_shape=FLAGS.trt_min_shape,
                            trt_max_shape=FLAGS.trt_max_shape,
                            trt_opt_shape=FLAGS.trt_opt_shape,
                            trt_calib_mode=FLAGS.trt_calib_mode,
                            cpu_threads=FLAGS.cpu_threads,
                            enable_mkldnn=FLAGS.enable_mkldnn)

    # predict from video file or camera video stream
    if FLAGS.video_file is not None or FLAGS.camera_id != -1:
        predict_video(detector,
                      FLAGS.video_file,
                      threshold=FLAGS.threshold,
                      output_dir=FLAGS.output_dir,
                      save_images=FLAGS.save_images,
                      save_mot_txts=FLAGS.save_mot_txts,
                      draw_center_traj=FLAGS.draw_center_traj,
                      secs_interval=FLAGS.secs_interval,
                      do_entrance_counting=FLAGS.do_entrance_counting,
                      camera_id=FLAGS.camera_id)
    else:
        # predict from image
        img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
        predict_image(detector,
                      img_list,
                      threshold=FLAGS.threshold,
                      output_dir=FLAGS.output_dir,
                      save_images=FLAGS.save_images,
                      run_benchmark=FLAGS.run_benchmark)
        if not FLAGS.run_benchmark:
            detector.det_times.info(average=True)
        else:
            mems = {
                'cpu_rss_mb': detector.cpu_mem / len(img_list),
                'gpu_rss_mb': detector.gpu_mem / len(img_list),
                'gpu_util': detector.gpu_util * 100 / len(img_list)
            }
            perf_info = detector.det_times.report(average=True)
            model_dir = FLAGS.model_dir
            mode = FLAGS.run_mode
            model_info = {
                'model_name': model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            data_info = {
                'batch_size': 1,
                'shape': "dynamic_shape",
                'data_num': perf_info['img_num']
            }
            det_log = PaddleInferBenchmark(detector.config, model_info,
                                           data_info, perf_info, mems)
            det_log('MOT')
Beispiel #3
0
def main():
    deploy_file = os.path.join(FLAGS.model_dir, 'infer_cfg.yml')
    with open(deploy_file) as f:
        yml_conf = yaml.safe_load(f)
    arch = yml_conf['arch']
    detector = SDE_Detector(
        FLAGS.model_dir,
        tracker_config=FLAGS.tracker_config,
        device=FLAGS.device,
        run_mode=FLAGS.run_mode,
        batch_size=1,
        trt_min_shape=FLAGS.trt_min_shape,
        trt_max_shape=FLAGS.trt_max_shape,
        trt_opt_shape=FLAGS.trt_opt_shape,
        trt_calib_mode=FLAGS.trt_calib_mode,
        cpu_threads=FLAGS.cpu_threads,
        enable_mkldnn=FLAGS.enable_mkldnn,
        output_dir=FLAGS.output_dir,
        threshold=FLAGS.threshold,
        save_images=FLAGS.save_images,
        save_mot_txts=FLAGS.save_mot_txts,
        draw_center_traj=FLAGS.draw_center_traj,
        secs_interval=FLAGS.secs_interval,
        do_entrance_counting=FLAGS.do_entrance_counting,
        reid_model_dir=FLAGS.reid_model_dir,
        mtmct_dir=FLAGS.mtmct_dir,
    )

    # predict from video file or camera video stream
    if FLAGS.video_file is not None or FLAGS.camera_id != -1:
        detector.predict_video(FLAGS.video_file, FLAGS.camera_id)
    elif FLAGS.mtmct_dir is not None:
        with open(FLAGS.mtmct_cfg) as f:
            mtmct_cfg = yaml.safe_load(f)
        detector.predict_mtmct(FLAGS.mtmct_dir, mtmct_cfg)
    else:
        # predict from image
        if FLAGS.image_dir is None and FLAGS.image_file is not None:
            assert FLAGS.batch_size == 1, "--batch_size should be 1 in MOT models."
        img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
        seq_name = FLAGS.image_dir.split('/')[-1]
        detector.predict_image(img_list,
                               FLAGS.run_benchmark,
                               repeats=10,
                               seq_name=seq_name)

        if not FLAGS.run_benchmark:
            detector.det_times.info(average=True)
        else:
            mode = FLAGS.run_mode
            model_dir = FLAGS.model_dir
            model_info = {
                'model_name': model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            bench_log(detector, img_list, model_info, name='MOT')
def predict_naive(model_dir,
                  reid_model_dir,
                  video_file,
                  image_dir,
                  mtmct_dir=None,
                  mtmct_cfg=None,
                  scaled=True,
                  device='gpu',
                  threshold=0.5,
                  output_dir='output'):
    pred_config = PredictConfig(model_dir)
    detector_func = 'SDE_Detector'
    if pred_config.arch == 'PicoDet':
        detector_func = 'SDE_DetectorPicoDet'
    detector = eval(detector_func)(pred_config, model_dir, device=device)

    pred_config = PredictConfig(reid_model_dir)
    reid_model = SDE_ReID(pred_config, reid_model_dir, device=device)

    if video_file is not None:
        predict_video(
            detector,
            reid_model,
            video_file,
            scaled=scaled,
            threshold=threshold,
            output_dir=output_dir,
            save_images=True,
            save_mot_txts=True,
            draw_center_traj=False,
            secs_interval=10,
            do_entrance_counting=False)
    elif mtmct_dir is not None:
        with open(mtmct_cfg) as f:
            mtmct_cfg_file = yaml.safe_load(f)
        predict_mtmct(
            detector,
            reid_model,
            mtmct_dir,
            mtmct_cfg_file,
            scaled=scaled,
            threshold=threshold,
            output_dir=output_dir,
            save_images=True,
            save_mot_txts=True)
    else:
        img_list = get_test_images(image_dir, infer_img=None)
        predict_image(
            detector,
            reid_model,
            img_list,
            threshold=threshold,
            output_dir=output_dir,
            save_images=True)
Beispiel #5
0
def main():
    detector = JDE_Detector(
        FLAGS.model_dir,
        tracker_config=None,
        device=FLAGS.device,
        run_mode=FLAGS.run_mode,
        batch_size=1,
        trt_min_shape=FLAGS.trt_min_shape,
        trt_max_shape=FLAGS.trt_max_shape,
        trt_opt_shape=FLAGS.trt_opt_shape,
        trt_calib_mode=FLAGS.trt_calib_mode,
        cpu_threads=FLAGS.cpu_threads,
        enable_mkldnn=FLAGS.enable_mkldnn,
        output_dir=FLAGS.output_dir,
        threshold=FLAGS.threshold,
        save_images=FLAGS.save_images,
        save_mot_txts=FLAGS.save_mot_txts,
        draw_center_traj=FLAGS.draw_center_traj,
        secs_interval=FLAGS.secs_interval,
        do_entrance_counting=FLAGS.do_entrance_counting,
    )

    # predict from video file or camera video stream
    if FLAGS.video_file is not None or FLAGS.camera_id != -1:
        detector.predict_video(FLAGS.video_file, FLAGS.camera_id)
    else:
        # predict from image
        img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
        detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)

        if not FLAGS.run_benchmark:
            detector.det_times.info(average=True)
        else:
            mode = FLAGS.run_mode
            model_dir = FLAGS.model_dir
            model_info = {
                'model_name': model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            bench_log(detector, img_list, model_info, name='MOT')
def main():
    pred_config = PredictConfig(FLAGS.model_dir)
    detector_func = 'SDE_Detector'
    if pred_config.arch == 'PicoDet':
        detector_func = 'SDE_DetectorPicoDet'

    detector = eval(detector_func)(pred_config,
                                   FLAGS.model_dir,
                                   device=FLAGS.device,
                                   run_mode=FLAGS.run_mode,
                                   batch_size=FLAGS.batch_size,
                                   trt_min_shape=FLAGS.trt_min_shape,
                                   trt_max_shape=FLAGS.trt_max_shape,
                                   trt_opt_shape=FLAGS.trt_opt_shape,
                                   trt_calib_mode=FLAGS.trt_calib_mode,
                                   cpu_threads=FLAGS.cpu_threads,
                                   enable_mkldnn=FLAGS.enable_mkldnn)

    pred_config = PredictConfig(FLAGS.reid_model_dir)
    reid_model = SDE_ReID(pred_config,
                          FLAGS.reid_model_dir,
                          device=FLAGS.device,
                          run_mode=FLAGS.run_mode,
                          batch_size=FLAGS.reid_batch_size,
                          trt_min_shape=FLAGS.trt_min_shape,
                          trt_max_shape=FLAGS.trt_max_shape,
                          trt_opt_shape=FLAGS.trt_opt_shape,
                          trt_calib_mode=FLAGS.trt_calib_mode,
                          cpu_threads=FLAGS.cpu_threads,
                          enable_mkldnn=FLAGS.enable_mkldnn)

    # predict from video file or camera video stream
    if FLAGS.video_file is not None or FLAGS.camera_id != -1:
        predict_video(detector,
                      reid_model,
                      FLAGS.video_file,
                      scaled=FLAGS.scaled,
                      threshold=FLAGS.threshold,
                      output_dir=FLAGS.output_dir,
                      save_images=FLAGS.save_images,
                      save_mot_txts=FLAGS.save_mot_txts,
                      draw_center_traj=FLAGS.draw_center_traj,
                      secs_interval=FLAGS.secs_interval,
                      do_entrance_counting=FLAGS.do_entrance_counting,
                      camera_id=FLAGS.camera_id)

    elif FLAGS.mtmct_dir is not None:
        mtmct_cfg_file = FLAGS.mtmct_cfg
        with open(mtmct_cfg_file) as f:
            mtmct_cfg = yaml.safe_load(f)
        predict_mtmct(detector,
                      reid_model,
                      FLAGS.mtmct_dir,
                      mtmct_cfg,
                      scaled=FLAGS.scaled,
                      threshold=FLAGS.threshold,
                      output_dir=FLAGS.output_dir,
                      save_images=FLAGS.save_images,
                      save_mot_txts=FLAGS.save_mot_txts)
    else:
        # predict from image
        img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
        predict_image(detector,
                      reid_model,
                      img_list,
                      threshold=FLAGS.threshold,
                      output_dir=FLAGS.output_dir,
                      save_images=FLAGS.save_images,
                      run_benchmark=FLAGS.run_benchmark)

        if not FLAGS.run_benchmark:
            detector.det_times.info(average=True)
            reid_model.det_times.info(average=True)
        else:
            mode = FLAGS.run_mode
            det_model_dir = FLAGS.model_dir
            det_model_info = {
                'model_name': det_model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            bench_log(detector, img_list, det_model_info, name='Det')

            reid_model_dir = FLAGS.reid_model_dir
            reid_model_info = {
                'model_name': reid_model_dir.strip('/').split('/')[-1],
                'precision': mode.split('_')[-1]
            }
            bench_log(reid_model, img_list, reid_model_info, name='ReID')