Exemplo n.º 1
0
def main(args):
    # Create output directory if needed
    os.makedirs(args.output, exist_ok=True)

    # Create pipeline steps
    capture_video = CaptureVideo(
        int(args.input) if args.input.isdigit() else args.input)

    display_video = DisplayVideo("image") \
        if args.display else None

    save_image = SaveImage("image", args.output, image_ext=args.image_ext)

    # Create image processing pipeline
    pipeline = (capture_video | display_video | save_image)

    # Iterate through pipeline
    try:
        for _ in tqdm(pipeline,
                      total=capture_video.frame_count
                      if capture_video.frame_count > 0 else None,
                      disable=not args.progress):
            pass
    except StopIteration:
        return
    except KeyboardInterrupt:
        return
    finally:
        # Pipeline cleanup
        capture_video.cleanup()
def main(args):
    # Create pipeline steps
    capture_video = CaptureVideo(
        int(args.input) if args.input.isdigit() else args.input)

    detect_faces = DetectFaces(prototxt=args.prototxt,
                               model=args.model,
                               confidence=args.confidence,
                               batch_size=args.batch_size)

    save_faces = SaveFaces(args.output)

    summary_file = os.path.join(args.output, args.out_summary)
    save_summary = SaveSummary(summary_file)

    annotate_image = AnnotateImage("annotated_image") \
        if args.display or args.out_video else None

    display_video = DisplayVideo("annotated_image") \
        if args.display else None

    save_video = SaveVideo("annotated_image", os.path.join(args.output, args.out_video), fps=capture_video.fps) \
        if args.out_video else None

    # Create image processing pipeline
    pipeline = (capture_video | detect_faces | save_faces | annotate_image
                | display_video | save_video | save_summary)

    # Iterate through pipeline
    progress = tqdm(total=capture_video.frame_count
                    if capture_video.frame_count > 0 else None,
                    disable=not args.progress)
    try:
        for _ in pipeline:
            progress.update(1)
    except StopIteration:
        return
    except KeyboardInterrupt:
        return
    finally:
        progress.close()

        # Pipeline cleanup
        capture_video.cleanup()
        if display_video:
            display_video.cleanup()
        if save_video:
            save_video.cleanup()

        print(f"[INFO] Saving summary to {summary_file}...")
        save_summary.write()
Exemplo n.º 3
0
def create_pipeline():
    capture_video = CaptureVideo("demo.mp4")
    mp.set_start_method("spawn", force=True)
    predict = AsyncPredict(model_path=WEIGHTS_FILE,
                           load_model_fn=load_fcn,
                           num_cpus=CPUS,
                           queue_size=QUEUE_SIZE,
                           ordered=True)
    # separate_background = SeparateBackground("vis_image")
    separate_background = VirtualBackground("vis_image", BACKGROUND_IMAGE)

    save_video = SaveVideo("vis_image", os.path.join(OUTPUT, OUT_VIDEO),
                           capture_video.fps * 0.5)

    pipeline = (capture_video | predict | separate_background | save_video)
    return iter(pipeline), (capture_video, predict, save_video)
def main(args):
    import sys
    # Create output directory if needed
    os.makedirs(args.output, exist_ok=True)

    # First create video capture
    if args.input.isdigit():
        capture_video = CaptureVideo(int(args.input))
    elif os.path.isfile(args.input):
        capture_video = CaptureVideo(args.input)
    else:
        print("Invalid Type put in!")
        sys.exit(1)

    if args.single_process:
        predict = None
    else:
        mp.set_start_method("spawn", force=True)
        predict = AsyncPredict(model_path=args.weights_file,
                               load_model_fn=load_fcn,
                               num_cpus=args.cpus,
                               queue_size=args.queue_size,
                               ordered=True)

    separate_background = SeparateBackground("vis_image")

    save_video = SaveVideo("vis_image",
                           os.path.join(args.output, args.out_video),
                           capture_video.fps if args.fps is None else args.fps)

    pipeline = (capture_video | predict | separate_background | save_video)

    try:
        for _ in tqdm(pipeline,
                      total=capture_video.frame_count
                      if capture_video.frame_count > 0 else None,
                      disable=not args.progress):
            pass
    except StopIteration:
        return
    except KeyboardInterrupt:
        return
    finally:
        # Clean up of file handles etc.
        if isinstance(predict, CaptureVideo):
            capture_video.cleanup()
        if isinstance(predict, AsyncPredict):
            predict.cleanup()
        if save_video:
            save_video.cleanup()
Exemplo n.º 5
0
def main(args):
    # Create output directory if needed
    os.makedirs(args.output, exist_ok=True)

    # Create pipeline steps
    if args.input.isdigit():
        capture_video = CaptureVideo(int(args.input))
    elif os.path.isfile(args.input):
        capture_video = CaptureVideo(args.input)
    elif os.path.isdir(args.input):
        capture_video = CaptureFrames(args.input)

    cfg = detectron.setup_cfg(config_file=args.config_file,
                              weights_file=args.weights_file,
                              config_opts=args.config_opts,
                              confidence_threshold=args.confidence_threshold,
                              cpu=False if args.gpus > 0 else True)
    if not args.single_process:
        mp.set_start_method("spawn", force=True)
        predict = AsyncPredict(cfg,
                               num_gpus=args.gpus,
                               num_cpus=args.cpus,
                               queue_size=args.queue_size,
                               ordered=True)
    else:
        predict = Predict(cfg)

    track_pose = TrackPose(
        link_len=args.track_link_len,
        num=args.track_num,
        mag=args.track_mag,
        match=args.track_match,
        orb_features=args.track_orb_features) if args.track_pose else None

    if args.separate_background:
        separate_background = SeparateBackground("vis_image")
        annotate_video = None
    else:
        separate_background = None
        metadata_name = cfg.DATASETS.TEST[0] if len(
            cfg.DATASETS.TEST) else "__unused"
        annotate_video = AnnotateVideo("vis_image",
                                       metadata_name,
                                       predictions=track_pose is None,
                                       pose_flows=track_pose is not None)

    display_video = DisplayVideo("vis_image") \
        if args.display else None

    save_video = SaveVideo("vis_image", os.path.join(args.output, args.out_video),
                           capture_video.fps if args.fps is None else args.fps) \
        if args.out_video else None

    # Create image processing pipeline
    pipeline = (capture_video | predict | track_pose | separate_background
                | annotate_video | display_video | save_video)

    # Iterate through pipeline
    try:
        for _ in tqdm(pipeline,
                      total=capture_video.frame_count
                      if capture_video.frame_count > 0 else None,
                      disable=not args.progress):
            pass
    except StopIteration:
        return
    except KeyboardInterrupt:
        return
    finally:
        # Pipeline cleanup
        if isinstance(predict, CaptureVideo):
            capture_video.cleanup()
        if isinstance(predict, AsyncPredict):
            predict.cleanup()
        if display_video:
            display_video.cleanup()
        if save_video:
            save_video.cleanup()