def main(args): # Create output directory if needed os.makedirs(args.output, exist_ok=True) # Create pipeline steps capture_images = (CaptureImages(args.input) if os.path.isdir(args.input) else CaptureImage(args.input)) cfg = detectron.setup_cfg( config_file=args.config_file, weights_file=args.weights_file, config_opts=args.config_opts, confidence_threshold=args.confidence_threshold, cpu=False if args.gpus > 0 else True, ) if not args.single_process: mp.set_start_method("spawn", force=True) predict = AsyncPredict( cfg, num_gpus=args.gpus, num_cpus=args.cpus, queue_size=args.queue_size, ordered=False, ) else: predict = Predict(cfg) if args.separate_background: separate_background = SeparateBackground("vis_image") annotate_image = None else: separate_background = None metadata_name = cfg.DATASETS.TEST[0] if len( cfg.DATASETS.TEST) else "__unused" annotate_image = AnnotateImage("vis_image", metadata_name) save_image = SaveImage("vis_image", args.output) # Create image processing pipeline pipeline = (capture_images | predict | separate_background | annotate_image | save_image) # Iterate through pipeline try: for _ in tqdm(pipeline, disable=not args.progress): pass except StopIteration: return except KeyboardInterrupt: return finally: # Pipeline cleanup if isinstance(predict, AsyncPredict): predict.cleanup()
def main(args): # Create output directory if needed os.makedirs(args.output, exist_ok=True) # Create pipeline steps if args.input.isdigit(): capture_video = CaptureVideo(int(args.input)) elif os.path.isfile(args.input): capture_video = CaptureVideo(args.input) elif os.path.isdir(args.input): capture_video = CaptureFrames(args.input) cfg = detectron.setup_cfg(config_file=args.config_file, weights_file=args.weights_file, config_opts=args.config_opts, confidence_threshold=args.confidence_threshold, cpu=False if args.gpus > 0 else True) if not args.single_process: mp.set_start_method("spawn", force=True) predict = AsyncPredict(cfg, num_gpus=args.gpus, num_cpus=args.cpus, queue_size=args.queue_size, ordered=True) else: predict = Predict(cfg) track_pose = TrackPose( link_len=args.track_link_len, num=args.track_num, mag=args.track_mag, match=args.track_match, orb_features=args.track_orb_features) if args.track_pose else None if args.separate_background: separate_background = SeparateBackground("vis_image") annotate_video = None else: separate_background = None metadata_name = cfg.DATASETS.TEST[0] if len( cfg.DATASETS.TEST) else "__unused" annotate_video = AnnotateVideo("vis_image", metadata_name, predictions=track_pose is None, pose_flows=track_pose is not None) display_video = DisplayVideo("vis_image") \ if args.display else None save_video = SaveVideo("vis_image", os.path.join(args.output, args.out_video), capture_video.fps if args.fps is None else args.fps) \ if args.out_video else None # Create image processing pipeline pipeline = (capture_video | predict | track_pose | separate_background | annotate_video | display_video | save_video) # Iterate through pipeline try: for _ in tqdm(pipeline, total=capture_video.frame_count if capture_video.frame_count > 0 else None, disable=not args.progress): pass except StopIteration: return except KeyboardInterrupt: return finally: # Pipeline cleanup if isinstance(predict, CaptureVideo): capture_video.cleanup() if isinstance(predict, AsyncPredict): predict.cleanup() if display_video: display_video.cleanup() if save_video: save_video.cleanup()
def process_images( input_path, output_path="output", config_file="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", weights_file=None, config_opts=[], confidence_threshold=0.5, gpus=1, cpus=0, single_process=True, queue_size=3, separate_background=True, progress=True, ): """ :: Detectron2 image processing pipeline :: Functionized version of `process_images.py` command line utility. Parameters ---------- input_path : str / path Path to input image file or directory. output_path : str / path, optional Path to output directory, default "output" config_file : str / path, optional Path to Detectron2 config file, default "configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" weights_file : str / path, optional Path to custom Detectron2 weights file, default None config_opts : list, optional Modify model config options, default [] confidence_threshold : float, optional Minimum score for instance predictions to be shown, by default 0.5 gpus : int, optional Number of GPUs, default 1 cpus : int, optional Number of CPUs, default 0 single_process : bool, optional force the pipeline to run in a single process, default True queue_size : int, optional Queue size per process, default 3 separate_background : bool, optional Make background transparent, default True progress : bool, optional Display progress, by default True """ # Create output directory if needed os.makedirs(output_path, exist_ok=True) # Create pipeline steps capture_images = (CaptureImages(input_path) if os.path.isdir(input_path) else CaptureImage(input_path)) cfg = detectron.setup_cfg( config_file=config_file, weights_file=weights_file, config_opts=config_opts, confidence_threshold=confidence_threshold, cpu=False if gpus > 0 else True, ) if not single_process: mp.set_start_method("spawn", force=True) predict = AsyncPredict(cfg, num_gpus=gpus, num_cpus=cpus, queue_size=queue_size, ordered=False) else: predict = Predict(cfg) if separate_background: separate_background = SeparateBackground("vis_image") annotate_image = None else: separate_background = None metadata_name = cfg.DATASETS.TEST[0] if len( cfg.DATASETS.TEST) else "__unused" annotate_image = AnnotateImage("vis_image", metadata_name) save_image = SaveImage("vis_image", output_path) # Create image processing pipeline pipeline = (capture_images | predict | separate_background | annotate_image | save_image) # Iterate through pipeline try: for _ in tqdm(pipeline, disable=not progress): pass except StopIteration: return except KeyboardInterrupt: return finally: # Pipeline cleanup if isinstance(predict, AsyncPredict): predict.cleanup()