def main(): current_dir = os.path.dirname(os.path.abspath(__file__)) """Prepares data for the person recognition demo""" parser = argparse.ArgumentParser(description='Multi camera multi person \ tracking live demo script') parser.add_argument('-i', type=str, nargs='+', help='Input sources (indexes \ of cameras or paths to video files)', required=True) parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'config.py'), required=False, help='Configuration file') parser.add_argument('--detections', type=str, help='JSON file with bounding boxes') parser.add_argument('-m', '--m_detector', type=str, required=False, help='Path to the person detection model') parser.add_argument('--t_detector', type=float, default=0.6, help='Threshold for the person detection model') parser.add_argument('--m_segmentation', type=str, required=False, help='Path to the person instance segmentation model') parser.add_argument( '--t_segmentation', type=float, default=0.6, help='Threshold for person instance segmentation model') parser.add_argument('--m_reid', type=str, required=True, help='Path to the person re-identification model') parser.add_argument('--output_video', type=str, default='', required=False, help='Optional. Path to output video') parser.add_argument( '--history_file', type=str, default='', required=False, help='Optional. Path to file in JSON format to save results of the demo' ) parser.add_argument( '--save_detections', type=str, default='', required=False, help='Optional. Path to file in JSON format to save bounding boxes') parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true') parser.add_argument('-d', '--device', type=str, default='CPU') parser.add_argument('-l', '--cpu_extension', help='MKLDNN (CPU)-targeted custom layers.Absolute \ path to a shared library with the kernels impl.', type=str, default=None) parser.add_argument('-u', '--utilization_monitors', default='', type=str, help='Optional. List of monitors to show initially.') args = parser.parse_args() if check_detectors(args) != 1: sys.exit(1) if len(args.config): log.info('Reading configuration file {}'.format(args.config)) config = read_py_config(args.config) else: log.error( 'No configuration file specified. Please specify parameter \'--config\'' ) sys.exit(1) random.seed(config['random_seed']) capture = MulticamCapture(args.i) log.info("Creating Inference Engine") ie = IECore() if args.detections: person_detector = DetectionsFromFileReader(args.detections, args.t_detector) elif args.m_segmentation: person_detector = MaskRCNN(ie, args.m_segmentation, args.t_segmentation, args.device, args.cpu_extension, capture.get_num_sources()) else: person_detector = Detector(ie, args.m_detector, args.t_detector, args.device, args.cpu_extension, capture.get_num_sources()) if args.m_reid: person_recognizer = VectorCNN(ie, args.m_reid, args.device, args.cpu_extension) else: person_recognizer = None run(args, config, capture, person_detector, person_recognizer) log.info('Demo finished successfully')
def main(): current_dir = os.path.dirname(os.path.abspath(__file__)) """Prepares data for the object tracking demo""" parser = argparse.ArgumentParser(description='Multi camera multi object \ tracking live demo script') parser.add_argument( '-i', '--input', required=True, nargs='+', help= 'Required. Input sources (indexes of cameras or paths to video files)') parser.add_argument('--loop', default=False, action='store_true', help='Optional. Enable reading the input in a loop') parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'configs/person.py'), required=False, help='Configuration file') parser.add_argument('--detections', type=str, help='JSON file with bounding boxes') parser.add_argument('-m', '--m_detector', type=str, required=False, help='Path to the object detection model') parser.add_argument('--t_detector', type=float, default=0.6, help='Threshold for the object detection model') parser.add_argument('--m_segmentation', type=str, required=False, help='Path to the object instance segmentation model') parser.add_argument( '--t_segmentation', type=float, default=0.6, help='Threshold for object instance segmentation model') parser.add_argument( '--m_reid', type=str, required=True, help='Required. Path to the object re-identification model') parser.add_argument('--output_video', type=str, default='', required=False, help='Optional. Path to output video') parser.add_argument( '--history_file', type=str, default='', required=False, help='Optional. Path to file in JSON format to save results of the demo' ) parser.add_argument( '--save_detections', type=str, default='', required=False, help='Optional. Path to file in JSON format to save bounding boxes') parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true') parser.add_argument('-d', '--device', type=str, default='CPU') parser.add_argument('-u', '--utilization_monitors', default='', type=str, help='Optional. List of monitors to show initially.') args = parser.parse_args() if check_detectors(args) != 1: sys.exit(1) if len(args.config): log.debug('Reading config from {}'.format(args.config)) config = read_py_config(args.config) else: log.error( 'No configuration file specified. Please specify parameter \'--config\'' ) sys.exit(1) random.seed(config.random_seed) capture = MulticamCapture(args.input, args.loop) log.info('OpenVINO Runtime') log.info('\tbuild: {}'.format(get_version())) core = Core() if args.detections: object_detector = DetectionsFromFileReader(args.detections, args.t_detector) elif args.m_segmentation: object_detector = MaskRCNN(core, args.m_segmentation, config.obj_segm.trg_classes, args.t_segmentation, args.device, capture.get_num_sources()) else: object_detector = Detector(core, args.m_detector, config.obj_det.trg_classes, args.t_detector, args.device, capture.get_num_sources()) if args.m_reid: object_recognizer = VectorCNN(core, args.m_reid, args.device) else: object_recognizer = None run(args, config, capture, object_detector, object_recognizer)