def run(params, capture, detector, reid): win_name = 'Multi camera tracking' config = {} if len(params.config): config = read_py_config(params.config) tracker = MultiCameraTracker(capture.get_num_sources(), reid, **config) thread_body = FramesThreadBody(capture, max_queue_length=len(capture.captures) * 2) frames_thread = Thread(target=thread_body) frames_thread.start() if len(params.output_video): video_output_size = (1920 // capture.get_num_sources(), 1080) fourcc = cv.VideoWriter_fourcc(*'XVID') output_video = cv.VideoWriter(params.output_video, fourcc, 24.0, video_output_size) else: output_video = None while thread_body.process: start = time.time() try: frames = thread_body.frames_queue.get_nowait() except queue.Empty: frames = None if frames is None: continue all_detections = detector.get_detections(frames) all_masks = [[] for _ in range(len(all_detections))] for i, detections in enumerate(all_detections): all_detections[i] = [det[0] for det in detections] all_masks[i] = [det[2] for det in detections if len(det) == 3] tracker.process(frames, all_detections, all_masks) tracked_objects = tracker.get_tracked_objects() fps = round(1 / (time.time() - start), 1) vis = visualize_multicam_detections(frames, tracked_objects, fps) if not params.no_show: cv.imshow(win_name, vis) if cv.waitKey(1) == 27: break if output_video: output_video.write(cv.resize(vis, video_output_size)) thread_body.process = False frames_thread.join() if len(params.history_file): history = tracker.get_all_tracks_history() with open(params.history_file, 'w') as outfile: json.dump(history, outfile)
def main(): current_dir = os.path.dirname(os.path.abspath(__file__)) """Prepares data for the person recognition demo""" parser = argparse.ArgumentParser(description='Multi camera multi person \ tracking live demo script') parser.add_argument('-i', type=str, nargs='+', help='Input sources (indexes \ of cameras or paths to video files)', required=True) parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'config.py'), required=False, help='Configuration file') parser.add_argument('--detections', type=str, help='JSON file with bounding boxes') parser.add_argument('-m', '--m_detector', type=str, required=False, help='Path to the person detection model') parser.add_argument('--t_detector', type=float, default=0.6, help='Threshold for the person detection model') parser.add_argument('--m_segmentation', type=str, required=False, help='Path to the person instance segmentation model') parser.add_argument( '--t_segmentation', type=float, default=0.6, help='Threshold for person instance segmentation model') parser.add_argument('--m_reid', type=str, required=True, help='Path to the person re-identification model') parser.add_argument('--output_video', type=str, default='', required=False, help='Optional. Path to output video') parser.add_argument( '--history_file', type=str, default='', required=False, help='Optional. Path to file in JSON format to save results of the demo' ) parser.add_argument( '--save_detections', type=str, default='', required=False, help='Optional. Path to file in JSON format to save bounding boxes') parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true') parser.add_argument('-d', '--device', type=str, default='CPU') parser.add_argument('-l', '--cpu_extension', help='MKLDNN (CPU)-targeted custom layers.Absolute \ path to a shared library with the kernels impl.', type=str, default=None) parser.add_argument('-u', '--utilization_monitors', default='', type=str, help='Optional. List of monitors to show initially.') args = parser.parse_args() if check_detectors(args) != 1: sys.exit(1) if len(args.config): log.info('Reading configuration file {}'.format(args.config)) config = read_py_config(args.config) else: log.error( 'No configuration file specified. Please specify parameter \'--config\'' ) sys.exit(1) random.seed(config['random_seed']) capture = MulticamCapture(args.i) log.info("Creating Inference Engine") ie = IECore() if args.detections: person_detector = DetectionsFromFileReader(args.detections, args.t_detector) elif args.m_segmentation: person_detector = MaskRCNN(ie, args.m_segmentation, args.t_segmentation, args.device, args.cpu_extension, capture.get_num_sources()) else: person_detector = Detector(ie, args.m_detector, args.t_detector, args.device, args.cpu_extension, capture.get_num_sources()) if args.m_reid: person_recognizer = VectorCNN(ie, args.m_reid, args.device, args.cpu_extension) else: person_recognizer = None run(args, config, capture, person_detector, person_recognizer) log.info('Demo finished successfully')
def run(params, capture, detector, reid, jot): #params : args 임 win_name = 'TEAM_KOTLIN' config = {} if len(params.config): config = read_py_config(params.config) tracker = MultiCameraTracker(capture.get_num_sources(), reid, **config) thread_body = FramesThreadBody(capture, max_queue_length=len(capture.captures) * 2) frames_thread = Thread(target=thread_body) frames_thread.start() if len(params.output_video): video_output_size = (1920 // capture.get_num_sources(), 1080) fourcc = cv.VideoWriter_fourcc(*'XVID') output_video = cv.VideoWriter(params.output_video, fourcc, 24.0, video_output_size) else: output_video = None print("##################################################################") while thread_body.process: start = time.time() try: frames = thread_body.frames_queue.get_nowait() except queue.Empty: frames = None if frames is None: continue all_detections = detector.get_detections(frames) #all_detections 좌표 성분 : ((left, right, top, bot), confidence) 값들의 리스트 # 1번 영상의 디텍션 위치는 앞에 2번 영상의 디텍션 위치는 뒤에 표시됨 # all_detextions : [[1번영상 사람들의 좌표, 컨디션], [2번 영상 사람들의 좌표, 컨디션]] all_masks = [[] for _ in range(len(all_detections)) ] # 디텍션 갯수만큼 비어있는 마스크 리스트 만듬 for i, detections in enumerate(all_detections): all_detections[i] = [det[0] for det in detections] all_masks[i] = [det[2] for det in detections if len(det) == 3] feature_data = tracker.process(frames, all_detections, all_masks) tracked_objects = tracker.get_tracked_objects() #20200511 추가 track_data = copy.deepcopy(feature_data) for track in track_data: del track['features'] del track['boxes'] del track['timestamps'] del track['cam_id'] #jottable 넘기는 함수 콜 jot.check_jot(tracked_objects, frames, track_data) fps = round(1 / (time.time() - start), 1) #시간 처리 dates = datetime.now() vis = visualize_multicam_detections(frames, tracked_objects, fps, dates) if not params.no_show: cv.imshow(win_name, vis) if cv.waitKey(1) == 27: break if output_video: output_video.write(cv.resize(vis, video_output_size)) thread_body.process = False frames_thread.join() if len(params.history_file): history = tracker.get_all_tracks_history() with open(params.history_file, 'w') as outfile: json.dump(history, outfile) print("##################################################################")
def main(): current_dir = os.path.dirname(os.path.abspath(__file__)) """Prepares data for the object tracking demo""" parser = argparse.ArgumentParser(description='Multi camera multi object \ tracking live demo script') parser.add_argument( '-i', '--input', required=True, nargs='+', help= 'Required. Input sources (indexes of cameras or paths to video files)') parser.add_argument('--loop', default=False, action='store_true', help='Optional. Enable reading the input in a loop') parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'configs/person.py'), required=False, help='Configuration file') parser.add_argument('--detections', type=str, help='JSON file with bounding boxes') parser.add_argument('-m', '--m_detector', type=str, required=False, help='Path to the object detection model') parser.add_argument('--t_detector', type=float, default=0.6, help='Threshold for the object detection model') parser.add_argument('--m_segmentation', type=str, required=False, help='Path to the object instance segmentation model') parser.add_argument( '--t_segmentation', type=float, default=0.6, help='Threshold for object instance segmentation model') parser.add_argument( '--m_reid', type=str, required=True, help='Required. Path to the object re-identification model') parser.add_argument('--output_video', type=str, default='', required=False, help='Optional. Path to output video') parser.add_argument( '--history_file', type=str, default='', required=False, help='Optional. Path to file in JSON format to save results of the demo' ) parser.add_argument( '--save_detections', type=str, default='', required=False, help='Optional. Path to file in JSON format to save bounding boxes') parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true') parser.add_argument('-d', '--device', type=str, default='CPU') parser.add_argument('-u', '--utilization_monitors', default='', type=str, help='Optional. List of monitors to show initially.') args = parser.parse_args() if check_detectors(args) != 1: sys.exit(1) if len(args.config): log.debug('Reading config from {}'.format(args.config)) config = read_py_config(args.config) else: log.error( 'No configuration file specified. Please specify parameter \'--config\'' ) sys.exit(1) random.seed(config.random_seed) capture = MulticamCapture(args.input, args.loop) log.info('OpenVINO Runtime') log.info('\tbuild: {}'.format(get_version())) core = Core() if args.detections: object_detector = DetectionsFromFileReader(args.detections, args.t_detector) elif args.m_segmentation: object_detector = MaskRCNN(core, args.m_segmentation, config.obj_segm.trg_classes, args.t_segmentation, args.device, capture.get_num_sources()) else: object_detector = Detector(core, args.m_detector, config.obj_det.trg_classes, args.t_detector, args.device, capture.get_num_sources()) if args.m_reid: object_recognizer = VectorCNN(core, args.m_reid, args.device) else: object_recognizer = None run(args, config, capture, object_detector, object_recognizer)