def visualize(image_list, results, labels, output_dir='output/', threshold=0.5): # visualize the predict result start_idx = 0 for idx, image_file in enumerate(image_list): im_bboxes_num = results['boxes_num'][idx] im_results = {} if 'boxes' in results: im_results['boxes'] = results['boxes'][start_idx:start_idx + im_bboxes_num, :] if 'label' in results: im_results['label'] = results['label'][start_idx:start_idx + im_bboxes_num] if 'score' in results: im_results['score'] = results['score'][start_idx:start_idx + im_bboxes_num] start_idx += im_bboxes_num im = visualize_box_mask(image_file, im_results, labels, threshold=threshold) img_name = os.path.split(image_file)[-1] if not os.path.exists(output_dir): os.makedirs(output_dir) out_path = os.path.join(output_dir, img_name) im.save(out_path, quality=95) print("save result to: " + out_path)
def predict_video(model_dir, video_file="0", threshold=0.2, use_gpu=True, run_mode='fluid', output_dir='output/'): if video_file == "0": video_file = 0 detector = Detector(model_dir, use_gpu=use_gpu, run_mode=run_mode) capture = cv2.VideoCapture(video_file) fps = 25 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_name = 'camera.mp4' if video_file != 0: video_name = os.path.split(video_file)[-1] if not os.path.exists(output_dir): os.makedirs(output_dir) out_path = os.path.join(output_dir, video_name) writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) index = 1 while 1: ret, frame = capture.read() if not ret: break print('detect frame:%d' % index) index += 1 results = detector.predict(frame, threshold) im = visualize_box_mask( frame, results, detector.config.labels, mask_resolution=detector.config.mask_resolution) im = np.array(im) cv2.imshow("dsf", im) if cv2.waitKey(1) & 0xFF == ord('q'): break writer.write(im) writer.release()
def predict_video(): detector = Detector(FLAGS.model_dir, use_gpu=FLAGS.use_gpu, run_mode=FLAGS.run_mode) capture = cv2.VideoCapture(FLAGS.video_file) fps = 30 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_name = os.path.split(FLAGS.video_file)[-1] if not os.path.exists(FLAGS.output_dir): os.makedirs(FLAGS.output_dir) out_path = os.path.join(FLAGS.output_dir, video_name) writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) index = 1 while (1): ret, frame = capture.read() if not ret: break print('detect frame:%d' % (index)) index += 1 results = detector.predict(frame, FLAGS.threshold) im = visualize_box_mask( frame, results, detector.config.labels, mask_resolution=detector.config.mask_resolution) im = np.array(im) writer.write(im) writer.release()
def predict_video(detector, camera_id): if camera_id != -1: capture = cv2.VideoCapture(camera_id) video_name = 'output.mp4' else: capture = cv2.VideoCapture(FLAGS.video_file) video_name = os.path.split(FLAGS.video_file)[-1] fps = 30 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') if not os.path.exists(FLAGS.output_dir): os.makedirs(FLAGS.output_dir) out_path = os.path.join(FLAGS.output_dir, video_name) writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) index = 1 while (1): ret, frame = capture.read() if not ret: break print('detect frame:%d' % (index)) index += 1 results = detector.predict(frame, FLAGS.threshold) im = visualize_box_mask( frame, results, detector.config.labels, mask_resolution=detector.config.mask_resolution) im = np.array(im) writer.write(im) if camera_id != -1: cv2.imshow('Mask Detection', im) if cv2.waitKey(1) & 0xFF == ord('q'): break writer.release()
def visualize(image_file, results, labels, output_dir='output/', threshold=0.5): # visualize the predict result im = visualize_box_mask(image_file, results, labels, threshold=threshold) img_name = os.path.split(image_file)[-1] if not os.path.exists(output_dir): os.makedirs(output_dir) out_path = os.path.join(output_dir, img_name) im.save(out_path, quality=95) print("save result to: " + out_path)
def pi_visualize(image_file, results, labels, mask_resolution=14, video_writer=None): # visualize the predict result im = visualize_box_mask( image_file, results, labels, mask_resolution=mask_resolution) # img_name = os.path.split(image_file)[-1] # if not os.path.exists(output_dir): # os.makedirs(output_dir) # out_path = os.path.join(output_dir, img_name) # img = cv2.cvtColor(, cv2.COLOR_RGB2BGR) img = numpy.asarray(im) cv2.imshow("mask", img) if video_writer is not None: video_writer.write(img) c = cv2.waitKey(1)
def predict_video(self, video_file, camera_id): video_out_name = 'output.mp4' if camera_id != -1: capture = cv2.VideoCapture(camera_id) else: capture = cv2.VideoCapture(video_file) video_out_name = os.path.split(video_file)[-1] # Get Video info : resolution, fps, frame count width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(capture.get(cv2.CAP_PROP_FPS)) frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) print("fps: %d, frame_count: %d" % (fps, frame_count)) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) out_path = os.path.join(self.output_dir, video_out_name) fourcc = cv2.VideoWriter_fourcc(*'mp4v') writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) index = 1 while (1): ret, frame = capture.read() if not ret: break print('detect frame: %d' % (index)) index += 1 results = self.predict_image([frame], visual=False) im = visualize_box_mask(frame, results, self.pred_config.labels, threshold=self.threshold) im = np.array(im) writer.write(im) if camera_id != -1: cv2.imshow('Mask Detection', im) if cv2.waitKey(1) & 0xFF == ord('q'): break writer.release()
def predict_publish(): """ This Function handles the video analytics pipeline that captures the image in a video feed, apply the onject detection, analyze the interest of event and notify visually. This function also sends the metadata and processed frame to the Redis. It runs forever. Args: Nothing Returns: Nothing """ index = 1 detector = paddle_detect.Detector(FLAGS.model_dir, use_gpu=FLAGS.use_gpu, run_mode=FLAGS.run_mode) event_type = int(prd.get("event_type")) app_mode = int(prd.get("app_mode")) event_manager = event_alert.EventManager(FLAGS.app_config_dir, event_type) eventName = event_manager.getEventName(event_type) event_type_prev = event_type app_mode_prev = app_mode capture = cv2.VideoCapture(FLAGS.video_feed, cv2.CAP_FFMPEG) while True: success, image = capture.read() print('Reading') if not success: break print('Reading Successful') while True: app_mode = int(prd.get("app_mode")) event_type = int(prd.get("event_type")) if event_type != event_type_prev or app_mode != app_mode_prev: event_manager.updateROISettings(event_type) eventName = event_manager.getEventName(event_type) retval, buffer = cv2.imencode('.jpg', image) pic_str = base64.b64encode(buffer) pic_str = pic_str.decode() prd.set(eventName, pic_str) event_type_prev = event_type app_mode_prev = app_mode if app_mode != 1: break print('detect frame:%d' % (index)) index += 1 results = detector.predict(image, FLAGS.threshold) #print(results['boxes']) im = visualize_box_mask( image, results, detector.config.labels, mask_resolution=detector.config.mask_resolution) im = np.array(im) img = im if event_type == 2: img, result, appMetadata = event_manager.monitorVehicle( results['boxes'], im) elif event_type == 1: img, result, appMetadata = event_manager.alertStrangerIntrusion( results['boxes'], im) elif event_type == 0: img, result, appMetadata = event_manager.findEventStat( results['boxes'], im, vehicleThres=5) else: img, result, appMetadata = event_manager.findEventStat( results['boxes'], im, vehicleThres=5) retval, buffer = cv2.imencode('.jpg', img) pic_str = base64.b64encode(buffer) pic_str = pic_str.decode() appMetadatastr = json.dumps(appMetadata) msg = {'appMetadata': appMetadatastr, 'image': pic_str} _id = prd.xadd('camera:0', msg, maxlen=1000) event_type_prev = event_type if FLAGS.local_debug == True: cv2.imshow("Evento", img) cv2.waitKey(10)