def doFaceTask(self, _task): # detect all at once, no cuda memory may occur data = _task.depackage() faces, preprocessed_images, frame, frame_info = data['faces'], data[ 'images'], data['frame'], data['frame_info'] logger.info("Extract: %s, #Faces: %s" % (frame_info, len(faces))) # timer.extract_start() embs, coeffs = self.extractor.extract_features(preprocessed_images) # timer.extract_done() is_put = False for i, face in enumerate(faces): face.embedding = embs[i, :] face.set_face_quality(coeffs[i]) if face.is_good(): _task = task.Task(task.Task.Face) _task.package(face=face, frame=frame, frame_info=frame_info) self.putResult(_task) is_put = True if not is_put: _task = task.Task(task.Task.Frame) _task.package(frame=frame, frame_info=frame_info) self.putResult(_task)
def face_verification(self): images_1st, images_2nd = self.request_data_parser() if images_1st and images_2nd: images_1st = self.calibrate_angle(images_1st) images_2nd = self.calibrate_angle(images_2nd) for image in images_1st: _task = task.Task(task.Task.Frame) _task.package(frame=image, frame_info='images_1st') self.pipeline.put(_task) for image in images_2nd: _task = task.Task(task.Task.Frame) _task.package(frame=image, frame_info='images_2nd') self.pipeline.put(_task) _task = task.Task(task.Task.Event) _task.package(frame_info_1='images_1st', frame_info_2='images_2nd') self.pipeline.put(_task) # get result from pipeline data = self.pipeline.get() status = data.pop('status') if status == Config.Status.SUCCESSFUL: return self.response_success(data) else: message = 'Faces not found' return self.response_error(message) else: return self.response_error('both input field must not be empty')
def run_demo(self): ''' This is main function ''' _pipeline = self.build_pipeline() print('Begin') retry = 0 while True: try: face, client_id, send_at = self.register_queue.get(False) _task = task.Task(task.Task.Event) _task.package(face=face, client_id=client_id, sent_at=send_at) except: frame, frame_info = self.frame_reader.next_frame() if frame is None: retry += 1 print('Retry %s times' % retry) if retry > DemoRegisterServer.MAX_RETRY: break continue _task = task.Task(task.Task.Frame) _task.package(frame=frame, frame_info=frame_info) _pipeline.put(_task) print("No more frame, stop") _pipeline.put(None)
def handleEarlyQualifiedTrackers(self): early_qualified_trackers = self.tracker_manager.get_early_qualified_trackers( ) for tracker in early_qualified_trackers: _task = task.Task(task.Task.Face) _task.package(tracker=tracker) self.putResult(_task)
def doFaceTask(self, _task): start = time.time() data = _task.depackage() task_name = data['type'] if task_name != Config.Worker.TASK_EXTRACTION: return tracker = data['tracker'] nrof_elements = len(tracker.elements) # here we do the batch embs extracting _interval = Config.Track.NUM_IMAGE_PER_EXTRACT for i in range(0, nrof_elements, _interval): _interval = min(nrof_elements - i, _interval) preprocessed_images = [] for j in range(i, i + _interval): face_image = CropperUtils.reverse_display_face( tracker.elements[j].face_image, tracker.elements[j].str_padded_bbox) preprocessed_image = self.preprocessor.process(face_image) preprocessed_images.append(preprocessed_image) embeddings_array, _ = self.extractor.extract_features_all_at_once( preprocessed_images) tracker.update_embeddings(embeddings_array, i, _interval) _task = task.Task(task.Task.Face) _task.package(tracker=tracker) self.putResult(_task) print(self.name, time.time() - start)
def doFaceTask(self, _task): # detect all at once, no cuda memory may occur data = _task.depackage() bbs, scores, frame, frame_info = data['bbs'], data['scores'], data[ 'frame'], data['frame_info'] nrof_faces = len(bbs) # timer.preprocess_start() faces = [] for i in range(nrof_faces): display_face, padded_bb_str = CropperUtils.crop_display_face( frame, np.asarray(bbs[i])) face = detection.PedestrianInfo(np.asarray(bbs[i]), frame_info, display_face, padded_bb_str, scores[i]) faces.append(face) # logger.info("Extract: %s, #Faces: %s" % (frame_info, len(faces))) # timer.extract_start() embs = self.encoder(frame, bbs_to_tlwhs(faces)) face_infos = [] for i, face in enumerate(faces): face.embedding = embs[i] face_infos.append(face) _task = task.Task(task.Task.Face) _task.package(faces=face_infos) self.putResult(_task)
def doFaceTask(self, _task): # detect all at once, no cuda memory may occur data = _task.depackage() faces, preprocessed_images, preprocessed_coeff_images, frame, frame_info = data[ 'faces'], data['images'], data['coeff_images'], data[ 'frame'], data['frame_info'] logger.info("Extract: %s, #Faces: %s" % (frame_info, len(faces))) # timer.extract_start() # TODO: we can use only one extracter face_infos = [] if preprocessed_images.any(): embs, _ = self.embs_extractor.extract_features_all_at_once( preprocessed_images) coeffs = [100] * embs.shape[0] if self.use_coeff_filter: _, coeffs = self.coeff_extractor.extract_features_all_at_once( preprocessed_coeff_images) # timer.extract_done() for i, face in enumerate(faces): face.embedding = embs[i, :] face.set_face_quality(coeffs[i]) face_infos.append(face) _task = task.Task(task.Task.Face) _task.package(faces=face_infos) self.putResult(_task)
def handleNewFaces(self, faces): self.tracker_manager.predict() qualified_trackers, _ = self.tracker_manager.update(faces) for tracker in qualified_trackers: _task = task.Task(task.Task.Face) _task.package(tracker=tracker) self.putResult(_task)
def doFaceTask(self, _task): # start = time.time() # TODO: Get timer data = _task.depackage() bbs, pts, frame, frame_info = data['bbs'], data['pts'], data[ 'frame'], data['frame_info'] nrof_faces = len(bbs) # timer.preprocess_start() faces = [] preprocessed_images = [] for i in range(nrof_faces): display_face, padded_bb_str = CropperUtils.crop_display_face( frame, bbs[i][:-1]) face = detection.FaceInfo(bbs[i][:-1], bbs[i][-1], frame_info, display_face, padded_bb_str, pts[:, i]) cropped_face = CropperUtils.crop_face(frame, bbs[i][:-1]) preprocessed = self.preprocessor.process(cropped_face) preprocessed_images.append(preprocessed) faces.append(face) preprocessed_images = np.array(preprocessed_images) # timer.preprocess_done() _task = task.Task(task.Task.Face) _task.package(faces=faces, images=preprocessed_images, frame=frame, frame_info=frame_info) self.putResult(_task)
def handleNewTracker(self, tracker): if tracker.face_id == Config.Track.INIT_FACE_ID: predicted_face_id, _ = self.global_recognize_tracker(tracker) if predicted_face_id == Config.Matcher.NEW_FACE: predicted_face_id = tracker.generate_face_id(prefix=self.area) logger.info("Generated face id: %s" % (predicted_face_id)) tracker.is_registered = False embs = [] labels = [] for element in tracker.elements: embs.append(element.embedding) labels.append(element.image_id) self.global_matcher.update(embs, labels) tracker.face_id = predicted_face_id logger.info("Recognized a new tracker face id: %s" % (tracker.face_id)) print("== %s: Recognized new tracker as: %s" % (self.name, tracker.face_id)) if tracker.face_id == Config.Track.RECOGNIZED_FACE_ID: logger.info("Handle remaining elements of sent tracker") print('== %s: Passing tracker %s to query face_id' % (self.name, tracker.track_id)) _task = task.Task(task.Task.Face) _task.package(tracker=tracker, type=Config.Worker.TASK_TRACKER) self.putResult(_task)
def process_stream(self, frame_reader): ''' This is main function ''' _id = ObjectId(self.dataset_id) _pipeline = self.build_pipeline() print('Begin') frame_counter = 0 while frame_reader.has_next(): print('process frame number_ ', frame_counter) # logger.info('process frame number_ ', frame_counter) frame = frame_reader.next_frame() if frame is None: break print('Read frame', frame_counter, frame.shape) if frame_counter % Config.Frame.FRAME_INTERVAL == 0: _task = task.Task(task.Task.Frame) _task.package(frame=frame, frame_info=frame_counter) _pipeline.put(_task) frame_counter += 1 print("Wait for executor to finish it jobs") _pipeline.put(None) frame_reader.release() return True
def doEventTask(self, _task): data = _task.depackage() frame = data['frame'] frame_info = data['frame_info'] trackers = data['trackers'] tlwhs = [tracker.to_tlwh().astype(np.int_) for tracker in trackers] # draw violations image = np.copy(frame) for tlwh in tlwhs: if self.violation_checker.line_violation(tlwh): tlbr = tlwh tlbr[2:] = tlbr[:2] + tlbr[2:] image = cv2.rectangle(frame, tuple(tlbr[:2]), tuple(tlbr[2:]), (0, 0, 255), 2) self.alert_tick = 0 self.alert_images.append(image) # in waiting time to add ending buffer if len(self.alert_images) > Config.ViolationAlert.BUFFER: self.alert_tick += 1 # in starting buffer else: trim_idx = min(len(self.alert_images), Config.ViolationAlert.BUFFER) self.alert_images[-trim_idx:] # Update alert images if self.alert_tick > Config.ViolationAlert.BUFFER: _task = task.Task(task.Task.Event) _task.package(alert_type=self.alert_type, images=self.alert_images) self.putResult(_task) self.alert_images = [] self.alert_tick = 0
def doFaceTask(self, _task): data = _task.depackage() tracker = data['tracker'] # is_ignored: to determine if the record is showed or not tracker.is_ignored = True image_paths = [] for element in tracker.elements: image_path = os.path.join(str(tracker.track_id), element.image_id + '.jpg') image_paths.append(image_path) classified_result = \ self.requester.post_list(Config.MicroServices.IMAGES, image_paths) if (classified_result is not None) and ('predictions' in classified_result): predictions = classified_result['predictions'] count = Counter(predictions).most_common(1) prediction, _ = count[0] print(self.name, 'predict %s: %s' % (tracker.track_id, prediction)) if prediction == True: tracker.is_ignored = False _task = task.Task(task.Task.Face) _task.package(type=Config.Worker.TASK_TRACKER, tracker=tracker) self.putResult(_task)
def doFrameTask(self, _task): start = time.time() data = _task.depackage() frame, frame_info = data['frame'], data['frame_info'] frame = cv2.resize(frame, (1920,1080)) frame, roi = self.convert(frame, self.roi_cordinate_np_scale) bbs = [] scores = [] if self.background_subtraction.preprocess(roi): pil_im = Image.fromarray(frame) bbs, scores = self.pedestrian_detector.detect_image(pil_im) for bb in bbs: bbx = [(bb[0],bb[1]), (bb[0],bb[3]), (bb[2],bb[3]), (bb[2], bb[1])] if (cv2.pointPolygonTest(np.array([bbx], np.int32 ), self.centroids, False) == -1): bbs.remove(bb) _task = task.Task(task.Task.Face) _task.package(bbs=bbs, scores=scores, frame=roi, frame_info=frame_info) self.putResult(_task) nrof_faces = len(bbs) if nrof_faces > 0: self.pedestrian_count += nrof_faces self.detected_frame_count += 1
def doEventTask(self, _task): waiting_images = self.waiting_images.get_all() for client_id, images in waiting_images: images = np.array(images) _face_task = task.Task(task.Task.Face) _face_task.package(images=images, frame_info=client_id) self.putResult(_face_task) self.putResult(_task)
def recognition(self, input_data, images): # this client_id should be id to seperate between multi request that send to this same api print('\ngot {} images to {}'.format(len(images), input_data['actionType'])) for image in images: _task = task.Task(task.Task.Frame) _task.package(frame=image, frame_info=input_data['client_id']) self.recognition_pipeline.put(_task) # notify pipeline there is no more images and start to matching _task = task.Task(task.Task.Event) _task.package(**input_data) self.recognition_pipeline.put(_task) data = self.recognition_pipeline.get() self.result_dict[input_data['client_id']] = data
def run(self): _pipeline = self.build_pipeline() frame_number = 0 while True: frame, frame_time = self.frame_reader.next_frame() if frame is not None: _task = task.Task(task.Task.Frame) _task.package(frame=frame, frame_info=frame_time) frame_number += 1 _pipeline.put(_task)
def doFaceTask(self, _task): ''' Only process frame with face ''' data = _task.depackage() frame, frame_info = data['frame'], data['frame_info'] preprocessed_frame = self.preprossor.process(frame) _task = task.Task(task.Task.Face) _task.package(frame=preprocessed_frame, frame_info=frame_info) self.putResult(_task)
def register_api(self): frame_info = 'Demo' images_str = self.request.json['images'] for image_str in images_str: success, frame = base64str_to_frame(image_str) if success: _task = task.Task(task.Task.Frame) _task.package(frame=frame, frame_info=frame_info) self.pipeline.put(_task) # notify pipeline there is no more images and _task = task.Task(task.Task.Event) _task.package(client_id=frame_info) self.pipeline.put(_task) results = self.pipeline.results() face_id = next(results) response = DemoEcommerceServer.response_success({'faceId': face_id}) return response
def doFaceTask(self, _task): data = _task.depackage() bbs, pts, frame, frame_info = data['bbs'], data['pts'], data[ 'frame'], data['frame_info'] bbox = self.get_biggest_face(bbs) if bbox.any(): cropped_face = CropperUtils.crop_face(frame, bbox[:-1]) preprocessed = self.preprocessor.process(cropped_face) _task = task.Task(task.Task.Face) _task.package(images=preprocessed, frame_info=frame_info) self.putResult(_task)
def doFaceTask(self, _task): data = _task.depackage() preprocessed_images, frame_info = data['images'], data['frame_info'] client_id = frame_info self.waiting_images.put(client_id, preprocessed_images) print('=' * 10, 'Process image', client_id) if self.waiting_images.has_enough(client_id): images = self.waiting_images.get(client_id) images = np.array(images) _task = task.Task(task.Task.Face) _task.package(images=images, frame_info=frame_info) self.putResult(_task)
def doEventTask(self, _task): # handle remaining images data = _task.depackage() client_id = data['client_id'] images = self.waiting_images.get(client_id) images = np.array(images) face_task = task.Task(task.Task.Face) face_task.package(images=images, frame_info=client_id) self.putResult(face_task) # notify that no more images by delegate event self.putResult(_task)
def doFaceTask(self, _task): # detect all at once, no cuda memory may occur data = _task.depackage() images, frame_info = data['images'], data['frame_info'] embs = np.array([]) if images.size > 0: try: embs, _ = self.extractor.extract_features(images) except ValueError: print('Can not extract image with shape', images.shape) _task = task.Task(task.Task.Face) _task.package(embs=embs, frame_info=frame_info) self.putResult(_task)
def doFrameTask(self, _task): data = _task.depackage() frame = data['frame'] frame_info = data['frame_info'] bboxes = self.detector.process(frame) detections = [detection.Detection(bbox, 100, 100) for bbox in bboxes] if len(detections) > 0: _task = task.Task(task.Task.Frame) _task.package(frame=frame, frame_info=frame_info, detections=detections) self.putResult(_task) logger.debug('Found %s detection' % len(detections))
def doFrameTask(self, _task): data = _task.depackage() frame = data['frame'] frame_info = data['frame_info'] detections = data['detections'] self.tracker_manager.predict() self.tracker_manager.update(detections) trackers = self.tracker_manager.get_confirmed_trackers() if len(trackers) > 0: _task = task.Task(task.Task.Event) _task.package(frame=frame, frame_info=frame_info, trackers=trackers) self.putResult(_task)
def doFaceTask(self, _task): data = _task.depackage() frame, frame_info = data['frame'], data['frame_info'] client_id = frame_info self.waiting_images.put(client_id, frame) if self.waiting_images.has_enough(client_id): images = self.waiting_images.get(client_id) images = np.array(images) has_masks = self.mask_classifier.is_wearing_mask(images) has_glasses = self.glasses_classifier.is_wearing_glasses(images) _task = task.Task(task.Task.Face) _task.package(glasses=has_glasses, masks=has_masks, client_id=client_id) self.putResult(_task)
def doFaceTask(self, _task): data = _task.depackage() faces, preprocessed_images, frame, frame_info = \ data['faces'], data['images'], data['frame'], data['frame_info'] nrof_faces = len(faces) i = 0 while i < nrof_faces: begin, end = i, i + self.max_nrof_faces _task = task.Task(task.Task.Face) _task.package(faces=faces[begin:end], images=preprocessed_images[begin:end], frame=frame, frame_info=frame_info) self.putResult(_task) i += self.max_nrof_faces
def doFrameTask(self, _task): # start = time.time() data = _task.depackage() frame, frame_info = data['frame'], data['frame_info'] # timer.detection_start() bbs, pts = self.face_detector.detect_face(frame) # timer.detection_done() # logger.info( # 'Frame: %s, bbs: %s, pts: %s' % (frame_info, list(bbs), list(pts))) _task = task.Task(task.Task.Face) _task.package(bbs=bbs, pts=pts, frame=frame, frame_info=frame_info) self.putResult(_task) nrof_faces = len(bbs) if nrof_faces > 0: self.face_count += nrof_faces self.detected_frame_count += 1
def doEventTask(self, _task): data = _task.depackage() images = data['images'] alert_type = data['alert_type'] video_rel_path = '%s/%s.mp4' % (self.rel_video_dir, time.time()) video_abs_path = "%s/%s" % (Config.Dir.DATA_DIR, video_rel_path) writer = cv2.VideoWriter(video_abs_path, self.fourcc, self.fps, self.video_dim) for image in images: writer.write(image) writer.release() _task = task.Task(task.Task.Event) _task.package(alert_type=alert_type, video_name=video_rel_path) self.putResult(_task) logger.info('Found violation, saved in %s' % video_rel_path)
def doFaceTask(self, _task): # start = time.time() # TODO: Get timer data = _task.depackage() bbs, pts, frame, frame_info = data['bbs'], data['pts'], data[ 'frame'], data['frame_info'] nrof_faces = len(bbs) # timer.preprocess_start() faces = [] preprocessed_face_images = [] preprocessed_coeff_images = [] for i in range(nrof_faces): display_face, padded_bb_str = CropperUtils.crop_display_face( frame, bbs[i][:-1]) face = detection.FaceInfo(bbs[i][:-1], bbs[i][-1], frame_info, display_face, padded_bb_str, pts[:, i]) preprocessed_face = self.face_preprocessor.process( frame, pts[:, i], self.aligner, Config.Align.IMAGE_SIZE, self.prewhitening) preprocessed_face_images.append(preprocessed_face) faces.append(face) if self.use_coeff_filter: cropped_face = CropperUtils.crop_face(frame, bbs[i][:-1]) preprocessed_coeff = self.coeff_preprocessor.process( cropped_face) preprocessed_coeff_images.append(preprocessed_coeff) preprocessed_face_images = np.array(preprocessed_face_images) preprocessed_coeff_images = np.array(preprocessed_coeff_images) # timer.preprocess_done() _task = task.Task(task.Task.Face) _task.package(faces=faces, images=preprocessed_face_images, coeff_images=preprocessed_coeff_images, frame=frame, frame_info=frame_info) self.putResult(_task)