class StoreFrame(QueueTask): def __init__(self, configuration, input_queue, output_queue): super().__init__(input_queue) self.output_queue = output_queue self.configuration = configuration def init(self): self.db = Database(self.configuration['db']) self.process_synchronizer = ProcessSynchronizer( self.configuration['synchronization']) def execute_with(self, messages): for message in messages: frame = Frame(offset=message["offset"], video_chunk_id=message['video_chunk_id']) self.db.add(frame) message['frame_id'] = frame.id self.process_synchronizer.register_frame_task( str(message['video_chunk_id']), str(frame.id)) for message in messages: self.output_queue.put( FrameMessage(video_chunk_id=message['video_chunk_id'], frame_id=message['frame_id'], payload=message['payload'])) def close(self): self.db.close() self.process_synchronizer.close()
def setUp(self): self.configuration = test_helper.configuration() self.db = Database(self.configuration['db']) self.db.truncate_all() video_chunk = VideoChunk(camera_id='CAMERA_ID', timestamp=1.0) self.db.add(video_chunk) self.synchronizer = ProcessSynchronizer( self.configuration['synchronization']) self.synchronizer.register_video_task(video_chunk.id) frame = Frame(offset=0, video_chunk_id=video_chunk.id) self.db.add(frame) self.bounding_box = [[10.0, 10.0], [20.0, 20.0]] face = Face(frame_id=frame.id, bounding_box=self.bounding_box) self.db.add(face) self.id = video_chunk.id
def setUp(self): self.configuration = test_helper.configuration() self.db = Database(self.configuration['db']) self.db.truncate_all() video_chunk = VideoChunk(camera_id='CAMERA_ID', timestamp=1.0) self.db.add(video_chunk) self.storage = raw_storage(self.configuration['storage']) self.storage.store_file(video_chunk.id, 'tests/resources/test.h264') self.synchronizer = ProcessSynchronizer( self.configuration['synchronization']) self.synchronizer.register_video_task(video_chunk.id) bounding_boxes = [[[602, 380], [289, 286]], [[604, 378], [296, 287]], [[604, 363], [316, 297]], [[633, 364], [310, 320]]] for i, bounding_box in enumerate(bounding_boxes): frame = Frame(offset=i * 6, video_chunk_id=video_chunk.id) self.db.add(frame) face = Face(frame_id=frame.id, bounding_box=bounding_box) self.db.add(face) self.video_chunk_id = video_chunk.id
class StoreProcessedVideo(QueueTask): def __init__(self, configuration, input_queue, output_queue): super().__init__(input_queue) self.configuration = configuration self.output_queue = output_queue def init(self): self.db = Database(self.configuration['db']) self.storage = processed_storage(self.configuration['storage']) def execute_with(self, message): video_chunk = message['video_chunk'] self.storage.store_file('{}.mp4'.format(video_chunk.id), message['filepath']) video_chunk.processed = True self.db.update() self.output_queue.put({ 'video_chunk_id': video_chunk.id, 'timestamp': video_chunk.timestamp, 'duration': message['duration'] }) def close(self): self.db.close()
def init(self): # load pre-trained classifier from local file? # load from database? self.face_classifier = SVClassifier.load( self.configuration['face_classifier']['model']) self.db = Database(self.configuration['db']) self.threshold = self.configuration['face_classifier']['threshold'] self.process_synchronizer = ProcessSynchronizer( self.configuration['synchronization'])
class TestFetchVideoData(unittest.TestCase): def setUp(self): self.configuration = test_helper.configuration() self.db = Database(self.configuration['db']) self.db.truncate_all() video_chunk = VideoChunk(camera_id='CAMERA_ID', timestamp=1.0) self.db.add(video_chunk) self.synchronizer = ProcessSynchronizer( self.configuration['synchronization']) self.synchronizer.register_video_task(video_chunk.id) frame = Frame(offset=0, video_chunk_id=video_chunk.id) self.db.add(frame) self.bounding_box = [[10.0, 10.0], [20.0, 20.0]] face = Face(frame_id=frame.id, bounding_box=self.bounding_box) self.db.add(face) self.id = video_chunk.id def test_execute(self): input_queue = Queue() output_queue = Queue() task = FetchVideoData(configuration=self.configuration, input_queue=input_queue, output_queue=output_queue) with test_helper.start_task(task): message = AnalyzedVideoChunkMessage(video_chunk_id=self.id) input_queue.put(message) input_queue.join() output_message = output_queue.get(block=False) video_chunk = output_message['video_chunk'] self.assertEqual(video_chunk.camera_id, 'CAMERA_ID') self.assertEqual(video_chunk.timestamp, 1.0) faces_by_offset = output_message['faces_by_offset'] self.assertEqual([*faces_by_offset.keys()], [0]) faces = faces_by_offset[0] self.assertEqual(len(faces), 1) self.assertEqual(faces[0].bounding_box, self.bounding_box) def tearDown(self): self.db.close()
class ProcessScheduler: def __init__(self, configuration): self.db = Database(configuration) self.cache = {} def update_frame_process(self, frame_id): frame_process = self.db.get(FrameProcess, frame_id) frame_process.processed_faces_count = FrameProcess.processed_faces_count + 1 self.db.update() if frame_process.is_completed(): video_chunk_id = self._video_chunk_id_for(frame_process.frame_id) return self.update_video_chunk_process(video_chunk_id) else: return None def update_video_chunk_process(self, video_chunk_id): video_chunk_process = self.db.get(VideoChunkProcess, video_chunk_id) video_chunk_process.processed_frames_count = VideoChunkProcess.processed_frames_count + 1 self.db.update() if video_chunk_process.is_completed(): return video_chunk_process.video_chunk_id else: return None def _video_chunk_id_for(self, frame_id): if frame_id not in self.cache: self.cache[frame_id] = self.db.get(Frame, frame_id).video_chunk_id return self.cache[frame_id] def close(self): self.db.close()
class FaceEmbeddingTask(QueueTask): def __init__(self, configuration, input_queue, output_queue): super().__init__(input_queue) self.output_queue = output_queue self.configuration = configuration self.db = None self.face_embedder = None def init(self): self.face_embedder = FaceEmbedderFactory.build( self.configuration['face_embedder']) self.db = Database(self.configuration['db']) def close(self): self.face_embedder.close() self.db.close() def execute_with(self, message): face_embedding_message: FaceEmbeddingMessage = message # Get face face = face_embedding_message.face_bytes face_id = face_embedding_message.detected_face_id video_chunk_id = face_embedding_message.video_chunk_id # Perform face embedding print("- Performing embedding - face_id: " + str(face_embedding_message.detected_face_id)) embedding = self.face_embedder.get_embedding_mem(face) #embedding = np.array([1, 2, 3]) # Insert result into database face_embedding = FaceEmbedding(face_id=face_id, embedding=list(embedding.astype(float))) self.db.add(face_embedding) # Queue face classification job face_classification_message = FaceClassificationMessage( video_chunk_id, face_id, embedding) self.output_queue.put(face_classification_message) def _stop(self): self.input_queue.put(None)
class TestRun(unittest.TestCase): def setUp(self): self.configuration = test_helper.configuration() self.db = Database(self.configuration['db']) self.db.truncate_all() video_chunk = VideoChunk(camera_id='CAMERA_ID', timestamp=1.0) self.db.add(video_chunk) self.storage = raw_storage(self.configuration['storage']) self.storage.store_file(video_chunk.id, 'tests/resources/test.h264') self.synchronizer = ProcessSynchronizer( self.configuration['synchronization']) self.synchronizer.register_video_task(video_chunk.id) bounding_boxes = [[[602, 380], [289, 286]], [[604, 378], [296, 287]], [[604, 363], [316, 297]], [[633, 364], [310, 320]]] for i, bounding_box in enumerate(bounding_boxes): frame = Frame(offset=i * 6, video_chunk_id=video_chunk.id) self.db.add(frame) face = Face(frame_id=frame.id, bounding_box=bounding_box) self.db.add(face) self.video_chunk_id = video_chunk.id def test_execute(self): publisher = Publisher(self.configuration['publisher']) message = AnalyzedVideoChunkMessage(self.video_chunk_id) publisher.publish(encode_message(message)) def tearDown(self): self.db.close() self.synchronizer.close()
class FetchVideoData(QueueTask): def __init__(self, configuration, input_queue, output_queue): super().__init__(input_queue) self.output_queue = output_queue self.configuration = configuration def init(self): self.db = Database(self.configuration['db']) def execute_with(self, message): video_chunk_id = message.video_chunk_id video_chunk = self.fetch_video_chunk(video_chunk_id) self.output_queue.put(video_chunk) def fetch_video_chunk(self, id): options = joinedload(VideoChunk.frames).joinedload(Frame.faces).joinedload(Face.person) return self.db.session.query(VideoChunk).options(options).get(id) def close(self): self.db.close()
class StoreVideoChunk(QueueTask): def __init__(self, configuration, input_queue, output_queue): super().__init__(input_queue) self.output_queue = output_queue self.configuration = configuration def init(self): self.db = Database(self.configuration['db']) self.storage = raw_storage(self.configuration['storage']) self.process_synchronizer = ProcessSynchronizer( self.configuration['synchronization']) def execute_with(self, message): video_chunk = VideoChunk(camera_id=message.camera_id, timestamp=message.timestamp) self.db.add(video_chunk) logging.info(f"{video_chunk.id} created in DB. Sampling starting!") self.process_synchronizer.register_video_task(str(video_chunk.id)) filepath = path.join('tmp', '{}.h264'.format(video_chunk.id)) with open(filepath, 'wb') as file: file.write(message.payload) self.storage.store_file(str(video_chunk.id), filepath) self.output_queue.put({ 'video_chunk_id': video_chunk.id, 'path': filepath }) def close(self): self.db.close() self.process_synchronizer.close()
class RemoveCompletedProcesses(QueueTask): def __init__(self, configuration, input_queue): super().__init__(input_queue) self.configuration = configuration def init(self): self.db = Database(self.configuration) def execute_with(self, video_chunk_id): frame_ids = (self.db.session.query( Frame.id).filter(Frame.video_chunk_id == video_chunk_id).all()) for frame_id in frame_ids: self.db.delete(FrameProcess, FrameProcess.frame_id == frame_id) self.db.delete(VideoChunkProcess, VideoChunkProcess.video_chunk_id == video_chunk_id) def close(self): self.db.close()
def init(self): self.db = Database(self.configuration['db']) self.process_synchronizer = ProcessSynchronizer( self.configuration['synchronization'])
print("") print("Usage: ") print("python sampler_mock.py 'image_path1' 'image_path2' ... ") print("--------------------------------") else: print('[*] Configuring sampler-mock') # input with open("sampler-mock.yml") as config_file: configuration = yaml.safe_load(config_file) print(configuration) # Create database connection db = Database(configuration['db']) # Create publiser to_publisher_queue = Queue() publisher_thread = StoppableThread( PublishToRabbitMQ(configuration=configuration['publisher'], input_queue=to_publisher_queue)) # Simulate Sampler ----------------------------------------------- print('[*] Configuration finished. Starting sampler-mock!') publisher_thread.start() # upload video chunk video_chunk = VideoChunk(camera_id="camera_1",
def init(self): self.db = Database(self.configuration)
class FaceDetectionTask(QueueTask): def __init__(self, configuration, input_queue, output_queue_to_classifier, output_queue_to_scheduler): super().__init__(input_queue) self.output_queue_to_classifier = output_queue_to_classifier self.output_queue_to_scheduler = output_queue_to_scheduler self.configuration = configuration self.db = None self.face_detector = None self.process_synchronizer = None def init(self): self.face_detector = FaceDetectorFactory.build( self.configuration['face_detector']) self.db = Database(self.configuration['db']) self.process_synchronizer = ProcessSynchronizer( self.configuration['synchronization']) def close(self): self.face_detector.close() self.db.close() self.process_synchronizer.close() def execute_with(self, message): face_detection_message: FrameMessage = message # Get message video_chunk_id = face_detection_message.video_chunk_id frame_id = face_detection_message.frame_id frame_bytes = face_detection_message.payload # Convert to cv2 img frame = cv2.imdecode(np.frombuffer(frame_bytes, np.uint8), cv2.IMREAD_COLOR) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Print frame cv2.imshow("asd", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) cv2.waitKey(1) # Detect faces print("- Performing detection - frame_id: " + str(frame_id) + " - video chunk id: " + str(video_chunk_id)) logging.debug("- Performing detection - frame_id: " + str(frame_id) + " - video chunk id: " + str(video_chunk_id)) rects = [] rects = self.face_detector.detect_face_image(frame) # if len(rects) > 0: faces = [] for rect in rects: x1, y1, x2, y2 = rect db_rect = [[x1, y1], [x2, y2]] # Insert detected face to db face = Face(frame_id=frame_id, bounding_box=db_rect) self.db.add(face) faces.append(face) # Face task sync creation self.process_synchronizer.register_face_task( video_chunk_id, frame_id, str(face.id)) for rect, face in zip(rects, faces): # Get cropped face x1, y1, x2, y2 = rect cropped_face = frame[y1:y2, x1:x2] #cv2.imshow("asd", cv2.cvtColor(cropped_face, cv2.COLOR_RGB2BGR)) #cv2.waitKey(1) logging.debug("- Found face, assigned id: " + str(face.id) + " - video chunk id: " + str(video_chunk_id)) print("- Found face, assigned id: " + str(face.id) + " - video chunk id: " + str(video_chunk_id)) # Print face with rect #cv2.imshow("asd", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) # cv2.waitKey(1) # Queue face embedding job face_embedding_message = FaceEmbeddingMessage( video_chunk_id, face.id, cropped_face) self.output_queue_to_classifier.put(face_embedding_message) else: # Frame task sync completion should_notify_scheduler = self.process_synchronizer.complete_frame_task( video_chunk_id, frame_id) if should_notify_scheduler: print( "- Notifying interpolator of video chunk analysis completion, video chunk id: " + str(video_chunk_id)) logging.debug( "- Notifying interpolator of video chunk analysis completion, video chunk id: " + str(video_chunk_id)) # Notify scheduler of video chunk analysis completion scheduler_notification = AnalyzedVideoChunkMessage( video_chunk_id) self.output_queue_to_scheduler.put(scheduler_notification)
def init(self): self.face_embedder = FaceEmbedderFactory.build( self.configuration['face_embedder']) self.db = Database(self.configuration['db'])
def init(self): self.face_detector = FaceDetectorFactory.build( self.configuration['face_detector']) self.db = Database(self.configuration['db']) self.process_synchronizer = ProcessSynchronizer( self.configuration['synchronization'])
from big_fiubrother_core.message_clients.rabbitmq import Publisher from big_fiubrother_core.messages import (ProcessedFaceMessage, ProcessedFrameMessage) from big_fiubrother_core.db import (VideoChunk, Frame, FrameProcess, VideoChunkProcess, Database) db_configuration = { 'host': 'localhost', 'database': 'big_fiubrother_test', 'username': '******', 'password': '******' } db = Database(db_configuration) video_chunk = VideoChunk(camera_id='CAMERA', timestamp=0.0, payload=b'payload') db.add(video_chunk) frame_ids = [] for offset in range(2): frame = Frame(offset=offset, video_chunk_id=video_chunk.id) db.add(frame) frame_ids.append(frame.id) db.add(VideoChunkProcess(video_chunk_id=video_chunk.id, total_frames_count=2)) db.add(FrameProcess(frame_id=frame_ids[0], total_faces_count=2)) # Publish messages for run
def init(self): self.db = Database(self.configuration['db']) self.storage = processed_storage(self.configuration['storage'])
def __init__(self, configuration): self.db = Database(configuration) self.cache = {}
class FaceClassificationTask(QueueTask): def __init__(self, configuration, input_queue, output_queue): super().__init__(input_queue) self.output_queue = output_queue self.configuration = configuration self.db = None self.face_classifier = None self.process_synchronizer = None self.threshold = 0.0 def init(self): # load pre-trained classifier from local file? # load from database? self.face_classifier = SVClassifier.load( self.configuration['face_classifier']['model']) self.db = Database(self.configuration['db']) self.threshold = self.configuration['face_classifier']['threshold'] self.process_synchronizer = ProcessSynchronizer( self.configuration['synchronization']) def close(self): self.db.close() def execute_with(self, message): face_classification_message: FaceClassificationMessage = message video_chunk_id = face_classification_message.video_chunk_id # Get message embedding = face_classification_message.face_embedding print() # Do face classification print("- Performing classification - face_id: " + str(face_classification_message.face_id)) classification_index, classification_probability = self.face_classifier.predict( embedding) #classification_index, classification_probability = [0, 0.9] is_match = classification_probability > self.threshold # Update database face row with result face_id = face_classification_message.face_id face: Face = self.db.get(Face, face_id) face.classification_id = int(classification_index) face.probability_classification = float(classification_probability) face.is_match = is_match #print(type(face.classification_id)) #print(type(face.probability_classification)) self.db.update() print("- Classification id: " + str(face.classification_id) + ", prob: " + str(face.probability_classification)) # Face analysis sync completion should_notify_scheduler = self.process_synchronizer.complete_face_task( video_chunk_id, face.frame_id, face_id) print(should_notify_scheduler) if should_notify_scheduler: print( "- Notifying interpolator of video chunk analysis completion, id: " + str(video_chunk_id)) # Notify scheduler of video chunk analysis completion scheduler_notification = AnalyzedVideoChunkMessage(video_chunk_id) self.output_queue.put(scheduler_notification) def _stop(self): self.input_queue.put(None)