def Score(self, cvImage): try: with self._lock: imageBlob = self.Preprocess(cvImage) detections = self._onnxSession.run( [self._onnxSessionOutputName], {self._onnxSessionInputName: imageBlob})[0] predBbox = self.PostprocessBbox( np.expand_dims(detections, axis=0)) originalImageSize = cvImage.shape[:2] bboxes = self.PostprocessBoxes(predBbox, originalImageSize, 416, 0.25) # bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates. bboxes = self.nms(bboxes, 0.213, method='nms') return bboxes, originalImageSize except: PrintGetExceptionDetails() raise
def on_new_sample(self, appsink): try: sample = appsink.emit("pull-sample") buffer = sample.get_buffer() caps = sample.get_caps() height = caps.get_structure(0).get_value('height') width = caps.get_structure(0).get_value('width') gst_lva_message = get_message(buffer) msg = self.get_lva_MediaStreamMessage(buffer, gst_lva_message, height, width) if msg is None: logging.info('media stream message is None') else: if (self.msgQueue is not None): if (self.msgQueue.full()): logging.info("queue is full") self.msgQueue.get() self.msgQueue.put(msg) else: logging.info("msgQueue is null") remove_message(buffer) if self.MJPEGOutput is not None: self.pushImageWithInference(sample, msg.media_sample.inferences) except: PrintGetExceptionDetails() return Gst.FlowReturn.OK
def __init__(self, shmFlags=None, name=None, size=None): try: self._shmFilePath = '/dev/shm' self._shmFileName = name if self._shmFileName is None: self._shmFileName = next(tempfile._get_candidate_names()) self._shmFileSize = size if self._shmFileSize is None: self._shmFileSize = 1024 * 1024 * 10 # Bytes (10MB) self._shmFileFullPath = os.path.join(self._shmFilePath, self._shmFileName) self._shmFlags = shmFlags # See the NOTE section here: https://docs.python.org/2/library/os.html#os.open for details on shmFlags if self._shmFlags is None: self._shmFile = open(self._shmFileFullPath, 'r+b') self._shm = mmap.mmap(self._shmFile.fileno(), self._shmFileSize) else: self._shmFile = os.open(self._shmFileFullPath, self._shmFlags) os.ftruncate(self._shmFile, self._shmFileSize) self._shm = mmap.mmap(self._shmFile, self._shmFileSize, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ) # Dictionary to host reserved mem blocks # self._mem_slots[sequenceNo] = [Begin, End] (closed interval) self._memSlots = dict() logging.info('Shared memory name: {0}'.format( self._shmFileFullPath)) except: PrintGetExceptionDetails() raise
def ProcessMediaStream(self, requestIterator, context): # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing) # For simplicity below, we use single process to handle gRPC clients # Auto increment counter. Increases per client requests responseSeqNum = 1 # First message from the client is (must be) MediaStreamDescriptor mediaStreamMessageRequest = next(requestIterator) # Extract message IDs requestSeqNum = mediaStreamMessageRequest.sequence_number requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number # State object per client clientState = State(mediaStreamMessageRequest.media_stream_descriptor) logging.info( '[Received] SeqNum: {0:07d} | AckNum: {1}\nMediaStreamDescriptor:\n{2}' .format(requestSeqNum, requestAckSeqNum, clientState._mediaStreamDescriptor)) # First message response ... mediaStreamMessage = extension_pb2.MediaStreamMessage( sequence_number=responseSeqNum, ack_sequence_number=requestSeqNum, media_stream_descriptor=extension_pb2.MediaStreamDescriptor( media_descriptor=media_pb2.MediaDescriptor( timescale=clientState._mediaStreamDescriptor. media_descriptor.timescale))) yield mediaStreamMessage width = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.width height = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.height msgQueue = Queue(maxsize=10) gst_lva_pipeline = Gst_Lva_Pipeline( msgQueue, mediaStreamMessageRequest.media_stream_descriptor. graph_identifier.graph_instance_name, width, height) gst_lva_pipeline.play() # Process rest of the MediaStream message sequence for mediaStreamMessageRequest in requestIterator: try: # Read request id, sent by client requestSeqNum = mediaStreamMessageRequest.sequence_number logging.info( '[Received] SequenceNum: {0:07d}'.format(requestSeqNum)) # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message) if (not self.ProcessMediaSample(clientState, mediaStreamMessageRequest, gst_lva_pipeline)): #logging.info('Error in processing media sample with sequence number ' + str(mediaStreamMessageRequest.sequence_number)) responseSeqNum += 1 # Respond with message without inferencing mediaStreamMessage = extension_pb2.MediaStreamMessage() mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = mediaStreamMessageRequest.sequence_number mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp logging.info("empty message for request seq = " + str(mediaStreamMessage.ack_sequence_number) + " response seq = " + str(responseSeqNum)) yield mediaStreamMessage elif context.is_active(): while (not msgQueue.empty()): mediaStreamMessage = msgQueue.get() responseSeqNum += 1 mediaStreamMessage.sequence_number = responseSeqNum logging.info( "responding for message with request seq = " + str(mediaStreamMessage.ack_sequence_number) + " response seq = " + str(responseSeqNum)) #logging.info(mediaStreamMessage) # yield response yield mediaStreamMessage else: break except: PrintGetExceptionDetails() logging.info('Done processing messages') logging.info('MediaStreamDescriptor:\n{0}'.format( clientState._mediaStreamDescriptor))
def ProcessMediaStream(self, requestIterator, context): # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing) # For simplicity below, we use single process to handle gRPC clients # Auto increment counter. Increases per client requests responseSeqNum = 1 # First message from the client is (must be) MediaStreamDescriptor mediaStreamMessageRequest = next(requestIterator) # Extract message IDs requestSeqNum = mediaStreamMessageRequest.sequence_number requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number # State object per client clientState = State(mediaStreamMessageRequest.media_stream_descriptor) logging.info('Connection created with peer {0}.\nMediaStreamDescriptor:\n{1}'.format(context.peer(), clientState._mediaStreamDescriptor)) logging.debug('[Received] SeqNum: {0:07d} | AckNum: {1}'.format(requestSeqNum, requestAckSeqNum)) # First message response ... mediaStreamMessage = extension_pb2.MediaStreamMessage( sequence_number = responseSeqNum, ack_sequence_number = requestSeqNum, media_stream_descriptor = extension_pb2.MediaStreamDescriptor( media_descriptor = media_pb2.MediaDescriptor( timescale = clientState._mediaStreamDescriptor.media_descriptor.timescale ) ) ) yield mediaStreamMessage # Process rest of the MediaStream message sequence for mediaStreamMessageRequest in requestIterator: try: # Increment response counter, will be sent to client responseSeqNum += 1 # Read request id, sent by client requestSeqNum = mediaStreamMessageRequest.sequence_number logging.debug('[Received] SeqNum: {0:07d}'.format(requestSeqNum)) # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message) cvImage = self.GetCvImageFromRawBytes(clientState, mediaStreamMessageRequest.media_sample) if cvImage is None: message = "Can't decode received bytes." logging.info(message) context.set_details(message) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return if list(cvImage.shape[:2]) != self._tYoloV3.image_shape: message = "Received an image of size {0}, but expected one of size {1}".format(cvImage.shape[:2], self._tYoloV3.image_shape) context.set_details(message) logging.info(message) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return # run inference cv2.imwrite('/tmp/sample{}.jpg'.format(requestSeqNum), cvImage) boxes, scores, indices = self._tYoloV3.Score(cvImage) logging.debug('Detected {0} inferences'.format(len(indices))) if DEBUG is not None: self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices) # Check client connection state if context.is_active(): # return inference result as MediaStreamMessage mediaStreamMessage = self.GetMediaStreamMessageResponse(boxes, scores, indices, cvImage.shape) mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = requestSeqNum mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp # yield response yield mediaStreamMessage else: break except: PrintGetExceptionDetails() logging.info('Connection closed with peer {0}.'.format(context.peer()))
def ProcessMediaStream(self, requestIterator, context): # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing) # For simplicity below, we use single process to handle gRPC clients # Auto increment counter. Increases per client requests responseSeqNum = 1 # First message from the client is (must be) MediaStreamDescriptor mediaStreamMessageRequest = next(requestIterator) # Extract message IDs requestSeqNum = mediaStreamMessageRequest.sequence_number requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number # State object per client clientState = State(mediaStreamMessageRequest.media_stream_descriptor) logging.info('Connection created with peer {0}.\nMediaStreamDescriptor:\n{1}'.format(context.peer(), clientState._mediaStreamDescriptor)) logging.debug('[Received] SeqNum: {0:07d} | AckNum: {1}'.format(requestSeqNum, requestAckSeqNum)) # First message response ... mediaStreamMessage = extension_pb2.MediaStreamMessage( sequence_number = responseSeqNum, ack_sequence_number = requestSeqNum, media_stream_descriptor = extension_pb2.MediaStreamDescriptor( media_descriptor = media_pb2.MediaDescriptor( timescale = clientState._mediaStreamDescriptor.media_descriptor.timescale ) ) ) yield mediaStreamMessage # Process rest of the MediaStream message sequence for mediaStreamMessageRequest in requestIterator: try: # Increment response counter, will be sent to client responseSeqNum += 1 # Read request id, sent by client requestSeqNum = mediaStreamMessageRequest.sequence_number logging.debug('[Received] SeqNum: {0:07d}'.format(requestSeqNum)) # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message) cvImage = self.GetCvImageFromRawBytes(clientState, mediaStreamMessageRequest.media_sample) if cvImage is None: message = "Can't decode received bytes." logging.info(message) context.set_details(message) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return if list(cvImage.shape[:2]) != self._tYoloV3.image_shape: message = "Received an image of size {0}, but expected one of size {1}".format(cvImage.shape[:2], self._tYoloV3.image_shape) context.set_details(message) logging.info(message) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return mediaStreamMessage = extension_pb2.MediaStreamMessage() # Check confidence allConfidenceReached = True for inference in mediaStreamMessageRequest.media_sample.inferences: confidence = inference.entity.tag.confidence if(confidence < self.inferenceConfidence): allConfidenceReached = False break if(len(mediaStreamMessageRequest.media_sample.inferences) > 0 and allConfidenceReached): # Return acknowledge message mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = requestSeqNum mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp for tinyYoloInference in mediaStreamMessageRequest.media_sample.inferences: objectLabel = tinyYoloInference.entity.tag.value if(self.objectTag is None): self.objectTag = objectLabel if(self.objectTag == objectLabel): inference = mediaStreamMessage.media_sample.inferences.add() inference.type = inferencing_pb2.Inference.InferenceType.ENTITY inference.subtype = 'From upstream' inference.entity.CopyFrom(inferencing_pb2.Entity( tag = inferencing_pb2.Tag( value = objectLabel, confidence = tinyYoloInference.entity.tag.confidence ), box = inferencing_pb2.Rectangle( l = tinyYoloInference.entity.box.l, t = tinyYoloInference.entity.box.t, w = tinyYoloInference.entity.box.w, h = tinyYoloInference.entity.box.h, ) ) ) else: # run inference boxes, scores, indices = self._tYoloV3.Score(cvImage) logging.debug('Detected {0} inferences'.format(len(indices))) if DEBUG is not None: self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices) # return inference result as MediaStreamMessage mediaStreamMessage = self.GetMediaStreamMessageResponse(mediaStreamMessage, boxes, scores, indices, cvImage.shape, mediaStreamMessageRequest.media_sample.timestamp) mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = requestSeqNum mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp # Check client connection state if context.is_active(): # yield response yield mediaStreamMessage else: break except: PrintGetExceptionDetails() logging.info('Connection closed with peer {0}.'.format(context.peer()))
def __del__(self): try: self._shmFile.close() except: PrintGetExceptionDetails() raise
def get_lva_MediaStreamMessage(self, buffer, gst_lva_message, ih, iw): msg = extension_pb2.MediaStreamMessage() msg.ack_sequence_number = gst_lva_message.sequence_number msg.media_sample.timestamp = gst_lva_message.timestamp # # Retrieve batch metadata from the gst_buffer # # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer)) frame = batch_meta.frame_meta_list while frame is not None: try: # Note that frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(frame.data) objInference = frame_meta.obj_meta_list frameWidth = frame_meta.source_frame_width frameHeight = frame_meta.source_frame_height inference = msg.media_sample.inferences.add() attributes = [] obj_label = None obj_confidence = 0 obj_left = 0 obj_width = 0 obj_top = 0 obj_width = 0 # iterate through objects while objInference is not None: try: # Casting objInference.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(objInference.data) except StopIteration: break rect_params = obj_meta.rect_params top = int(rect_params.top) left = int(rect_params.left) width = int(rect_params.width) height = int(rect_params.height) obj_confidence = obj_meta.confidence objLabel = obj_meta.obj_label obj_label = objLabel obj_left = left / iw obj_top = top / ih obj_width = width / iw obj_height = height / ih inference.type = inferencing_pb2.Inference.InferenceType.ENTITY try: objInference = objInference.next except StopIteration: break if obj_label is not None: try: entity = inferencing_pb2.Entity( tag=inferencing_pb2.Tag(value=obj_label, confidence=obj_confidence), box=inferencing_pb2.Rectangle(l=obj_left, t=obj_top, w=obj_width, h=obj_height)) for attr in attributes: attribute = inferencing_pb2.Attribute( name=attr[0], value=attr[1], confidence=attr[2]) entity.attributes.append(attribute) except: PrintGetExceptionDetails() inference.entity.CopyFrom(entity) except StopIteration: break try: frame = frame.next except StopIteration: break return msg
def get_lva_MediaStreamMessage(self, buffer, gst_lva_message, ih, iw): msg = extension_pb2.MediaStreamMessage() msg.ack_sequence_number = gst_lva_message.sequence_number msg.media_sample.timestamp = gst_lva_message.timestamp # # Retrieve batch metadata from the gst_buffer # # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer)) frame = batch_meta.frame_meta_list while frame is not None: try: # Note that frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(frame.data) objInference = frame_meta.obj_meta_list frameWidth = frame_meta.source_frame_width frameHeight = frame_meta.source_frame_height # iterate through objects while objInference is not None: try: # Casting objInference.data to pyds.NvDsObjectMeta obj_meta=pyds.NvDsObjectMeta.cast(objInference.data) except StopIteration: break inference = msg.media_sample.inferences.add() attributes = [] obj_label = None obj_confidence = 0 obj_left = 0 obj_width = 0 obj_top = 0 obj_width = 0 color = '' # Classification attribute = None if(obj_meta.class_id == 0 and obj_meta.classifier_meta_list is not None): classifier_meta = obj_meta.classifier_meta_list while classifier_meta is not None: classifierItem = pyds.NvDsClassifierMeta.cast(classifier_meta.data) if(classifierItem is not None): label_meta = classifierItem.label_info_list while label_meta is not None: labelItem = pyds.NvDsLabelInfo.cast(label_meta.data) prob = round(labelItem.result_prob, 2) attrValue = labelItem.result_label attrName = 'unknown' if(classifierItem.unique_component_id == PGIE_CLASS_ID_VEHICLE_COLOR): attrName = 'color' else: attrName = 'type' attributes.append([attrName, attrValue, prob]) try: label_meta=label_meta.next except StopIteration: break try: classifier_meta=classifier_meta.next except StopIteration: break rect_params=obj_meta.rect_params top=int(rect_params.top) left=int(rect_params.left) width=int(rect_params.width) height=int(rect_params.height) obj_confidence = obj_meta.confidence obj_label = obj_meta.obj_label obj_left = left / iw obj_top = top / ih obj_width = width/ iw obj_height = height / ih obj_id = None # Tracking: Active tracking bbox information if(self.trackinEnabled): obj_id = obj_meta.object_id obj_active_tracking = obj_meta.tracker_bbox_info tracking_coord = obj_active_tracking.org_bbox_coords if(tracking_coord is not None and tracking_coord.left > 0 and tracking_coord.width > 0 and tracking_coord.top > 0 and tracking_coord.height > 0): obj_left = tracking_coord.left / iw obj_top = tracking_coord.top / ih obj_width = tracking_coord.width/ iw obj_height = tracking_coord.height / ih inference.type = inferencing_pb2.Inference.InferenceType.ENTITY if obj_label is not None: try: entity = inferencing_pb2.Entity( tag = inferencing_pb2.Tag( value = obj_label, confidence = obj_confidence ), box = inferencing_pb2.Rectangle( l = obj_left, t = obj_top, w = obj_width, h = obj_height ) ) if(self.trackinEnabled and obj_id is not None): entity.id = str(obj_id) for attr in attributes: attribute = inferencing_pb2.Attribute( name = attr[0], value = attr[1], confidence = attr[2] ) entity.attributes.append(attribute) except: PrintGetExceptionDetails() inference.entity.CopyFrom(entity) try: objInference=objInference.next except StopIteration: break except StopIteration: break try: frame = frame.next except StopIteration: break return msg
def ProcessMediaStream(self, requestIterator, context): # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing) # For simplicity below, we use single process to handle gRPC clients # Auto increment counter. Increases per client requests responseSeqNum = 1 # First message from the client is (must be) MediaStreamDescriptor mediaStreamMessageRequest = next(requestIterator) # Extract message IDs requestSeqNum = mediaStreamMessageRequest.sequence_number requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number # State object per client clientState = State(mediaStreamMessageRequest.media_stream_descriptor) logging.info( '[Received] SeqNum: {0:07d} | AckNum: {1}\nMediaStreamDescriptor:\n{2}' .format(requestSeqNum, requestAckSeqNum, clientState._mediaStreamDescriptor)) # First message response ... mediaStreamMessage = extension_pb2.MediaStreamMessage( sequence_number=responseSeqNum, ack_sequence_number=requestSeqNum, media_stream_descriptor=extension_pb2.MediaStreamDescriptor( media_descriptor=media_pb2.MediaDescriptor( timescale=clientState._mediaStreamDescriptor. media_descriptor.timescale))) # Send acknowledge message to client yield mediaStreamMessage width = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.width height = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.height # Process rest of the MediaStream message sequence messageCount = 0 imageBatch = [] for mediaStreamMessageRequest in requestIterator: try: # Read request id, sent by client requestSeqNum = mediaStreamMessageRequest.sequence_number timestamp = mediaStreamMessageRequest.media_sample.timestamp logging.info( '[Received] SequenceNum: {0:07d}'.format(requestSeqNum)) imageDetails = self.get_image_details( clientState, mediaStreamMessageRequest) # Increment request sequence number responseSeqNum += 1 if (messageCount < self.batchSize): # Add images to batch and create acknowledge message logging.info( 'Adding image #{0} to batch.'.format(messageCount + 1)) mediaStreamMessage = extension_pb2.MediaStreamMessage( sequence_number=responseSeqNum, ack_sequence_number=requestSeqNum) imageBatch.append(imageDetails) messageCount += 1 else: # Process batch logging.info( 'Processing batch ({0}).'.format(messageCount)) mediaStreamMessage = extension_pb2.MediaStreamMessage() for image in imageBatch: mediaStreamMessage = self.process_media_sample( mediaStreamMessage, image) if (mediaStreamMessage is None): # Respond with message without inferencing mediaStreamMessage = extension_pb2.MediaStreamMessage() responseStatusMessage = "empty message for request seq = " + str( mediaStreamMessage.ack_sequence_number ) + " response seq = " + str(responseSeqNum) else: responseStatusMessage = "responding for message with request seq = " + str( mediaStreamMessage.ack_sequence_number ) + " response seq = " + str(responseSeqNum) logging.info(responseStatusMessage) mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = mediaStreamMessageRequest.sequence_number mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp # Clear batch imageBatch.clear() messageCount = 0 if context.is_active(): # yield response yield mediaStreamMessage else: break except: PrintGetExceptionDetails() logging.info('Done processing messages')
def ProcessMediaStream(self, requestIterator, context): # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing) # For simplicity below, we use single process to handle gRPC clients # Auto increment counter. Increases per client requests responseSeqNum = 1 # First message from the client is (must be) MediaStreamDescriptor mediaStreamMessageRequest = next(requestIterator) # Extract message IDs requestSeqNum = mediaStreamMessageRequest.sequence_number requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number # State object per client clientState = State(mediaStreamMessageRequest.media_stream_descriptor) if DEBUG is not None: logging.info( '[Received] SeqNum: {0:07d} | AckNum: {1}\nMediaStreamDescriptor:\n{2}' .format(requestSeqNum, requestAckSeqNum, clientState._mediaStreamDescriptor)) # First message response ... mediaStreamMessage = extension_pb2.MediaStreamMessage( sequence_number=responseSeqNum, ack_sequence_number=requestSeqNum, media_stream_descriptor=extension_pb2.MediaStreamDescriptor( media_descriptor=media_pb2.MediaDescriptor( timescale=clientState._mediaStreamDescriptor. media_descriptor.timescale))) yield mediaStreamMessage # Process rest of the MediaStream messagge sequence for mediaStreamMessageRequest in requestIterator: try: # Increment response counter, will be sent to client responseSeqNum += 1 # Read request id, sent by client requestSeqNum = mediaStreamMessageRequest.sequence_number if DEBUG is not None: logging.info( '[Received] SeqNum: {0:07d}'.format(requestSeqNum)) # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message) cvImage = self.GetCvImageFromRawBytes( clientState, mediaStreamMessageRequest.media_sample) if cvImage is None: logging.info('Cant decode received bytes.') continue # start = t.default_timer() # run inference boxes, originalImageSize = self._YoloV4.Score(cvImage) # end = t.default_timer() # infTime = round((end - start) * 1000, 5) # logging.info('inf time: {0}'.format(infTime)) # if DEBUG is not None: # self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices) # Check client connection state if context.is_active(): # return inference result as MediaStreamMessage mediaStreamMessage = self.GetMediaStreamMessageResponse( boxes, originalImageSize) mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = requestSeqNum mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp # yield response yield mediaStreamMessage else: break except: PrintGetExceptionDetails()