def GetMediaStreamMessageResponse(self, predictions, imgShape, confidenceThreshold=0.1): try: msg = extension_pb2.MediaStreamMessage() ih, iw, _ = imgShape for prediction in predictions: confidenceScore = prediction['probability'] if confidenceScore >= confidenceThreshold: objectLabel = prediction['tagName'] inference = msg.media_sample.inferences.add() inference.type = inferencing_pb2.Inference.InferenceType.ENTITY inference.entity.CopyFrom(inferencing_pb2.Entity( tag=inferencing_pb2.Tag( value=objectLabel, confidence=confidenceScore ), box=inferencing_pb2.Rectangle( l=prediction['boundingBox']['left'], t=prediction['boundingBox']['top'], w=prediction['boundingBox']['width'], h=prediction['boundingBox']['height'], ) ) ) return msg except: PrintGetExceptionDetails() raise
def GetMediaStreamMessageResponse(self, boxes, scores, indices, imgShape, confidenceThreshold=0.1): try: msg = extension_pb2.MediaStreamMessage() ih, iw, _ = imgShape for idx in indices[0]: confidenceScore = scores[tuple(idx)].tolist() if confidenceScore >= confidenceThreshold: objectLabel = self._tYoloV3._labelList[idx[1].tolist()] idxTuple = (idx[0], idx[2]) ymin, xmin, ymax, xmax = boxes[idxTuple].tolist() inference = msg.media_sample.inferences.add() inference.type = inferencing_pb2.Inference.InferenceType.ENTITY inference.entity.CopyFrom( inferencing_pb2.Entity( tag = inferencing_pb2.Tag( value = objectLabel, confidence = confidenceScore ), box = inferencing_pb2.Rectangle( l = xmin / iw, t = ymin / ih, w = (xmax - xmin) / iw, h = (ymax - ymin) / ih, ) ) ) return msg except: PrintGetExceptionDetails() raise
def process_images(self, mediaStreamMessage, rawBytes, size): # Read image raw bytes im = Image.frombytes('RGB', size, rawBytes.tobytes()) draw = ImageDraw.Draw(im) imgBuf = io.BytesIO() im.save(imgBuf, format='JPEG') imgBytes = np.frombuffer(imgBuf.getvalue(), dtype=np.uint8) # Convert to grayscale cvGrayImage = cv.imdecode(imgBytes, cv.COLOR_BGR2RGB) grayBytes = cvGrayImage.tobytes() # Calculate intensity totalColor = cvGrayImage.sum() avgColor = totalColor / len(grayBytes) colorIntensity = 'dark' if avgColor < 127 else 'light' logging.info('Color intensity: {}'.format(colorIntensity)) inference = mediaStreamMessage.media_sample.inferences.add() inference.subtype = 'colorIntensity' classification = inferencing_pb2.Classification( tag=inferencing_pb2.Tag(value=colorIntensity, confidence=1.0)) inference.classification.CopyFrom(classification) return mediaStreamMessage
def GetMediaStreamMessageResponse(self, bboxes, originalImageSize): try: msg = extension_pb2.MediaStreamMessage() ih, iw = originalImageSize for i, bbox in enumerate(bboxes): confidenceScore = bbox[4].tolist() objectLabel = self._YoloV4._labelList[int(bbox[5])] xmin, ymin, xmax, ymax = np.array(bbox[:4], dtype=np.int32) inference = msg.media_sample.inferences.add() inference.type = inferencing_pb2.Inference.InferenceType.ENTITY inference.entity.CopyFrom( inferencing_pb2.Entity(tag=inferencing_pb2.Tag( value=objectLabel, confidence=confidenceScore), box=inferencing_pb2.Rectangle( l=xmin / iw, t=ymin / ih, w=(xmax - xmin) / iw, h=(ymax - ymin) / ih, ))) return msg except: PrintGetExceptionDetails() raise
def GetDummyMediaStreamMessageResponse(self, dummyValue): try: ih = 480 iw = 640 msg = extension_pb2.MediaStreamMessage() inference = msg.media_sample.inferences.add() inference.type = inferencing_pb2.Inference.InferenceType.ENTITY inference.entity.CopyFrom( inferencing_pb2.Entity(tag=inferencing_pb2.Tag( value=dummyValue, confidence=0.0), box=inferencing_pb2.Rectangle(l=0, t=0, w=iw, h=ih))) return msg except: PrintGetExceptionDetails() raise
def ProcessMediaStream(self, requestIterator, context): # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing) # For simplicity below, we use single process to handle gRPC clients # Auto increment counter. Increases per client requests responseSeqNum = 1 # First message from the client is (must be) MediaStreamDescriptor mediaStreamMessageRequest = next(requestIterator) # Extract message IDs requestSeqNum = mediaStreamMessageRequest.sequence_number requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number # State object per client clientState = State(mediaStreamMessageRequest.media_stream_descriptor) logging.info('Connection created with peer {0}.\nMediaStreamDescriptor:\n{1}'.format(context.peer(), clientState._mediaStreamDescriptor)) logging.debug('[Received] SeqNum: {0:07d} | AckNum: {1}'.format(requestSeqNum, requestAckSeqNum)) # First message response ... mediaStreamMessage = extension_pb2.MediaStreamMessage( sequence_number = responseSeqNum, ack_sequence_number = requestSeqNum, media_stream_descriptor = extension_pb2.MediaStreamDescriptor( media_descriptor = media_pb2.MediaDescriptor( timescale = clientState._mediaStreamDescriptor.media_descriptor.timescale ) ) ) yield mediaStreamMessage # Process rest of the MediaStream message sequence for mediaStreamMessageRequest in requestIterator: try: # Increment response counter, will be sent to client responseSeqNum += 1 # Read request id, sent by client requestSeqNum = mediaStreamMessageRequest.sequence_number logging.debug('[Received] SeqNum: {0:07d}'.format(requestSeqNum)) # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message) cvImage = self.GetCvImageFromRawBytes(clientState, mediaStreamMessageRequest.media_sample) if cvImage is None: message = "Can't decode received bytes." logging.info(message) context.set_details(message) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return if list(cvImage.shape[:2]) != self._tYoloV3.image_shape: message = "Received an image of size {0}, but expected one of size {1}".format(cvImage.shape[:2], self._tYoloV3.image_shape) context.set_details(message) logging.info(message) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return mediaStreamMessage = extension_pb2.MediaStreamMessage() # Check confidence allConfidenceReached = True for inference in mediaStreamMessageRequest.media_sample.inferences: confidence = inference.entity.tag.confidence if(confidence < self.inferenceConfidence): allConfidenceReached = False break if(len(mediaStreamMessageRequest.media_sample.inferences) > 0 and allConfidenceReached): # Return acknowledge message mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = requestSeqNum mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp for tinyYoloInference in mediaStreamMessageRequest.media_sample.inferences: objectLabel = tinyYoloInference.entity.tag.value if(self.objectTag is None): self.objectTag = objectLabel if(self.objectTag == objectLabel): inference = mediaStreamMessage.media_sample.inferences.add() inference.type = inferencing_pb2.Inference.InferenceType.ENTITY inference.subtype = 'From upstream' inference.entity.CopyFrom(inferencing_pb2.Entity( tag = inferencing_pb2.Tag( value = objectLabel, confidence = tinyYoloInference.entity.tag.confidence ), box = inferencing_pb2.Rectangle( l = tinyYoloInference.entity.box.l, t = tinyYoloInference.entity.box.t, w = tinyYoloInference.entity.box.w, h = tinyYoloInference.entity.box.h, ) ) ) else: # run inference boxes, scores, indices = self._tYoloV3.Score(cvImage) logging.debug('Detected {0} inferences'.format(len(indices))) if DEBUG is not None: self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices) # return inference result as MediaStreamMessage mediaStreamMessage = self.GetMediaStreamMessageResponse(mediaStreamMessage, boxes, scores, indices, cvImage.shape, mediaStreamMessageRequest.media_sample.timestamp) mediaStreamMessage.sequence_number = responseSeqNum mediaStreamMessage.ack_sequence_number = requestSeqNum mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp # Check client connection state if context.is_active(): # yield response yield mediaStreamMessage else: break except: PrintGetExceptionDetails() logging.info('Connection closed with peer {0}.'.format(context.peer()))
def get_lva_MediaStreamMessage(self, buffer, gst_lva_message, ih, iw): msg = extension_pb2.MediaStreamMessage() msg.ack_sequence_number = gst_lva_message.sequence_number msg.media_sample.timestamp = gst_lva_message.timestamp # # Retrieve batch metadata from the gst_buffer # # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer)) frame = batch_meta.frame_meta_list while frame is not None: try: # Note that frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(frame.data) objInference = frame_meta.obj_meta_list frameWidth = frame_meta.source_frame_width frameHeight = frame_meta.source_frame_height inference = msg.media_sample.inferences.add() attributes = [] obj_label = None obj_confidence = 0 obj_left = 0 obj_width = 0 obj_top = 0 obj_width = 0 # iterate through objects while objInference is not None: try: # Casting objInference.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(objInference.data) except StopIteration: break rect_params = obj_meta.rect_params top = int(rect_params.top) left = int(rect_params.left) width = int(rect_params.width) height = int(rect_params.height) obj_confidence = obj_meta.confidence objLabel = obj_meta.obj_label obj_label = objLabel obj_left = left / iw obj_top = top / ih obj_width = width / iw obj_height = height / ih inference.type = inferencing_pb2.Inference.InferenceType.ENTITY try: objInference = objInference.next except StopIteration: break if obj_label is not None: try: entity = inferencing_pb2.Entity( tag=inferencing_pb2.Tag(value=obj_label, confidence=obj_confidence), box=inferencing_pb2.Rectangle(l=obj_left, t=obj_top, w=obj_width, h=obj_height)) for attr in attributes: attribute = inferencing_pb2.Attribute( name=attr[0], value=attr[1], confidence=attr[2]) entity.attributes.append(attribute) except: PrintGetExceptionDetails() inference.entity.CopyFrom(entity) except StopIteration: break try: frame = frame.next except StopIteration: break return msg
def get_lva_MediaStreamMessage(self, buffer, gst_lva_message, ih, iw): msg = extension_pb2.MediaStreamMessage() msg.ack_sequence_number = gst_lva_message.sequence_number msg.media_sample.timestamp = gst_lva_message.timestamp # # Retrieve batch metadata from the gst_buffer # # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer)) frame = batch_meta.frame_meta_list while frame is not None: try: # Note that frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(frame.data) objInference = frame_meta.obj_meta_list frameWidth = frame_meta.source_frame_width frameHeight = frame_meta.source_frame_height # iterate through objects while objInference is not None: try: # Casting objInference.data to pyds.NvDsObjectMeta obj_meta=pyds.NvDsObjectMeta.cast(objInference.data) except StopIteration: break inference = msg.media_sample.inferences.add() attributes = [] obj_label = None obj_confidence = 0 obj_left = 0 obj_width = 0 obj_top = 0 obj_width = 0 color = '' # Classification attribute = None if(obj_meta.class_id == 0 and obj_meta.classifier_meta_list is not None): classifier_meta = obj_meta.classifier_meta_list while classifier_meta is not None: classifierItem = pyds.NvDsClassifierMeta.cast(classifier_meta.data) if(classifierItem is not None): label_meta = classifierItem.label_info_list while label_meta is not None: labelItem = pyds.NvDsLabelInfo.cast(label_meta.data) prob = round(labelItem.result_prob, 2) attrValue = labelItem.result_label attrName = 'unknown' if(classifierItem.unique_component_id == PGIE_CLASS_ID_VEHICLE_COLOR): attrName = 'color' else: attrName = 'type' attributes.append([attrName, attrValue, prob]) try: label_meta=label_meta.next except StopIteration: break try: classifier_meta=classifier_meta.next except StopIteration: break rect_params=obj_meta.rect_params top=int(rect_params.top) left=int(rect_params.left) width=int(rect_params.width) height=int(rect_params.height) obj_confidence = obj_meta.confidence obj_label = obj_meta.obj_label obj_left = left / iw obj_top = top / ih obj_width = width/ iw obj_height = height / ih obj_id = None # Tracking: Active tracking bbox information if(self.trackinEnabled): obj_id = obj_meta.object_id obj_active_tracking = obj_meta.tracker_bbox_info tracking_coord = obj_active_tracking.org_bbox_coords if(tracking_coord is not None and tracking_coord.left > 0 and tracking_coord.width > 0 and tracking_coord.top > 0 and tracking_coord.height > 0): obj_left = tracking_coord.left / iw obj_top = tracking_coord.top / ih obj_width = tracking_coord.width/ iw obj_height = tracking_coord.height / ih inference.type = inferencing_pb2.Inference.InferenceType.ENTITY if obj_label is not None: try: entity = inferencing_pb2.Entity( tag = inferencing_pb2.Tag( value = obj_label, confidence = obj_confidence ), box = inferencing_pb2.Rectangle( l = obj_left, t = obj_top, w = obj_width, h = obj_height ) ) if(self.trackinEnabled and obj_id is not None): entity.id = str(obj_id) for attr in attributes: attribute = inferencing_pb2.Attribute( name = attr[0], value = attr[1], confidence = attr[2] ) entity.attributes.append(attribute) except: PrintGetExceptionDetails() inference.entity.CopyFrom(entity) try: objInference=objInference.next except StopIteration: break except StopIteration: break try: frame = frame.next except StopIteration: break return msg