def ProcessMediaStream(self, requestIterator, context):
        # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing)
        # For simplicity below, we use single process to handle gRPC clients

        # Auto increment counter. Increases per client requests
        responseSeqNum = 1

        # First message from the client is (must be) MediaStreamDescriptor
        mediaStreamMessageRequest = next(requestIterator)

        # Extract message IDs
        requestSeqNum = mediaStreamMessageRequest.sequence_number
        requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number

        # State object per client
        clientState = State(mediaStreamMessageRequest.media_stream_descriptor)

        logging.info(
            '[Received] SeqNum: {0:07d} | AckNum: {1}\nMediaStreamDescriptor:\n{2}'
            .format(requestSeqNum, requestAckSeqNum,
                    clientState._mediaStreamDescriptor))

        # First message response ...
        mediaStreamMessage = extension_pb2.MediaStreamMessage(
            sequence_number=responseSeqNum,
            ack_sequence_number=requestSeqNum,
            media_stream_descriptor=extension_pb2.MediaStreamDescriptor(
                media_descriptor=media_pb2.MediaDescriptor(
                    timescale=clientState._mediaStreamDescriptor.
                    media_descriptor.timescale)))
        yield mediaStreamMessage

        width = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.width
        height = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.height

        msgQueue = Queue(maxsize=10)
        gst_lva_pipeline = Gst_Lva_Pipeline(
            msgQueue, mediaStreamMessageRequest.media_stream_descriptor.
            graph_identifier.graph_instance_name, width, height)
        gst_lva_pipeline.play()

        # Process rest of the MediaStream message sequence
        for mediaStreamMessageRequest in requestIterator:
            try:
                # Read request id, sent by client
                requestSeqNum = mediaStreamMessageRequest.sequence_number

                logging.info(
                    '[Received] SequenceNum: {0:07d}'.format(requestSeqNum))

                # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message)
                if (not self.ProcessMediaSample(clientState,
                                                mediaStreamMessageRequest,
                                                gst_lva_pipeline)):
                    #logging.info('Error in processing media sample with sequence number ' + str(mediaStreamMessageRequest.sequence_number))

                    responseSeqNum += 1
                    # Respond with message without inferencing
                    mediaStreamMessage = extension_pb2.MediaStreamMessage()
                    mediaStreamMessage.sequence_number = responseSeqNum
                    mediaStreamMessage.ack_sequence_number = mediaStreamMessageRequest.sequence_number
                    mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp
                    logging.info("empty message for request seq = " +
                                 str(mediaStreamMessage.ack_sequence_number) +
                                 " response seq = " + str(responseSeqNum))

                    yield mediaStreamMessage

                elif context.is_active():
                    while (not msgQueue.empty()):
                        mediaStreamMessage = msgQueue.get()
                        responseSeqNum += 1
                        mediaStreamMessage.sequence_number = responseSeqNum

                        logging.info(
                            "responding for message with request seq = " +
                            str(mediaStreamMessage.ack_sequence_number) +
                            " response seq = " + str(responseSeqNum))
                        #logging.info(mediaStreamMessage)

                        # yield response
                        yield mediaStreamMessage
                else:
                    break
            except:
                PrintGetExceptionDetails()

        logging.info('Done processing messages')
        logging.info('MediaStreamDescriptor:\n{0}'.format(
            clientState._mediaStreamDescriptor))
Esempio n. 2
0
    def ProcessMediaStream(self, requestIterator, context):
        # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing)
        # For simplicity below, we use single process to handle gRPC clients

        # Auto increment counter. Increases per client requests
        responseSeqNum = 1

        # First message from the client is (must be) MediaStreamDescriptor
        mediaStreamMessageRequest = next(requestIterator)

        # Extract message IDs
        requestSeqNum = mediaStreamMessageRequest.sequence_number
        requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number

        # State object per client       
        clientState = State(mediaStreamMessageRequest.media_stream_descriptor)

        logging.info('Connection created with peer {0}.\nMediaStreamDescriptor:\n{1}'.format(context.peer(), clientState._mediaStreamDescriptor))
        logging.debug('[Received] SeqNum: {0:07d} | AckNum: {1}'.format(requestSeqNum, requestAckSeqNum))

        # First message response ...
        mediaStreamMessage =    extension_pb2.MediaStreamMessage(
                                    sequence_number = responseSeqNum,
                                    ack_sequence_number = requestSeqNum,
                                    media_stream_descriptor = extension_pb2.MediaStreamDescriptor(
                                        media_descriptor = media_pb2.MediaDescriptor(
                                            timescale = clientState._mediaStreamDescriptor.media_descriptor.timescale
                                        )
                                    )
                                )
        yield mediaStreamMessage

        # Process rest of the MediaStream message sequence
        for mediaStreamMessageRequest in requestIterator:
            try:
                # Increment response counter, will be sent to client
                responseSeqNum += 1
                
                # Read request id, sent by client
                requestSeqNum = mediaStreamMessageRequest.sequence_number

                logging.debug('[Received] SeqNum: {0:07d}'.format(requestSeqNum))

                # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message)
                cvImage = self.GetCvImageFromRawBytes(clientState, mediaStreamMessageRequest.media_sample)

                if cvImage is None:
                    message = "Can't decode received bytes."
                    logging.info(message)
                    context.set_details(message)
                    context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
                    return

                if list(cvImage.shape[:2]) != self._tYoloV3.image_shape:
                    message = "Received an image of size {0}, but expected one of size {1}".format(cvImage.shape[:2], self._tYoloV3.image_shape)
                    context.set_details(message)
                    logging.info(message)
                    context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
                    return

                # run inference
                cv2.imwrite('/tmp/sample{}.jpg'.format(requestSeqNum), cvImage)
                boxes, scores, indices = self._tYoloV3.Score(cvImage)

                logging.debug('Detected {0} inferences'.format(len(indices)))

                if DEBUG is not None:
                    self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices)

                # Check client connection state
                if context.is_active():
                    # return inference result as MediaStreamMessage
                    mediaStreamMessage = self.GetMediaStreamMessageResponse(boxes, scores, indices, cvImage.shape)

                    mediaStreamMessage.sequence_number = responseSeqNum
                    mediaStreamMessage.ack_sequence_number = requestSeqNum
                    mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp

                    # yield response
                    yield mediaStreamMessage
                else:
                    break
            except:
                PrintGetExceptionDetails()
                
        logging.info('Connection closed with peer {0}.'.format(context.peer()))
    def ProcessMediaStream(self, requestIterator, context):
        # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing)
        # For simplicity below, we use single process to handle gRPC clients

        # Auto increment counter. Increases per client requests
        responseSeqNum = 1

        # First message from the client is (must be) MediaStreamDescriptor
        mediaStreamMessageRequest = next(requestIterator)

        # Extract message IDs
        requestSeqNum = mediaStreamMessageRequest.sequence_number
        requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number

        # State object per client       
        clientState = State(mediaStreamMessageRequest.media_stream_descriptor)

        logging.info('Connection created with peer {0}.\nMediaStreamDescriptor:\n{1}'.format(context.peer(), clientState._mediaStreamDescriptor))
        logging.debug('[Received] SeqNum: {0:07d} | AckNum: {1}'.format(requestSeqNum, requestAckSeqNum))

        # First message response ...
        mediaStreamMessage = extension_pb2.MediaStreamMessage(
                                    sequence_number = responseSeqNum,
                                    ack_sequence_number = requestSeqNum,
                                    media_stream_descriptor = extension_pb2.MediaStreamDescriptor(
                                        media_descriptor = media_pb2.MediaDescriptor(
                                            timescale = clientState._mediaStreamDescriptor.media_descriptor.timescale
                                        )
                                    )
                                )
        yield mediaStreamMessage

        # Process rest of the MediaStream message sequence
        for mediaStreamMessageRequest in requestIterator:
            try:
                # Increment response counter, will be sent to client
                responseSeqNum += 1
                
                # Read request id, sent by client
                requestSeqNum = mediaStreamMessageRequest.sequence_number

                logging.debug('[Received] SeqNum: {0:07d}'.format(requestSeqNum))

                # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message)
                cvImage = self.GetCvImageFromRawBytes(clientState, mediaStreamMessageRequest.media_sample)

                if cvImage is None:
                    message = "Can't decode received bytes."
                    logging.info(message)
                    context.set_details(message)
                    context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
                    return

                if list(cvImage.shape[:2]) != self._tYoloV3.image_shape:
                    message = "Received an image of size {0}, but expected one of size {1}".format(cvImage.shape[:2], self._tYoloV3.image_shape)
                    context.set_details(message)
                    logging.info(message)
                    context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
                    return

                mediaStreamMessage = extension_pb2.MediaStreamMessage()

                # Check confidence
                allConfidenceReached = True
                for inference in mediaStreamMessageRequest.media_sample.inferences:
                    confidence = inference.entity.tag.confidence
                    if(confidence < self.inferenceConfidence):
                        allConfidenceReached = False
                        break
                
                if(len(mediaStreamMessageRequest.media_sample.inferences) > 0 and allConfidenceReached):
                    # Return acknowledge message
                    mediaStreamMessage.sequence_number = responseSeqNum
                    mediaStreamMessage.ack_sequence_number = requestSeqNum
                    mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp
                    
                    for tinyYoloInference in mediaStreamMessageRequest.media_sample.inferences:
                        objectLabel = tinyYoloInference.entity.tag.value

                        if(self.objectTag is None):
                            self.objectTag = objectLabel

                        if(self.objectTag == objectLabel):
                            inference = mediaStreamMessage.media_sample.inferences.add()
                            inference.type = inferencing_pb2.Inference.InferenceType.ENTITY
                            inference.subtype = 'From upstream'
                            inference.entity.CopyFrom(inferencing_pb2.Entity(
                                                            tag = inferencing_pb2.Tag(
                                                                value = objectLabel,
                                                                confidence = tinyYoloInference.entity.tag.confidence
                                                            ),
                                                            box = inferencing_pb2.Rectangle(
                                                                l = tinyYoloInference.entity.box.l,
                                                                t = tinyYoloInference.entity.box.t,
                                                                w = tinyYoloInference.entity.box.w,
                                                                h = tinyYoloInference.entity.box.h,
                                                            )
                                                        )
                                                    )
                else:
                    # run inference
                    boxes, scores, indices = self._tYoloV3.Score(cvImage)

                    logging.debug('Detected {0} inferences'.format(len(indices)))

                    if DEBUG is not None:
                        self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices)

                    # return inference result as MediaStreamMessage
                    mediaStreamMessage = self.GetMediaStreamMessageResponse(mediaStreamMessage, boxes, scores, indices, cvImage.shape, mediaStreamMessageRequest.media_sample.timestamp)

                    mediaStreamMessage.sequence_number = responseSeqNum
                    mediaStreamMessage.ack_sequence_number = requestSeqNum
                    mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp

                # Check client connection state
                if context.is_active():
                    # yield response
                    yield mediaStreamMessage
                else:
                    break
            except:
                PrintGetExceptionDetails()
                
        logging.info('Connection closed with peer {0}.'.format(context.peer()))
    def ProcessMediaStream(self, requestIterator, context):
        # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing)
        # For simplicity below, we use single process to handle gRPC clients

        # Auto increment counter. Increases per client requests
        responseSeqNum = 1

        # First message from the client is (must be) MediaStreamDescriptor
        mediaStreamMessageRequest = next(requestIterator)

        # Extract message IDs
        requestSeqNum = mediaStreamMessageRequest.sequence_number
        requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number

        # State object per client
        clientState = State(mediaStreamMessageRequest.media_stream_descriptor)

        logging.info('Connection created with peer {0}.\nMediaStreamDescriptor:\n{1}'.format(
            context.peer(), clientState._mediaStreamDescriptor))
        instance_id = clientState._mediaStreamDescriptor.graph_identifier.graph_instance_name
        logging.debug('[Received] SeqNum: {0:07d} | AckNum: {1}'.format(
            requestSeqNum, requestAckSeqNum))

        # First message response ...
        mediaStreamMessage = extension_pb2.MediaStreamMessage(
            sequence_number=responseSeqNum,
            ack_sequence_number=requestSeqNum,
            media_stream_descriptor=extension_pb2.MediaStreamDescriptor(
                media_descriptor=media_pb2.MediaDescriptor(
                    timescale=clientState._mediaStreamDescriptor.media_descriptor.timescale
                )
            )
        )
        yield mediaStreamMessage

        # Process rest of the MediaStream message sequence
        for mediaStreamMessageRequest in requestIterator:
                # Increment response counter, will be sent to client
                responseSeqNum += 1

                # Read request id, sent by client
                requestSeqNum = mediaStreamMessageRequest.sequence_number

                logging.debug(
                    '[Received] SeqNum: {0:07d}'.format(requestSeqNum))

                # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message)
                cvImage = self.GetCvImageFromRawBytes(
                    clientState, mediaStreamMessageRequest.media_sample)

                if cvImage is None:
                    message = "Can't decode received bytes."
                    logging.info(message)
                    context.set_details(message)
                    context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
                    return

                # if list(cvImage.shape[:2]) != self._tYoloV3.image_shape:
                #     message = "Received an image of size {0}, but expected one of size {1}".format(
                #         cvImage.shape[:2], self._tYoloV3.image_shape)
                #     context.set_details(message)
                #     logging.info(message)
                #     context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
                #     return

                # instance_name = mediaStreamMessageRequest.graph_identifier.graph_instance_name
                # logging.info('req: {0}'.format(mediaStreamMessageRequest))

                # run inference
                # logging.info(self._tYoloV3)
                # out = self._tYoloV3.Score(cvImage)
                # logging.info(out)
                # predictions = self._tYoloV3.Score(cvImage, instance_id)
                try:
                    s = self.stream_manager.get_stream_by_id(instance_id)
                    if s:
                        #print('got stream and predicting...', flush=True)
                        s.predict(cvImage)
                        predictions = s.last_prediction
                    else:
                        #print('got notthing', flush=True)
                        predictions = []
                except:
                    print("[ERROR] Unexpected error:", sys.exc_info(), flush=True)
                    predictions = []
                # stream_manager.update(cvImage, instance_id)
                # logging.debug(
                #     'Detected {0} inferences'.format(len(predictions)))

                # if DEBUG is not None:
                #     self.CreateDebugOutput(
                #         requestSeqNum, cvImage, boxes, scores, indices)

                # Check client connection state
                if context.is_active():
                    # return inference result as MediaStreamMessage
                    mediaStreamMessage = self.GetMediaStreamMessageResponse(
                        predictions, cvImage.shape)

                    mediaStreamMessage.sequence_number = responseSeqNum
                    mediaStreamMessage.ack_sequence_number = requestSeqNum
                    mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp

                    # yield response
                    yield mediaStreamMessage
                else:
                    break

        logging.info('Connection closed with peer {0}.'.format(context.peer()))
Esempio n. 5
0
    def ProcessMediaStream(self, requestIterator, context):
        # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing)
        # For simplicity below, we use single process to handle gRPC clients

        # Auto increment counter. Increases per client requests
        responseSeqNum = 1

        # First message from the client is (must be) MediaStreamDescriptor
        mediaStreamMessageRequest = next(requestIterator)

        # Extract message IDs
        requestSeqNum = mediaStreamMessageRequest.sequence_number
        requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number

        # State object per client
        clientState = State(mediaStreamMessageRequest.media_stream_descriptor)

        logging.info(
            '[Received] SeqNum: {0:07d} | AckNum: {1}\nMediaStreamDescriptor:\n{2}'
            .format(requestSeqNum, requestAckSeqNum,
                    clientState._mediaStreamDescriptor))

        # First message response ...
        mediaStreamMessage = extension_pb2.MediaStreamMessage(
            sequence_number=responseSeqNum,
            ack_sequence_number=requestSeqNum,
            media_stream_descriptor=extension_pb2.MediaStreamDescriptor(
                media_descriptor=media_pb2.MediaDescriptor(
                    timescale=clientState._mediaStreamDescriptor.
                    media_descriptor.timescale)))
        # Send acknowledge message to client
        yield mediaStreamMessage

        width = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.width
        height = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.dimensions.height

        # Process rest of the MediaStream message sequence
        messageCount = 0
        imageBatch = []
        for mediaStreamMessageRequest in requestIterator:
            try:
                # Read request id, sent by client
                requestSeqNum = mediaStreamMessageRequest.sequence_number
                timestamp = mediaStreamMessageRequest.media_sample.timestamp

                logging.info(
                    '[Received] SequenceNum: {0:07d}'.format(requestSeqNum))

                imageDetails = self.get_image_details(
                    clientState, mediaStreamMessageRequest)
                # Increment request sequence number
                responseSeqNum += 1

                if (messageCount < self.batchSize):
                    # Add images to batch and create acknowledge message
                    logging.info(
                        'Adding image #{0} to batch.'.format(messageCount + 1))
                    mediaStreamMessage = extension_pb2.MediaStreamMessage(
                        sequence_number=responseSeqNum,
                        ack_sequence_number=requestSeqNum)
                    imageBatch.append(imageDetails)
                    messageCount += 1
                else:
                    # Process batch
                    logging.info(
                        'Processing batch ({0}).'.format(messageCount))
                    mediaStreamMessage = extension_pb2.MediaStreamMessage()
                    for image in imageBatch:
                        mediaStreamMessage = self.process_media_sample(
                            mediaStreamMessage, image)

                    if (mediaStreamMessage is None):
                        # Respond with message without inferencing
                        mediaStreamMessage = extension_pb2.MediaStreamMessage()
                        responseStatusMessage = "empty message for request seq = " + str(
                            mediaStreamMessage.ack_sequence_number
                        ) + " response seq = " + str(responseSeqNum)
                    else:
                        responseStatusMessage = "responding for message with request seq = " + str(
                            mediaStreamMessage.ack_sequence_number
                        ) + " response seq = " + str(responseSeqNum)

                    logging.info(responseStatusMessage)
                    mediaStreamMessage.sequence_number = responseSeqNum
                    mediaStreamMessage.ack_sequence_number = mediaStreamMessageRequest.sequence_number
                    mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp

                    # Clear batch
                    imageBatch.clear()
                    messageCount = 0

                if context.is_active():
                    # yield response
                    yield mediaStreamMessage
                else:
                    break
            except:
                PrintGetExceptionDetails()

        logging.info('Done processing messages')
    def ProcessMediaStream(self, requestIterator, context):
        # Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing)
        # For simplicity below, we use single process to handle gRPC clients

        # Auto increment counter. Increases per client requests
        responseSeqNum = 1

        # First message from the client is (must be) MediaStreamDescriptor
        mediaStreamMessageRequest = next(requestIterator)

        # Extract message IDs
        requestSeqNum = mediaStreamMessageRequest.sequence_number
        requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number

        # State object per client
        clientState = State(mediaStreamMessageRequest.media_stream_descriptor)

        if DEBUG is not None:
            logging.info(
                '[Received] SeqNum: {0:07d} | AckNum: {1}\nMediaStreamDescriptor:\n{2}'
                .format(requestSeqNum, requestAckSeqNum,
                        clientState._mediaStreamDescriptor))

        # First message response ...
        mediaStreamMessage = extension_pb2.MediaStreamMessage(
            sequence_number=responseSeqNum,
            ack_sequence_number=requestSeqNum,
            media_stream_descriptor=extension_pb2.MediaStreamDescriptor(
                media_descriptor=media_pb2.MediaDescriptor(
                    timescale=clientState._mediaStreamDescriptor.
                    media_descriptor.timescale)))
        yield mediaStreamMessage

        # Process rest of the MediaStream messagge sequence
        for mediaStreamMessageRequest in requestIterator:
            try:
                # Increment response counter, will be sent to client
                responseSeqNum += 1

                # Read request id, sent by client
                requestSeqNum = mediaStreamMessageRequest.sequence_number

                if DEBUG is not None:
                    logging.info(
                        '[Received] SeqNum: {0:07d}'.format(requestSeqNum))

                # Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message)
                cvImage = self.GetCvImageFromRawBytes(
                    clientState, mediaStreamMessageRequest.media_sample)

                if cvImage is None:
                    logging.info('Cant decode received bytes.')
                    continue

                # start = t.default_timer()
                # run inference
                boxes, originalImageSize = self._YoloV4.Score(cvImage)
                # end = t.default_timer()
                # infTime = round((end - start) * 1000, 5)
                # logging.info('inf time: {0}'.format(infTime))

                # if DEBUG is not None:
                #     self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices)

                # Check client connection state
                if context.is_active():
                    # return inference result as MediaStreamMessage
                    mediaStreamMessage = self.GetMediaStreamMessageResponse(
                        boxes, originalImageSize)

                    mediaStreamMessage.sequence_number = responseSeqNum
                    mediaStreamMessage.ack_sequence_number = requestSeqNum
                    mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp

                    # yield response
                    yield mediaStreamMessage
                else:
                    break
            except:
                PrintGetExceptionDetails()