def test_streaming_annotate_video(self):
        # Setup Expected Response
        annotation_results_uri = "annotationResultsUri-238075757"
        expected_response = {"annotation_results_uri": annotation_results_uri}
        expected_response = video_intelligence_pb2.StreamingAnnotateVideoResponse(
            **expected_response
        )

        # Mock the API response
        channel = ChannelStub(responses=[iter([expected_response])])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = (
                videointelligence_v1p3beta1.StreamingVideoIntelligenceServiceClient()
            )

        # Setup Request
        request = {}
        request = video_intelligence_pb2.StreamingAnnotateVideoRequest(**request)
        requests = [request]

        response = client.streaming_annotate_video(requests)
        resources = list(response)
        assert len(resources) == 1
        assert expected_response == resources[0]

        assert len(channel.requests) == 1
        actual_requests = channel.requests[0][1]
        assert len(actual_requests) == 1
        actual_request = list(actual_requests)[0]
        assert request == actual_request
Exemple #2
0
def streaming_annotate(stream_file, output_uri):
    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config specifying the output_uri.
    # The output_uri is the prefix of the actual output files.
    storage_config = videointelligence.types.StreamingStorageConfig(
        enable_storage_annotation_result=True,
        annotation_result_storage_directory=output_uri)
    # Here we use label detection as an example.
    # All features support output to GCS.
    config = videointelligence.types.StreamingVideoConfig(
        feature=(videointelligence.enums.
                 StreamingFeature.STREAMING_LABEL_DETECTION),
        storage_config=storage_config)

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(stream_file, 'rb') as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.types.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # timeout argument specifies the maximum allowable time duration between
    # the time that the last packet is sent to Google video intelligence API
    # and the time that an annotation result is returned from the API.
    # timeout argument is represented in number of seconds.
    responses = client.streaming_annotate_video(requests, timeout=3600)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        print('Storage URI: {}'.format(response.annotation_results_uri))
Exemple #3
0
def streaming_annotate(stream_file):
    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config.
    config = videointelligence.types.StreamingVideoConfig(
        feature=(videointelligence.enums.StreamingFeature.
                 STREAMING_EXPLICIT_CONTENT_DETECTION))

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(stream_file, 'rb') as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.types.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # timeout argument specifies the maximum allowable time duration between
    # the time that the last packet is sent to Google video intelligence API
    # and the time that an annotation result is returned from the API.
    # timeout argument is represented in number of seconds.
    responses = client.streaming_annotate_video(requests, timeout=3600)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        for frame in response.annotation_results.explicit_annotation.frames:
            time_offset = (frame.time_offset.seconds +
                           frame.time_offset.nanos / 1e9)
            pornography_likelihood = videointelligence.enums.Likelihood(
                frame.pornography_likelihood)

            print('Time: {}s'.format(time_offset))
            print('\tpornogaphy: {}'.format(pornography_likelihood.name))
    def test_streaming_annotate_video_exception(self):
        # Mock the API response
        channel = ChannelStub(responses=[CustomException()])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = (
                videointelligence_v1p3beta1.StreamingVideoIntelligenceServiceClient()
            )

        # Setup request
        request = {}

        request = video_intelligence_pb2.StreamingAnnotateVideoRequest(**request)
        requests = [request]

        with pytest.raises(CustomException):
            client.streaming_annotate_video(requests)
Exemple #5
0
def streaming_annotate(stream_file):
    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config.
    config = videointelligence.types.StreamingVideoConfig(feature=(
        videointelligence.enums.StreamingFeature.STREAMING_OBJECT_TRACKING))

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    def stream_generator(chunk):
        yield config_request
        yield videointelligence.types.StreamingAnnotateVideoRequest(
            input_content=chunk)

    def focus_camera(top, bottom, left, right):
        # focus camera to coordinates
        print("person found moving camera")

    def response_filter(requests):
        # streaming_annotate_video returns a generator.
        # timeout argument specifies the maximum allowable time duration between
        # the time that the last packet is sent to Google video intelligence API
        # and the time that an annotation result is returned from the API.
        # timeout argument is represented in number of seconds.
        responses = client.streaming_annotate_video(requests, timeout=3600)

        # Each response corresponds to about 1 second of video.
        for response in responses:
            # Check for errors.
            if response.error.message:
                print(response.error.message)
                # handle more gracefully later
                continue

            if response.annotation_results.object_annotations:
                for annotation in response.annotation_results.object_annotations:
                    description = annotation.entity.description
                    confidence = annotation.confidence

                    # track_id tracks the same object in the video.
                    track_id = annotation.track_id

                    # description is in Unicode
                    print('\tEntity description: {}'.format(
                        description.encode('utf-8').strip()))
                    print('\tTrack Id: {}'.format(track_id))
                    if annotation.entity.entity_id:
                        print('\tEntity id: {}'.format(
                            annotation.entity.entity_id))

                    print('\tConfidence: {}'.format(confidence))

                    # Every annotation has only one frame
                    frame = annotation.frames[0]
                    box = frame.normalized_bounding_box
                    time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
                    print('\tTimestamp: {}'.format(time_offset))
                    print('\tbounding box position:')
                    print('\tleft  : {}'.format(box.left))
                    print('\ttop   : {}'.format(box.top))
                    print('\tright : {}'.format(box.right))
                    print('\tbottom: {}\n'.format(box.bottom))

                    if (description.encode('utf-8').strip() == "person"
                            and confidence > 0.70):
                        # send coordinates to camera
                        focus_camera(box.top, box.bottom, box.left, box.right)

    # Load file content.
    with io.open(stream_file, 'rb') as video_file:

        counter = 0
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            counter += 1
            print('chuncks of data sent :  {}  data size : {}'.format(
                counter, len(data)))
            requests = stream_generator(data)
            response_filter(requests)
Exemple #6
0
def streaming_annotate(stream_file):
    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config with stationary_camera option.
    # stationary_camera flag can be set to False (default option) or True.
    label_config = videointelligence.types.StreamingLabelDetectionConfig(
        stationary_camera=False)
    config = videointelligence.types.StreamingVideoConfig(
        feature=(videointelligence.enums.StreamingFeature.
                 STREAMING_LABEL_DETECTION),
        label_detection_config=label_config)

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(stream_file, 'rb') as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.types.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # timeout argument specifies the maximum allowable time duration between
    # the time that the last packet is sent to Google video intelligence API
    # and the time that an annotation result is returned from the API.
    # timeout argument is represented in number of seconds.
    responses = client.streaming_annotate_video(requests, timeout=3600)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        # Get the time offset of the response.
        frame = response.annotation_results.label_annotations[0].frames[0]
        time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
        print('{}s:'.format(time_offset))

        for annotation in response.annotation_results.label_annotations:
            description = annotation.entity.description
            # Every annotation has only one frame
            confidence = annotation.frames[0].confidence
            print('\t{} (confidence: {})'.format(
                description.encode('utf-8').strip(), confidence))