Beispiel #1
0
def annotation_to_storage_streaming(path, output_uri):
    # [START video_streaming_annotation_to_storage_beta]
    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'
    # output_uri = 'gs://path_to_output'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config specifying the output_uri.
    # The output_uri is the prefix of the actual output files.
    storage_config = videointelligence.types.StreamingStorageConfig(
        enable_storage_annotation_result=True,
        annotation_result_storage_directory=output_uri,
    )
    # Here we use label detection as an example.
    # All features support output to GCS.
    config = videointelligence.types.StreamingVideoConfig(
        feature=(videointelligence.enums.StreamingFeature.
                 STREAMING_LABEL_DETECTION),
        storage_config=storage_config,
    )

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(path, "rb") as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.types.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # The default timeout is about 300 seconds.
    # To process longer videos it should be set to
    # larger than the length (in seconds) of the stream.
    responses = client.streaming_annotate_video(requests, timeout=600)

    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        print("Storage URI: {}".format(response.annotation_results_uri))
def detect_explicit_content_streaming(path):
    # [START video_streaming_explicit_content_detection_beta]
    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config.
    config = videointelligence.StreamingVideoConfig(feature=(
        videointelligence.StreamingFeature.STREAMING_EXPLICIT_CONTENT_DETECTION
    ))

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(path, "rb") as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # The default timeout is about 300 seconds.
    # To process longer videos it should be set to
    # larger than the length (in seconds) of the stream.
    responses = client.streaming_annotate_video(requests, timeout=900)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        for frame in response.annotation_results.explicit_annotation.frames:
            time_offset = (frame.time_offset.seconds +
                           frame.time_offset.microseconds / 1e6)
            pornography_likelihood = videointelligence.Likelihood(
                frame.pornography_likelihood)

            print("Time: {}s".format(time_offset))
            print("\tpornogaphy: {}".format(pornography_likelihood.name))
Beispiel #3
0
def detect_shot_change_streaming(path):
    # [START video_streaming_shot_change_detection_beta]
    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config.
    config = videointelligence.types.StreamingVideoConfig(
        feature=(videointelligence.enums.StreamingFeature.
                 STREAMING_SHOT_CHANGE_DETECTION))

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(path, 'rb') as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.types.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # The default timeout is about 300 seconds.
    # To process longer videos it should be set to
    # larger than the length (in seconds) of the stream.
    responses = client.streaming_annotate_video(requests, timeout=600)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        for annotation in response.annotation_results.shot_annotations:
            start = (annotation.start_time_offset.seconds +
                     annotation.start_time_offset.nanos / 1e9)
            end = (annotation.end_time_offset.seconds +
                   annotation.end_time_offset.nanos / 1e9)

            print('Shot: {}s to {}s'.format(start, end))
Beispiel #4
0
def detect_labels_streaming(path):
    # [START video_streaming_label_detection_beta]
    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config.
    config = videointelligence.types.StreamingVideoConfig(feature=(
        videointelligence.enums.StreamingFeature.STREAMING_LABEL_DETECTION))

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(path, 'rb') as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.types.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    responses = client.streaming_annotate_video(requests)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        # Get the time offset of the response.
        frame = response.annotation_results.label_annotations[0].frames[0]
        time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
        print('{}s:'.format(time_offset))

        for annotation in response.annotation_results.label_annotations:
            description = annotation.entity.description
            # Every annotation has only one frame
            confidence = annotation.frames[0].confidence
            # description is in Unicode
            print(u'\t{} (confidence: {})'.format(description, confidence))
def streaming_automl_object_tracking(path, project_id, model_id):
    # [START video_streaming_automl_object_tracking_beta]
    import io

    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'
    # project_id = 'project_id'
    # model_id = 'automl_object_tracking_model_id'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    model_path = "projects/{}/locations/us-central1/models/{}".format(
        project_id, model_id)

    automl_config = videointelligence.StreamingAutomlObjectTrackingConfig(
        model_name=model_path)

    video_config = videointelligence.StreamingVideoConfig(
        feature=videointelligence.StreamingFeature.
        STREAMING_AUTOML_OBJECT_TRACKING,
        automl_object_tracking_config=automl_config,
    )

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.StreamingAnnotateVideoRequest(
        video_config=video_config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    # Note: Input videos must have supported video codecs. See
    # https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
    # for more details.
    stream = []
    with io.open(path, "rb") as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # The default timeout is about 300 seconds.
    # To process longer videos it should be set to
    # larger than the length (in seconds) of the stream.
    responses = client.streaming_annotate_video(requests, timeout=900)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        object_annotations = response.annotation_results.object_annotations

        # object_annotations could be empty
        if not object_annotations:
            continue

        for annotation in object_annotations:
            # Each annotation has one frame, which has a timeoffset.
            frame = annotation.frames[0]
            time_offset = (frame.time_offset.seconds +
                           frame.time_offset.microseconds / 1e6)

            description = annotation.entity.description
            confidence = annotation.confidence

            # track_id tracks the same object in the video.
            track_id = annotation.track_id

            # description is in Unicode
            print("{}s".format(time_offset))
            print(u"\tEntity description: {}".format(description))
            print("\tTrack Id: {}".format(track_id))
            if annotation.entity.entity_id:
                print("\tEntity id: {}".format(annotation.entity.entity_id))

            print("\tConfidence: {}".format(confidence))

            # Every annotation has only one frame
            frame = annotation.frames[0]
            box = frame.normalized_bounding_box
            print("\tBounding box position:")
            print("\tleft  : {}".format(box.left))
            print("\ttop   : {}".format(box.top))
            print("\tright : {}".format(box.right))
            print("\tbottom: {}\n".format(box.bottom))
def streaming_automl_classification(path, project_id, model_id):
    # [START video_streaming_automl_classification_beta]
    import io

    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'
    # project_id = 'gcp_project_id'
    # model_id = 'automl_classification_model_id'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    model_path = "projects/{}/locations/us-central1/models/{}".format(
        project_id, model_id)

    # Here we use classification as an example.
    automl_config = videointelligence.StreamingAutomlClassificationConfig(
        model_name=model_path)

    video_config = videointelligence.StreamingVideoConfig(
        feature=videointelligence.StreamingFeature.
        STREAMING_AUTOML_CLASSIFICATION,
        automl_classification_config=automl_config,
    )

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.StreamingAnnotateVideoRequest(
        video_config=video_config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    # Note: Input videos must have supported video codecs. See
    # https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
    # for more details.
    stream = []
    with io.open(path, "rb") as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # The default timeout is about 300 seconds.
    # To process longer videos it should be set to
    # larger than the length (in seconds) of the stream.
    responses = client.streaming_annotate_video(requests, timeout=600)

    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        for label in response.annotation_results.label_annotations:
            for frame in label.frames:
                print("At {:3d}s segment, {:5.1%} {}".format(
                    frame.time_offset.seconds,
                    frame.confidence,
                    label.entity.entity_id,
                ))
def detect_labels_streaming(path):
    # [START video_streaming_label_detection_beta]
    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config.
    config = videointelligence.StreamingVideoConfig(
        feature=(videointelligence.StreamingFeature.STREAMING_LABEL_DETECTION))

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(path, "rb") as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # The default timeout is about 300 seconds.
    # To process longer videos it should be set to
    # larger than the length (in seconds) of the stream.
    responses = client.streaming_annotate_video(requests, timeout=600)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        label_annotations = response.annotation_results.label_annotations

        # label_annotations could be empty
        if not label_annotations:
            continue

        for annotation in label_annotations:
            # Each annotation has one frame, which has a timeoffset.
            frame = annotation.frames[0]
            time_offset = (frame.time_offset.seconds +
                           frame.time_offset.microseconds / 1e6)

            description = annotation.entity.description
            confidence = annotation.frames[0].confidence
            # description is in Unicode
            print(u"{}s: {} (confidence: {})".format(time_offset, description,
                                                     confidence))
Beispiel #8
0
def track_objects_streaming(path):
    # [START video_streaming_object_tracking_beta]
    from google.cloud import videointelligence_v1p3beta1 as videointelligence

    # path = 'path_to_file'

    client = videointelligence.StreamingVideoIntelligenceServiceClient()

    # Set streaming config.
    config = videointelligence.types.StreamingVideoConfig(feature=(
        videointelligence.enums.StreamingFeature.STREAMING_OBJECT_TRACKING))

    # config_request should be the first in the stream of requests.
    config_request = videointelligence.types.StreamingAnnotateVideoRequest(
        video_config=config)

    # Set the chunk size to 5MB (recommended less than 10MB).
    chunk_size = 5 * 1024 * 1024

    # Load file content.
    stream = []
    with io.open(path, 'rb') as video_file:
        while True:
            data = video_file.read(chunk_size)
            if not data:
                break
            stream.append(data)

    def stream_generator():
        yield config_request
        for chunk in stream:
            yield videointelligence.types.StreamingAnnotateVideoRequest(
                input_content=chunk)

    requests = stream_generator()

    # streaming_annotate_video returns a generator.
    # The default timeout is about 300 seconds.
    # To process longer videos it should be set to
    # larger than the length (in seconds) of the stream.
    responses = client.streaming_annotate_video(requests, timeout=600)

    # Each response corresponds to about 1 second of video.
    for response in responses:
        # Check for errors.
        if response.error.message:
            print(response.error.message)
            break

        object_annotations = response.annotation_results.object_annotations

        # object_annotations could be empty
        if not object_annotations:
            continue

        for annotation in object_annotations:
            # Each annotation has one frame, which has a timeoffset.
            frame = annotation.frames[0]
            time_offset = frame.time_offset.seconds + \
                frame.time_offset.nanos / 1e9

            description = annotation.entity.description
            confidence = annotation.confidence

            # track_id tracks the same object in the video.
            track_id = annotation.track_id

            # description is in Unicode
            print('{}s'.format(time_offset))
            print(u'\tEntity description: {}'.format(description))
            print('\tTrack Id: {}'.format(track_id))
            if annotation.entity.entity_id:
                print('\tEntity id: {}'.format(annotation.entity.entity_id))

            print('\tConfidence: {}'.format(confidence))

            # Every annotation has only one frame
            frame = annotation.frames[0]
            box = frame.normalized_bounding_box
            print('\tBounding box position:')
            print('\tleft  : {}'.format(box.left))
            print('\ttop   : {}'.format(box.top))
            print('\tright : {}'.format(box.right))
            print('\tbottom: {}\n'.format(box.bottom))