Esempio n. 1
0
    def test_annotate_video(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = video_intelligence_pb2.AnnotateVideoResponse(
            **expected_response)
        operation = operations_pb2.Operation(
            name="operations/test_annotate_video", done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = videointelligence_v1.VideoIntelligenceServiceClient()

        # Setup Request
        input_uri = "gs://demomaker/cat.mp4"
        features_element = enums.Feature.LABEL_DETECTION
        features = [features_element]

        response = client.annotate_video(input_uri=input_uri,
                                         features=features)
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = video_intelligence_pb2.AnnotateVideoRequest(
            input_uri=input_uri, features=features)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Esempio n. 2
0
def detect_faces(local_file_path="path/to/your/video-file.mp4"):
    """Detects faces in a video from a local file."""

    client = videointelligence.VideoIntelligenceServiceClient()

    with io.open(local_file_path, "rb") as f:
        input_content = f.read()

    # Configure the request
    config = videointelligence.FaceDetectionConfig(include_bounding_boxes=True,
                                                   include_attributes=True)
    context = videointelligence.VideoContext(face_detection_config=config)

    # Start the asynchronous request
    operation = client.annotate_video(
        request={
            "features": [videointelligence.Feature.FACE_DETECTION],
            "input_content": input_content,
            "video_context": context,
        })

    print("\nProcessing video for face detection annotations.")
    result = operation.result(timeout=300)

    print("\nFinished processing.\n")

    # Retrieve the first result, because a single video was processed.
    annotation_result = result.annotation_results[0]

    for annotation in annotation_result.face_detection_annotations:
        print("Face detected:")
        for track in annotation.tracks:
            print("Segment: {}s to {}s".format(
                track.segment.start_time_offset.seconds +
                track.segment.start_time_offset.microseconds / 1e6,
                track.segment.end_time_offset.seconds +
                track.segment.end_time_offset.microseconds / 1e6,
            ))

            # Each segment includes timestamped faces that include
            # characteristics of the face detected.
            # Grab the first timestamped face
            timestamped_object = track.timestamped_objects[0]
            box = timestamped_object.normalized_bounding_box
            print("Bounding box:")
            print("\tleft  : {}".format(box.left))
            print("\ttop   : {}".format(box.top))
            print("\tright : {}".format(box.right))
            print("\tbottom: {}".format(box.bottom))

            # Attributes include glasses, headwear, smiling, direction of gaze
            print("Attributes:")
            for attribute in timestamped_object.attributes:
                print("\t{}:{} {}".format(attribute.name, attribute.value,
                                          attribute.confidence))
    def test_annotate_video_exception(self):
        # Setup Response
        error = status_pb2.Status()
        operation = operations_pb2.Operation(
            name='operations/test_annotate_video_exception', done=True)
        operation.error.CopyFrom(error)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        client = videointelligence_v1.VideoIntelligenceServiceClient(
            channel=channel)

        response = client.annotate_video()
        exception = response.exception()
        assert exception.errors[0] == error
    def test_annotate_video(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = video_intelligence_pb2.AnnotateVideoResponse(
            **expected_response)
        operation = operations_pb2.Operation(
            name='operations/test_annotate_video', done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        client = videointelligence_v1.VideoIntelligenceServiceClient(
            channel=channel)

        response = client.annotate_video()
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = video_intelligence_pb2.AnnotateVideoRequest()
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Esempio n. 5
0
    def test_annotate_video_exception(self):
        # Setup Response
        error = status_pb2.Status()
        operation = operations_pb2.Operation(
            name="operations/test_annotate_video_exception", done=True)
        operation.error.CopyFrom(error)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = videointelligence_v1.VideoIntelligenceServiceClient()

        # Setup Request
        input_uri = "gs://demomaker/cat.mp4"
        features_element = enums.Feature.LABEL_DETECTION
        features = [features_element]

        response = client.annotate_video(input_uri=input_uri,
                                         features=features)
        exception = response.exception()
        assert exception.errors[0] == error
Esempio n. 6
0
def google_single_video(video, mycursor, mydb):
    client = videointelligence.VideoIntelligenceServiceClient()
    config = videointelligence.types.PersonDetectionConfig(
        include_bounding_boxes=True,
        include_attributes=True,
        include_pose_landmarks=True,
    )
    context = videointelligence.types.VideoContext(
        person_detection_config=config)

    # Open video
    with io.open(video, "rb") as f:
        input_content = f.read()
    # Start the asynchronous request
    print("Sending video " + video + " for analysis...")

    # Maybe here we can send all the videos at the same time
    operation = client.annotate_video(
        request={
            "features": [
                videointelligence.Feature.LABEL_DETECTION, videointelligence.
                Feature.LOGO_RECOGNITION, videointelligence.Feature.
                LABEL_DETECTION, videointelligence.Feature.PERSON_DETECTION,
                videointelligence.Feature.FACE_DETECTION,
                videointelligence.Feature.EXPLICIT_CONTENT_DETECTION
            ],
            "input_content":
            input_content,
            "video_context":
            context,
        })
    result = operation.result(timeout=90)

    # Retrieve the first result, because a single video was processed.
    annotation_result = result.annotation_results[0]

    print("Searching for explicit content...")
    explicit = detector.explicit(annotation_result)

    print("Searching for logo...")
    logos = detector.logo(annotation_result)

    print("Searching for theme...")
    themes = detector.theme(annotation_result)

    # Saving to DB
    ## Explicit content
    id = get_video_id(video)
    sql = """
            INSERT INTO explicit (id_video, explicit) 
            VALUES (%s, %s) ON DUPLICATE KEY UPDATE
            explicit=%s;
            """
    val = (id, explicit, explicit)
    mycursor.execute(sql, val)
    mydb.commit()

    ## Logos
    for brand in logos:
        sql = """
            INSERT INTO brand (id_video, name) 
            VALUES (%s, %s) ON DUPLICATE KEY UPDATE
            name=%s;
            """
        val = (id, brand, brand)
        mycursor.execute(sql, val)
        mydb.commit()

    ## Theme
    for theme in themes:
        sql = """
            INSERT INTO theme (id_video, name) 
            VALUES (%s, %s) ON DUPLICATE KEY UPDATE
            name=%s;
            """
        val = (id, theme, theme)
        mycursor.execute(sql, val)
        mydb.commit()

    delete_video(video)
def client():
    return videointelligence_v1.VideoIntelligenceServiceClient()
Esempio n. 8
0
def detect_person(local_file_path="path/to/your/video-file.mp4"):
    """Detects people in a video from a local file."""

    client = videointelligence.VideoIntelligenceServiceClient()

    with io.open(local_file_path, "rb") as f:
        input_content = f.read()

    # Configure the request
    config = videointelligence.types.PersonDetectionConfig(
        include_bounding_boxes=True,
        include_attributes=True,
        include_pose_landmarks=True,
    )
    context = videointelligence.types.VideoContext(
        person_detection_config=config)

    # Start the asynchronous request
    operation = client.annotate_video(
        request={
            "features": [videointelligence.Feature.PERSON_DETECTION],
            "input_content": input_content,
            "video_context": context,
        })

    print("\nProcessing video for person detection annotations.")
    result = operation.result(timeout=300)

    print("\nFinished processing.\n")

    # Retrieve the first result, because a single video was processed.
    annotation_result = result.annotation_results[0]

    for annotation in annotation_result.person_detection_annotations:
        print("Person detected:")
        for track in annotation.tracks:
            print("Segment: {}s to {}s".format(
                track.segment.start_time_offset.seconds +
                track.segment.start_time_offset.microseconds / 1e6,
                track.segment.end_time_offset.seconds +
                track.segment.end_time_offset.microseconds / 1e6,
            ))

            # Each segment includes timestamped objects that include
            # characteristic - -e.g.clothes, posture of the person detected.
            # Grab the first timestamped object
            timestamped_object = track.timestamped_objects[0]
            box = timestamped_object.normalized_bounding_box
            print("Bounding box:")
            print("\tleft  : {}".format(box.left))
            print("\ttop   : {}".format(box.top))
            print("\tright : {}".format(box.right))
            print("\tbottom: {}".format(box.bottom))

            # Attributes include unique pieces of clothing,
            # poses, or hair color.
            print("Attributes:")
            for attribute in timestamped_object.attributes:
                print("\t{}:{} {}".format(attribute.name, attribute.value,
                                          attribute.confidence))

            # Landmarks in person detection include body parts such as
            # left_shoulder, right_ear, and right_ankle
            print("Landmarks:")
            for landmark in timestamped_object.landmarks:
                print("\t{}: {} (x={}, y={})".format(
                    landmark.name,
                    landmark.confidence,
                    landmark.point.x,  # Normalized vertex
                    landmark.point.y,  # Normalized vertex
                ))