コード例 #1
0
def analyze_shots(path):
    """ Detects camera shot changes. """
    # [START construct_request]
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.SHOT_CHANGE_DETECTION]
    operation = video_client.annotate_video(path, features)
    # [END construct_request]
    print('\nProcessing video for shot change annotations:')

    # [START check_operation]
    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(20)

    print('\nFinished processing.')
    # [END check_operation]

    # [START parse_response]
    shots = operation.result().annotation_results[0]

    for note, shot in enumerate(shots.shot_annotations):
        print('Scene {}: {} to {}'.format(note, shot.start_time_offset,
                                          shot.end_time_offset))
コード例 #2
0
def analyze_labels(path, scanned_time, duration):
    """ Detects labels given a GCS path. """
    # [START construct_request]
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video(path, features)
    # [END construct_request]
    print('\nProcessing video for label annotations:')

    # [START check_operation]
    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(20)

    print('\nFinished processing.')
    # [END check_operation]

    # [START parse_response]
    results = operation.result().annotation_results[0]

    for label in results.label_annotations:

        for l, location in enumerate(label.locations):
            if (location.segment.start_time_offset <= int(scanned_time)
                    and location.segment.end_time_offset >=
                (int(scanned_time) + int(duration))):
                print('Label description: {}'.format(label.description))
                print('Locations:')
                print('\t{}: {} to {}'.format(
                    l, location.segment.start_time_offset,
                    location.segment.end_time_offset))
            else:
                continue
コード例 #3
0
    def test_annotate_video(self, mock_create_stub):
        # Mock gRPC layer
        grpc_stub = mock.Mock()
        mock_create_stub.return_value = grpc_stub

        client = video_intelligence_service_client.VideoIntelligenceServiceClient(
        )

        # Mock request
        input_uri = 'inputUri1707300727'
        features = []

        # Mock response
        expected_response = video_intelligence_pb2.AnnotateVideoResponse()
        operation = operations_pb2.Operation(
            name='operations/test_annotate_video', done=True)
        operation.response.Pack(expected_response)
        grpc_stub.AnnotateVideo.return_value = operation

        response = client.annotate_video(input_uri, features)
        self.assertEqual(expected_response, response.result())

        grpc_stub.AnnotateVideo.assert_called_once()
        args, kwargs = grpc_stub.AnnotateVideo.call_args
        self.assertEqual(len(args), 2)
        self.assertEqual(len(kwargs), 1)
        self.assertIn('metadata', kwargs)
        actual_request = args[0]

        expected_request = video_intelligence_pb2.AnnotateVideoRequest(
            input_uri=input_uri, features=features)
        self.assertEqual(expected_request, actual_request)
コード例 #4
0
ファイル: analyze.py プロジェクト: CNUPiedPiper/Recording
def analyze_labels(path):
    """ Detects labels given a GCS path. """
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video(path, features)
    print('\nProcessing video for label annotations:')

    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(20)

    print('\nFinished processing.')

    # first result is retrieved because a single video was processed
    results = operation.result().annotation_results[0]

    for i, label in enumerate(results.label_annotations):
        print('Label description: {}'.format(label.description))
        print('Locations:')

        for l, location in enumerate(label.locations):
            print('\t{}: {} to {}'.format(l,
                                          location.segment.start_time_offset,
                                          location.segment.end_time_offset))
コード例 #5
0
ファイル: analyze.py プロジェクト: arbatovdan/new
def analyze_safe_search(path):
    """ Detects safe search features the GCS path to a video. """
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.SAFE_SEARCH_DETECTION]
    operation = video_client.annotate_video(path, features)
    print('\nProcessing video for safe search annotations:')

    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(15)

    print('\nFinished processing.')

    # first result is retrieved because a single video was processed
    safe_annotations = (
        operation.result().annotation_results[0].safe_search_annotations)

    likely_string = ("Unknown", "Very unlikely", "Unlikely", "Possible",
                     "Likely", "Very likely")

    for note in safe_annotations:
        print('Time: {}s').format(note.time_offset / 1000000.0)
        print('\tadult: {}').format(likely_string[note.adult])
        print('\tspoof: {}').format(likely_string[note.spoof])
        print('\tmedical: {}').format(likely_string[note.medical])
        print('\tracy: {}').format(likely_string[note.racy])
        print('\tviolent: {}\n').format(likely_string[note.violent])
コード例 #6
0
ファイル: analyze.py プロジェクト: CNUPiedPiper/Recording
def analyze_faces(path):
    """ Detects faces given a GCS path. """
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.FACE_DETECTION]
    operation = video_client.annotate_video(path, features)
    print('\nProcessing video for label annotations:')

    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(20)

    print('\nFinished processing.')

    # first result is retrieved because a single video was processed
    face_annotations = (
        operation.result().annotation_results[0].face_annotations)

    for face_id, face in enumerate(face_annotations):
        print('Thumbnail size: {}'.format(len(face.thumbnail)))

        for segment_id, segment in enumerate(face.segments):
            print('Track {}: {} to {}'.format(segment_id,
                                              segment.start_time_offset,
                                              segment.end_time_offset))
コード例 #7
0
def get_label_annotations(gcs_uri):
    """ Detects labels given a Google Cloud Storage (GCS) URI. """
    # [START construct_request]
    video_client = video_intelligence_service_client.VideoIntelligenceServiceClient()
    features = [enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video(gcs_uri, features)
    # [END construct_request]

    # [START check_operation]
    while not operation.done():
        time.sleep(20)
    # [END check_operation]

    # [START get_response]
    results = operation.result().annotation_results[0]
    return results.label_annotations
コード例 #8
0
    def test_annotate_video_exception(self, mock_create_stub):
        # Mock gRPC layer
        grpc_stub = mock.Mock()
        mock_create_stub.return_value = grpc_stub

        client = video_intelligence_service_client.VideoIntelligenceServiceClient(
        )

        # Mock request
        input_uri = 'inputUri1707300727'
        features = []

        # Mock exception response
        error = status_pb2.Status()
        operation = operations_pb2.Operation(
            name='operations/test_annotate_video_exception', done=True)
        operation.error.CopyFrom(error)
        grpc_stub.AnnotateVideo.return_value = operation

        response = client.annotate_video(input_uri, features)
        self.assertEqual(error, response.exception())
コード例 #9
0
ファイル: analyze.py プロジェクト: CNUPiedPiper/Recording
def analyze_shots(path):
    """ Detects camera shot changes. """
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.SHOT_CHANGE_DETECTION]
    operation = video_client.annotate_video(path, features)
    print('\nProcessing video for shot change annotations:')

    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(20)

    print('\nFinished processing.')

    # first result is retrieved because a single video was processed
    shots = operation.result().annotation_results[0]

    for note, shot in enumerate(shots.shot_annotations):
        print('Scene {}: {} to {}'.format(note, shot.start_time_offset,
                                          shot.end_time_offset))
コード例 #10
0
def run_quickstart():
    # [START videointelligence_quickstart]
    import sys
    import time

    from google.cloud.gapic.videointelligence.v1beta1 import enums
    from google.cloud.gapic.videointelligence.v1beta1 import (
        video_intelligence_service_client)

    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video('gs://demomaker/cat.mp4', features)
    print('\nProcessing video for label annotations:')

    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(15)

    print('\nFinished processing.')

    # first result is retrieved because a single video was processed
    results = operation.result().annotation_results[0]

    for label in results.label_annotations:
        print('Label description: {}'.format(label.description))
        print('Locations:')

        for l, location in enumerate(label.locations):
            positions = 'Entire video'
            if (location.segment.start_time_offset != -1
                    or location.segment.end_time_offset != -1):
                positions = '{} to {}'.format(
                    location.segment.start_time_offset / 1000000.0,
                    location.segment.end_time_offset / 1000000.0)
            print('\t{}: {}'.format(l, positions))

        print('\n')
コード例 #11
0
ファイル: analyze.py プロジェクト: arbatovdan/new
def analyze_labels_file(path):
    """ Detects labels given a file path. """
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.LABEL_DETECTION]

    with io.open(path, "rb") as movie:
        content_base64 = base64.b64encode(movie.read())

    operation = video_client.annotate_video('',
                                            features,
                                            input_content=content_base64)
    print('\nProcessing video for label annotations:')

    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(15)

    print('\nFinished processing.')

    # first result is retrieved because a single video was processed
    results = operation.result().annotation_results[0]

    for i, label in enumerate(results.label_annotations):
        print('Label description: {}'.format(label.description))
        print('Locations:')

        for l, location in enumerate(label.locations):
            positions = 'Entire video'
            if (location.segment.start_time_offset != -1
                    or location.segment.end_time_offset != -1):
                positions = '{} to {}'.format(
                    location.segment.start_time_offset / 1000000.0,
                    location.segment.end_time_offset / 1000000.0)
            print('\t{}: {}'.format(l, positions))

        print('\n')
コード例 #12
0
ファイル: label.py プロジェクト: bw4sz/GoogleVideo
def main(path):
    """ Detects labels given a GCS path. """
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.LABEL_DETECTION]
    video_context = video_intelligence_pb2.VideoContext()
    video_context.stationary_camera = True
    video_context.label_detection_mode = video_intelligence_pb2.FRAME_MODE
    operation = video_client.annotate_video(path,
                                            features,
                                            video_context=video_context)
    print('\nProcessing video for label annotations:')

    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(10)

    print('\nFinished processing.')

    results = operation.result().annotation_results[0]

    return (results)
コード例 #13
0
def analyze_labels(path):
    """ Detects labels given a GCS path. """
    # [START construct_request]
    video_client = (
        video_intelligence_service_client.VideoIntelligenceServiceClient())
    features = [enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video(path, features)
    # [END construct_request]
    print('\nProcessing video for label annotations:')

    # [START check_operation]
    while not operation.done():
        sys.stdout.write('.')
        sys.stdout.flush()
        time.sleep(20)

    print('\nFinished processing.')
    # [END check_operation]

    # [START parse_response]
    results = operation.result().annotation_results[0]
    labels = []
    for label in results.label_annotations:
        label_sample = {"description": label.description, "locations": {}}
        print('Label description: {}'.format(label.description))
        for l, location in enumerate(label.locations):
            label_sample['locations'][l] = [
                location.segment.start_time_offset,
                location.segment.end_time_offset
            ]
            print('\t{}: {} to {}'.format(l,
                                          location.segment.start_time_offset,
                                          location.segment.end_time_offset))

        labels.append(label_sample)
    final_output = {"labels": labels}
    json.dump(final_output, open('adgence_labels_output.json', 'w'))
コード例 #14
0
    def __init__(self, vid, args):

        #start time
        self.start_time = time.time()

        #store args from MotionMeerkat
        self.args = args
        self.args.video = vid

        #set descriptors
        self.frame_count = 0

        #Box Annotations dictionary
        self.annotations = {}

        ##Google Properties##
        #Google Credentials
        os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = args.google_account

        #Set Google Credentials and Properties
        os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = args.google_account

        ##Cloud Video Properties
        self.video_client = (
            video_intelligence_service_client.VideoIntelligenceServiceClient())
        self.features = [enums.Feature.LABEL_DETECTION]
        self.video_context = video_intelligence_pb2.VideoContext()
        self.video_context.stationary_camera = True
        self.video_context.label_detection_mode = video_intelligence_pb2.FRAME_MODE

        #Google Cloud Storage
        storage_client = storage.Client()

        #TODO check if bucket exists.
        self.bucket = storage_client.get_bucket(args.bucket)

        #create output directory
        normFP = os.path.normpath(vid)
        (filepath, filename) = os.path.split(normFP)
        (shortname, extension) = os.path.splitext(filename)
        (_, IDFL) = os.path.split(filepath)

        self.file_destination = os.path.join(self.args.output, shortname)
        print("Writing clips to " + self.file_destination)

        if not os.path.exists(self.file_destination):
            os.makedirs(self.file_destination)

        ##VIDEO PROPERTIES
        #read video
        self.cap = cv2.VideoCapture(self.args.video)

        #set frame frate
        self.frame_rate = self.cap.get(5)

        #background subtraction
        self.background_instance = self.create_background()

        #Detector almost always returns first frame
        self.IS_FIRST_FRAME = True

        #Motion History, boolean state of Motion
        self.MotionHistory = []