コード例 #1
0
def shot_detection(path,csvpath):
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION]

    with io.open(path, 'rb') as movie:
        input_content = movie.read()

    operation = video_client.annotate_video(
        features=features, input_content=input_content)

    print('\nProcessing video for shot change annotations:',path)

    result = operation.result(timeout=200)
    print('\nFinished processing.')
    fields = ['Shot Number', 'Start', 'End']
    rows=[]
    # first result is retrieved because a single video was processed
    for i, shot in enumerate(result.annotation_results[0].shot_annotations):
        start_time = (shot.start_time_offset.seconds +
                      shot.start_time_offset.nanos / 1e9)
        end_time = (shot.end_time_offset.seconds +
                    shot.end_time_offset.nanos / 1e9)
        rows.append([i+1,start_time,end_time])
    # writing to csv file
    with open(csvpath, 'w') as csvfile:
        # creating a csv writer object
        csvwriter = csv.writer(csvfile)

        # writing the fields
        csvwriter.writerow(fields)

        # writing the data rows
        csvwriter.writerows(rows)
コード例 #2
0
def analyze_shots(path):
    """ Detects camera shot changes. """
    # [START video_shot_tutorial_construct_request]
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.Feature.SHOT_CHANGE_DETECTION]
    operation = video_client.annotate_video(
        request={"features": features, "input_uri": path}
    )
    # [END video_shot_tutorial_construct_request]
    print("\nProcessing video for shot change annotations:")

    # [START video_shot_tutorial_check_operation]
    result = operation.result(timeout=120)
    print("\nFinished processing.")

    # [END video_shot_tutorial_check_operation]

    # [START video_shot_tutorial_parse_response]
    for i, shot in enumerate(result.annotation_results[0].shot_annotations):
        start_time = (
            shot.start_time_offset.seconds + shot.start_time_offset.microseconds / 1e6
        )
        end_time = (
            shot.end_time_offset.seconds + shot.end_time_offset.microseconds / 1e6
        )
        print("\tShot {}: {} to {}".format(i, start_time, end_time))
コード例 #3
0
    def performAllAnalysis(self, path):
        myLogger.debug(
            "Entering processing gs stored video for label annotations" + path)

        video_client = videointelligence.VideoIntelligenceServiceClient()
        features = [videointelligence.enums.Feature.LABEL_DETECTION]
        myLogger.debug("LABEL_DETECTION initiated")
        features += [
            videointelligence.enums.Feature.EXPLICIT_CONTENT_DETECTION
        ]
        myLogger.debug("EXPLICIT_CONTENT_DETECTION initiated")
        features += [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION]
        myLogger.debug("SHOT_CHANGE_DETECTION initiated")
        # features += [videointelligence.enums.Feature.FACE_DETECTION]
        # print(features)
        # exit
        mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
        config = videointelligence.types.LabelDetectionConfig(
            label_detection_mode=mode)
        myLogger.debug("SHOT_AND_FRAME_MODE set")
        context = videointelligence.types.VideoContext(
            label_detection_config=config)

        operation = video_client.annotate_video(path,
                                                features=features,
                                                video_context=context)

        myLogger.debug("Start video analysis for: " + path)

        result = operation.result(timeout=3600)
        myLogger.debug("Finished video analysis for: " + path)
        return result
コード例 #4
0
def transcribe(path):
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]

    config = videointelligence.types.SpeechTranscriptionConfig(
        language_code="en-US", enable_automatic_punctuation=True)
    video_context = videointelligence.types.VideoContext(
        speech_transcription_config=config)

    operation = video_client.annotate_video(input_uri=path,
                                            features=features,
                                            video_context=video_context)

    print("\nProcessing video for speech transcription.")

    result = operation.result(timeout=600)

    content = ""

    # There is only one annotation_result since only
    # one video is processed.
    annotation_results = result.annotation_results[0]

    # The number of alternatives for each transcription is limited by
    # SpeechTranscriptionConfig.max_alternatives.
    # Each alternative is a different possible transcription
    # and has its own confidence score.

    # for alternative in speech_transcription.alternatives:
    #     content += alternative.transcript

    # Returns whole transcript
    return "".join(annotation_results.speech_transcriptions)
コード例 #5
0
def analyze_video_labels(path):
    """ Detects labels given a GCS path. """
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]

    with open(path, 'rb') as movie:
        input_content = movie.read()
    operation = video_client.annotate_video(features=features,
                                            input_content=input_content)

    print('\nProcessing video for label annotations:')
    result = operation.result(timeout=90)
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        print('Video label description: {}'.format(
            segment_label.entity.description))
        for category_entity in segment_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        for i, segment in enumerate(segment_label.segments):
            start_time = (segment.segment.start_time_offset.seconds +
                          segment.segment.start_time_offset.nanos / 1e9)
            end_time = (segment.segment.end_time_offset.seconds +
                        segment.segment.end_time_offset.nanos / 1e9)
            positions = '{}s to {}s'.format(start_time, end_time)
            confidence = segment.confidence

            # print('\tSegment {}: {}'.format(i, positions))
            print('\tConfidence: {}'.format(confidence))
        print('\n')
コード例 #6
0
ファイル: labels.py プロジェクト: jayakrishna531/gcp-testing
def analyze_labels(path):
    """ Detects labels given a GCS path. """
    # [START video_label_tutorial_construct_request]
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video(input_uri=path, features=features)
    # [END video_label_tutorial_construct_request]
    print('\nProcessing video for label annotations:')

    # [START video_label_tutorial_check_operation]
    result = operation.result(timeout=90)
    print('\nFinished processing.')
    # [END video_label_tutorial_check_operation]

    # [START video_label_tutorial_parse_response]
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        print('Video label description: {}'.format(
            segment_label.entity.description))
        for category_entity in segment_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        for i, segment in enumerate(segment_label.segments):
            start_time = (segment.segment.start_time_offset.seconds +
                          segment.segment.start_time_offset.nanos / 1e9)
            end_time = (segment.segment.end_time_offset.seconds +
                        segment.segment.end_time_offset.nanos / 1e9)
            positions = '{}s to {}s'.format(start_time, end_time)
            confidence = segment.confidence
            print('\tSegment {}: {}'.format(i, positions))
            print('\tConfidence: {}'.format(confidence))
        print('\n')
コード例 #7
0
def run_quickstart():
    # [START video_quickstart]
    from google.cloud import videointelligence

    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video('gs://demomaker/cat.mp4',
                                            features=features)
    print('\nProcessing video for label annotations:')

    result = operation.result(timeout=90)
    print('\nFinished processing.')

    # first result is retrieved because a single video was processed
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        print('Video label description: {}'.format(
            segment_label.entity.description))
        for category_entity in segment_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        for i, segment in enumerate(segment_label.segments):
            start_time = (segment.segment.start_time_offset.seconds +
                          segment.segment.start_time_offset.nanos / 1e9)
            end_time = (segment.segment.end_time_offset.seconds +
                        segment.segment.end_time_offset.nanos / 1e9)
            positions = '{}s to {}s'.format(start_time, end_time)
            confidence = segment.confidence
            print('\tSegment {}: {}'.format(i, positions))
            print('\tConfidence: {}'.format(confidence))
        print('\n')
コード例 #8
0
 def __init__(self, logger, event):
     #variables
     self.logger = logger.global_log
     folder_temporary = 'C:/Temp'
     video_content = event['video-content']
     credentials = "syn-g-cloud-ac072cf6a455.json"
     input_content = ''
     location = 'us-east1'
     #build
     tmp_video = '{}/{}'.format(folder_temporary, video_content)
     os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials
     gcp_feats = videointelligence.enums.Feature
     #open
     with io.open(tmp_video, 'rb') as file:
         input_content = file.read()
     #init the service
     video_client = videointelligence.VideoIntelligenceServiceClient()
     #feats to be called
     features = [gcp_feats.LABEL_DETECTION, gcp_feats.SHOT_CHANGE_DETECTION]
     #call
     operation = video_client.annotate_video(input_content=input_content,
                                             features=features,
                                             location_id=location)
     #result
     result = operation.result(timeout=900)
     #print
     self.get_scenes(result)
     self.get_labels_segment(result)
     self.get_labels_shot(result)
コード例 #9
0
def run_quickstart():
    # [START video_quickstart]
    from google.cloud import videointelligence

    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video(
        request={
            "features": features,
            "input_uri": "gs://cloud-samples-data/video/cat.mp4",
        })
    print("\nProcessing video for label annotations:")

    result = operation.result(timeout=120)
    print("\nFinished processing.")

    # first result is retrieved because a single video was processed
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        print("Video label description: {}".format(
            segment_label.entity.description))
        for category_entity in segment_label.category_entities:
            print("\tLabel category description: {}".format(
                category_entity.description))

        for i, segment in enumerate(segment_label.segments):
            start_time = (segment.segment.start_time_offset.seconds +
                          segment.segment.start_time_offset.microseconds / 1e6)
            end_time = (segment.segment.end_time_offset.seconds +
                        segment.segment.end_time_offset.microseconds / 1e6)
            positions = "{}s to {}s".format(start_time, end_time)
            confidence = segment.confidence
            print("\tSegment {}: {}".format(i, positions))
            print("\tConfidence: {}".format(confidence))
        print("\n")
コード例 #10
0
def google(screen_name):
    #https://cloud.google.com/video-intelligence/docs/analyze-labels#video_analyze_labels-python
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]

    with io.open(path, 'rb') as movie:
        input_content = movie.read()

    operation = video_client.annotate_video(features=features,
                                            input_content=input_content)
    print('\nProcessing video for label annotations:')

    result = operation.result(timeout=90)
    print('\nFinished processing.')

    # Process video/segment level label annotations
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        print('Video label description: {}'.format(
            segment_label.entity.description))
        for category_entity in segment_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        for i, segment in enumerate(segment_label.segments):
            confidence = segment.confidence
            print('\tConfidence: {}'.format(confidence))
            print('\n')
コード例 #11
0
def analyzeLabels(input_content):
    videoClient = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]
    operation = videoClient.annotate_video(input_content=input_content,
                                           features=features)

    print('\nProcessing video for label annotations:')

    result = operation.result(timeout=90)
    print('\nFinished processing.')

    shotLabels = result.annotation_results[0].shot_label_annotations
    for i, shot_label in enumerate(shotLabels):
        print('Shot label description: {}'.format(
            shot_label.entity.description))
        for category_entity in shot_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        for i, shot in enumerate(shot_label.segments):
            startTime = (shot.segment.start_time_offset.seconds +
                         shot.segment.start_time_offset.nanos / 1e9)
            endTime = (shot.segment.end_time_offset.seconds +
                       shot.segment.end_time_offset.nanos / 1e9)
            positions = '{}s to {}s'.format(startTime, endTime)
            confidence = shot.confidence
            print('\tSegment {}: {}'.format(i, positions))
            print('\tConfidence: {}'.format(confidence))
        print('\n')
コード例 #12
0
def analyze_faces(path):
    # [START construct_request]
    """ Detects faces given a GCS path. """
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.FACE_DETECTION]
    operation = video_client.annotate_video(path, features=features)
    # [END construct_request]
    print('\nProcessing video for face annotations:')

    # [START check_operation]
    result = operation.result(timeout=600)
    print('\nFinished processing.')
    # [END check_operation]

    # [START parse_response]
    # first result is retrieved because a single video was processed
    faces = result.annotation_results[0].face_annotations
    for face_id, face in enumerate(faces):
        print('Thumbnail size: {}'.format(len(face.thumbnail)))

        for segment_id, segment in enumerate(face.segments):
            start_time = (segment.segment.start_time_offset.seconds +
                          segment.segment.start_time_offset.nanos / 1e9)
            end_time = (segment.segment.end_time_offset.seconds +
                        segment.segment.end_time_offset.nanos / 1e9)
            positions = '{}s to {}s'.format(start_time, end_time)
            print('\tSegment {}: {}'.format(segment_id, positions))
コード例 #13
0
    def performLabelAnalysis(self, path):
        """ Detects labels given a GCS path. """
        myLogger.debug(
            "Entering label processing gs stored video for label annotations" +
            path)
        video_client = videointelligence.VideoIntelligenceServiceClient()
        features = [videointelligence.enums.Feature.LABEL_DETECTION]
        # features += [videointelligence.enums.Feature.FACE_DETECTION]
        # print(features)
        # exit
        mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
        config = videointelligence.types.LabelDetectionConfig(
            label_detection_mode=mode)
        context = videointelligence.types.VideoContext(
            label_detection_config=config)
        myLogger.debug("SHOT_AND_FRAME_MODE set")
        operation = video_client.annotate_video(path,
                                                features=features,
                                                video_context=context)
        myLogger.debug("SHOT_AND_FRAME_MODE set")

        myLogger.debug("Start label video analysis for: " + path)

        result = operation.result(timeout=3600)
        myLogger.debug("Finished label video analysis for: " + path)
        return result
コード例 #14
0
ファイル: google_api.py プロジェクト: Tahlor/cleanvid
    def create_video_speech_operation_from_model(
            self, storage_uri, *args,
            **kwargs):  # i.e. copied and pasted from Google
        """Transcribe speech from a video stored on GCS."""
        from google.cloud import videointelligence
        video_client = videointelligence.VideoIntelligenceServiceClient()
        features = [videointelligence.Feature.SPEECH_TRANSCRIPTION]

        config = videointelligence.SpeechTranscriptionConfig(
            language_code="en-US",
            enable_automatic_punctuation=True,
            #enable_word_time_offsets=True,
            max_alternatives=2)
        video_context = videointelligence.VideoContext(
            speech_transcription_config=config)
        operation = video_client.annotate_video(
            request={
                "features": features,
                "input_uri": storage_uri,
                "video_context": video_context,
            })

        # Use confirmation
        if self.require_api_confirmation:
            confirmation = input(
                f"Really recognize speech in {storage_uri}? (Y/n) ")
            if confirmation.lower() != "y":
                raise Exception("Did not agree to recognize speech")

        print("\nProcessing video for speech transcription.")
        return operation
コード例 #15
0
def zoom_analysis(video, frame_to_use=0):
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]

    operation = video_client.annotate_video(video, features=features)
    print('\nProcessing video for label annotations:')

    result = operation.result(timeout=120)
    print('\nFinished processing.')

    frame_offsets = []

    # first result is retrieved because a single video was processed
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        for category_entity in segment_label.category_entities:
            # Take frames with people
            if (category_entity.description == 'person'):
                print('\tLabel category description: {}'.format(
                    category_entity.description))
                print(segment_label)
                frame = segment_label.frames[frame_to_use]
                time_offset = (frame.time_offset.seconds +
                               frame.time_offset.nanos / 1e9)
                print('\tFirst frame time offset: {}s'.format(time_offset))
                print('\tFirst frame confidence: {}'.format(frame.confidence))
                print('\n')
                frame_offsets.append(time_offset)
    return sorted(set(frame_offsets))
コード例 #16
0
def process_audio(file, context):
    bucket = validate_message(file, "bucket")
    name = validate_message(file, "name")

    print("Starting transcription for", bucket, " / ", name)

    video_client = videointelligence.VideoIntelligenceServiceClient()

    # maybe add other features here?
    features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]

    config = videointelligence.types.SpeechTranscriptionConfig(
        language_code="en-US", enable_automatic_punctuation=True)

    video_context = videointelligence.types.VideoContext(
        speech_transcription_config=config)

    operation = video_client.annotate_video(input_uri=f"gs://{bucket}/{name}",
                                            features=features,
                                            video_context=video_context)

    transcriptions = []

    annotation_results = operation.result()
    first_result = annotation_results.annotation_results[0]
    speech_transcriptions_raw = first_result.speech_transcriptions

    for result in speech_transcriptions_raw:
        alternative = result.alternatives[0]
        transcript = alternative.transcript
        confidence = alternative.confidence

        words = []
        for word_info in alternative.words:
            word = word_info.word
            start = word_info.start_time.seconds + 1e-9 * word_info.start_time.nanos
            end = word_info.end_time.seconds + 1e-9 * word_info.end_time.nanos

            words.append({"word": word, "start": start, "end": end})

        keywords = text_to_keywords.get_keywords(transcript)

        transcriptions.append({
            "transcript": transcript,
            "confidence": confidence,
            "words": words,
            "keywords": keywords
        })

    # response = transcribe_file(output, channels)
    # response = [paragraph.to_json() for paragraph in response]

    mongo.set_keyword_data(file["name"], transcriptions)

    print("File {} processed.".format(file["name"]))

    return transcriptions


# process_audio({"bucket": "catchup-app", "name": "test_video.mp4"}, {})
コード例 #17
0
    def google_analyze(self):
        video_client = videointelligence.VideoIntelligenceServiceClient()
        features = [videointelligence.enums.Feature.LABEL_DETECTION]

        with io.open(self.name + '.mp4', 'rb') as movie:
            input_content = movie.read()
        try:
            operation = video_client.annotate_video(
                features=features, input_content=input_content)
            print('\nProcessing video for label annotations:')
            result = operation.result(timeout=90)
        except Exception as e:
            print("Video Intelligence error")
            exit()

        print('\nFinished processing.')

        # Process video/segment level label annotations
        segment_labels = result.annotation_results[0].segment_label_annotations
        for i, segment_label in enumerate(segment_labels):
            print('Video label description: {}'.format(
                segment_label.entity.description))
            for category_entity in segment_label.category_entities:
                print("Label category description: " +
                      category_entity.description)

            for i, segment in enumerate(segment_label.segments):
                confidence = segment.confidence
                print("The accuracy of the identification in this case is " +
                      str(confidence) + "\n")
コード例 #18
0
def google_analysis():
    #https://cloud.google.com/video-intelligence/docs/analyze-labels#video_analyze_labels-python
    os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.getcwd() + '/google.json'
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]

    with io.open(os.path.join(os.getcwd(), "" + screen_name + ".mp4"),
                 'rb') as movie:
        input_content = movie.read()

    operation = video_client.annotate_video(features=features,
                                            input_content=input_content)
    print('\nProcessing video for label annotations:')

    result = operation.result(timeout=90)
    print('\nFinished processing.')

    # Process video/segment level label annotations
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        print('Video label description: {}'.format(
            segment_label.entity.description))
        for category_entity in segment_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        #show the matching degree
        for i, segment in enumerate(segment_label.segments):
            confidence = segment.confidence
            print('\tConfidence: {}'.format(confidence))
            print('\n')
コード例 #19
0
def hello():

	video_file_path = request.args.get('video_file_path')

	if video_file_path == None or len(video_file_path) == 0:
		logging.info('no path parameter provided')
		# print('no path parameter provided')
		return 'no path parameter provided'


	path = 'gs://' + STORAGE_NAME + '/' + video_file_path
	logging.info('Accessing video from ' + path + ' ...')
	# print('Accessing video from ' + path + ' ...')

	video_client = videointelligence.VideoIntelligenceServiceClient()
	features = [videointelligence.enums.Feature.LABEL_DETECTION]
	mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
	config = videointelligence.types.LabelDetectionConfig(label_detection_mode=mode)
	context = videointelligence.types.VideoContext(label_detection_config=config)

	operation = video_client.annotate_video(path, features=features, video_context=context)
	
	logging.info('Processing video for labelling...')
	# print('Processing video for labelling...')

	result = operation.result(timeout=180)

	labels = []

	# Process video/segment level label annotations
	segment_labels = result.annotation_results[0].segment_label_annotations
	for i, segment_label in enumerate(segment_labels):
		labels.append(segment_label.entity.description)

	# Process shot level label annotations
	shot_labels = result.annotation_results[0].shot_label_annotations
	for i, shot_label in enumerate(shot_labels):
		labels.append(shot_label.entity.description)

	# # Process frame level label annotations
	# frame_labels = result.annotation_results[0].frame_label_annotations
	# for i, frame_label in enumerate(frame_labels):
	# 	labels.append(frame_label.entity.description)

	labels = list(set(labels))
	logging.info(labels)
	# print(labels)

	if len(labels) == 0:
		labels.append('please wait')

	trimmed_file_path = video_file_path.split('.')[0]

	ref = db.reference(trimmed_file_path)
	ref.set(labels)
	logging.info('Written to ' + LABELS_DATABASE_NAME)
	# print('Written to ' + LABELS_DATABASE_NAME)

	return jsonify(labels)
コード例 #20
0
def analyze_labels_file(path, dict_list):

    # [START video_analyze_labels]

    """Detect labels given a file path."""
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]
    with io.open(path, 'rb') as movie:
        input_content = movie.read()
    operation = video_client.annotate_video(
        features=features, input_content=input_content)
    print('\nProcessing video for label annotations (This may take a while):')
    result = operation.result(timeout=300)
    print('Finished processing\n')
    print('****************************************')

    # Process shot level label annotations

    label_count = 0
    print('\nGenerating a report')
    shot_labels = result.annotation_results[0].shot_label_annotations
    file = open('Label Analysis Result.txt', 'w')
    file.write('Probable contents in the video (NOT following the picture order): \n\n')

    # Make a list for MySQL database
    content_name = []
    for i, shot_label in enumerate(shot_labels):
        content_name.append(shot_label.entity.description)
        label_count += 1
        file.write('    ' + str(label_count) + '\t{}'.format(
            shot_label.entity.description) + '\n')
    file.write('\n\n')
    file.write('Details are listed below\n\n')

    # Continue writing in the txt report
    # And insert pic_content data to table pic_content
    for i, shot_label in enumerate(shot_labels):
        file.write('Shot label description: {}'.format(
            shot_label.entity.description) + '\n')
        for category_entity in shot_label.category_entities:
            file.write('\tLabel category description: {}'.format(
                category_entity.description) + '\n')
        for i, shot in enumerate(shot_label.segments):
            start_time = (shot.segment.start_time_offset.seconds +
                          shot.segment.start_time_offset.nanos / 1e9)
            end_time = (shot.segment.end_time_offset.seconds +
                        shot.segment.end_time_offset.nanos / 1e9)

            frame_no = int(round(end_time*FRAMERATE))
            dict_list[frame_no-1]["contents"].append(shot_label.entity.description)
            positions = '{}Picture number {}'.format('', frame_no)
            confidence = shot.confidence
            file.write('\tSegment {}: {}'.format(i+1, positions) + '\n')
            file.write('\tConfidence: {}'.format(confidence) + '\n')
        file.write('\n')
    file.close()
    POSTS.insert_many(dict_list)
    print('Report generated\n')
    print('****************************************')
コード例 #21
0
ファイル: google_apis.py プロジェクト: vsyropou/api-consumers
    def consume(self, path, **kwargs):
        """ Detects labels given a GCS path. """

        video_client = videointelligence.VideoIntelligenceServiceClient()
        features = [videointelligence.enums.Feature.LABEL_DETECTION]

        mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
        config = videointelligence.types.LabelDetectionConfig(
            label_detection_mode=mode)
        context = videointelligence.types.VideoContext(
            label_detection_config=config)

        operation = video_client.annotate_video(path,
                                                features=features,
                                                video_context=context)

        info('Processing "%s" for label annotations:' % path)

        result = operation.result(timeout=10 * 60)
        info('Finished processing.')

        out = []

        # Process video/segment level label annotations
        segment_labels = result.annotation_results[0].segment_label_annotations
        for i, segment_label in enumerate(segment_labels):
            # print('Video label description: {}'.format(
            #   segment_label.entity.description))
            for category_entity in segment_label.category_entities:
                # print('\tLabel category description: {}'.format(
                #    category_entity.description))
                pass
            for i, segment in enumerate(segment_label.segments):
                start_time = (segment.segment.start_time_offset.seconds +
                              segment.segment.start_time_offset.nanos / 1e9)
                end_time = (segment.segment.end_time_offset.seconds +
                            segment.segment.end_time_offset.nanos / 1e9)
                positions = '{}s to {}s'.format(start_time, end_time)
                confidence = segment.confidence
                # print('\tSegment {}: {}'.format(i, positions))
                # print('\tConfidence: {}'.format(confidence))
            # print('\n')

            out.append({
                'label':
                segment_label.entity.description,
                'category':
                category_entity.description
                if segment_label.category_entities else '',
                'start':
                start_time if segment_label.segments else '',
                'end':
                end_time if segment_label.segments else '',
                'confidence':
                confidence if segment_label.segments else ''
            })

        return out
コード例 #22
0
 def test_default_credentials(self):
     env = EnvironmentVarGuard()
     env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
     env.set('KAGGLE_KERNEL_INTEGRATIONS', 'VIDEO_INTELLIGENCE')
     with env:
         init_video_intelligence()
         client = videointelligence.VideoIntelligenceServiceClient()
         self.assertIsNotNone(client.credentials)
         self.assertIsInstance(client.credentials, KaggleKernelCredentials)
コード例 #23
0
def get_label_annotations(gcs_uri):
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]
    operation = video_client.annotate_video(gcs_uri, features=features)

    # Wait until the  annotate_video function call has completed.
    results = operation.result(timeout=90).annotation_results[0]
    label_annotations = results.segment_label_annotations
    return label_annotations
コード例 #24
0
def get_annotations(path_data):
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [
        videointelligence.enums.Feature.LABEL_DETECTION,
        videointelligence.enums.Feature.SHOT_CHANGE_DETECTION,
    ]
    operation = video_client.annotate_video(input_content=path_data,
                                            features=features)
    return operation
コード例 #25
0
 def test_user_provided_credentials(self):
     credentials = _make_credentials()
     env = EnvironmentVarGuard()
     env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
     env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
     with env:
         init_video_intelligence()
         client = videointelligence.VideoIntelligenceServiceClient(credentials=credentials)
         self.assertNotIsInstance(client.credentials, KaggleKernelCredentials)
         self.assertIsNotNone(client.credentials)
コード例 #26
0
 def send_video(self, credentials, tmp_video):
     self.logger.info('GCP call')
     os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials
     video_client = videointelligence.VideoIntelligenceServiceClient()
     operation = video_client.annotate_video(
         input_content=self.read_video(tmp_video),
         features=[videointelligence.enums.Feature.OBJECT_TRACKING],
         location_id='us-east1')
     result = operation.result(timeout=900)
     return result
コード例 #27
0
ファイル: videolabel.py プロジェクト: wzezhong/EC601
def analyze_labels(path):
    os.chdir(path)
    os.chdir("../")
    """Detect labels given a file path."""
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]

    with io.open(path, 'rb') as movie:
        input_content = movie.read()

    operation = video_client.annotate_video(features=features,
                                            input_content=input_content)
    print('\nProcessing video for label annotations:')

    result = operation.result(timeout=90)
    print('\nFinished processing.')

    # Process video/segment level label annotations
    segment_labels = result.annotation_results[0].segment_label_annotations
    for i, segment_label in enumerate(segment_labels):
        print('Video label description: {}'.format(
            segment_label.entity.description))
        for category_entity in segment_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        for i, segment in enumerate(segment_label.segments):
            start_time = (segment.segment.start_time_offset.seconds +
                          segment.segment.start_time_offset.nanos / 1e9)
            end_time = (segment.segment.end_time_offset.seconds +
                        segment.segment.end_time_offset.nanos / 1e9)
            positions = '{}s to {}s'.format(start_time, end_time)
            confidence = segment.confidence
            print('\tSegment {}: {}'.format(i, positions))
            print('\tConfidence: {}'.format(confidence))
        print('\n')

    #I use this code to process frame level label annotations
    labels = []
    frame_labels = result.annotation_results[0].frame_label_annotations
    for i, frame_label in enumerate(frame_labels):
        print('Frame label description: {}'.format(
            frame_label.entity.description))
        for category_entity in frame_label.category_entities:
            print('\tLabel category description: {}'.format(
                category_entity.description))

        # We all know that every frame label annotation, so we just print imformation for the first frame.
        frame = frame_label.frames[0]
        time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
        print('\tFirst frame time offset: {}s'.format(time_offset))
        print('\tFirst frame confidence: {}'.format(frame.confidence))
        print('\n')

    return labels
コード例 #28
0
def run_quickstart(file_dir):
    # [START video_quickstart]
    
    i = 0
    j = 0
    k = 0
    l = 0
    errors = {}
    video_client = videointelligence.VideoIntelligenceServiceClient()
    features = [videointelligence.enums.Feature.LABEL_DETECTION]
    
    for direc in os.listdir(file_dir):
        print(direc)
        loc=os.path.join(file_dir,direc)
        os.chdir(loc)
        try:
            
            if glob.glob('*.mp4'):
                
                file = glob.glob('*.mp4')[0]

                file_name = os.path.join(file_dir,direc,file)
                print("file name = " ,file_name)
                with io.open(file_name, 'rb') as movie:
                    input_content = movie.read()
                    
                operation = video_client.annotate_video(input_content=input_content, features=features)
                print('\nProcessing video for label annotations:')

                result = operation.result(timeout=120)
                print('\nFinished processing.')

                result_json = {}
                result_json = get_details(result)
                with open(os.path.join(file_dir,direc,'data.json'), 'w') as outfile:
                    json.dump(result_json, outfile)
                

                i = i+1
                
            elif glob.glob('*.webm'):
                
                l = l+1
                                    
                
        except Exception as o:
            print(o)
            j = j+1
            errors[loc]=str(o)
        
        k = k + 1
        
    print("Out of total {}, processed {} mp4 videos successfully, while {} files errored out, and left {} webm files out!".format(k,i,j, l))
    with open(os.path.join(file_dir,'error/error.json'), 'w') as outfile:
        json.dump(errors, outfile)
    def find_labels(self):
        from google.cloud import videointelligence
        video_client = videointelligence.VideoIntelligenceServiceClient()
        features = [videointelligence.enums.Feature.LABEL_DETECTION]
        operation = video_client.annotate_video(
            self.gcs_uri, features=features)

        result = operation.result(timeout=120)

        # Single video is being processed. '''annotation_results[0]'''
        self.segment_labels = result.annotation_results[0] \
            .segment_label_annotations
コード例 #30
0
def analyze_labels(index):
  dict={}
  path="gs://devfest-gjm/Vines/"+str(index)+".mp4"
  video_client = videointelligence.VideoIntelligenceServiceClient()
  features = [videointelligence.enums.Feature.LABEL_DETECTION]
  operation = video_client.annotate_video(path, features=features)
  result = operation.result(timeout=90)
  segment_labels = result.annotation_results[0].segment_label_annotations
  for i, segment_label in enumerate(segment_labels):
    for i, segment in enumerate(segment_label.segments):
      dict[str(segment_label.entity.description)]=segment.confidence
  return dict