Example #1
0
def get_emotion():
    file_name = request.args.get("file_name")
    file_path = f"{base_path}{file_name}"

    while not os.path.exists(file_path):
        time.sleep(1)

    if os.path.isfile(file_path):
        d = dict()

        responses = da_client.IdentifyEmotion(
            chunk_generator_from_file(file_path), 2000, metadata=metadata)

        # responses is the iterator for all the response values
        for response in responses:
            d[response.emotion] = d.get(response.emotion, 0) + 1
            print("Received message\n", response)

        d = Counter(d)
        word, _ = d.most_common(1)[0]
        print(f"most common sentiment: {word.lower()}")

        return word.lower()
    else:
        print(f"{file_path} isn't a file!")
        return "error"
def get_tone_emotion():

	responses = client.IdentifyEmotion(
		# Use chunk_generator_from_file generator to stream from local file
		chunk_generator_from_file(file_path),
		# Use chunk_generator_from_url generator to stream from remote url or youtube with is_youtube_url set to true
		# chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url),
		 TIMEOUT_SECONDS, metadata=metadata)

	# responses is the iterator for all the response values
	for response in responses:
		#print("Received message",response)
		emotion = str(response)
		break
		#print(type(x))
		
	print(emotion)
# DeepAffects realtime Api client
client = get_deepaffects_client()

# chunk_generator() is a generator function which yields audio segment object asynchronously
metadata = [('apikey', apikey), ('speakerids', speakerIds),
            ('encoding', encoding), ('samplerate', sampleRate),
            ('languagecode', languageCode), ('apiversion', apiVersion),
            ('verbose', verbose)]
"""Stream audio from url or youtube.

responses = client.IdentifySpeaker(
    chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url), TIMEOUT_SECONDS, metadata=metadata)
"""
"""Stream audio from local file.
"""
responses = client.IdentifySpeaker(chunk_generator_from_file(file_path),
                                   TIMEOUT_SECONDS,
                                   metadata=metadata)

# responses is the iterator for all the response values
for response in responses:
    print("Received message")
    print(response)
"""Response.

    response = {           
        userId: userId of the speaker identified in the segment,
        start: start of the segment,
        end: end of the segment
    }
"""
Example #4
0
    content: base64 encoded audio,
    segmentOffset: offset of the segment in complete audio stream
"""
"""
Sample implementation which reads audio from a file and splits it into
segments more than 3 sec
AudioSegment and yields base64 encoded audio segment objects asynchronously
"""
"""Stream audio from url or youtube.

responses = client.IdentifyEmotion(
    chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url), TIMEOUT_SECONDS, metadata=metadata)
"""
"""Stream audio from local file.
"""
responses = client.IdentifyEmotion(chunk_generator_from_file(file_path),
                                   TIMEOUT_SECONDS,
                                   metadata=metadata)

# responses is the iterator for all the response values
for response in responses:
    print("Received message")
    print(response)
"""Response.
    response = {
        emotion: Emotion identified in the segment,
        start: start of the segment,
        end: end of the segment
    }
"""
    content: base64 encoded audio,
    segmentOffset: offset of the segment in complete audio stream
"""
"""
Sample implementation which reads audio from a file and splits it into
segments more than 3 sec
AudioSegment and yields base64 encoded audio segment objects asynchronously
"""
"""Stream audio from url or youtube.

responses = client.DiarizeEmotion(
    chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url), TIMEOUT_SECONDS, metadata=metadata)
"""
"""Stream audio from local file.
"""
responses = client.DiarizeEmotion(chunk_generator_from_file(file_path),
                                  TIMEOUT_SECONDS,
                                  metadata=metadata)

# responses is the iterator for all the response values
for response in responses:
    print("Received message")
    print(response)
"""Response.
    response = {
        userId: userId of the speaker identified in the segment,
        emotion: Emotion identified in the segment,
        start: start of the segment,
        end: end of the segment
    }
"""
Example #6
0
# segment_chunk(Args)
"""segment_chunk.

Args:
    encoding : Audio Encoding,
    languageCode: language code ,
    sampleRate: sample rate of audio ,
    content: base64 encoded audio,
    segmentOffset: offset of the segment in complete audio stream
"""

# Call client api function with generator and metadata

responses = client.IdentifyEmotion(
    # Use chunk_generator_from_file generator to stream from local file
    chunk_generator_from_file(file_path),
    # Use chunk_generator_from_url generator to stream from remote url or youtube with is_youtube_url set to true
    # chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url),
    TIMEOUT_SECONDS,
    metadata=metadata)

# responses is the iterator for all the response values
for response in responses:
    print("Received message", response)
    break
"""Response.
    response = {
        emotion: Emotion identified in the segment,
        start: start of the segment,
        end: end of the segment
    }