コード例 #1
0
ファイル: measure_wav.py プロジェクト: sahilyousif/SIM
def voice_analysis():

    print("Loading library...")
    Vokaturi.load("../SIM/libs/OpenVokaturi-2-1/lib/Vokaturi_mac.so")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())

    print("Reading sound file...")
    file_name = "demo.wav"
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)
    print("   sample rate %.3f Hz" % sample_rate)

    print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:  # mono
        c_buffer[:] = samples[:] / 32768.0
    else:  # stereo
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0

    print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)

    print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)

    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    if quality.valid:
        with Connection('amqp://*****:*****@localhost:5672//') as conn:
            producer = conn.Producer(serializer='json')
            producer.publish(
                {
                    'Neutral': format(emotionProbabilities.neutrality, '.3f'),
                    'Happy': format(emotionProbabilities.happiness, '.3f'),
                    'Sad': format(emotionProbabilities.sadness, '.3f'),
                    'Angry': format(emotionProbabilities.anger, '.3f'),
                    'Fear': format(emotionProbabilities.fear, '.3f')
                },
                exchange=media_exchange,
                routing_key='voice',
                declare=[voice_queue])
        # print ("Neutral: %.3f" % emotionProbabilities.neutrality)
        # print ("Happy: %.3f" % emotionProbabilities.happiness)
        # print ("Sad: %.3f" % emotionProbabilities.sadness)
        # print ("Angry: %.3f" % emotionProbabilities.anger)
        # print ("Fear: %.3f" % emotionProbabilities.fear)
    else:
        print("Not enough sonorancy to determine emotions")

    voice.destroy()
コード例 #2
0
def sentiment_analysis(path=sys.argv[1]):
    print("Loading library...")
    Vokaturi.load("./OpenVokaturi-3-0-linux64.so")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())

    print("Reading sound file...")
    file_name = path
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)
    print("   sample rate %.3f Hz" % sample_rate)

    print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                             samples[:, 1]) / 32768.0  # stereo

    print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)

    print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)

    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    if quality.valid:
        print("Neutral: %.3f" % emotionProbabilities.neutrality)
        print("Happy: %.3f" % emotionProbabilities.happiness)
        print("Sad: %.3f" % emotionProbabilities.sadness)
        print("Angry: %.3f" % emotionProbabilities.anger)
        print("Fear: %.3f" % emotionProbabilities.fear)
        out_dict = {}
        out_dict['neutral'] = emotionProbabilities.neutrality
        out_dict['happy'] = emotionProbabilities.happiness
        out_dict['sad'] = emotionProbabilities.sadness
        out_dict['angry'] = emotionProbabilities.anger
        out_dict['fear'] = emotionProbabilities.fear
        voice.destroy()
        return out_dict
コード例 #3
0
def main():
    print("Loading library...")
    Vokaturi.load("lib/open/win/OpenVokaturi-3-3-win64.dll")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())
    r = RedisManager(host=RedisConfig['host'],
                     port=RedisConfig['port'],
                     db=RedisConfig['db'],
                     password=RedisConfig['password'],
                     decodedResponses=RedisConfig['decodedResponses'])
    sub = r.getRedisPubSub()
    sub.subscribe(RedisConfig['newAudioPubSubChannel'])
    for item in sub.listen():
        print(item)  # Test
        if item['type'] == 'message':
            newMsg = item['data']
            print("New Msg: " + str(newMsg))  # Test
            if not isinstance(newMsg, str):
                newMsg = newMsg.decode()
            audioID = newMsg
            audioContent = r.hgetFromRedis(
                key=audioID, field=RedisConfig['audioHsetB64Field'])
            audioParams = r.hgetFromRedis(
                key=audioID, field=RedisConfig['audioHsetParamsField'])
            if audioContent:
                if isinstance(audioParams, bytes):
                    audioParams = audioParams.decode('utf-8')
                if isinstance(audioContent, bytes):
                    audioContent = audioContent.decode('utf-8')
                audioContent = base64.b64decode(audioContent)
                audioContent = ast.literal_eval(audioContent.decode('utf-8'))
                audioParams = ast.literal_eval(audioParams)
                audioEmotions = extractEmotionsFromAudioFile(
                    audioContent, audioParams)
                print(audioEmotions)  # Test
                if not audioEmotions:
                    audioEmotions = RedisConfig['voidMsg']
                r.publishOnRedis(channel=RedisConfig['VocalChannel'],
                                 msg=str(audioEmotions))
                r.hsetOnRedis(key=audioID,
                              field=RedisConfig['audioHsetVocalResultField'],
                              value=str(audioEmotions))
コード例 #4
0
def analyzeAudio(filename):
    print("Loading library...")
    Vokaturi.load("lib/OpenVokaturi-3-3/lib/open/win/OpenVokaturi-3-3-win64.dll")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())
    
    print("Reading sound file...")
    (sample_rate, samples) = scipy.io.wavfile.read(filename)
    print("   sample rate %.3f Hz" % sample_rate)
    
    print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0  # stereo
    
    print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)
    
    print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)
    
    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)
    
    if quality.valid:
        print("Neutral: %.3f" % emotionProbabilities.neutrality)
        print("Happy: %.3f" % emotionProbabilities.happiness)
        print("Sad: %.3f" % emotionProbabilities.sadness)
        print("Angry: %.3f" % emotionProbabilities.anger)
        print("Fear: %.3f" % emotionProbabilities.fear)
    
    voice.destroy()
    
    return emotionProbabilities
コード例 #5
0
# For the sound file example.wav that comes with OpenVokaturi, the result should be:
#   Neutral: 0.760
#   Happy: 0.000
#   Sad: 0.238
#   Angry: 0.001
#   Fear: 0.000

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print("Loading library...")
Vokaturi.load("../lib/Vokaturi_win64.dll")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
    c_buffer[:] = samples[:] / 32768.0
else:  # stereo
    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0
コード例 #6
0
#
# A sample script that uses the Vokaturi library to extract the emotions from
# a wav file on disk. The file has to contain a mono recording.
#
# Call syntax:
#   python3 measure_wav_linux32.py path_to_sound_file.wav

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print ("Loading library...")
Vokaturi.load("../lib/Vokaturi_linux32.so")
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

print ("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print ("   sample rate %.3f Hz" % sample_rate)

print ("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:
	c_buffer[:] = samples[:] / 32768.0
else:
	c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0
コード例 #7
0
ファイル: VokaturiHelper.py プロジェクト: MAIOLIX/emoTest
 def getOS(self):
     print("Analyzed by: %s" % Vokaturi.versionAndLicense())
コード例 #8
0
def voice_predict():
    file_name = "Audio/audio.wav"
    print("Loading library...")
    Vokaturi.load("OpenVokaturi-2-1d/lib/Vokaturi_mac64.so")

    print("Analyzed by: %s" % Vokaturi.versionAndLicense())

    print("Reading sound file...")
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)
    print("   sample rate %.3f Hz" % sample_rate)

    print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                             samples[:, 1]) / 32768.0  # stereo

    print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)

    print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)

    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    fh = open("output.txt", 'a')
    fh.write("Based on your voice, your emotion is ")
    if not quality.valid:
        fh.write("beyond my understanding.\n")
        exit(1)

    print_result = [(round(emotionProbabilities.neutrality*100), "neutral"), \
                   (round(emotionProbabilities.happiness*100), "happy"), \
                   (round(emotionProbabilities.anger*100), "angry"), \
                   (round(emotionProbabilities.fear*100),"fearful"), \
                   (round(emotionProbabilities.sadness*100), "sad")]
    print_result = [tup for tup in print_result if tup[0] != 0]
    print_result.sort(key=lambda tup: tup[0])
    if len(print_result) == 0:
        fh.write("beyond my understanding.\n")
    elif len(print_result) == 1:
        fh.write("dominantly %d percent %s.\n" %
                 (print_result[0][0], print_result[0][1]))
    else:
        for i in range(len(print_result) - 1, 0, -1):
            fh.write("%d percent %s, " %
                     (print_result[i][0], print_result[i][1]))
        fh.write("and %d percent %s.\n" %
                 (print_result[0][0], print_result[0][1]))
    fh.close()

    with open("output.txt") as f1:
        with open("templates.yaml", "w") as f2:
            f2.write("welcome: Ready to hear your comments?\n\nround: ")
            for line in f1:
                f2.write(line.strip("\n"))
                f2.write(" ")

    voice.destroy()