Esempio n. 1
0
 def __init__(self):
     self.__checkOS()
     if self.__osSystem == 'win32':
         print("Loading library...")
         Vokaturi.load("./lib/open/win/OpenVokaturi-3-3-win32.dll")
     if self.__osSystem == 'linux':
         Vokaturi.load('./lib/open/linux/OpenVokaturi-3-3-linux64.so')
Esempio n. 2
0
def analyzeFile(filepath, lineNum):
    print("in analyzeFile 1")
    Vokaturi.load("./lib/Vokaturi_mac.so")

    (sample_rate, samples) = scipy.io.wavfile.read(filepath)

    print("in analyzeFile 2")
    buffer_length = len(samples)
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:  # mono
        c_buffer[:] = samples[:] / 32768.0
    else:  # stereo
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0

    voice = Vokaturi.Voice(sample_rate, buffer_length)
    print("in analyzeFile 3")

    voice.fill(buffer_length, c_buffer)

    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)
    print("in analyzeFile 4")

    if quality.valid:
        emotions = [
            emotionProbabilities.happiness, emotionProbabilities.sadness,
            emotionProbabilities.anger, emotionProbabilities.fear,
            emotionProbabilities.neutrality
        ]

        if max(emotions) == emotionProbabilities.happiness:
            maxEmotion = "Happy"
        elif max(emotions) == emotionProbabilities.sadness:
            maxEmotion = "Sad"
        elif max(emotions) == emotionProbabilities.anger:
            maxEmotion = "Angry"
        elif max(emotions) == emotionProbabilities.neutrality:
            maxEmotion = "Neut"
        else:
            maxEmotion = "Afraid"

        stats = (
            "Happy: %.3f\tSad: %.3f\tAngry %.3f\tFear %.3f\tNeut %.3f" %
            (emotions[0], emotions[1], emotions[2], emotions[3], emotions[4]))

        print("in analyzeFile 5")
        emotionFile = open("emotions", 'a')
        print("in analyzeFile 6")
        writeEmotions(emotionFile, maxEmotion + " " + stats, lineNum)
        print("in analyzeFile 7")
        emotionFile.close()
        print("in analyzeFile 8")

    else:
        print("Not enough sonorancy to determine emotions")
Esempio n. 3
0
def voice_analysis():

    print("Loading library...")
    Vokaturi.load("../SIM/libs/OpenVokaturi-2-1/lib/Vokaturi_mac.so")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())

    print("Reading sound file...")
    file_name = "demo.wav"
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)
    print("   sample rate %.3f Hz" % sample_rate)

    print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:  # mono
        c_buffer[:] = samples[:] / 32768.0
    else:  # stereo
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0

    print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)

    print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)

    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    if quality.valid:
        with Connection('amqp://*****:*****@localhost:5672//') as conn:
            producer = conn.Producer(serializer='json')
            producer.publish(
                {
                    'Neutral': format(emotionProbabilities.neutrality, '.3f'),
                    'Happy': format(emotionProbabilities.happiness, '.3f'),
                    'Sad': format(emotionProbabilities.sadness, '.3f'),
                    'Angry': format(emotionProbabilities.anger, '.3f'),
                    'Fear': format(emotionProbabilities.fear, '.3f')
                },
                exchange=media_exchange,
                routing_key='voice',
                declare=[voice_queue])
        # print ("Neutral: %.3f" % emotionProbabilities.neutrality)
        # print ("Happy: %.3f" % emotionProbabilities.happiness)
        # print ("Sad: %.3f" % emotionProbabilities.sadness)
        # print ("Angry: %.3f" % emotionProbabilities.anger)
        # print ("Fear: %.3f" % emotionProbabilities.fear)
    else:
        print("Not enough sonorancy to determine emotions")

    voice.destroy()
def vokatori_fun(file_name):
    #print("Loading library...")
    Vokaturi.load(
        "C:/Users/gokhalea/HackathonCodes/OpenVokaturi-3-0a/OpenVokaturi-3-0a/lib/open/win/OpenVokaturi-3-0-win64.dll"
    )
    #print("Analyzed by: %s" % Vokaturi.versionAndLicense())

    #print("Reading sound file...")
    #file_name = sys.argv[1]
    #(sample_rate, samples) = scipy.io.wavfile.read(file_name)
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)
    #print("   sample rate %.3f Hz" % sample_rate)
    #print("Samples:" % samples)
    #print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    #print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:  # mono
        c_buffer[:] = samples[:] / 32768.0
    else:  # stereo
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0

    #print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)

    #print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)

    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    if quality.valid:

        print("Neutral: %.3f" % emotionProbabilities.neutrality)
        print("Happy: %.3f" % emotionProbabilities.happiness)
        print("Sad: %.3f" % emotionProbabilities.sadness)
        print("Angry: %.3f" % emotionProbabilities.anger)
        print("Fear: %.3f" % emotionProbabilities.fear)
        print(
            "______________________________________________________________________________________________"
        )
        return emotionProbabilities
    else:
        print("Not enough sonorancy to determine emotions")

    voice.destroy()

    return None
Esempio n. 5
0
def analyze(file_name):

    #   print ("Loading library...")
    Vokaturi.load("../lib/Vokaturi_linux64.so")
    #   print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

    #   print ("Reading sound file...")
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)
    #   print ("   sample rate %.3f Hz" % sample_rate)

    #   print ("Allocating Vokaturi sample array...")
    buffer_length = len(samples)

    #   print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)

    if samples.ndim == 1:  # mono
        c_buffer[:] = samples[:] / 32768.0
    else:  # stereo
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0

#   print ("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)

    #   print ("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)

    #   print ("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    enabled = True
    if quality.valid:
        #          print ("Neutral: %.3f" % emotionProbabilities.neutrality)
        #          print ("Happy: %.3f" % emotionProbabilities.happiness)
        #          print ("Sad: %.3f" % emotionProbabilities.sadness)
        #          print ("Angry: %.3f" % emotionProbabilities.anger)
        #          print ("Fear: %.3f" % emotionProbabilities.fear)
        value = emotionProbabilities.anger
    else:
        value = 0
        enabled = False

    voice.destroy()
    return enabled, value
Esempio n. 6
0
def callVokaturi(fileName):

    # Loading Vokaturi Mac
    Vokaturi.load("/Users/nchao/Desktop/Yale Hacks/lib/Vokaturi_mac.so")

    # Reading sound files (.wav)
    file_name = "/Users/nchao/Desktop/Yale Hacks/" + fileName
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)

    # Allocating Vokaturi sample array
    buffer_length = len(samples)
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                             samples[:, 1]) / 32768.0  # stereo

    # Creating VokaturiVoice and filling it with voice sample
    voice = Vokaturi.Voice(sample_rate, buffer_length)
    voice.fill(buffer_length, c_buffer)

    # Extracting emotions from Vokaturi
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)
    emoDict = {
        "Neutral": emotionProbabilities.neutrality,
        "Happy": emotionProbabilities.happiness,
        "Sad": emotionProbabilities.sadness,
        "Angry": emotionProbabilities.anger,
        "Fear": emotionProbabilities.fear
    }

    # Finding main emotion in voice file
    sortedVals = sorted(emoDict.values())[::-1]
    stdDev = numpy.std(sortedVals)
    emotions = []
    for percentage in sortedVals:
        if percentage > abs(max(sortedVals) - 1.5 * stdDev):
            emotions += [
                key for key, val in emoDict.items() if val == percentage
            ]

    voice.destroy()
    return emoDict
def sentiment_analysis(path=sys.argv[1]):
    print("Loading library...")
    Vokaturi.load("./OpenVokaturi-3-0-linux64.so")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())

    print("Reading sound file...")
    file_name = path
    (sample_rate, samples) = scipy.io.wavfile.read(file_name)
    print("   sample rate %.3f Hz" % sample_rate)

    print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                             samples[:, 1]) / 32768.0  # stereo

    print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)

    print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)

    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    if quality.valid:
        print("Neutral: %.3f" % emotionProbabilities.neutrality)
        print("Happy: %.3f" % emotionProbabilities.happiness)
        print("Sad: %.3f" % emotionProbabilities.sadness)
        print("Angry: %.3f" % emotionProbabilities.anger)
        print("Fear: %.3f" % emotionProbabilities.fear)
        out_dict = {}
        out_dict['neutral'] = emotionProbabilities.neutrality
        out_dict['happy'] = emotionProbabilities.happiness
        out_dict['sad'] = emotionProbabilities.sadness
        out_dict['angry'] = emotionProbabilities.anger
        out_dict['fear'] = emotionProbabilities.fear
        voice.destroy()
        return out_dict
Esempio n. 8
0
def extract_emotions(file_path):

    Vokaturi.load(get_vokaturi_lib())

    (sample_rate, samples) = scipy.io.wavfile.read(file_path)

    buffer_length = len(samples)

    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                             samples[:, 1]) / 32768.0  # stereo

    voice = Vokaturi.Voice(sample_rate, buffer_length)

    voice.fill(buffer_length, c_buffer)

    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    # if quality.valid:
    print("Neutral: %.3f" % emotionProbabilities.neutrality)
    print("Happy: %.3f" % emotionProbabilities.happiness)
    print("Sad: %.3f" % emotionProbabilities.sadness)
    print("Angry: %.3f" % emotionProbabilities.anger)
    print("Fear: %.3f" % emotionProbabilities.fear)

    emotions = {
        'neutrality': "%.3f" % emotionProbabilities.neutrality,
        'happiness': "%.3f" % emotionProbabilities.happiness,
        'sadness': "%.3f" % emotionProbabilities.sadness,
        'anger': "%.3f" % emotionProbabilities.anger,
        'fear': "%.3f" % emotionProbabilities.fear
    }

    voice.destroy()

    return emotions
def main():
    print("Loading library...")
    Vokaturi.load("lib/open/win/OpenVokaturi-3-3-win64.dll")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())
    r = RedisManager(host=RedisConfig['host'],
                     port=RedisConfig['port'],
                     db=RedisConfig['db'],
                     password=RedisConfig['password'],
                     decodedResponses=RedisConfig['decodedResponses'])
    sub = r.getRedisPubSub()
    sub.subscribe(RedisConfig['newAudioPubSubChannel'])
    for item in sub.listen():
        print(item)  # Test
        if item['type'] == 'message':
            newMsg = item['data']
            print("New Msg: " + str(newMsg))  # Test
            if not isinstance(newMsg, str):
                newMsg = newMsg.decode()
            audioID = newMsg
            audioContent = r.hgetFromRedis(
                key=audioID, field=RedisConfig['audioHsetB64Field'])
            audioParams = r.hgetFromRedis(
                key=audioID, field=RedisConfig['audioHsetParamsField'])
            if audioContent:
                if isinstance(audioParams, bytes):
                    audioParams = audioParams.decode('utf-8')
                if isinstance(audioContent, bytes):
                    audioContent = audioContent.decode('utf-8')
                audioContent = base64.b64decode(audioContent)
                audioContent = ast.literal_eval(audioContent.decode('utf-8'))
                audioParams = ast.literal_eval(audioParams)
                audioEmotions = extractEmotionsFromAudioFile(
                    audioContent, audioParams)
                print(audioEmotions)  # Test
                if not audioEmotions:
                    audioEmotions = RedisConfig['voidMsg']
                r.publishOnRedis(channel=RedisConfig['VocalChannel'],
                                 msg=str(audioEmotions))
                r.hsetOnRedis(key=audioID,
                              field=RedisConfig['audioHsetVocalResultField'],
                              value=str(audioEmotions))
Esempio n. 10
0
def analyzeAudio(filename):
    print("Loading library...")
    Vokaturi.load("lib/OpenVokaturi-3-3/lib/open/win/OpenVokaturi-3-3-win64.dll")
    print("Analyzed by: %s" % Vokaturi.versionAndLicense())
    
    print("Reading sound file...")
    (sample_rate, samples) = scipy.io.wavfile.read(filename)
    print("   sample rate %.3f Hz" % sample_rate)
    
    print("Allocating Vokaturi sample array...")
    buffer_length = len(samples)
    print("   %d samples, %d channels" % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0  # stereo
    
    print("Creating VokaturiVoice...")
    voice = Vokaturi.Voice(sample_rate, buffer_length)
    
    print("Filling VokaturiVoice with samples...")
    voice.fill(buffer_length, c_buffer)
    
    print("Extracting emotions from VokaturiVoice...")
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)
    
    if quality.valid:
        print("Neutral: %.3f" % emotionProbabilities.neutrality)
        print("Happy: %.3f" % emotionProbabilities.happiness)
        print("Sad: %.3f" % emotionProbabilities.sadness)
        print("Angry: %.3f" % emotionProbabilities.anger)
        print("Fear: %.3f" % emotionProbabilities.fear)
    
    voice.destroy()
    
    return emotionProbabilities
Esempio n. 11
0
def emotion_recognition(speech):

    #import library Vokaturi
    sys.path.append("../api")
    Vokaturi.load("../lib/Vokaturi_mac.so")

    (sample_rate, samples) = scipy.io.wavfile.read(speech)

    buffer_length = len(samples)

    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
        c_buffer[:] = samples[:] / 32768.0  # mono
    else:
        c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                             samples[:, 1]) / 32768.0  # stereo

    voice = Vokaturi.Voice(sample_rate, buffer_length)

    voice.fill(buffer_length, c_buffer)

    #extracting emotions from speech
    quality = Vokaturi.Quality()
    emotionProbabilities = Vokaturi.EmotionProbabilities()
    voice.extract(quality, emotionProbabilities)

    if quality.valid:
        '''print("Neutral: %.3f" % emotionProbabilities.neutrality)
	    print("Happy: %.3f" % emotionProbabilities.happiness)
	    print("Sad: %.3f" % emotionProbabilities.sadness)
	    print("Angry: %.3f" % emotionProbabilities.anger)
	    print("Fear: %.3f" % emotionProbabilities.fear)'''
        emotion = emotionProbabilities.max
        percent = emotionProbabilities.max.percent

    voice.destroy()

    return emotion, percent
Esempio n. 12
0
# Paul Boersma 2017-01-25
#
# A sample script that uses the Vokaturi library to extract the emotions from
# a wav file on disk. The file has to contain a mono recording.
#
# Call syntax:
#   python3 measure_wav_linux32.py path_to_sound_file.wav

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print("Loading library...")
Vokaturi.load("../lib/Vokaturi_linux32.so")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:
    c_buffer[:] = samples[:] / 32768.0
else:
    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0
from pathlib import Path
import sys

from google.cloud import speech
from google.cloud.speech import enums, types
import scipy.io.wavfile

from .config import project_root
from .state import get_state

vokaturi_directory = project_root() / 'deps/Vokaturi'
sys.path.append(str(vokaturi_directory / 'api'))
import Vokaturi
Vokaturi.load(str(vokaturi_directory / 'OpenVokaturi-3-0-linux64.so'))


def read_speech(path):
    import subprocess
    output = str(Path(path).resolve().parent / 'file.wav')
    subprocess.check_call(['sox', '|opusdec --force-wav {} -'.format(path), output])
    return scipy.io.wavfile.read(output)


def get_sentiment(sample_rate, samples):
    print('Sample rate %.3f Hz' % sample_rate)

    print('Allocating Vokaturi sample array...')
    buffer_length = len(samples)
    print('%d samples, %d channels' % (buffer_length, samples.ndim))
    c_buffer = Vokaturi.SampleArrayC(buffer_length)
    if samples.ndim == 1:
Esempio n. 14
0
Affiliation: Robotics Laboratory, Japan Advanced Institute of Science and Technology, Japan
Project:     CARESSES (http://caressesrobot.org/en/)
'''

import sys
import audioop
import math

sys.path.append("./api")
import Vokaturi
import numpy as np

from collections import deque
from pyaudio import PyAudio, paInt16

Vokaturi.load("./lib/open/linux/OpenVokaturi-3-0-linux64.so")


class VocalEmotionEstimatorEngine():
    CHUNK = 2048
    RATE = 16000
    THRESHOLD = 1500
    SILENCE_LIMIT = 1
    PREV_AUDIO = 0.5

    def __init__(self):
        self.data = []
        self.audio2send = []
        self.rel = VocalEmotionEstimatorEngine.RATE / VocalEmotionEstimatorEngine.CHUNK
        self.slid_win = deque(
            maxlen=VocalEmotionEstimatorEngine.SILENCE_LIMIT * self.rel)
Esempio n. 15
0
 def __init__(self):
     self.__checkOS()
     if self.__osSystem == 'win32':
         print("Loading library...")
         Vokaturi.load("../lib/open/win/OpenVokaturi-3-3-win64.dll")
Esempio n. 16
0
fileCount = 0
osLoaded = False

print("1 for linux32 ")
print("2 for linux64 ")
print("3 for mac32 ")
print("4 for mac64 ")
print("5 for win32 ")
print("6 for win64 ")
selection = input("Please select os version: ")

#while loop for selecting the os version
while (osLoaded == False):
    if selection == '1':
        print("Loading...")
        Vokaturi.load(
            "../OpenVokaturi-3-0/lib/open/linux/OpenVokaturi-3-0-linux32.so")
        osLoaded = True
        print("OS loaded")

    elif selection == '2':
        print("Loading...")
        Vokaturi.load(
            "../OpenVokaturi-3-0/lib/open/linux/OpenVokaturi-3-0-linux64.so")
        osLoaded = True
        print("OS loaded")

    elif selection == '3':
        print("Loading...")
        Vokaturi.load(
            "../OpenVokaturi-3-0/lib/open/macos/OpenVokaturi-3-0-mac32.dylib")
        osLoaded = True
#
# For the sound file hello.wav that comes with OpenVokaturi, the result should be:
#	Neutral: 0.760
#	Happy: 0.000
#	Sad: 0.238
#	Angry: 0.001
#	Fear: 0.000

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print("Loading library...")
Vokaturi.load("../lib/open/macos/OpenVokaturi-3-0-mac64.dylib")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
    c_buffer[:] = samples[:] / 32768.0
else:  # stereo
    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0
Esempio n. 18
0
#
# For the sound file hello.wav that comes with OpenVokaturi, the result should be:
#	Neutral: 0.760
#	Happy: 0.000
#	Sad: 0.238
#	Angry: 0.001
#	Fear: 0.000

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print ("Loading library...")
Vokaturi.load("../lib/open/linux/OpenVokaturi-3-3-linux32.so")
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

print ("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print ("   sample rate %.3f Hz" % sample_rate)

print ("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
	c_buffer[:] = samples[:] / 32768.0
else:  # stereo
	c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0
Esempio n. 19
0
#
# For the sound file example.wav that comes with OpenVokaturi, the result should be:
#   Neutral: 0.426
#   Happy: 0.001
#   Sad: 0.506
#   Angry: 0.005
#   Fear: 0.063

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print("Loading library...")
Vokaturi.load("../lib/Vokaturi_linux_arm64.so")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
    c_buffer[:] = samples[:] / 32768.0
else:  # stereo
    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0
# Paul Boersma 2017-01-25
#
# A sample script that uses the Vokaturi library to extract the emotions from
# a wav file on disk. The file has to contain a mono recording.
#
# Call syntax:
#   python3 measure_wav_linux64.py path_to_sound_file.wav

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print ("Loading library...")
Vokaturi.load("../lib/Vokaturi_win64.dll")
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

print ("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print ("   sample rate %.3f Hz" % sample_rate)

print ("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
	c_buffer[:] = samples[:] / 32768.0
else:  # stereo
	c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0
Esempio n. 21
0
#
# For the sound file example.wav that comes with OpenVokaturi, the result should be:
#   Neutral: 0.426
#   Happy: 0.001
#   Sad: 0.506
#   Angry: 0.005
#   Fear: 0.063

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print("Loading library...")
Vokaturi.load("../lib/Vokaturi_mac.so")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
    c_buffer[:] = samples[:] / 32768.0
else:  # stereo
    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0
#	Neutral: 0.760
#	Happy: 0.000
#	Sad: 0.238
#	Angry: 0.001
#	Fear: 0.000

import sys
import scipy.io.wavfile
import Vokaturi
import os

vokaturi_path = os.path.dirname(os.path.realpath(__file__)) 
print(vokaturi_path)

print ("Loading library...")
Vokaturi.load(vokaturi_path + "/OpenVokaturi-3-0-mac64.dylib")
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

print ("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(vokaturi_path + "/"+ file_name)
print ("   sample rate %.3f Hz" % sample_rate)

print ("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
	c_buffer[:] = samples[:] / 32768.0
else:  # stereo
Esempio n. 23
0
#	Angry: 0.001
#	Fear: 0.000

import sys
import scipy.io.wavfile

sys.path.append("./")
import Vokaturi

os = sys.argv[2]

print("Loading library...")
# NOTE: This is working for OSX, Linux, Win64. If this doesn't work on your machine, you might have to install some other
# Vokaturi library like the win32.dll file and change the relative path accordingly
if os == 'windows':
    Vokaturi.load("./OpenVokaturi-3-0-win64.dll")
elif os == 'linux':
    Vokaturi.load("./OpenVokaturi-3-0-linux64.so")
elif os == 'mac' or os == 'osx' or os == 'macos':
    Vokaturi.load("./OpenVokaturi-3-0-mac64.dylib")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
# Paul Boersma 2017-01-25
#
# A sample script that uses the Vokaturi library to extract the emotions from
# a wav file on disk. The file has to contain a mono recording.
#
# Call syntax:
#   python3 measure_wav_mac.py path_to_sound_file.wav

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print ("Loading library...")
Vokaturi.load("../lib/Vokaturi_mac.so")
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

print ("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print ("   sample rate %.3f Hz" % sample_rate)

print ("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
	c_buffer[:] = samples[:] / 32768.0
else:  # stereo
	c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0
import uuid
import gc
import sys
import scipy.io.wavfile
sys.path.append("api")
import Vokaturi
from shutil import copyfile
from confidence_prediction import test_example
import pickle

with open('logistic_regression_.pkl', 'rb') as f:
    clf = pickle.load(f)

#from opus.decoder import Decoder as OpusDecoder
#Vokaturi.load("api/OpenVokaturi-3-3-win64.dll")
Vokaturi.load("api/OpenVokaturi-3-3-linux64.so")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())


def analyze():
    file_name = "tempFile.wav"
    test_exm = test_example(file_name, clf)
    return test_exm[0, 0], test_exm[0, 1]


clients = {}


class OpusDecoderWS(tornado.websocket.WebSocketHandler):
    def open(self):
        print('new connection')
#
# For the sound file hello.wav that comes with OpenVokaturi, the result should be:
#	Neutral: 0.760
#	Happy: 0.000
#	Sad: 0.238
#	Angry: 0.001
#	Fear: 0.000

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print("Loading library...")
Vokaturi.load("../bin/Debug/OpenVokaturi-3-0-win64.dll")
#Vokaturi.load("../lib/open/win/OpenVokaturi-3-0-win32.dll")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
    c_buffer[:] = samples[:] / 32768.0
else:  # stereo
Esempio n. 27
0
#
# Call syntax:
#   python3 analyze_one_file.py path_to_sound_file.wav

import sys
import os
import scipy.io.wavfile

sys.path.append(os.path.join(sys.path[0],'api'))

import Vokaturi

file_name = sys.argv[1]

libPath = os.path.join(sys.path[0],'lib','Vokaturi_mac.so')
Vokaturi.load(libPath)

os.system('clear')

(sample_rate, samples) = scipy.io.wavfile.read(file_name)
samples = samples[:] / 32768.0
buffer_length = len(samples)
c_buffer = Vokaturi.SampleArrayC(buffer_length)

# something weird with original sample code here.
# try just the first element in array.
for index in range(len(samples)):
   c_buffer[index] = samples[index][0]

voice = Vokaturi.Voice(sample_rate, buffer_length)
emotionProbabilities = Vokaturi.EmotionProbabilities()
Esempio n. 28
0
#
# For the sound file hello.wav that comes with OpenVokaturi, the result should be:
#	Neutral: 0.760
#	Happy: 0.000
#	Sad: 0.238
#	Angry: 0.001
#	Fear: 0.000

import sys
import pyaudio
import numpy as np

sys.path.append("/OpenVokaturi/api")
import Vokaturi

Vokaturi.load("/OpenVokaturi/lib/open/win/OpenVokaturi-3-0-win64.dll")

CHUNKSIZE = 100000  # fixed chunk size
sample_rate = 44100

# initialize portaudio
p = pyaudio.PyAudio()

device_info = p.get_default_input_device_info()
#device_info = p.get_device_info_by_index(1)
channelcount = device_info["maxInputChannels"] if (
    device_info["maxOutputChannels"] < device_info["maxInputChannels"]) else device_info["maxOutputChannels"]
stream = p.open(format=pyaudio.paInt16,
                channels=channelcount,
                rate=int(device_info["defaultSampleRate"]),
                input=True,
Esempio n. 29
0
#
# For the sound file hello.wav that comes with OpenVokaturi, the result should be:
#	Neutral: 0.760
#	Happy: 0.000
#	Sad: 0.238
#	Angry: 0.001
#	Fear: 0.000

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print ("Loading library...")
Vokaturi.load("../lib/open/win/OpenVokaturi-3-3-win32.dll")
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

print ("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print ("   sample rate %.3f Hz" % sample_rate)

print ("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
	c_buffer[:] = samples[:] / 32768.0
else:  # stereo
	c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0
Esempio n. 30
0
#
# For the sound file example.wav that comes with OpenVokaturi, the result should be:
#   Neutral: 0.760
#   Happy: 0.000
#   Sad: 0.238
#   Angry: 0.001
#   Fear: 0.000

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print("Loading library...")
Vokaturi.load("../lib/Vokaturi_win64.dll")
print("Analyzed by: %s" % Vokaturi.versionAndLicense())

print("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print("   sample rate %.3f Hz" % sample_rate)

print("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:  # mono
    c_buffer[:] = samples[:] / 32768.0
else:  # stereo
    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0
Esempio n. 31
0
def listen_me():

    global text, duration

    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', '-f', default='recording.wav')
    args = parser.parse_args()

    # 라이브러리 준비
    Vokaturi.load("/home/pi/lib/piZero.so")

    # 클라우드 스피치, 텍스트 자연어처리, tts 클라이언트 각각 초기화
    client = CloudSpeechClient()
    nlp_client = language.LanguageServiceClient()
    tts_client = texttospeech.TextToSpeechClient()

    pos_wavs = []
    neut_wavs = []
    neg_wavs = []
    intro_wavs = []

    pos_wavs.append(text_to_audio(tts_client, '진짜?', '0.wav'))
    pos_wavs.append(text_to_audio(tts_client, '대박', '1.wav'))
    pos_wavs.append(text_to_audio(tts_client, '우와', '2.wav'))
    pos_wavs.append(text_to_audio(tts_client, '하하', '3.wav'))

    neut_wavs.append(text_to_audio(tts_client, '응', '10.wav'))
    neut_wavs.append(text_to_audio(tts_client, '그렇구나', '11.wav'))
    neut_wavs.append(text_to_audio(tts_client, '그래서?', '12.wav'))
    neut_wavs.append(text_to_audio(tts_client, '응응', '13.wav'))

    neg_wavs.append(text_to_audio(tts_client, '저런', '4.wav'))
    neg_wavs.append(text_to_audio(tts_client, '힘내', '5.wav'))
    neg_wavs.append(text_to_audio(tts_client, '에휴', '6.wav'))

    intro_wavs.append(text_to_audio(tts_client, '들어줄게. 얘기해봐', 'intro0.wav'))
    intro_wavs.append(text_to_audio(tts_client, '무슨 일 이야?', 'intro1.wav'))
    play_wav(random.choice(intro_wavs))

    logging.basicConfig(level=logging.INFO)

    with Board() as board:

        while True:

            print('말해보자.')
            text = None
            duration = 0.
            emotion = None

            def wait():
                global text, duration
                start = time.monotonic()

                while text is None:

                    # 텍스트로 인식
                    text = client.recognize(language_code='ko-KR')
                    duration = time.monotonic() - start

            # 녹음하면서
            record_file(AudioFormat.CD,
                        filename=args.filename,
                        wait=wait,
                        filetype='wav')

            print(text)
            print('Recorded: %.02f seconds' % duration)

            if text in ['들어줘서 고마워', '내 얘기 들어줘서 고마워', '어시스턴트', '잘가', '잘 가']:
                return

            # 텍스트 감정 분석
            document = types.Document(content=text,
                                      type=enums.Document.Type.PLAIN_TEXT)
            sentiment = nlp_client.analyze_sentiment(
                document=document).document_sentiment

            print('텍스트 감정 분석*********************************')
            print('Text: {}'.format(text))
            print('Sentiment: {}, {}'.format(sentiment.score,
                                             sentiment.magnitude))

            ##################### 실험후 바꿔도 됨 ####################
            pos_standard = 0.6
            neg_standard = 0.1
            # magnitude_standard = 0.1

            # text sentiment analysis is enough
            if (sentiment.score < neg_standard
                    or sentiment.score > pos_standard):
                if sentiment.score < neg_standard:
                    emotion = False
                    print("@@@negative")
                else:
                    emotion = True
                    print("@@@positive")

            else:
                # 녹음 파일 감정 분석
                print('오디오 감정 분석*********************************')
                (sample_rate, samples) = scipy.io.wavfile.read(args.filename)
                # print ("   sample rate %.3f Hz" % sample_rate)

                # print ("Allocating Vokaturi sample array...")
                buffer_length = len(samples)
                print("   %d samples, %d channels" %
                      (buffer_length, samples.ndim))
                c_buffer = Vokaturi.SampleArrayC(buffer_length)
                if samples.ndim == 1:  # mono
                    c_buffer[:] = samples[:] / 32768.0
                else:  # stereo
                    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                                         samples[:, 1]) / 32768.0

                # print ("Creating VokaturiVoice...")
                voice = Vokaturi.Voice(sample_rate, buffer_length)

                # print ("Filling VokaturiVoice with samples...")
                voice.fill(buffer_length, c_buffer)

                # print ("Extracting emotions from VokaturiVoice...")
                quality = Vokaturi.Quality()
                emotionProbabilities = Vokaturi.EmotionProbabilities()
                voice.extract(quality, emotionProbabilities)

                if quality.valid:
                    # print ("Neutral: %.3f" % emotionProbabilities.neutrality)
                    # print ("Happy: %.3f" % emotionProbabilities.happiness)
                    # print ("Sad: %.3f" % emotionProbabilities.sadness)
                    # print ("Angry: %.3f" % emotionProbabilities.anger)
                    # print ("Fear: %.3f" % emotionProbabilities.fear)
                    # fear 는 무시하도록 하자.

                    wave_score = emotionProbabilities.happiness - (
                        emotionProbabilities.sadness +
                        emotionProbabilities.anger)

                    if wave_score > 0 and sentiment.score > 0.4:
                        print('@@@긍정')
                        emotion = True
                    elif wave_score < 0 and sentiment.score < 0.4:
                        print('@@@부정')
                        emotion = False

                    # text 스코어와 wave 스코어가 불일치 할때는 중립반응 (emotion = None)

            # 여기서 부터 반응.

            with Leds() as leds:
                if emotion is True:
                    play_wav(random.choice(pos_wavs))
                    leds.pattern = Pattern.blink(100)
                    color = (255, 255, 0)
                    leds.update(Leds.rgb_pattern(color))
                    time.sleep(1)
                    # play_wav('laugh.wav')
                elif emotion is False:
                    play_wav(random.choice(neg_wavs))
                    leds.pattern = Pattern.breathe(1000)
                    color = (102, 140, 255)
                    leds.update(Leds.rgb_on(color))
                    time.sleep(1)
                    # play_wav('people-cheering.wav')

                # 중립 리액션
                else:
                    play_wav(random.choice(neut_wavs))
                    leds.pattern = Pattern.blink(5)
                    color = (230, 0, 115)
                    leds.update(Leds.rgb_on(color))
                    time.sleep(1)
Esempio n. 32
0
import requests
import json
from picamera import PiCamera
from time import sleep

#lib required for hearthrate emotion recognition
import RPi.GPIO as GPIO

#libs required for audio emotion recognition
import sys
import scipy.io.wavfile

sys.path.append("../lib/OpenVokaturi-3-0a/api")
print("[AUDIO] Loading library...")
import Vokaturi
Vokaturi.load("../lib/OpenVokaturi-3-0a/lib/open/pi3b.so")
print("[AUDIO] Analyzed by: %s" % Vokaturi.versionAndLicense())

import pyaudio
import wave

EMOTIONS = [
    'fear', 'happy', 'neutral', 'contempt', 'surprise', 'sadness', 'anger',
    'disgust'
]
_FEAR = 0
_HAPPINESS = 1
_NEUTRAL = 2
_CONTEMPT = 3
_SURPRISE = 4
_SADNESS = 5
# Paul Boersma 2017-01-25
#
# A sample script that uses the Vokaturi library to extract the emotions from
# a wav file on disk. The file has to contain a mono recording.
#
# Call syntax:
#   python3 measure_wav_linux32.py path_to_sound_file.wav

import sys
import scipy.io.wavfile

sys.path.append("../api")
import Vokaturi

print ("Loading library...")
Vokaturi.load("../lib/Vokaturi_linux32.so")
print ("Analyzed by: %s" % Vokaturi.versionAndLicense())

print ("Reading sound file...")
file_name = sys.argv[1]
(sample_rate, samples) = scipy.io.wavfile.read(file_name)
print ("   sample rate %.3f Hz" % sample_rate)

print ("Allocating Vokaturi sample array...")
buffer_length = len(samples)
print ("   %d samples, %d channels" % (buffer_length, samples.ndim))
c_buffer = Vokaturi.SampleArrayC(buffer_length)
if samples.ndim == 1:
	c_buffer[:] = samples[:] / 32768.0
else:
	c_buffer[:] = 0.5*(samples[:,0]+0.0+samples[:,1]) / 32768.0