示例#1
0
    def __init__(self, PERSONA, mic):
        self.persona = PERSONA
        # self.mic - we're actually going to ignore the mic they passed in
        self.music = Music()

        # index spotify playlists into new dictionary and language models
        original = self.music.get_soup_playlist(
        ) + ["STOP", "CLOSE", "PLAY", "PAUSE",
             "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"]
        pronounced = g2p.translateWords(original)
        zipped = zip(original, pronounced)
        lines = ["%s %s" % (x, y) for x, y in zipped]

        with open("dictionary_spotify.dic", "w") as f:
            f.write("\n".join(lines) + "\n")

        with open("sentences_spotify.txt", "w") as f:
            f.write("\n".join(original) + "\n")
            f.write("<s> \n </s> \n")
            f.close()

        # make language model
        os.system(
            "text2idngram -vocab sentences_spotify.txt < sentences_spotify.txt -idngram spotify.idngram")
        os.system(
            "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm")

        # create a new mic with the new music models
        self.mic = Mic(
            speaker.newSpeaker(),
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic"),
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic")
        )
示例#2
0
    def setUp(self):
        self.jasper_clip = "../static/audio/jasper.wav"
        self.time_clip = "../static/audio/time.wav"

        from mic import Mic
        self.m = Mic(speaker.newSpeaker(), "languagemodel.lm", "dictionary.dic",
                     "languagemodel_persona.lm", "dictionary_persona.dic")
示例#3
0
	def __init__(self):
		self.speaker = speaker.newSpeaker()

        #interprocess 
		self.DISPATCHER_PORT = 9002
		self.dispatcherClient = DispatcherClient(port=self.DISPATCHER_PORT)

        ## Attach event listeners
		self.attachEvents(self.speaker)
示例#4
0
	def __init__(self): 

		self.speaker = speaker.newSpeaker()

        #interprocess 
		self.DISPATCHER_PORT = 9002
		self.dispatcherClient = DispatcherClient(port=self.DISPATCHER_PORT)

		self.inputBuffer =[]
		self.checkNavigation = []
示例#5
0
	def __init__(self):
		self.speaker = speaker.newSpeaker()

  #       #interprocess 
		self.DISPATCHER_PORT = 9002
		self.dispatcherClient = DispatcherClient(port=self.DISPATCHER_PORT)

		## For collision detection: lock navigation until ready
		self.collisionLocked = False
		self.obstacle = None
		self.OngoingNav = 0
		self.cruncherAlert = 0
		self.infotosay =None
		self.cruncherInfotosay=""

		## Attach event listeners
		self.attachEvents(self.speaker)
示例#6
0
    def __init__(self, PERSONA, mic):
        self.persona = PERSONA
        # self.mic - we're actually going to ignore the mic they passed in
        self.music = Music()

        # index spotify playlists into new dictionary and language models
        words = self.music.get_soup_playlist() + ["STOP", "CLOSE", "PLAY", "PAUSE",
             "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"]
        text = "\n".join(["<s> %s </s>" for word in words])
        # make language model
        vocabcompiler.compile_text(text, languagemodel_spotify)

        # create a new mic with the new music models
        self.mic = Mic(
            speaker.newSpeaker(),
            stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify),
            stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify)
        )
示例#7
0
    def __init__(self, PERSONA, mic):
        self.persona = PERSONA
        # self.mic - we're actually going to ignore the mic they passed in
        self.music = Music()

        # index spotify playlists into new dictionary and language models
        words = self.music.get_soup_playlist() + [
            "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER",
            "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"
        ]
        text = "\n".join(["<s> %s </s>" for word in words])
        # make language model
        vocabcompiler.compile_text(text, languagemodel_spotify)

        # create a new mic with the new music models
        self.mic = Mic(
            speaker.newSpeaker(),
            stt.PocketSphinxSTT(lmd_music=languagemodel_spotify,
                                dictd_music=dictionary_spotify),
            stt.PocketSphinxSTT(lmd_music=languagemodel_spotify,
                                dictd_music=dictionary_spotify))
示例#8
0
    def __init__(self, PERSONA, mic):
        self.persona = PERSONA
        # self.mic - we're actually going to ignore the mic they passed in
        self.music = Music()

        # index spotify playlists into new dictionary and language models
        original = self.music.get_soup_playlist() + [
            "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER",
            "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"
        ]
        pronounced = g2p.translateWords(original)
        zipped = zip(original, pronounced)
        lines = ["%s %s" % (x, y) for x, y in zipped]

        with open("dictionary_spotify.dic", "w") as f:
            f.write("\n".join(lines) + "\n")

        with open("sentences_spotify.txt", "w") as f:
            f.write("\n".join(original) + "\n")
            f.write("<s> \n </s> \n")
            f.close()

        # make language model
        os.system(
            "text2idngram -vocab sentences_spotify.txt < sentences_spotify.txt -idngram spotify.idngram"
        )
        os.system(
            "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm"
        )

        # create a new mic with the new music models
        self.mic = Mic(
            speaker.newSpeaker(),
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm",
                                dictd_music="dictionary_spotify.dic"),
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm",
                                dictd_music="dictionary_spotify.dic"))
示例#9
0
if __name__ == "__main__":

    print "==========================================================="
    print " JASPER The Talking Computer                               "
    print " Copyright 2013 Shubhro Saha & Charlie Marsh               "
    print "==========================================================="

    profile = yaml.safe_load(open("profile.yml", "r"))

    try:
        api_key = profile["keys"]["GOOGLE_SPEECH"]
    except KeyError:
        api_key = None

    try:
        stt_engine_type = profile["stt_engine"]
    except KeyError:
        print "stt_engine not specified in profile, defaulting to PocketSphinx"
        stt_engine_type = "sphinx"

    mic = Mic(speaker.newSpeaker(), stt.PocketSphinxSTT(), stt.newSTTEngine(stt_engine_type, api_key=api_key))

    addendum = ""
    if "first_name" in profile:
        addendum = ", %s" % profile["first_name"]
    mic.say("How can I be of service%s?" % addendum)

    conversation = Conversation("JASPER", mic, profile)

    conversation.handleForever()
示例#10
0
import time
import bible_search
import bible_lists
import sys
import select

WORDS = ["READ", "BIBLE"]

config = open("config.txt")
lang = config.read()
lang = lang.strip()
config.close()

profile = yaml.safe_load(open("profile.yml", "r"))
if "INDONESIAN" in lang:
    mic = Mic(speaker.newSpeaker(), "languagemodel_command.lm", "dictionary_commandindo.dic", "languagemodel_persona.lm", "dictionary_persona.dic")
else:
    mic = Mic(speaker.newSpeaker(), "languagemodel_command.lm", "dictionary_command.dic", "languagemodel_persona.lm", "dictionary_persona.dic")
 

def isValid(text):
    """
        Returns True if the input is related to new testament

        Arguments:
        text -- user-input, typically transcribed speech
    """
    return bool(re.search(r'\b(read|bible)\b', text, re.IGNORECASE))

  
class BibleReader:
示例#11
0
path = os.getenv("PATH")
if path:
    path = os.pathsep.join([path, "/usr/local/lib/"])
else:
    path = "/usr/local/lib/"
os.environ["PATH"] = path

import urllib2
import vocabcompiler
import traceback

lib_path = os.path.abspath('../client')
sys.path.append(lib_path)

import speaker as speak
speaker = speak.newSpeaker()


def testConnection():
    try:
        urllib2.urlopen("http://www.google.com").getcode()
        print "CONNECTED TO INTERNET"

    except urllib2.URLError:
        print "COULD NOT CONNECT TO NETWORK"
        speaker.say(
            "Warning: I was unable to connect to a network. Parts of the system may not work correctly, depending on your setup."
        )


def fail(message):
示例#12
0
    except KeyError:
        api_key = None

    try:
        stt_engine_type = profile['stt_engine']
    except KeyError:
        log.warn("stt_engine not specified in profile, defaulting to PocketSphinx")
        stt_engine_type = "sphinx"
    
    log.debug("command line args: %s" % args)
    
    try:
      if args.no_speaker:
        spk = speaker.DummySpeaker(log, profile)
      else:
        spk = speaker.newSpeaker(log, profile)
      
      if not args.pipe and not args.sock and not args.local and not args.half_local and not args.dynamic:
        passiveSTT = stt.PocketSphinxSTT(logger=log)
        activeSTT  = stt.newSTTEngine(stt_engine_type, logger=log, api_key=api_key)
      elif args.half_local or args.dynamic:
        passiveSTT = None
        activeSTT  = stt.newSTTEngine(stt_engine_type, logger=log, api_key=api_key)
      else:
        passiveSTT = None
        activeSTT  = None

      mic = Mic(spk, passiveSTT, activeSTT, log, profile=profile)      
    except:
        log.critical( "fatal error creating mic", exc_info=True)
        exit(1)
示例#13
0
    print "==========================================================="
    print " JASPER The Talking Computer                               "
    print " Copyright 2013 Shubhro Saha & Charlie Marsh               "
    print "==========================================================="

    profile = yaml.safe_load(open("profile.yml", "r"))

    try:
        api_key = profile['keys']['GOOGLE_SPEECH']
    except KeyError:
        api_key = None

    try:
        stt_engine_type = profile['stt_engine']
    except KeyError:
        print "stt_engine not specified in profile, defaulting to PocketSphinx"
        stt_engine_type = "sphinx"

    mic = Mic(speaker.newSpeaker(), stt.PocketSphinxSTT(),
              stt.newSTTEngine(stt_engine_type, api_key=api_key))
    house = House(mic)
    addendum = ""
    if 'first_name' in profile:
        addendum = ", %s" % profile["first_name"]
    mic.say("How can I be of service%s?" % addendum)

    conversation = Conversation("JASPER", mic, profile, house)

    conversation.handleForever()
示例#14
0
                            # false activation or speach not recognised
                            self.speaker.play("../static/audio/beep_hi.mp3")

            except KeyboardInterrupt:
                print 'got break'
                break


if __name__ == "__main__":
    l = jasperLogger.jasperLogger(level=logging.DEBUG,
                                  logFile='persistentCache.log',
                                  console=True)
    logger = l.getLogger()
    profile = yaml.safe_load(open("profile.yml", "r"))
    #spk = speaker.DummySpeaker(logger, profile)
    spk = speaker.newSpeaker(logger, profile)
    activeSTT = stt.newSTTEngine(profile['stt_engine'],
                                 logger=logger,
                                 api_key=profile['keys']['GOOGLE_SPEECH'])

    logger.info("start")
    if len(sys.argv) < 2:
        f = "active.wav"
    else:
        f = sys.argv[1]

    mic = Mic(spk, activeSTT, activeSTT, logger, profile)
    #f = 'samples/wlacz_dekoder_ncplusAt1003_4m_RodeM3_SBLive.wav'
    #f = 'samples/wlacz_dekoder_ncplusAt1003_1m_RodeM3_SBLive.wav'
    #mic.find_input_device()
    #print f
示例#15
0
def isLocal():
    return len(sys.argv) > 1 and sys.argv[1] == "--local"


if isLocal():
    from local_mic import Mic
else:
    from mic import Mic

if __name__ == "__main__":

    print "==========================================================="
    print " JASPER The Talking Computer                               "
    print " Copyright 2013 Shubhro Saha & Charlie Marsh               "
    print "==========================================================="

    profile = yaml.safe_load(open("profile.yml", "r"))

    mic = Mic(
        speaker.newSpeaker(), "languagemodel.lm", "dictionary.dic", "languagemodel_persona.lm", "dictionary_persona.dic"
    )

    addendum = ""
    if "first_name" in profile:
        addendum = ", %s" % profile["first_name"]
    mic.say("How can I be of service%s?" % addendum)

    conversation = Conversation("JASPER", mic, profile)

    conversation.handleForever()
示例#16
0
#!/usr/bin/env python

import os
import urllib2
import sys

import vocabcompiler
import traceback

lib_path = os.path.abspath('../client')
sys.path.append(lib_path)

import speaker as speak
speaker = speak.newSpeaker()

def configure():
    try:
        urllib2.urlopen("http://www.google.com").getcode()

        print "CONNECTED TO INTERNET"
        print "COMPILING DICTIONARY"
        vocabcompiler.compile("../client/sentences.txt", "../client/dictionary.dic", "../client/languagemodel.lm")

        print "STARTING CLIENT PROGRAM"
        os.system("$JASPER_HOME/jasper/client/start.sh &")

    except:
        print "COULD NOT CONNECT TO NETWORK"
        traceback.print_exc()
        speaker.say("Hello, I could not connect to a network. Please read the documentation to configure your Raspberry Pi.")
示例#17
0
                else:
                  # false activation or speach not recognised
                  self.speaker.play("../static/audio/beep_hi.mp3")
                
          except KeyboardInterrupt:
            print 'got break'
            break



if __name__ == "__main__":
    l = jasperLogger.jasperLogger(level=logging.DEBUG, logFile='persistentCache.log', console=True)
    logger = l.getLogger()
    profile = yaml.safe_load(open("profile.yml", "r"))
    #spk = speaker.DummySpeaker(logger, profile)
    spk = speaker.newSpeaker(logger, profile)
    activeSTT  = stt.newSTTEngine(profile['stt_engine'], logger=logger, api_key=profile['keys']['GOOGLE_SPEECH'])

    logger.info("start")
    if len(sys.argv) < 2:
      f = "active.wav"
    else:
      f = sys.argv[1]
    
    
    mic = Mic(spk, activeSTT, activeSTT, logger, profile)
    #f = 'samples/wlacz_dekoder_ncplusAt1003_4m_RodeM3_SBLive.wav'
    #f = 'samples/wlacz_dekoder_ncplusAt1003_1m_RodeM3_SBLive.wav'
    #mic.find_input_device()
    #print f
    #raw_input("<pause>")
示例#18
0
				for i in range(4):
					if(GPIO.input(ROW[i]) == 0):   # a key is pressed
						print MATRIX[i][j]
						if(MATRIX[i][j] != '#'):			 	#not the end yet
							inputId.append(MATRIX[i][j])
							mic.say(MATRIX[i][j])
							

						else:
							print inputId

							while(GPIO.input(ROW[i]) == 0):
								pass
							return inputId    #break out infinite loop
							
						while(GPIO.input(ROW[i]) == 0):
							pass

				GPIO.output(COL[j], 1)
			time.sleep(0.1)

	except KeyboardInterrupt:
		GPIO.cleanup()


if __name__ == '__main__':

	Speaker = speaker.newSpeaker()
	getInput(Speaker)