예제 #1
0
    def openApp(self, tokens):
        say("Opening " + tokens[1])
        self.command = """xdotool key Super_L
sleep {}
xdotool type {}
sleep {}
xdotool key Return""".format(DELAY, tokens[1], DELAY)
예제 #2
0
    def say(self,
            say_text,
            language="en",
            gender="male",
            variant=0,
            capital_emphasis=None,
            pitch=None,
            speed=None,
            gap=None,
            amplitude=None,
            extra_args=None,
            stdout=None,
            wav_fp=None,
            add_silence=True):

        if self.on_air():
            say(say_text,
                language=language,
                gender=gender,
                variant=variant,
                pitch=pitch,
                speed=speed,
                gap=gap,
                amplitude=amplitude,
                extra_args=extra_args,
                capital_emphasis=capital_emphasis,
                stdout=self.wav_pipe_w)

            # Play a sample of silence, to prevent pifm from looping buffer
            if add_silence:
                self.play_silence()
        else:
            raise RadioNotRunningError
예제 #3
0
    def say(self, say_text, language="en", gender="male", variant=0, capital_emphasis=None,
        pitch=None, speed=None, gap=None, amplitude=None, extra_args=None,
        stdout=None, wav_fp=None, add_silence=True):

        if self.on_air():
            say(say_text, language=language, gender=gender, variant=variant, 
                pitch=pitch, speed=speed, gap=gap, amplitude=amplitude, extra_args=extra_args,
                    capital_emphasis=capital_emphasis, stdout=self.wav_pipe_w)

            # Play a sample of silence, to prevent pifm from looping buffer
            if add_silence:
                self.play_silence()
        else:
            raise RadioNotRunningError
예제 #4
0
    def copy(self, tokens):
        if "\\0" in tokens:
            tokens.remove("\\0")
        if len(tokens) == 1:
            self.command = "xdotool key Control_L+c"
        elif len(tokens) == 2:
            self.command = """sleep {}
xdotool key ctrl+BackSpace
sleep {}
xdotool type {}
sleep {}
xdotool key Control_L+c""".format(DELAY, DELAY, tokens[1], LONG_DELAY)
            say("Copied")
        else:
            self.command = "cp -a " + tokens[1] + " " + tokens[2]
예제 #5
0
def confirmation(text):
    say(text)
    if VOICE:
        answer = voice_input()
    else:
        answer = text_input()
    answer = answer.lower()

    affirmative = ["yes", "ya", "aye", "yeah"]
    negative = ["no", "nope", "don't"]
    
    if (commonelements(affirmative, answer.split())) and (not commonelements(negative, answer.split())):
        return True
    elif (commonelements(negative, answer.split())) and (not commonelements(affirmative, answer.split())):
        return False
    else:
        return confirmation()
예제 #6
0
 def execute(self):
     if self.command == "Nothing":
         return 1
     elif self.command == "Chatbot":
         return 2
     else:
         commands = self.command.split('\n')
         print commands
         for command in commands:
             """
             arg = shlex.split(self.command)
             p = Popen(arg)
             self.processid = p.pid
             """
             os.system(command)
             self.processid = 0
         say("Done.")
         return 0
예제 #7
0
def read_input(signal = False):
    punctuations = [',','?',"'", '"','!','@','#','$','%','^','&','*','-','|','=','`']
    flag = True
    sentence = ""
    wake = ["wake", "awake"]
    while flag:
        if VOICE:
            sentence = voice_input()
        else:
            sentence = text_input()
        chat_sentence = sentence

        sentences = sent_tokenize(sentence)
        split_sentence = []
        for i in sentences:
            phrases = i.split('and')
            split_sentence = split_sentence + phrases
        
        split_sentence = [i.strip() for i in split_sentence]

        """
        Formatting of input
        """
        sentences = []

        for sentence in split_sentence:
            sentence = sentence.lower()
            for i in punctuations:
                sentence = sentence.replace(i, "")
            sentence = sentence.replace("  ", " ")
            sentence = sentence.replace("please", "")
            sentence = sentence.strip('.')
            sentence = sentence.strip()
            if commonelements(wake, sentence.split()) or (not signal):
                flag = False
            else:
                flag = True

            if sentence in cliche_choices:
                flag = True
                say(cliche(sentence))
            sentences.append(sentence)
    print sentences
    return (sentences, chat_sentence)
예제 #8
0
    def createFolder(self, tokens):
        if len(tokens) == 2:
            say("Creating folder {}".format(tokens[1]))
            self.command = """sleep {}
xdotool key Control_L+Shift_L+n
sleep {}
xdotool type {}
xdotool key Return""".format(DELAY, DELAY, tokens[1])

        if len(tokens) > 2:
            if os.path.isdir(tokens[2]):
                destination = tokens[2]
                if destination[-1] != '/':
                    destination = destination + '/'
                destination = destination + tokens[1]
                self.command = "mkdir " + destination
            
            else:
                pass
                Inform(" Location " + tokens[2] + " does not exists.")
예제 #9
0
def gettext():
    r = sr.Recognizer()
    while True:
        with sr.Microphone() as source:
            print("Say something!")
            audio = r.listen(source)
            print("Finished recording")
            say("Okay.")

        try:
            text = r.recognize_google(audio)
            print("You said : " + text)
            return text

        except sr.UnknownValueError:
            say("Hey")

        except sr.RequestError as e:
            say("Hey")
예제 #10
0
    def dateInform(self, tokens):

        months = ["January", "February", "March", "April", "May", "June", 
                  "July", "August", "September", "October", "November", "December"]
        if len(tokens) == 2:
            if tokens[1] == "time":
                say("Time is "+ str(time.strftime("%I:%M %p")))

            elif tokens[1] == "date":
                today = str(date.today())
                features = today.split('-')
                features = features[::-1]
                if features[0][0] == '0':
                    features[0] = features[0][1]
                speech = "Today is " + str(features[0]) + "th " + months[int(features[1])-1] + " " + features[2]
                say(speech)
        
        else:
            week = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
            today = datetime.today().weekday()
            today = week[int(today)]
            speech = "Today is " + today
            say(speech)
예제 #11
0
def decode():

    # Only allocate part of the gpu memory when predicting.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    config = tf.ConfigProto(gpu_options=gpu_options)

    with tf.Session(config=config) as sess:
        # Create model and load parameters.
        model = create_model(sess, True)
        model.batch_size = 1  # We decode one sentence at a time.

        # Load vocabularies.
        enc_vocab_path = os.path.join(
            gConfig['working_directory'],
            "vocab%d.enc" % gConfig['enc_vocab_size'])
        dec_vocab_path = os.path.join(
            gConfig['working_directory'],
            "vocab%d.dec" % gConfig['dec_vocab_size'])

        enc_vocab, _ = data_utils.initialize_vocabulary(enc_vocab_path)
        dec_vocab, rev_dec_vocab = data_utils.initialize_vocabulary(
            dec_vocab_path)

        # Decode from standard input.
        sys.stdout.write("> ")
        sys.stdout.flush()
        sentence = sys.stdin.readline()

        while sentence:
            sentence, Hash = inputrepair(sentence, enc_vocab)
            print(sentence)
            # Get token-ids for the input sentence.
            token_ids = data_utils.sentence_to_token_ids(
                tf.compat.as_bytes(sentence), enc_vocab)
            # Which bucket does it belong to?
            bucket_id = min([
                b for b in xrange(len(_buckets))
                if _buckets[b][0] > len(token_ids)
            ])
            # Get a 1-element batch to feed the sentence to the model.
            encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                {bucket_id: [(token_ids, [])]}, bucket_id)
            # Get output logits for the sentence.
            _, _, output_logits = model.step(sess, encoder_inputs,
                                             decoder_inputs, target_weights,
                                             bucket_id, True)
            # This is a greedy decoder - outputs are just argmaxes of output_logits.
            outputs = [
                int(np.argmax(logit, axis=1)) for logit in output_logits
            ]
            # If there is an EOS symbol in outputs, cut them at that point.
            if data_utils.EOS_ID in outputs:
                outputs = outputs[:outputs.index(data_utils.EOS_ID)]
            # Print out French sentence corresponding to outputs.
            output = " ".join([
                tf.compat.as_str(rev_dec_vocab[output]) for output in outputs
            ])
            print(Hash)
            print(output)
            output = outputrepair(output, Hash)
            #ttos.speak(output)
            #Hspeaker(output)
            say(output)
            #engine.say(output)
            #print(" ".join([tf.compat.as_str(rev_dec_vocab[output]) for output in outputs]))
            print("> ", end="")
            sys.stdout.flush()
            sentence = sys.stdin.readline()
예제 #12
0
 def createFile(self, tokens):
     say("Doesn't support this operation right now.")
예제 #13
0
        start_moving = not start_moving

    turning = (average_angles[0] if (average_angles[1] < 90) else -average_angles[0])
    last_turn.append(turning)

    if start_moving:
        trn = 0.75*last_turn[0]
        fwd = abs(15 - 10 * (abs(trn)/90))
        print fwd, trn
        r.go(fwd, trn)
        print fwd, trn
        #r.turn(trn/2, 20)
    last_turn = last_turn[1:]

    #    print " ".join([str(x) for x in last_turn])

    if is_talking and circles is not None and len(circles) > 0:
        r.move(25, 0)
        texttospeech.say(index)
        index += 1
        if(index > 3):
            break
        r.stop()


    if key == 27: # exit on ESC
        break

r.stop()d
r.close()
예제 #14
0
파일: decoder.py 프로젝트: Tinson09/Talk-OS
def decode():

    PERFORM = True

    gConfig = get_config()
    chatConfig = get_config('seq2seq.ini')
    # Only allocate part of the gpu memory when predicting.

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    config = tf.ConfigProto(gpu_options=gpu_options)
    graph = tf.Graph()
    chat_graph = tf.Graph()
    chat_session = tf.Session(config=config, graph=chat_graph)
    sess = tf.Session(config=config, graph=graph)
    model = None
    chatmodel = None
    # Ceate model and load parameters.
    with graph.as_default():
        model = create_model(sess, True, use_lstm=False, gConfig=gConfig)
    with chat_graph.as_default():
        chatmodel = create_model(chat_session,
                                 True,
                                 use_lstm=True,
                                 gConfig=chatConfig)
    say('Loaded required models. Ready')
    model.batch_size = 1  # We decode one sentence at a time.
    chatmodel.batch_size = 1
    # Lad vocabularies.
    enc_vocab_path = os.path.join(gConfig['working_directory'],
                                  'vocab%d.enc' % gConfig['enc_vocab_size'])
    dec_vocab_path = os.path.join(gConfig['working_directory'],
                                  'vocab%d.dec' % gConfig['dec_vocab_size'])
    # Lad Chat vocabularies
    chat_enc_vocab_path = \
        os.path.join(chatConfig['working_directory'], 'vocab%d.enc'
                     % chatConfig['enc_vocab_size'])
    chat_dec_vocab_path = \
        os.path.join(chatConfig['working_directory'], 'vocab%d.dec'
                     % chatConfig['dec_vocab_size'])
    (enc_vocab, _) = \
        data_utils.initialize_vocabulary(enc_vocab_path)
    (dec_vocab, rev_dec_vocab) = \
        data_utils.initialize_vocabulary(dec_vocab_path)
    (chat_enc_vocab, _) = \
        data_utils.initialize_vocabulary(chat_enc_vocab_path)
    (chat_dec_vocab, chat_rev_dec_vocab) = \
        data_utils.initialize_vocabulary(chat_dec_vocab_path)
    # Decode from standard input.
    sentences, chat_sentence = read_input()
    """ 
    .
    .
    Program loop start 
    .
    .
    """
    while True:
        for sentence in sentences:
            if (not PERFORM) and (sentence == "chatbot off"
                                  or sentence == "stop chatbot"
                                  or sentence == "start working"
                                  or sentence == "stop chat"):
                PERFORM = True
                break

            if sentence == "terminate session" or sentence == "terminate this session":
                return

            if not PERFORM:
                sentence = chat_sentence

            #print("SENTENCE stage 1: "+sentence)
            if PERFORM:
                (sentence, Hash) = inputrepair(sentence, enc_vocab)
            #print("SENTENCE stage 2: "+sentence)
            # Get token-ids for the input sentence.
            token_ids = \
                data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence),
                    enc_vocab)
            chat_token_ids = \
                data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence),
                    chat_enc_vocab)
            # Which bucket does it belong to?
            bucket_id = min([
                b for b in xrange(len(_buckets))
                if _buckets[b][0] > len(token_ids)
            ])
            chat_bucket_id = min([
                b for b in xrange(len(_buckets))
                if _buckets[b][0] > len(chat_token_ids)
            ])
            # Get a 1-element batch to feed the sentence to the model.
            if PERFORM:
                (encoder_inputs, decoder_inputs, target_weights) = \
                    model.get_batch({bucket_id: [(token_ids, [])]},
                                    bucket_id)
                # Get output logits for the sentence.
                (_, _, output_logits) = model.step(
                    sess,
                    encoder_inputs,
                    decoder_inputs,
                    target_weights,
                    bucket_id,
                    True,
                )
            else:
                (chat_encoder_inputs, chat_decoder_inputs,
                 chat_target_weights) = \
                    chatmodel.get_batch({bucket_id: [(chat_token_ids, [])]},
                        chat_bucket_id)
                (_, _, output_logits) = chatmodel.step(
                    chat_session,
                    chat_encoder_inputs,
                    chat_decoder_inputs,
                    chat_target_weights,
                    chat_bucket_id,
                    True,
                )
                # This is a greedy decoder - outputs are just argmaxes of output_logits.
            outputs = [
                int(np.argmax(logit, axis=1)) for logit in output_logits
            ]
            # If there is an EOS symbol in outputs, cut them at that point.
            if data_utils.EOS_ID in outputs:
                outputs = outputs[:outputs.index(data_utils.EOS_ID)]
                # print(Hash)
            if PERFORM:
                output = ' '.join([
                    tf.compat.as_str(rev_dec_vocab[output])
                    for output in outputs
                ])
                output = outputrepair(output, Hash)
                task = Execution(output)
                status = task.execute()
            else:
                output = ' '.join([
                    tf.compat.as_str(chat_rev_dec_vocab[output])
                    for output in outputs
                ])
                say(output)
            if status == 1:
                state = True
            else:
                state = False
            if status == 2:
                PERFORM = False
        sentences, chat_sentence = read_input(state)