Пример #1
0
def textToSpeech(text):
    # lang = 'ko_KR'
    global lang
    lang = lang.replace('-','_')

    speech = Speech(text, lang)
    sox_effects = ("speed", "1.0")
    sox_effects = ("vol", "0.05")
    speech.play(sox_effects)
Пример #2
0
def textToSpeech(request):
    text = request.GET.get("text", "")
    lang = "ur"
    translation = database.child("DataSet").order_by_key().limit_to_first(
        3).get()
    translator = Translator()
    translatorResult = translator.translate(text, src="en", dest="ur")
    speech = Speech(translatorResult.text, lang)
    sox_effects = ("speed", "1.0")
    speech.play(sox_effects)
    return HttpResponse("1")
def say(text=None):
    from google_speech import Speech

    if text is None:
        say_fortune()
    elif text.lower() == "flipkart":
        say_fk_fortune()
    else:
        lang = "en"
        speech = Speech(text, lang)
    speech.play()
Пример #4
0
 def bookText(self, bookPath):
     if os.path.exists(bookPath):
         with open(bookPath, "rb") as file:
             pdf = PyPDF2.PdfFileReader(file)
             totalPages = pdf.getNumPages()
             pg = int(input(f"Enter page no. to read (1-{totalPages}): "))
             text = pdf.getPage(pg - 1).extractText()
             # print(text)
             if self.isOnline():
                 tts = Speech(text, "en")
                 tts.play()
             else:
                 print("Go Online for a better audio experience")
                 self.contentReader(text)
     else:
         print(f"Sorry no file found at {bookPath}")
Пример #5
0
        def myfunc():
            self.is_speaking = True

            if self.speech_engine == 'not_set':
                if sys.platform == 'darwin':
                    self.speech_engine = 'mac'
                else:
                    self.speech_engine == 'google'

            if self.speech_engine == 'mac':
                os.system('say -v {} "{}"'.format(speaker, sentence))
            elif self.speech_engine == 'google':
                speech = Speech(sentence, lang)
                speech.play(tuple())

            self.is_speaking = False
Пример #6
0
def speak(text):
    print(text)
    if text is None:
        text = "Sorry"
    lang = "en"
    speech = Speech(text, lang)
    return speech
Пример #7
0
def speak_google_tts(text):
    """ This method implements Text to Speech using the Google Translate TTS.
    It uses Google Speech Python Package.
    :param text: Text which is needed to be spoken
    :return: None
    """
    Speech(text=text, lang='en').play(sox_effects=None)
Пример #8
0
def test_weekdays(amount):
    """Test your knowledge about weekdays"""
    lang = "en"
    result = 0
    weekdays = [
        "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday",
        "Sunday"
    ]

    for idx in range(amount):
        sleep(0.8)
        print("~" * 10)

        rnd = randrange(0, len(weekdays))
        text = weekdays[rnd]

        Speech(text, lang).play()

        test = input("Your answer (Weekday): ")

        if test == text:
            result += 1
            print("You are right.")
        else:
            print(f"The right answer is: {text}")
            sleep(2)

    print("Result:")
    print(f" {amount} - the number of questions")
    print(f" {result} - the number of RIGHT answers")
Пример #9
0
        def myfunc():
            self.is_speaking = True

            if self.speech_engine == 'not_set':
                if sys.platform == 'darwin':
                    self.speech_engine = 'mac'
                else:
                    self.speech_engine == 'google'

            if self.speech_engine == 'mac':
                subprocess.call(['say', '-v', speaker, sentence])
            elif self.speech_engine == 'google':
                speech = Speech(sentence, lang)
                speech.play(tuple())

            self.is_speaking = False
Пример #10
0
def myspeech():
     r = sr.Recognizer()
     with sr.Microphone() as source:
         print("Listening...")
         r.pause_threshold = 1
         audio = r.listen(source)
     try:
        query = r.recognize_google(audio, language='fr-in')
        global vocale
        vocale = query.split()
     except sr.UnknownValueError:
        speech = Speech("je ne vous ai pas compris", "fr")
        speech.play()

        query = str(speech)

     return query
Пример #11
0
def make_wav(name, template, args):
    output_file = '{0}.mp3'.format(name)
    transcript = template.substitute(args)
    speech = Speech(
        transcript,
        random.choice(['en-UK', 'en-US', 'en-IE', 'en-ZA', 'en-NZ', 'en-KE']))
    speech.save('tmp.mp3')
    pitch = random.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
    subprocess.call([
        'sox', 'tmp.mp3', 'tmp2.wav', 'pitch', '-{}00'.format(pitch), 'speed',
        '1.3'.format(pitch), 'reverse'
    ])
    subprocess.call([
        'sox', 'tmp2.wav', output_file, 'reverse', 'silence', '1', '00:00:01',
        '-96d'
    ])
    return output_file
Пример #12
0
def test(text):
    # reverberance
    sox_effects = ("reverb", "1.5")
    speech = Speech(text, "fr")
    speech.play((
        "speed",
        "1.2",
        "reverb",
        "80",
        "pitch",
        "-300",
        "pad",
        "1",
        "5",
        # "oops",
        #"lowpass", "100", "5"
    ))
Пример #13
0
    def init_listener(self):
        try:
            print("A moment of silence, please...")
            with self.mic as source:
                self.r.adjust_for_ambient_noise(source)
            print("Set minimum energy threshold to {}".format(
                self.r.energy_threshold))
            while True:
                print("Listening!")
                with self.mic as source:
                    audio = self.r.listen(source)
                print("Got it! Now to recognize it...")
                try:
                    # recognize speech
                    value = self.r.recognize_google(audio)
                    #value = self.r.recognize_sphinx(audio)
                    print('val: ' + value)
                    value = value.lower().split()
                    # recognizing wake word and extracting from command
                    if value.pop(0) == 'hey' and value.pop(
                            0) == config.wake_word:
                        # we need some special handling here to correctly print unicode characters to standard output
                        value = ' '.join(value)
                        if str is bytes:  # this version of Python uses bytes for strings (Python 2)
                            print(u"You said {}".format(value).encode("utf-8"))
                            value = value.encode("utf-8")
                        else:  # this version of Python uses unicode for strings (Python 3+)
                            print("You said {}".format(value))
                        result = self.do_cmd(value)
                        if result:
                            lang = "en"
                            speech = Speech(result, lang)
                            sox_effects = ('speed', '1.0')
                            speech.play(sox_effects)
                            print(result)
                    else:
                        print('not a command')

                except sr.UnknownValueError:
                    print("Oops! Didn't catch that")
                except sr.RequestError as e:
                    print(
                        "Uh oh! Couldn't request results from Google Speech Recognition service; {0}"
                        .format(e))
        except KeyboardInterrupt:
            pass
Пример #14
0
def say(text=None, lang="en", robot=False):
    import os
    from google_speech import Speech
    if text is None:
        say_fortune()
    elif text == "flipkart":
        say_fk_fortune()
    else:
        speech = Speech(text, lang)
        if not robot:
            sox_effects = ("speed", "1.02", "vol", "0.3")
        else:
            sox_effects = ("speed 0.9 overdrive 10 echo 0.8 0.7 "
                           "6 0.7 echo 0.8 0.7 10 0.7 echo 0.8 0.7 "
                           "12 0.7 echo 0.8 0.88 12 0.7 echo 0.8 "
                           "0.88 30 0.7 echo 0.6 0.6 60 0.7").split(" ")
        speech.play(sox_effects)
        print(text)
Пример #15
0
def speak_google_tts(text):
    """ This method implements Text to Speech using the Google Translate TTS.
    It uses Google Speech Python Package.
    :param text: Text which is needed to be spoken
    :return: None
    """
    with tempfile.TemporaryDirectory() as tmpdirname:
        fd, mpiii = tempfile.mkstemp(suffix='.mp3', dir=tmpdirname)
        Speech(text=text, lang=susicfg.get("language")).save(mpiii)
        player.say(mpiii)
Пример #16
0
def read_problem(
    operands,
    inter_operand_pause=1.5,
    language='en',
):
    speeches = []
    for operand in operands:
        speeches.append(Speech(str(operand), language))

    for speech in speeches:
        speech.play(None)
        time.sleep(inter_operand_pause)
Пример #17
0
def listen():
    with iris.Microphone() as source:
        recognizer.adjust_for_ambient_noise(source)
        audio = recognizer.listen(source)
    try:
        return recognizer.recognize_google(audio, language='pt-BR')
    except iris.UnknownValueError:
        print("Could not understand audio")
        master_name = open('master_name', 'r').read()
        Speech('Desculpe {0}, não entendi.'.format(master_name), 'pt-br').play()
    except iris.RequestError as e:
        print("Recog Error; {0}".format(e))
    return ""
Пример #18
0
def get_google_speech(opts):
    
    tree = ET.parse(opts.xmlfile)
    root = tree.getroot()
    
    els=root.findall(".//pl")
    
    # print(els)
    
    pattern=re.compile("\((N ?)?[\d]\)")
    elnb=1
    for el in els:
        text=el.text
        lang = "pl"
        ahash = hashlib.md5(text.encode("utf-8")).hexdigest()
        filename="gspeech/"+ahash+".mp3"
        print("filename",filename)
    
        speech = Speech(text, lang)
        speech.save(filename)
        time.sleep(0.5)
        
        elnb=elnb+1
Пример #19
0
def get_google(filename):

    f = open(filename, "r")
    lines = f.readlines()
    f.close()

    output = []
    counter = 1
    plregex = re.compile("<pl>(.+)</pl>")
    for line in lines:
        line = line.strip()
        match = plregex.search(line)
        # print(match)
        if match:
            text = match.group(1)
            print("polish", text)
            lang = "pl"
            filename = "gspeech/" + str(counter) + ".mp3"
            print("filename", filename)
            speech = Speech(text, lang)
            speech.save(filename)
            time.sleep(0.5)
            counter = counter + 1
Пример #20
0
def say(text, speed=1.5, lang='en'):
    try:
        speech = Speech(text.replace('\n', ''), lang)
        speech.play()

        sox_effects = ('speed', speed)
        speech.play(sox_effects)
    except TypeError:
        print('Could not read {}'.format(text))
Пример #21
0
 def hello ():
     r = sr.Recognizer()                                                                                   
     with sr.Microphone() as source:                                
         print("Speak:")                                                                                   
         audio = r.listen(source)
         text = ""
     try:
         text = r.recognize_google(audio)
     except sr.UnknownValueError:
         print("Could not understand audio")
     except sr.RequestError as e:
         print("Could not request results; {0}".format(e))
     def bag_of_words(s, words):
         bag = [0 for _ in range(len(words))]
         s_words = nltk.word_tokenize(s)
         s_words = [stemmer.stem(word.lower()) for word in s_words]
         for se in s_words:
             for i, w in enumerate(words):
                 if w == se:
                     bag[i] = 1
         return numpy.array(bag)
     def chat():
         print("Start talking with the bot (type quit to stop)!")
         results = model.predict([bag_of_words(text, words)])
         results_index = numpy.argmax(results)
         tag = labels[results_index]
         for tg in data["intents"]:
             if tg['tag'] == tag:
                 responses = tg['responses']
         return random.choice(responses)
     c_text=str(chat())
     lang = "en"
     speech = Speech(c_text, lang)
     sox_effects = ("speed", "0.95")
     speech.play(sox_effects)
     return c_text,text
Пример #22
0
def gen_audio_file(trs, langin, langout, pattern, fileout):

    temp = '/tmp'
    # settle tmp
    temp += '/mvoad-' + str(time.time())
    os.mkdir(temp)

    tmpfn = temp + '/mvoad.tmp.wav'
    tmpold = temp + '/mvoad.tmp.dest1.mp3'
    tmpdest = temp + '/mvoad.tmp.dest.mp3'

    for tr in trs:
        untrsname = temp + '/mvoad.src.word.mp3'
        trsname = temp + '/mvoad.dest.word.mp3'
        Speech(tr.origin, langin).save(untrsname)
        Speech(tr.text, langout).save(trsname)
        for item in pattern:
            # sox
            inter = tmpdest
            tmpdest = tmpold
            tmpold = inter
            if (item[0]):
                filename = trsname
            else:
                filename = untrsname
            t = sox.Transformer()
            t.tempo(item[1], 's')
            t.pad(0, item[2])
            if pattern.index(item) != 0 or trs.index(tr) != 0:
                t.build(filename, tmpfn)
                cbn = sox.Combiner()
                cbn.build([tmpdest, tmpfn], tmpold, 'concatenate')
            else:
                t.build(filename, tmpold)
    shutil.move(tmpold, fileout)
    shutil.rmtree(temp)
Пример #23
0
def callback(data):
    global networkConnection
    print("Debug Data: ".format(data))

    if bt_flag == False:
        if networkConnection == True:
            # Google speech block
            speech = Speech(data.data, lang)
            speech.play()
            time.sleep(0.5)
        else:
            # On device mycroft mimic v1
            p = subprocess.Popen(
                ["/home/nullbyte/Desktop/myGit/mimic1/mimic", data.data],
                stdout=subprocess.PIPE)
            (output, err) = p.communicate()
            p_status = p.wait()
    else:
        cmd = "echo " + data.data + " > /dev/rfcomm0"
        print(cmd)
        p = subprocess.Popen(cmd, shell=True)
        (stderr, stdout) = p.communicate()
        print("stderr: ".format(stderr))
        print("stdout: ".format(stdout))
Пример #24
0
def test_digits(amount, max, min):
    """Test your knowledge about digits."""
    prepositions = [
        "on",
        "it",
        "from",
        "to",
        "before",
        "after",
        "around",
        "until",
        "about",
        "over",
        "outside",
        "inside",
        "below",
        "above",
    ]

    lang = "en"
    result = 0

    prev_rnd = list()

    for idx in range(amount):
        sleep(0.8)
        print("~" * 10)
        rnd = randrange(min, max)

        while rnd in prev_rnd:
            rnd = randrange(min, max)
        prev_rnd.append(rnd)

        prep = prepositions[randrange(0, len(prepositions))]
        text = f".{prep} {rnd}"
        Speech(text, lang).play()

        test = input("Your answer (digit only): ")

        if int(test) == int(rnd):
            result += 1
            print("You are right.")
        else:
            print(f"The right answer is: {rnd}")

    print("Result:")
    print(f" {amount} - the number of questions")
    print(f" {result} - the number of RIGHT answers")
Пример #25
0
def test_dates(amount, weekday):
    """Test your knowledge about dates"""
    lang = "en"
    result = 0

    prev_dates = list()

    start_date = datetime.date(1200, 1, 1)
    end_date = datetime.date(2200, 12, 31)

    time_between_dates = end_date - start_date
    days_between_dates = time_between_dates.days

    for idx in range(amount):
        sleep(0.8)
        print("~" * 10)

        random_number_of_days = randrange(days_between_dates)

        while random_number_of_days in prev_dates:
            random_number_of_days = randrange(days_between_dates)
        prev_dates.append(random_number_of_days)

        random_date = start_date + datetime.timedelta(
            days=random_number_of_days)

        if weekday:
            text = random_date.strftime("%A %B %d")
        else:
            text = random_date.strftime("%B %d")

        Speech(text, lang).play()

        if weekday:
            test = input("Your answer (Weekday Month Day): ")
        else:
            test = input("Your answer (Month Day): ")

        if test == text:
            result += 1
            print("You are right.")
        else:
            print(f"The right answer is: {text}")
            sleep(2)

    print("Result:")
    print(f" {amount} - the number of questions")
    print(f" {result} - the number of RIGHT answers")
Пример #26
0
    def speak_and_save(self, text, path, index):
        speech = Speech(text, self.language)
        speech.play(sox_effects=("speed", DEFAULT_SPEED))

        if not os.path.isdir(path):
            os.mkdir(path)
            pass

        full_path = "{}/{}.mp3".format(path, index)
        print(full_path)
        speech.save(full_path)
Пример #27
0
        def inner(*args, **kwargs):
            jk = kwargs.pop('jk', None)
            # before function call:
            if jk:
                text = "LOL! Then why are you asking me to run '" + fun.__name__ + "'? Unbelievable!"
                speech = Speech(text, lang)
                speech.play(None)

            # function call:
            if run_anyway or not jk:
                out = fun(*args, **kwargs)
            else:
                out = None

            # after function call:
            if jk:
                message_idx = randint(0, len(messages.default) - 1)
                text = messages.default[message_idx]
                speech = Speech(text, lang)
                speech.play(None)
            return out
    def handleKeyEvent(self, symbol, modifiers):
        if symbol == pyglet.window.key.BACKSPACE:
            self.currentPage == START_PAGE
            return

        if self.currentPage == START_PAGE:
            if symbol == pyglet.window.key._1:
                self.initPage(MANAGE_WORDBANK_PAGE)
            elif symbol == pyglet.window.key._2:
                self.initPage(START_DICTATION_PAGE)
            elif symbol == pyglet.window.key._3:
                self.initPage(SETTINGS_PAGE)

        elif self.currentPage == MANAGE_WORDBANK_PAGE:
            pass

        elif self.currentPage == START_DICTATION_PAGE:
            if symbol == pyglet.window.key.ENTER:
                self.initPage(IN_DICTATION_PAGE)
            #test only
            if symbol == pyglet.window.key._1:
                self.toggleSelect(self.wordbank.wordlists()[0])
            elif symbol == pyglet.window.key._2:
                self.toggleSelect(self.wordbank.wordlists()[1])

        elif self.currentPage == IN_DICTATION_PAGE:
            if symbol == pyglet.window.key.RIGHT:
                print(self.wordsToDictate[self.dictationIndex])
                self.dictationIndex = (self.dictationIndex + 1) % len(
                    self.wordsToDictate)
                speech = Speech(self.wordsToDictate[self.dictationIndex],
                                LANG_CN_MANDARIN)
                speech.play()
            elif symbol == pyglet.window.key.R:
                speech = Speech(self.wordsToDictate[self.dictationIndex],
                                LANG_CN_MANDARIN)
                speech.play()

        elif currentPage == SETTINGS_PAGE:
            pass
Пример #29
0
def textToSpeech(text):
    current_dir = os.path.dirname(os.path.realpath(__file__))
    current_file = os.path.basename(__file__)
    current_file_name = current_file[:-3]
    log_dir = '{}/logs'.format(current_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    LOG_FILENAME = current_dir + '/../logs/log_{}.log'.format(
        current_file_name)
    logger = logging.getLogger('Google-Speech')
    logger.setLevel(logging.DEBUG)

    file_handler = logging.handlers.TimedRotatingFileHandler(
        filename=LOG_FILENAME, when='midnight', interval=1, encoding='utf-8')
    file_handler.suffix = 'log=%Y%m%d'
    logger.addHandler(file_handler)
    formatter = logging.Formatter(
        '%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] %(message)s')
    file_handler.setFormatter(formatter)

    try:
        sox_effects = ("speech", "2")
        sox_effects = ("vol", "1")

        lang = "en_US"
        now_time = datetime.datetime.now().strftime("%Y %H %M %d%m")
        speech = Speech(now_time, lang)
        speech.play(sox_effects)

        lang = "ko_KR"
        speech = Speech(text, lang)
        speech.play(sox_effects)
    except Exception as ex:
        logger.info("Exception sox Error(google speech).")

    logger.info("[" + now_time + "] Text : " + text)
Пример #30
0
 def say(self, text):
     speech = Speech(text, self.lang)
     speech.play()
def Face(pirPin):
    #don't render frame.
    #uses picamera library to capture frames. 

    #Get a reference to the Raspberry Pi camera.
    camera = None
    while not camera:
        camera = picamera.PiCamera()
            
    camera.resolution = (320, 240)
    output = np.empty((240, 320, 3), dtype=np.uint8)

    #path to save unknown person's photos
    path = '/home/pi/python_server/Unknown_People/'

    #load known faces
    print("Loading known face image(s)")
    # Load face encodings
    while True:
        try:    
    	    with open(r'/home/pi/python_server/dataset_faces.dat', 'rb') as f:
                all_face_encodings = pickle.load(f)
                break
        except IOError:
            continue

    # Grab the list of names and the list of encodings
    known_face_names = list(all_face_encodings.keys())
    known_face_encodings = np.array(list(all_face_encodings.values()))

    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    t_end = time.time() + 20*1
    while time.time() <= t_end:
        print("Capturing image.")
        # Grab a single frame of video from the RPi camera as a numpy array
        camera.capture(output, format="rgb")

        # Loop over each face found in the frame to see if it's someone we know.
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(output)
            print("Found {} faces in image.".format(len(face_locations)))

            face_encodings = face_recognition.face_encodings(output, face_locations)

            face_names = []
            known = False
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
                face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]
                    known = True
                else:
                    name = "भवानी सेवा दल मेें आपका स्वागत है, आपकी एंट्री नहीं है, कृपया अपनी एंट्री कराये।"
                    now = datetime.now()
                    dt_string = now.strftime("%d-%m-%Y, %H-%M-%S")
                    cv2.imwrite(path + dt_string + '.jpg', output)
                face_names.append(name)

        print(*face_names, sep = ", ")
    
        #play names of detected people
        lang = "hi"
        sox_effects = ("speed", "1.0")
        for name in face_names:
            speech = Speech(name, lang)
            speech.play(sox_effects)
        
        if known == True:
            retractActuator(person=len(face_locations))
            stopActuator()
            extendActuator(person=len(face_locations))
            stopActuator()
            known = False

        #toggle process_this_frame var to run FR on alternate frames
        process_this_frame = not process_this_frame


        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        
    #release picamera resource
    if time.time() >= t_end:
        if camera:
           camera.close()