コード例 #1
0
ファイル: views.py プロジェクト: MauriIC19/PyStudent
def dictado(request):
    if request.session['id'] is not 0:
        grade = request.GET.get('grado')

        if request.method == 'POST':
            key = request.POST.get('key')
            textoPalabras = Palabras.objects.filter(dificultad = int(grade))
            pal = []

            for palabra in textoPalabras:
                pal.append(palabra.palabra)

            pythoncom.CoInitialize()
            text = pal[int(key)]
            src = "/static/audio/audio.m4a"
            engine = CreateObject("SAPI.SpVoice")
            stream = CreateObject("SAPI.SpFileStream")
            from comtypes.gen import SpeechLib
            #Gracias Chuy
            path = os.path.join(BASE_DIR, "PyStudent\\static\\audio\\audio.m4a")
            #stream.Open("C:/Users/sasuk/Desktop/PyStudent/PyStudent/static/audio/audio.m4a", SpeechLib.SSFMCreateForWrite)
            stream.Open(path, SpeechLib.SSFMCreateForWrite)
            engine.AudioOutputStream = stream
            engine.speak(text)
            stream.Close()

            return HttpResponse(src)

        return render(request, 'dictadoPalabras.html')
    else:
        return redirect('/pystudent/')
コード例 #2
0
ファイル: proc.py プロジェクト: Arinda95/Speakr
 def audioWise():
     File = open("text/loadedFile.svcg", 'r')
     text = File.read()
     engine = CreateObject("SAPI.SpVoice")
     stream = CreateObject("SAPI.SpFileStream")
     stream.Open('media/AudioBook.wav', SpeechLib.SSFMCreateForWrite)
     engine.AudioOutputStream = stream
     engine.speak(text)
     stream.Close()
コード例 #3
0
 def _get_engine(language_pair, fn):
     print(f'Creating track {language_pair} #{fn}...')
     engine = CreateObject("SAPI.SpVoice")
     stream = CreateObject("SAPI.SpFileStream")
     try:
         mkdir(f'audio/{language_pair}')
     except OSError:
         pass
     stream.Open(f'audio/{language_pair}/audio{fn:03}.wav', SpeechLib.SSFMCreateForWrite)
     engine.AudioOutputStream = stream
     return engine, stream
コード例 #4
0
def do_TTS(msg, req_type):
    print('starting tts')
    engine = CreateObject("SAPI.SpVoice")
    stream = CreateObject("SAPI.SpFileStream")

    stream.Open('temp.wav', SpeechLib.SSFMCreateForWrite)
    engine.AudioOutputStream = stream
    # need to parse request to find requested message
    if (req_type == 0):
        print(msg)
        word = msg
    else:
        ind = msg.find(': ')
        ind1 = msg.find('\r\n')
        word = msg[ind + 2:ind1]
    engine.speak(word, 0)
    stream.Close()
    #     # need to change this to temp if i want it to be overwritten each time
    f = wave.open("temp.wav", 'rb')
    frames = f.readframes(f.getnframes())
    frames = [
        struct.unpack("<h", frames[i] + frames[i + 1])[0]
        for i in range(0, len(frames), 2)
    ]
    # change samp rate by changing last argument, 4 means 5.5 kHz
    frames = [
        np.uint8(((frames[i] + 2**15) >> 9) + 32)
        for i in range(0, len(frames), 4)
    ]
    ind = -1
    for i in range(len(frames)):
        # whatever value zero is encoded as
        if frames[i] != 96:
            ind = i
            break
    #frames = frames[ind-10:]
    for i in range(len(frames)):
        if (frames[len(frames) - i - 1] != 96):
            ind = i
            break
    # frames = frames[:ind+20]
    #t = np.linspace(0, 10000, f.getnframes())
    # 7-bit ascii character offset by 32 to avoid control bits
    assert max(frames) <= 159 and min(frames) >= 32
    z = []
    for i in range(len(frames)):
        # if (i%4 != 0):
        z.append(Bits('uint:8=' + str(frames[i])))
    # not sure if \n is helpful
    # u8_str = ''.join(a.bin+'\n' for a in z)
    #u8_str = ''.join(str(a)+'\n' for a in frames)
    ascii_str = ''.join(chr(a) for a in frames)
    cltskt.send(ascii_str)
コード例 #5
0
def tta(txt):
    from comtypes.client import CreateObject
    engine = CreateObject("SAPI.SpVoice")
    stream = CreateObject("SAPI.SpFileStream")
    from comtypes.gen import SpeechLib
    outfile = "splits/my_tta.wav"
    stream.Open(outfile, SpeechLib.SSFMCreateForWrite)
    engine.AudioOutputStream = stream
    #f = open(infile, 'r')
    #txt = f.read()
    #f.close()
    engine.speak(txt)
    stream.Close()
コード例 #6
0
    def test(self, dynamic=False):
        engine = CreateObject("SAPI.SpVoice", dynamic=dynamic)
        stream = CreateObject("SAPI.SpFileStream", dynamic=dynamic)
        from comtypes.gen import SpeechLib

        fd, fname = tempfile.mkstemp(suffix=".wav")
        os.close(fd)

        stream.Open(fname, SpeechLib.SSFMCreateForWrite)

        # engine.AudioStream is a propputref property
        engine.AudioOutputStream = stream
        self.assertEqual(engine.AudioOutputStream, stream)
        engine.speak("Hello, World", 0)
        stream.Close()
        filesize = os.stat(fname).st_size
        self.assertTrue(filesize > 100, "filesize only %d bytes" % filesize)
        os.unlink(fname)
コード例 #7
0
def txtToWav(inputFile, outputFile, voice=0):
    if not os.path.exists(outputFile[0:outputFile.rfind('\\')]):
        os.makedirs(outputFile[0:outputFile.rfind('\\')])

    engine = CreateObject("SAPI.SpVoice")
    engine.Voice = engine.GetVoices()[voice]
    stream = CreateObject("SAPI.SpFileStream")
    from comtypes.gen import SpeechLib

    stream.Open(outputFile, SpeechLib.SSFMCreateForWrite)
    engine.AudioOutputStream = stream

    f = open(inputFile, 'r')
    text = f.read()
    f.close()

    engine.speak(text)
    stream.Close()
コード例 #8
0
    def test(self, dynamic=False):
        engine = CreateObject("SAPI.SpVoice", dynamic=dynamic)
        stream = CreateObject("SAPI.SpFileStream", dynamic=dynamic)
        from comtypes.gen import SpeechLib

        fd, fname = tempfile.mkstemp(suffix=".wav")
        os.close(fd)

        stream.Open(fname, SpeechLib.SSFMCreateForWrite)

        # engine.AudioStream is a propputref property
        engine.AudioOutputStream = stream
        self.failUnlessEqual(engine.AudioOutputStream, stream)
        engine.speak("Hello, World", 0)
        stream.Close()
        filesize = os.stat(fname).st_size
        self.failUnless(filesize > 100, "filesize only %d bytes" % filesize)
        os.unlink(fname)
コード例 #9
0
def getLength(text, wavfn):
    '''Get the length, in seconds, of a wav file produced by applying a
    text-to-speech engine to the given text.'''
    with tempfile.NamedTemporaryFile() as f:
        wavfn = f.name

    engine = CreateObject("SAPI.SpVoice")
    stream = CreateObject("SAPI.SpFileStream")
    stream.Open(wavfn, comtypes.gen.SpeechLib.SSFMCreateForWrite)
    engine.AudioOutputStream = stream
    engine.speak(text)
    stream.Close()

    with contextlib.closing(wave.open(wavfn, 'r')) as f:
        frames = f.getnframes()
        rate = f.getframerate()
        duration = frames / float(rate)

    os.remove(wavfn)
    return duration
コード例 #10
0
def getLength(text,wavfn):
    '''Get the length, in seconds, of a wav file produced by applying a
    text-to-speech engine to the given text.'''
    with tempfile.NamedTemporaryFile() as f:
        wavfn = f.name

    engine = CreateObject("SAPI.SpVoice")
    stream = CreateObject("SAPI.SpFileStream")
    stream.Open(wavfn, comtypes.gen.SpeechLib.SSFMCreateForWrite)
    engine.AudioOutputStream = stream
    engine.speak(text)
    stream.Close()

    with contextlib.closing(wave.open(wavfn,'r')) as f:
        frames = f.getnframes()
        rate = f.getframerate()
        duration = frames / float(rate)

    os.remove(wavfn)
    return duration
コード例 #11
0
ファイル: tts.py プロジェクト: ashiqa/TalkAR
def txt_zu_wav(eingabe,
               ausgabe,
               text_aus_datei=True,
               geschwindigkeit=2,
               Stimmenname="Zira"):
    from comtypes.client import CreateObject
    engine = CreateObject("SAPI.SpVoice")

    engine.rate = geschwindigkeit  # von -10 bis 10

    for stimme in engine.GetVoices():
        if stimme.GetDescription().find(Stimmenname) >= 0:
            engine.Voice = stimme
            break
    else:
        print("Fehler Stimme nicht gefunden -> Standard wird benutzt")

    if text_aus_datei:
        datei = open(eingabe, 'r')
        text = datei.read()
        datei.close()
    else:
        text = eingabe

    stream = CreateObject("SAPI.SpFileStream")
    from comtypes.gen import SpeechLib

    stream.Open(ausgabe, SpeechLib.SSFMCreateForWrite)
    engine.AudioOutputStream = stream
    engine.speak(text)

    stream.Close()


# reads from a file named test.txt and converts to a .wav file
#txt_zu_wav("test.txt", "test_1.wav")
コード例 #12
0
import pyttsx3 as pyttsx  #使用pyttsx实现
engine = pyttsx.init()  #初始化
engine.say('大家好')  #文字转化语音
engine.runAndWait()  #运行
#***********************************
from win32com.client import Dispatch  #导入Dispatch对象
sperker = Dispatch('SAPI.SpVoice')  #生成对象
str = open("1.txt", encoding="utf-8").read()  #打开文本
sperker.Speak(str)  #朗读
sperker.Speak('我是赵桐')
del sperker  #释放对象
#***********************************
from comtypes.client import CreateObject
from comtypes.gen import SpeechLib
engine = CreateObject("SAPI.SpVoice")
stream = CreateObject('SAPI.SpFileStream')  #输出流
stream.open('1.wav', SpeechLib.SSFMCreateForWrite)
engine.AudioOutputStream = stream  #接通管道
f = open('1.txt', 'r', encoding='utf-8')  #读取文本内容
engine.speak(f.read())  #输出到1.wav
f.close()  #关闭文件
stream.close()  #输出流关闭
コード例 #13
0
    #mixer.init()
    mixer.music.load(wav_file)
    mixer.music.play()

#windows system play and convert wav file
if (operatingSystem == 'win32'):
    from comtypes.client import CreateObject

    engine = CreateObject("SAPI.SpVoice")
    stream = CreateObject("SAPI.SpFileStream")

    from comtypes.gen import SpeechLib

    stream.Open(wav_file, SpeechLib.SSFMCreateForWrite)  # ERRORS NEAR HERE
    engine.AudioOutputStream = stream  # MAYBE HERE :(
    engine.speak(quote)
    stream.Close()

    import win32com.client as wincl
    speak = wincl.Dispatch("SAPI.SpVoice")
    speak.Speak(quote)

#print ("Playing: " + wav_file)
#This went to quickyl and slowed down the CPU
while mixer.music.get_busy():
    time.sleep(0.1)

mixer.quit()

#print ("Done Playing")
コード例 #14
0
ファイル: test_tts2wav.py プロジェクト: nio101/BASECAMP
# coding: utf-8

from comtypes.client import CreateObject
from comtypes.gen import SpeechLib

engine = CreateObject("SAPI.SpVoice")
stream = CreateObject("SAPI.SpFileStream")

outfile = "test.wav"
stream.Open(outfile, SpeechLib.SSFMCreateForWrite)
engine.AudioOutputStream = stream

print engine.GetVoices().Count
engine.Voice=engine.GetVoices().Item(1)
engine.Rate = 2

engine.speak(u'La température extérieure est de 4,7°Centigrades.')
engine.speak(u"Bonsoir Nicolas... Est-ce que ta journée de travail s'est bien passée?")
engine.speak(u"La factrice est passée, ce matin. Et on a sonné deux fois à la porte, à 17h34 et 18h27, en ton absence.")

stream.Close()
コード例 #15
0
f = open("list.txt", "w+")
f1 = open("final.txt", "w")
f1.write("")
f1.close()
f.write("")
os.system("del /q \"pictures\*\"")
os.system("del /q \"comments\*\"")
os.system("del images/endcard.wav")

engine = CreateObject("SAPI.SpVoice")
stream = CreateObject("SAPI.SpFileStream")
outfile = "title.wav"
f.write(f.read() + "file '" + outfile + "'\n")
f.write(f.read() + "file 'silence.wav'\n")
stream.Open(outfile, comtypes.gen.SpeechLib.SSFMCreateForWrite)
engine.AudioOutputStream = stream

engine.speak(post.title)
stream.Close()
create_title(post.author.name, post.created_utc, post.title, "title.png")

for comment in comments:
    body = comment.body
    try:
        create_comment(comment.author.name, comment.score, comment.created_utc,
                       body, "pictures/picture" + comment.id + ".png")
    except:
        continue
    engine = CreateObject("SAPI.SpVoice")
    stream = CreateObject("SAPI.SpFileStream")
    outfile = "comments/comment" + comment.id + ".wav"
コード例 #16
0
ファイル: kindaRandom.py プロジェクト: ncarn2/kindaRandom
def textToSpeechBot():

    #This program starts off by webscraping a random quotation #It then sends that program from text to speech
    #It then will convert that speech back to text and speak to itself
    #Similar to the way humans play telephone
    #After doing this, it will call a list of people (only can call one person)
    #Twilio free trial limited us ot only calling one person :(
    #Always more random features to incorporate
    #table number 29

    import contextlib  #To supress output of pygame import
    with contextlib.redirect_stdout(None):  #Supress output of pygame import
        from pygame import mixer  #Playing music
        import pygame
    from gtts import gTTS
    import time
    import lxml
    import urllib.request
    from bs4 import BeautifulSoup
    import random
    import speech_recognition as sr
    from pathlib import Path
    import os
    import contextlib
    with contextlib.redirect_stdout(None):
        import moviepy.editor as mpy
    from pydub import AudioSegment
    #from pocketsphinx import AudioFile
    import wave
    from gtts import gTTS

    pygame.init()

    #ffmpeg debugger
    #import logging

    #Grab the quote from the web
    url = "http://www.quotationspage.com/random.php"
    request = urllib.request.Request(url)
    html = urllib.request.urlopen(request)
    soup = BeautifulSoup(html, 'lxml')
    links = soup.findAll(
        'a', {"title": "Click for further information about this quotation"})
    quotes = []
    for a in links:
        quotes.append(str(a)[str(a).find('>') + 1:str(a).rfind('<')])

    quote = random.choice(quotes)
    print("Random Web Quote: " + quote)

    #pause = input("Press any key to continue\n")

    file = "quote.mp3"
    wav_file = "quote.wav"
    #print(file, wav_file)

    import os
    import platform
    import sys

    #Determine what operating system the user has in order to play audio properly
    operatingSystem = sys.platform
    #print ("Operating System: " + operatingSystem)
    #Have to create the files and initialize mixers for both OS's

    from gtts import gTTS
    tts = gTTS(text=quote, lang='en')
    tts.save(file)

    mixer.init(frequency=22050, size=-16, channels=2, buffer=-4096)

    #linux play and convert wav file
    if (operatingSystem == 'linux'):

        #sound = AudioSegment.from_mp3(file)
        #sound.export(wav_file, format="wav")

        file = "./" + file
        sound = AudioSegment.from_mp3(file)
        sound.export(wav_file, format="wav")

        #mixer.init()
        mixer.music.load(wav_file)
        mixer.music.play()

    #windows system play and convert wav file
    if (operatingSystem == 'win32'):
        from comtypes.client import CreateObject

        engine = CreateObject("SAPI.SpVoice")
        stream = CreateObject("SAPI.SpFileStream")

        from comtypes.gen import SpeechLib

        stream.Open(wav_file, SpeechLib.SSFMCreateForWrite)  # ERRORS NEAR HERE
        engine.AudioOutputStream = stream  # MAYBE HERE :(
        engine.speak(quote)
        stream.Close()

        import win32com.client as wincl
        speak = wincl.Dispatch("SAPI.SpVoice")
        speak.Speak(quote)

    #print ("Playing: " + wav_file)
    #This went to quickyl and slowed down the CPU
    while mixer.music.get_busy():
        time.sleep(0.1)

    mixer.quit()

    #print ("Done Playing")

    #Beginning of the twilio and Flask
    from flask import Flask
    from twilio.twiml.voice_response import VoiceResponse
    from twilio.rest import Client

    app = Flask(__name__)

    lastTelephone = quote
    phoneNumberToCall = input("What is your phone number ?")
    query = ''.join(quote.split())
    phoneNumberToCall = phoneNumberToCall.replace(' ', '').replace(
        '-', '').replace('(', '').replace(')', '')
    print(phoneNumberToCall)
    account_sid = 'AC7119e83de9a686d0d56bc8491d80526a'
    auth_token = 'fcf3e5c1691051282836a49d26ff38c4'
    client = Client(account_sid, auth_token)

    call = client.calls.create(
        url=
        'https://handler.twilio.com/twiml/EH06f621851a96b743015a43371effcf68?Message='
        + query,
        to='+1' + phoneNumberToCall,
        from_='+17207704132')

    print("Calling:", call.sid)

    @app.route("/voice", methods=['GET', 'POST'])
    def voice():
        """Respond to incoming phone calls with a 'Hello world' message"""

        # Start our TwiML response
        resp = VoiceResponse()

        # Read a message aloud to the caller
        resp.say(query)

        return str(resp)
コード例 #17
0
from comtypes.client import CreateObject
from comtypes.gen import SpeechLib

"""
 Generates audio files using SAPI.SpVoice (TTS) by feeding words (per line) in abuses.txt text file
"""

Engine = CreateObject("SAPI.SpVoice")
Stream = CreateObject("SAPI.SpFileStream")
infile = open("abuses.txt", 'r').read()
split = infile.split('\n')
outfile = "{}.wav"

for i in split:
    out = outfile.format(i)
    Stream.Open(out, SpeechLib.SSFMCreateForWrite)
    Engine.AudioOutputStream = Stream
    Engine.speak(i)
    Stream.Close()
    print('Saved {} ...'.format(out))
コード例 #18
0
def generate(lang):
    global output_name
    text = random_page(lang)

    engine = CreateObject("SAPI.SpVoice")
    stream = CreateObject("SAPI.SpFileStream")
    from comtypes.gen import SpeechLib
    stream.Open('audio.mp3', SpeechLib.SSFMCreateForWrite)
    engine.AudioOutputStream = stream
    engine.speak(text)
    stream.Close()


    RESOLUTION = {800, 600}
    images = []
    for filename in glob.glob('./images/*.jpg'):
        images.append(filename)

    IMAGE_NUMBER = len(images)


    audioclip = AudioFileClip("audio.mp3")
    duration = audioclip.duration

    seconds = duration
    print(seconds)
    fps = 30



    total_frames = int(seconds * fps)

    FRAMES_PER_IMAGE = (total_frames) / IMAGE_NUMBER

    color_percentage_for_each_frame = (100 / total_frames) / 100

    write_to = 'output/{}.mp4'.format('project') # have a folder of output where output files could be stored.

    writer = imageio.get_writer(write_to, format='mp4', mode='I', fps=fps)

    current_image = 0
    next_change = FRAMES_PER_IMAGE

    for i in range(total_frames):
        if i < total_frames:
            im = Image.open(images[current_image])
            im = im.resize(RESOLUTION)
            if (i >= next_change):
                current_image += 1
                next_change += FRAMES_PER_IMAGE
                if (i >= len(images)):
                    i = 0
            processed = ImageEnhance.Color(im).enhance(
                color_percentage_for_each_frame * i)
            writer.append_data(np.asarray(processed))
        else:
            writer.append_data(np.asarray(im))
    writer.close()



    videoclip = VideoFileClip("./output/project.mp4")

    new_audioclip = CompositeAudioClip([audioclip])
    videoclip.audio = new_audioclip
    videoclip = videoclip.subclip(0, duration)
    videoclip.write_videofile('./' + output_name + ".mp4")

    import os, shutil
    folder = './images'
    for filename in os.listdir(folder):
        file_path = os.path.join(folder, filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
            elif os.path.isdir(file_path):
                shutil.rmtree(file_path)
        except Exception as e:
            print('Failed to delete %s. Reason: %s' % (file_path, e))
    return output_name + ".mp4"