예제 #1
0
 def interact(self) -> None:
     while True:
         if self.SpeechRecognition:
             with sr.Microphone() as source:
                 self.r.adjust_for_ambient_noise(source, duration=1)
                 print("Dites quelque chose !")
                 audio = self.r.listen(source)
                 sentence = ""
                 try:
                     sentence = self.r.recognize_google(audio,
                                                        language="fr-FR")
                     print("Google pense que vous dites ca : " + sentence)
                 except sr.UnknownValueError:
                     print("Google n'a pas pu comprendre l'audio")
                 except sr.RequestError as e:
                     print(
                         "Impossible de faire une requete au serveurs google. Etes vous bien connecté à internet ?"
                     )
         else:
             sentence = input('> ')
             if sentence == '.':
                 break
         response_complete = self.complete(self.response(sentence))
         print(response_complete)
         if self.talk:
             voice = Voice(lang="fr", speed=120)
             voice.say(response_complete)
예제 #2
0
def make_espeak(text, lang, max_length):
    voice = Voice(lang=gtts_to_espeak[lang], speed=130,
                  volume=2) if lang in gtts_to_espeak else Voice(lang="en",
                                                                 speed=130)
    wav = voice.to_audio(text)

    pydub_wav = AudioSegment.from_file_using_temporary_files(BytesIO(wav))
    audio_length = len(pydub_wav) / 1000

    return wav, audio_length
예제 #3
0
 def setUp(self):
     self.effects = {
         eff_class: set()
         for eff_class in (PhonemicEffect, TextEffect, AudioEffect,
                           VoiceEffect)
     }
     for effect_cls in set(AVAILABLE_EFFECTS):
         effect = effect_cls()
         for cls in self.effects.keys():
             if isinstance(effect, cls):
                 self.effects[cls].add(effect)
     self.voice = Voice(lang="fr")
예제 #4
0
    def __init__(self, filepath: str, lang="fr"):
        self.voice = Voice(lang=lang)
        self.alphabet = bidict({pho : i for i, pho in enumerate(self.voice.phonems._all | {"_"})})
        self.alphabet_size = len(self.alphabet)
        logging.info("Loading the input file")
        with open(filepath) as corpus_file:
            phonemized_file = PhonemeList(corpus_file.read())
        self.synth = Synthesizer(self._build_phonemes_stats(phonemized_file), self.alphabet, self.voice)

        self.phonemes_list = [phoneme.name for phoneme in phonemized_file]
예제 #5
0
def get_phonemes(utterance, lang="en", arpabet=True, default_dur=0.4):
    try:
        #raise
        from voxpopuli import Voice
        voice = Voice(lang=lang)
        print(voice.to_phonemes(utterance))
        if arpabet:

            return [(ipa2arpabet[phoneme.name.replace("_",
                                                      ".")], phoneme.duration)
                    for phoneme in voice.to_phonemes(utterance)]
        else:
            return [(phoneme.name.replace("_", "pau"), phoneme.duration)
                    for phoneme in voice.to_phonemes(utterance)]
    except:
        phones = fallback_get_phonemes(utterance, lang).split()
        # some cleanup, remove digits
        phones = [pho.strip("0123456789") for pho in phones]
        if not arpabet:
            phones = [arpabet2ipa[pho.replace(".", "_")] for pho in phones]
        return [(pho, default_dur) for pho in phones]
예제 #6
0
파일: main.py 프로젝트: hadware/katalixia
class RhymeTree(TreeNode):
    def __init__(self, rhyming_lang="fr"):
        super().__init__()
        self.voice = Voice(lang=rhyming_lang)
        self.children = dict()  # type:Dict[str,Union[TreeNode, Leaf]]

    def insert_rhyme(self, rhyme_string, data=None):
        new_leaf = Leaf.from_string(rhyme_string.strip(), self.voice)
        if new_leaf is not None:
            if data is not None:
                new_leaf.data = data
            self.insert(new_leaf, 1)
        else:
            logging.warning("Word '%s' returned empty phoneme" % rhyme_string)

    def find_rhyme(self, string):
        string_phonemes = Leaf.clean_silences(
            [pho.name for pho in self.voice.to_phonemes(string)])
        current_pho = string_phonemes.pop()
        if current_pho not in self.children:
            return None
        else:
            return self.children[current_pho].find(string_phonemes, string)

    def save(self, filepath):
        with open(filepath, "wb") as picklefile:
            pickle.dump(self, picklefile)

    @classmethod
    def from_pickle(cls, pickle_filepath):
        with open(pickle_filepath, "rb") as picklefile:
            return pickle.load(picklefile)

    @classmethod
    def from_text_file(cls, textfile_filepath, lang="fr", separator=None):
        separator = separator if separator is not None else "\n"
        with open(textfile_filepath) as file:
            all_strings = file.read().split(separator)

        return cls.from_word_list(all_strings, lang)

    @classmethod
    def from_word_list(cls, input_list, lang="fr"):
        tree = cls(lang)
        for string in input_list:
            tree.insert_rhyme(string)

        return tree

    def to_dict(self):
        return {pho: child.to_dict() for pho, child in self.children.items()}
예제 #7
0
class TestEffects(unittest.TestCase):

    text = "Les écoute pas ces sheitane c  moi le vrai crysw"

    def setUp(self):
        self.effects = {
            eff_class: set()
            for eff_class in (PhonemicEffect, TextEffect, AudioEffect,
                              VoiceEffect)
        }
        for effect_cls in set(AVAILABLE_EFFECTS):
            effect = effect_cls()
            for cls in self.effects.keys():
                if isinstance(effect, cls):
                    self.effects[cls].add(effect)
        self.voice = Voice(lang="fr")

    def test_text_effects(self):
        for effect in self.effects[TextEffect]:
            self.assertIsNotNone(effect.process(self.text))

    def test_phonemic_effects(self):
        pho = self.voice.to_phonemes(self.text)
        for effect in self.effects[PhonemicEffect]:
            self.assertIsNotNone(effect.process(pho))

    def test_audio_effects(self):
        wav = self.voice.to_audio(self.text)
        _, wav_array = get_event_loop().run_until_complete(
            AudioRenderer.to_f32_16k(wav))
        for effect in self.effects[AudioEffect]:
            self.assertIsNotNone(effect.process(wav_array))

    def test_voice_effects(self):
        cookie_hash = md5(("parce que nous we").encode('utf8')).digest()
        voice_params = VoiceParameters.from_cookie_hash(cookie_hash)
        for effect in self.effects[VoiceEffect]:
            self.assertIsNotNone(effect.process(voice_params))
예제 #8
0
파일: main.py 프로젝트: hadware/katalixia
 def __init__(self, rhyming_lang="fr"):
     super().__init__()
     self.voice = Voice(lang=rhyming_lang)
     self.children = dict()  # type:Dict[str,Union[TreeNode, Leaf]]
예제 #9
0
# import pandas as pd
from voxpopuli import Voice

# taille moyenne des phrases en phonèmes

CORPUS_PATH = "french_corpus.txt"

voice = Voice(lang="fr")
phonemes_count = []

def extract_sentence(corpus):
    for x in corpus:
        phonetized_sentence = voice.to_phonemes(x)
        yield phonetized_sentence.phonemes_str.strip("_")

phonemes_count = []
with open(CORPUS_PATH, "r") as corpus:
    for x in extract_sentence(corpus):
        phonemes_count.append(len(x))

print(sum(phonemes_count ) / len(phonemes_count))
예제 #10
0
import pandas as pd
from voxpopuli import Voice
import json
import tqdm
import pprint

lx = pd.read_csv('data/lexique.csv', sep='\t')
filtered_lx = lx[lx['4_cgram'].isin(["NOM", "ADJ", "ADV"])]

dict_pho = {}

v = Voice(lang="fr")

for index, word_row in tqdm.tqdm(filtered_lx.iterrows(),
                                 total=len(filtered_lx)):
    mot = word_row["1_ortho"]

    try:
        phonemes = v.to_phonemes(mot)
    except Exception as e:
        print(e)
        print(mot)
        continue
    pho_list = []
    for pho in phonemes:
        if pho.name != "_":
            pho_list.append(pho.name)
    if len(pho_list) > 1:
        word_data = {
            "pho": pho_list,
            "gram": word_row["4_cgram"],
예제 #11
0
        beat_duration = len(beats_track) / (rate * BEATS_TRACK_BEAT_COUNT)
        beats_track_looped = np.tile(
            beats_track,
            LOOPS_COUNT * BEATS_PER_MEASURE * len(CHORDS_PROGRESSION) //
            BEATS_TRACK_BEAT_COUNT)

    logging.info("Beat time : %dms" % (beat_duration * 1000))
    logging.info("Measure time : %dms" % (beat_duration * 1000 * 4))

    prog_freqs = get_trinote_progression_freqs(CHORDS_PROGRESSION)
    logging.info("First freq progression: \n %s" % (str(prog_freqs)))
    track_freqs = prog_freqs * LOOPS_COUNT

    progression_phonems = PhonemeList([])
    for freq in track_freqs:
        progression_phonems.append(
            Phoneme("a", int(beat_duration * 1000), [(0, freq), (100, freq)]))
    logging.info("Rendering audio")

    voice = Voice(lang="fr", voice_id=2)
    wav = voice.to_audio(progression_phonems)
    if BEATS_TRACK_FILE is not None:
        rate, wave_array = to_f32_16k(wav)
        mixed_tracks = mix_tracks(beats_track_looped * 0.6,
                                  wave_array * 0.3,
                                  align="left")
        wav = to_wav_bytes(mixed_tracks, 16000)
    player = AudioPlayer()
    player.set_file(BytesIO(wav))
    player.play()
    player.close()
예제 #12
0
            #print(len(phoneme.pitch_modifiers))
            phoneme.duration = notearr[vnum][1]
            for pnum, pimo in enumerate(phoneme.pitch_modifiers, start=0):
                if notearr[vnum][0] != '_':
                    phoneme.pitch_modifiers[pnum] = (pimo[0], notearr[vnum][0])
            vnum += 1
        #print(phoneme)
        #print("---")
    print(phonemes_list)
    if fname != None:
        voice.to_audio(phonemes_list, fname)
    voice.say(phonemes_list)


# Sing in German: "Alle meine Entchen"
voice = Voice(lang="de", voice_id=2)
text = "Alle meine Entchen schwimmen auf dem See. | schwimmen auf dem See. | Köpfchen in das Wasser. Schwänzchen in die Höh."
notes = 'd4 e4 f#4 g4 a2 a2 b4 b4 b4 b4 a2 4 b4 b4 b4 b4 a2 4 g4 g4 g4 g4 f#2 f#2 a4 a4 a4 a4 d2'
sing(text, notes, voice, "entchen.wav")

# Sing "Atemlos"
voice = Voice(lang="de", voice_id=5)
text = "Atemlos durch die Nacht | bis ein neuer Tag erwacht."
notes = '+c4 b4 a2 a4 g8 e2 8 e4 g8 g4. -b8 b4. b4 c4.'
# if you don't specify a filename, you sing without saving:
sing(text, notes, voice)

# Sing in French: "Frère Jacques"
voice = Voice(lang="fr", voice_id=1)
# in the text Jacques is changed by Jacquès and matines by matinès for the purpose of singing..
text = "Frèrè Jacquès, Frèrè Jacquès, dormez-vous, dormez-vous? Sonnez les matinès, sonnez les matinès, Ding, ding, dong. Ding, ding, dong."
예제 #13
0
 def get_voice(self):
     return Voice(lang=self.lang, pitch=self.pitch, speed=self.speed,
                  voice_id=self.voice_id, volume=self.volume)
예제 #14
0
        beats_track_looped = np.tile(beats_track,
                                     PROG_LOOP_COUNT * BEATS_PER_MEASURE * FULL_LOOP_COUNT * \
                                     (FIRST_PROG_REPEAT + SECOND_PROG_REPEAT) // BEATS_TRACK_BEAT_COUNT)

    logging.info("Beat time : %dms" % (beat_duration * 1000))
    logging.info("Measure time : %dms" % (beat_duration * 1000 * 4))

    first_prog_freqs = get_trinote_progression_freqs(FIRST_CHORDS_PROGRESSION)
    logging.info("First freq progression: \n %s" % (str(first_prog_freqs)))
    second_prog_freqs = get_trinote_progression_freqs(SECOND_CHORDS_PROGRESSION)
    logging.info("First freq progression: \n %s" % (str(second_prog_freqs)))

    track_freqs = first_prog_freqs * FIRST_PROG_REPEAT + second_prog_freqs * SECOND_PROG_REPEAT

    progression_phonems = PhonemeList([])
    for freq in track_freqs * FULL_LOOP_COUNT:
        progression_phonems.append(
            Phoneme("w", int(beat_duration * 1000), [(0,freq), (100,freq)])
        )
    logging.info("Rendering audio")

    voice = Voice(lang="fr", voice_id=3)
    wav = voice.to_audio(progression_phonems)
    if BEATS_TRACK_FILE is not None:
        rate, wave_array = to_f32_16k(wav)
        mixed_tracks = mix_tracks(beats_track_looped * 0.6, wave_array * 0.4, align="left")
        wav = to_wav_bytes(mixed_tracks, 16000)
    player = AudioPlayer()
    player.set_file(BytesIO(wav))
    player.play()
    player.close()
예제 #15
0
파일: rimer.py 프로젝트: juttingn/rimr
            syllablebuffer.append(pho)
        elif pho in FrenchPhonemes.VOWELS:
            syllablebuffer.append(pho)
            syllabe = "".join(syllablebuffer)
            syllabe_list.append(syllabe)
            syllablebuffer = []
    #si le syllablebuffer n'est pas vide on ajoute les phonèmes à la dernière syllabe
    if syllablebuffer:
        syllabe_list[-1] += "".join(syllablebuffer)
    return syllabe_list


if __name__ == "__main__":
    args = parser.parse_args()

    v = Voice(lang="fr")
    phonemes = v.to_phonemes(args.query)

    pho_query = []
    for pho in phonemes:
        if pho.name != "_":
            pho_query.append(pho.name)

    rimes = []
    for mot, word_data in lexicon.items():
        pho_list = word_data['pho']

        # Depending on the argument selected by the user regarding grammatical class, genre, number,
        # certain words are 'skipped'.

        if args.gram is not None and word_data['gram'] not in args.gram: