Exemple #1
0
def main():
    file_path = search_number('755205')
    if 'Error' not in file_path:
        record, status = process(file_path)
        return record, status
    else:
        return 'Error', 'Not Found'
Exemple #2
0
def main(wf):
    import parser
    import moment_format
    args = wf.args
    if len(args) == 0:
        return

    action = args[0]

    if action == 'parse':
        parser.process(wf, args[1:])
    elif action == 'format':
        moment_format.process(wf, args[1:])
    elif action == 'addFormat':
        moment_format.add_format(wf, args[1:])
    elif action == 'delFormat':
        moment_format.del_format(wf, args[1:])
Exemple #3
0
 def process(self):
     parser = Parser(self.source, self.filename)
     self.content = content = []
     self.write = write = self.content.append
     for state, s in parser.process():
         getattr(self, 'process_'+state)(s)
     content.append('\n')
     source = ''.join(content)
     if isinstance(source, unicode):
         _compile = compile_unicode
     else:
         _compile = compile
     try:
         return _compile(source, self.filename, 'exec')
     except SyntaxError, exc:
         raise CompileError(exc.msg, self.filename, exc.lineno)
Exemple #4
0
def generate_long(
    text="",
    numeric_translation=True
):  # slower, but can translate numeric details and longer sentences
    """
    params: text :: a str (long)
            numeric_translation :: phonetic translation will be performed before speech generation [slightly slower]

            ** will be saved as out.wav **
    """
    # the weights couldn't be stored directly in github
    if not os.path.exists("model1/model_gs_301k.data-00000-of-00001"):
        print('--------------------------------------------------------------')
        print('--------------------------------------------------------------')
        print("No weights found for first model. Downloading ...")
        wget.download(
            "https://gitlab.com/zabir-nabil/bangla_tts_weights/raw/master/model_gs_301k.data-00000-of-00001"
        )
        shutil.move("model_gs_301k.data-00000-of-00001",
                    "model1/model_gs_301k.data-00000-of-00001")

    if not os.path.exists("model2/model_gs_300k.data-00000-of-00001"):
        print('--------------------------------------------------------------')
        print('--------------------------------------------------------------')
        print("No weights found for second model. Downloading ...")
        wget.download(
            "https://gitlab.com/zabir-nabil/bangla_tts_weights/raw/master/model_gs_300k.data-00000-of-00001"
        )
        shutil.move("model_gs_300k.data-00000-of-00001",
                    "model2/model_gs_300k.data-00000-of-00001")

    text_arr = process(text)
    print(text_arr)

    # Load data
    L = load_data(text_arr)

    # Load graph
    g = Graph()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Restore parameters
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     'Text2Mel')
        saver1 = tf.train.Saver(var_list=var_list)
        # check for the weights

        saver1.restore(sess, tf.train.latest_checkpoint("model1"))
        print("Model 1 loaded!")

        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'SSRN') + \
                   tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'gs')
        saver2 = tf.train.Saver(var_list=var_list)
        saver2.restore(sess, tf.train.latest_checkpoint("model2"))
        print("Model 2 loaded!")

        t1 = time.time()

        ## mel generation
        Y = np.zeros((len(L), max_T, n_mels), np.float32)
        prev_max_attentions = np.zeros((len(L), ), np.int32)
        for j in tqdm(range(max_T)):
            _gs, _Y, _max_attentions, _alignments = \
                sess.run([g.global_step, g.Y, g.max_attentions, g.alignments],
                         {g.L: L,
                          g.mels: Y,
                          g.prev_max_attentions: prev_max_attentions})
            Y[:, j, :] = _Y[:, j, :]
            prev_max_attentions = _max_attentions[:, j]

        # Get magnitude spectrum
        Z = sess.run(g.Z, {g.Y: Y})

        generated_wav = np.array(
            [])  # a tuple, wav numpy array and sampling rate

        for i, mag in enumerate(Z):
            #mag = upsample2(mag)
            wav = spectrogram2wav(mag)  # griffin-lim speech generation

            generated_wav = np.append(generated_wav, wav)

        t_needed = time.time() - t1

        print(f'Total time taken {t_needed} secs.')

        write("out.wav", sr, generated_wav)
    def test_parse_symgiza_output(self):
        data = process()

        print sys.path
        self.assertEqual(data, [('Y', 'and'), ('sus', 'their'),
                                ('children', 'niños')])
Exemple #6
0
 def from_file(filename):
     """Parses a .frac file into a Program."""
     return Program(process(filename))