def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):
    # harded coded
    texts = [
        "Scientists at the CERN laboratory say they have discovered a new particle.",
        "There's a way to measure the acute emotional intelligence that has never gone out of style.",
        "President Trump met with other leaders at the Group of 20 conference.",
        "Generative adversarial network or variational auto-encoder.",
        "Please call Stella.",
        "Some have accepted this as a miracle without any physical explanation.",
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, mel = synthesis.tts(model,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=False)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
            writer.add_image(
                tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                global_step)

            # Mel
            writer.add_image(
                "(Eval) Predicted mel spectrogram text{}_{}".format(
                    idx, speaker_str), prepare_spec_image(mel), global_step)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=fs)
            except Exception as e:
                warn(str(e))
                pass
Exemple #2
0
def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):

    # harded coded
    texts = [
        "This is Informatics Institute of Technology evaluation sentence for Text to speeh for sinhala"
    ]

    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, mel = synthesis.tts(model,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=False)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
            writer.add_image(
                tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                global_step)

            # Mel
            writer.add_image(
                "(Eval) Predicted mel spectrogram text{}_{}".format(
                    idx, speaker_str), prepare_spec_image(mel), global_step)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=fs)
            except Exception as e:
                warn(str(e))
                pass
def eval_model(device, model, global_step, logs_dir, ismultispeaker):
    """Evaluate the model
    """
    import synthesis

    # Hardcoded sentences for evaluation
    texts = [
        "Scientists at the CERN laboratory say they have discovered a new particle.",
        "There's a way to measure the acute emotional intelligence that has never gone out of style.",
        "President Trump met with other leaders at the Group of Twenty conference.",
        "Generative adversarial network or variational auto-encoder.",
        "Please call Stella.",
        "Some have accepted this as a miracle without any physical explanation.",
    ]

    eval_output_dir = join(logs_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    eval_alignment_dir = join(eval_output_dir, "alignment")
    os.makedirs(eval_alignment_dir, exist_ok=True)

    eval_wav_dir = join(eval_output_dir, "wavs")
    os.makedirs(eval_wav_dir, exist_ok=True)

    # Prepare model for evaluation
    model_eval = build_model().to(device)
    model_eval.load_state_dict(model.state_dict())

    # hard coded
    speaker_ids = [0, 1, cfg.n_speakers - 1] if ismultispeaker else [None]

    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, _ = synthesis.tts(model_eval,
                                                    text,
                                                    speaker_id=speaker_id,
                                                    fast=True)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_alignment_dir,
                f"step{global_step:09d}_text{idx}_{speaker_str}_alignment.png")
            save_alignment(path, alignment)

            # Audio
            path = join(
                eval_wav_dir,
                f"step{global_step:09d}_text{idx}_{speaker_str}_predicted.wav")
            audio.save_wav(signal, path)
Exemple #4
0
def text_to_speech(text, speaker_id=-1):
    kwargs = {}
    if speaker_id >= 0:
        kwargs["speaker_id"] = speaker_id

    waveform, alignment, spectrogram, mel = tts(model,
                                                text,
                                                fast=False,
                                                **kwargs)

    with tempfile.SpooledTemporaryFile() as f:
        audio.save_wav(waveform, f)
        f.seek(0)
        return f.read()
def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):
    texts = [
        "Scientists at the CERN laboratory say they have discovered a new particle.",
        "There's a way to measure the acute emotional intelligence that has never gone out of style.",
        "Generative adversarial network or variational auto-encoder.",
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    speaker_id = 0 if ismultispeaker else None
    for idx, text in enumerate(texts):
        signal, alignment, _, mel = synthesis.tts(model,
                                                  text,
                                                  p=0,
                                                  speaker_id=speaker_id,
                                                  fast=False)
        signal /= np.max(np.abs(signal))

        # Alignment
        path = join(eval_output_dir,
                    "step{:09d}_text{}_alignment.png".format(global_step, idx))
        save_alignment(path, alignment)
        tag = "eval_averaged_alignment_{}".format(idx)
        writer.add_image(tag,
                         np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                         global_step)

        # Mel
        writer.add_image("(Eval) Predicted mel spectrogram text{}".format(idx),
                         prepare_spec_image(mel), global_step)

        # Audio
        path = join(eval_output_dir,
                    "step{:09d}_text{}_predicted.wav".format(global_step, idx))
        audio.save_wav(signal, path)

        try:
            writer.add_audio("(Eval) Predicted audio signal {}".format(idx),
                             signal,
                             global_step,
                             sample_rate=fs)
        except Exception as e:
            warn(str(e))
            pass
Exemple #6
0
def eval_model(global_step, device, model, checkpoint_dir, ismultispeaker):
    # harded coded
    texts = [
        "Scientists at the CERN laboratory say they have discovered a new particle.",
        "There's a way to measure the acute emotional intelligence that has never gone out of style.",
        "President Trump met with other leaders at the Group of 20 conference.",
        "Generative adversarial network or variational auto-encoder.",
        "Please call Stella.",
        "Some have accepted this as a miracle without any physical explanation.",
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # Prepare model for evaluation
    model_eval = build_model().to(device)
    model_eval.load_state_dict(model.state_dict())

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, mel = synthesis.tts(model_eval,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=True)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)
Exemple #7
0
def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):
    # harded coded
    '''
    texts = [
        "Scientists at the CERN laboratory say they have discovered a new particle.",
        "There's a way to measure the acute emotional intelligence that has never gone out of style.",
        "President Trump met with other leaders at the Group of 20 conference.",
        "Generative adversarial network or variational auto-encoder.",
        "Please call Stella.",
        "Some have accepted this as a miracle without any physical explanation.",
    ]
    '''
    texts = [
        "Kell on neli, Eesti Raadio uudistega on stuudios Meelis Kompus.",
        "Külma on üks kuni viis kraadi ja saartel on õhutemperatuur miinus ühe ja pluss ühe kraadi vahel. [r]",
        "Need olid Eesti Raadio uudised.",
        #"Martini töö on kellegi teise pealt maha kopeeritud.",
        #"Milline on toetuste maksmise kord?",
        #"Ma pole sel aastal kordagi haige olnud.",
        #"„Kolme kuu jooksul on elu siin suhteliselt palju muutunud,“ nentis president Ilves.",
        "Kanepi läbis Austraalias kvalifikatsiooni edukalt ja pääses kolmekümne kahe parema hulka.",
        #"Järelehüüded suletud sarga kõrval: liiga vara laskunud vaikus on nii ülekohtune.",
        "Võõra viipekaardi leidnud alaealised lõid laiaks suure summa.",
        "Las Vegases lasi mees maha kaks hotelli turvatöötajat."
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # hard coded
    speaker_ids = [0, 1, 6] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, mel = synthesis.tts(model,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=False)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
            writer.add_image(
                tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                global_step)

            # Mel
            writer.add_image(
                "(Eval) Predicted mel spectrogram text{}_{}".format(
                    idx, speaker_str), prepare_spec_image(mel), global_step)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=fs)
            except Exception as e:
                warn(str(e))
                pass
Exemple #8
0
def eval_model(global_step, writer, device, model, checkpoint_dir,
               ismultispeaker):
    # harded coded
    texts = [
        "jin1 tian1 tian1 qi4 zhen1 bu2 cuo4 。",
        "zuo2 wan3 , ya4 zhou1 wen2 hua4 jia1 nian2 hua2 zai4 guo2 jia1 ti3 yu4 chang3 sheng4 da4 kai1 yan3 。",
        "zhe4 shi4 zhong1 hua2 min2 zu2 shi3 zhong1 jian1 shou3 de5 dao4 de2 zhun3 ze2 。",
        "you3 shen2 me5 xu1 yao4 wo3 bang1 mang2 ma5 ? jin2 guan3 shuo1 !"
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # Prepare model for evaluation
    model_eval = build_model().to(device)
    model_eval.load_state_dict(model.state_dict())

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, mel = synthesis.tts(model_eval,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=True)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
            writer.add_image(
                tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                global_step)

            # Mel
            writer.add_image(
                "(Eval) Predicted mel spectrogram text{}_{}".format(
                    idx, speaker_str), prepare_spec_image(mel), global_step)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=hparams.sample_rate)
            except Exception as e:
                warn(str(e))
                pass
Exemple #9
0
def eval_model(global_step, writer, device, model, checkpoint_dir,
               ismultispeaker):
    # harded coded
    texts = [
        "And debtors might practically have as much as they liked%if they could only pay for it.",
        "There's a way to measure the acute emotional intelligence that has never gone out of style.",
        "President trump met with other leaders at the group of 20 conference.",
        "Generative adversarial network or variational auto encoder.",
        "Please call stella.",
        "Some have accepted this as a miracle without any physical explanation.",
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # Prepare model for evaluation
    model_eval = build_model(training_type=model.training_type).to(device)
    model_eval.load_state_dict(model.state_dict())

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts, 1):
            signal, alignments, _, mel = synthesis.tts(model_eval,
                                                       text,
                                                       p=1,
                                                       speaker_id=speaker_id,
                                                       fast=True)
            signal /= np.max(np.abs(signal))

            # Alignment
            for i, alignment in enumerate(alignments, 1):
                alignment_dir = join(eval_output_dir,
                                     "alignment_layer{}".format(i))
                os.makedirs(alignment_dir, exist_ok=True)
                path = join(
                    alignment_dir,
                    "step{:09d}_text{}_{}_layer{}_alignment.png".format(
                        global_step, idx, speaker_str, i))
                save_alignment(path, alignment, global_step)
                tag = "eval_text_{}_alignment_layer{}_{}".format(
                    idx, i, speaker_str)
                writer.add_image(
                    tag,
                    np.uint8(cm.viridis(np.flip(alignment, 1)) * 255).T,
                    global_step)

            # Mel
            writer.add_image(
                "(Eval) Predicted mel spectrogram text{}_{}".format(
                    idx, speaker_str),
                prepare_spec_image(mel).transpose(2, 0, 1), global_step)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=hparams.sample_rate)
            except Exception as e:
                warn(str(e))
                pass
Exemple #10
0
def eval_model(global_step, writer, device, model, checkpoint_dir,
               ismultispeaker):
    # harded coded
    #texts = [
    #    "Scientists at the CERN laboratory say they have discovered a new particle.",
    #    "There's a way to measure the acute emotional intelligence that has never gone out of style.",
    #    "President Trump met with other leaders at the Group of 20 conference.",
    #    "Generative adversarial network or variational auto-encoder.",
    #    "Please call Stella.",
    #    "Some have accepted this as a miracle without any physical explanation.",
    #]
    texts = [
        "Mais alors, dit Alice, si le monde n'a absolument aucun sens, qui nous empêche d'en inventer un ?",
        "Chante, ô Muse, le héros aux cent détours qui a tant erré sur terre après avoir pillé la ville sainte de Troie,",
        "qui a vu tant de villes et connu tant de peuples, qui sur mer a tant souffert en son coeur, luttant pour sa vie et le retour de ses équipages.",
        "Déesse, fille de Zeus, débute où tu veux et raconte-nous l'histoire, à nous aussi.",
        "après tout , la vie de ces gens est si misérable , que l' annonce de la mort n' a rien d' effrayant pour eux .",
        "Après tout, la vie de ces gens est si misérable, que l'annonce de la mort n'a rien d'effrayant pour eux.",
    ]

    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # Prepare model for evaluation
    model_eval = build_model().to(device)
    model_eval.load_state_dict(model.state_dict())

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            print("(text):", text)
            signal, alignment, _, mel = synthesis.tts(model_eval,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=True)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
            writer.add_image(
                tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                global_step)

            # Mel
            writer.add_image(
                "(Eval) Predicted mel spectrogram text{}_{}".format(
                    idx, speaker_str), prepare_spec_image(mel), global_step)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=fs)
            except Exception as e:
                warn(str(e))
                pass
Exemple #11
0
def eval_model(global_step, writer, device, model, checkpoint_dir,
               ismultispeaker):
    # harded coded
    texts = [
        "Gã khổng lồ công nghệ này đã ký các thỏa thuận hợp tác với 200 tờ báo ấn phẩm tại nhiều quốc gia.",
        "Hồ Chí Minh đọc bản Tuyên ngôn Độc lập vào ngày 2 tháng 9 năm 1945 trên Quảng trường Ba Đình tại Hà Nội.",
        "Hình như tôi chiều các em quá nên bây giờ các em hư đúng không.",
        "Các sách giáo khoa Tiếng Việt đã được thẩm định cũng dựa trên khung thời lượng và chuẩn đầu ra để thiết kế cho phù hợp nhằm đi đến cái đích đó.",
        "Thầy cô chúng ta đã thay đổi.",
        "Đây là ký hiệu riêng của một số loại biển số ít phổ biến.",
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # Prepare model for evaluation
    model_eval = build_model().to(device)
    model_eval.load_state_dict(model.state_dict())

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, mel = synthesis.tts(model_eval,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=True)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
            try:
                writer.add_image(
                    tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                    global_step)
            except Exception as e:
                warn(str(e))

            # Mel
            try:
                writer.add_image(
                    "(Eval) Predicted mel spectrogram text{}_{}".format(
                        idx, speaker_str), prepare_spec_image(mel),
                    global_step)
            except Exception as e:
                warn(str(e))

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=hparams.sample_rate)
            except Exception as e:
                warn(str(e))
                pass
Exemple #12
0
def eval_model(global_step, writer, device, model, checkpoint_dir,
               ismultispeaker):
    # harded coded
    texts = [
        u'तरीके में छह निश्चित प्रैक्टिकल कदम है',
        u"यह आशावाद का एक बड़ा और दोस्त था",
        u"डाउंस ने प्रशंसा के लिए उन्हें धन्यवाद दिया और बाहर जाने लगाओ",
        u"इस दिशा में कोई प्रगति नहीं कर पा रहा था", u"यह उससे भी बढ़कर है",
        u"अवधेश या भौतिक समतुल्य में रूपांतरण हो",
        u"उसने कल्पना में डॉक्टर को साफ देखा", u"और बता तू भी करेंगे",
        u"ज्यादातर चीजों की तुलना में कुछ चीजें हमेशा अधिक महत्वपूर्ण होती है",
        u"गहरे आध्यात्मिक मनुष्य भी थे",
        u"ैसा स्वर्ग में है वैसा ही धरती पर होगा",
        u"हमें धन के बारे में जागरूक करना चाहिए",
        u"अनंत बार आ सकता है और जा सकता है",
        u"समग्र प्रयास में टोकन योगदान बताते हैं",
        u"आंखों को स्वस्थ करने के विचार से भर गया", u"हिंदी ऑडियो बुक डॉट कॉम",
        u"या आप कभी भी सिर्फ पर नहीं पहुंचेंगे", u"विश्व के सभी हिस्सों में",
        u"क्या वह नंबर एक ही तलाश कर रहे हैं", u"आपके अवचेतन मन की शक्ति",
        u"रेलवे स्टेशन से थामस एडिशन के ऑफिस की तरफ जा रहे थे",
        u"मैं जानता हूं कि आपको सुझाव के सिद्धांत के प्रयोग के द्वारा मैं जिस भी इच्छा को अपने मस्तिष्क में निरंतर बनाए रखूंगा",
        u"इस बात से काफी सनसनी फैली",
        u"आप जिस की आलोचना करते हैं उसे आकर्षित नहीं कर सकते",
        u"हम इसे 80 एक सिद्धांत कह सकते हैं",
        u"अपने स्थाई ग्राहकों के सहयोग से"
    ]
    import synthesis
    synthesis._frontend = _frontend

    eval_output_dir = join(checkpoint_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)

    # Prepare model for evaluation
    model_eval = build_model().to(device)
    model_eval.load_state_dict(model.state_dict())

    # hard coded
    speaker_ids = [0, 1, 10] if ismultispeaker else [None]
    for speaker_id in speaker_ids:
        speaker_str = "multispeaker{}".format(
            speaker_id) if speaker_id is not None else "single"

        for idx, text in enumerate(texts):
            signal, alignment, _, mel = synthesis.tts(model_eval,
                                                      text,
                                                      p=0,
                                                      speaker_id=speaker_id,
                                                      fast=True)
            signal /= np.max(np.abs(signal))

            # Alignment
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
                    global_step, idx, speaker_str))
            save_alignment(path, alignment)
            tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
            writer.add_image(
                tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),
                global_step)

            # Mel
            writer.add_image(
                "(Eval) Predicted mel spectrogram text{}_{}".format(
                    idx, speaker_str), prepare_spec_image(mel), global_step)

            # Audio
            path = join(
                eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
                    global_step, idx, speaker_str))
            audio.save_wav(signal, path)

            try:
                writer.add_audio("(Eval) Predicted audio signal {}_{}".format(
                    idx, speaker_str),
                                 signal,
                                 global_step,
                                 sample_rate=hparams.sample_rate)
            except Exception as e:
                warn(str(e))
                pass