コード例 #1
0
ファイル: logger.py プロジェクト: CookiePPP/codedump
 def log_validation(self, reduced_loss, model, y, y_pred, iteration, val_teacher_force_till, val_p_teacher_forcing, diagonality, avg_prob):
     self.add_scalar("validation.loss", reduced_loss, iteration)
     self.add_scalar("validation.attention_alignment_diagonality", diagonality, iteration)
     self.add_scalar("validation.average_max_attention_weight", avg_prob, iteration)
     self.add_scalar("validation.p_teacher_forcing", val_p_teacher_forcing, iteration)
     self.add_scalar("validation.teacher_force_till", val_teacher_force_till, iteration)
     _, mel_outputs, gate_outputs, alignments = y_pred
     mel_targets, gate_targets, *_ = y
 
     # plot distribution of parameters
     for tag, value in model.named_parameters():
         tag = tag.replace('.', '/')
         self.add_histogram(tag, value.data.cpu().numpy(), iteration)
     
     # plot alignment, mel target and predicted, gate target and predicted
     idx = 0 # plot longest audio file
     self.add_image(
         "alignment",
         plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
         iteration, dataformats='HWC')
     
     idx = 1 # and randomly pick a second one to plot
     self.add_image(
         "alignment2",
         plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
         iteration, dataformats='HWC')
コード例 #2
0
 def log_validation(self, reduced_loss, model, y, y_pred, iteration):
     self.add_scalar("validation.loss", reduced_loss, iteration)
     z, alignments, pred_output_lengths, log_s_sum, logdet_w_sum = y_pred
     mel_targets, *_ = y
     
     # plot distribution of parameters
     for tag, value in model.named_parameters():
         tag = tag.replace('.', '/')
         self.add_histogram(tag, value.data.cpu().numpy(), iteration)
     
     # plot alignment, mel target and predicted, gate target and predicted
     if len(alignments.shape) == 4:
         for head_i in range(alignments.shape[1]):
             idx = 0 # plot longest audio file
             self.add_image(
                 f"alignment1/h{head_i}",
                 plot_alignment_to_numpy(alignments[idx][head_i].data.cpu().numpy().T),
                 iteration, dataformats='HWC')
             
             if alignments.shape[0] > 1: # if batch_size > 1...
                 idx = 1 # pick a second plot
                 self.add_image(
                     f"alignment2/h{head_i}",
                     plot_alignment_to_numpy(alignments[idx][head_i].data.cpu().numpy().T),
                     iteration, dataformats='HWC')
     else:
         for idx in range(2): # plot longest audio file
             self.add_image(
                 f"alignment/{idx}",
                 plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
                 iteration, dataformats='HWC')
コード例 #3
0
ファイル: logger.py プロジェクト: dodohow1011/waveglow
    def log_alignment(self, model, enc_slf_attn, dec_enc_attn, out_mel, target,
                      iteration):

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        #idx = random.randint(0, enc_slf_attn.size(0) - 1)
        idx = 0
        self.add_image(
            "encoder_self_alignment",
            plot_alignment_to_numpy(enc_slf_attn[idx].data.cpu().numpy().T),
            iteration)
        self.add_image(
            "decoder_encoder_alignment",
            plot_alignment_to_numpy(dec_enc_attn[idx].data.cpu().numpy().T),
            iteration)
        self.add_image(
            "mel_target",
            plot_spectrogram_to_numpy(target[idx].data.cpu().numpy()),
            iteration)
        self.add_image(
            "mel_predicted",
            plot_spectrogram_to_numpy(out_mel[idx].data.cpu().numpy()),
            iteration)
コード例 #4
0
    def log_validation(self, reduced_loss_main, reduced_loss_join,
                       reduced_loss_class, reduced_loss, model, y, y_pred,
                       iteration):
        self.add_scalar("validation.loss_mian", reduced_loss_main, iteration)
        self.add_scalar("validation.loss_join", reduced_loss_join, iteration)
        self.add_scalar("validation.loss_class", reduced_loss_class, iteration)

        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, alignment_outputs, acoustics_of_phone, join_outs, text_alignment = y_pred
        mel_targets, alignment_targets, alignments_weights, text_alignment_padded = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, mel_targets.size(0) - 1)
        self.add_image(
            "alignment_target",
            plot_alignment_to_numpy(
                alignment_targets[idx].data.cpu().numpy().T), iteration)
        self.add_image(
            "alignment_output",
            plot_alignment_to_numpy(
                alignment_outputs[idx].data.cpu().numpy().T), iteration)
        self.add_image(
            "mel_target",
            plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
            iteration)
        self.add_image(
            "mel_predicted",
            plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
            iteration)
        self.add_image(
            "alignments_weights",
            plot_weight_outputs_to_numpy(
                alignments_weights[idx].data.cpu().numpy()), iteration)
        self.add_image(
            "acoustic_of_phone_reference",
            plot_alignment_to_numpy(
                acoustics_of_phone[idx].data.cpu().numpy().T,
                x_label='Encoder timestep',
                y_label='Phoneme Acoustic'), iteration)
        self.add_image(
            "acoustic_of_phone_predicted",
            plot_alignment_to_numpy(join_outs[idx].data.cpu().numpy().T,
                                    x_label='Encoder timestep',
                                    y_label='Phoneme Acoustic'), iteration)
        self.add_image(
            "phone_level_acoustic_text_alignment_output",
            plot_alignment_to_numpy(text_alignment[idx].data.cpu().numpy().T,
                                    figsize=(8, 6),
                                    x_label='Encoder timestep',
                                    y_label='Encoder timestep'), iteration)
コード例 #5
0
ファイル: logger.py プロジェクト: ktho22/tacotron2
    def log_validation(self, reduced_loss, model, y, y_pred, iteration):
        log_dict = {
            "loss/val": reduced_loss,
        }

        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)

        align = Image.fromarray(
            plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T))
        align.save(os.path.join(self.outdir, f'align_{iteration:08}.png'))

        target = Image.fromarray(
            plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()))
        target.save(os.path.join(self.outdir, f'target_{iteration:08}.png'))

        output = Image.fromarray(
            plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()))
        output.save(os.path.join(self.outdir, f'output_{iteration:08}.png'))

        gate = Image.fromarray(
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()))
        gate.save(os.path.join(self.outdir, f'gate_{iteration:08}.png'))

        log_dict.update({
            "alignment":
            wandb.Image(Image.fromarray(
                plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T)),
                        caption='att'),
            "mel_target":
            wandb.Image(Image.fromarray(
                plot_spectrogram_to_numpy(
                    mel_targets[idx].data.cpu().numpy())),
                        caption='att'),
            "mel_predicted":
            wandb.Image(Image.fromarray(
                plot_spectrogram_to_numpy(
                    mel_outputs[idx].data.cpu().numpy())),
                        caption='att'),
            "gate":
            wandb.Image(Image.fromarray(
                plot_gate_outputs_to_numpy(
                    gate_targets[idx].data.cpu().numpy(),
                    torch.sigmoid(gate_outputs[idx]).data.cpu().numpy())),
                        caption='att'),
        })
        wandb.log(log_dict, step=iteration)
コード例 #6
0
ファイル: logger.py プロジェクト: CookiePPP/codedump
    def log_teacher_forced_validation(self, reduced_loss, model, y, y_pred, iteration, val_teacher_force_till, val_p_teacher_forcing, diagonality, avg_prob):
        self.add_scalar("teacher_forced_validation.loss", reduced_loss, iteration)
        self.add_scalar("teacher_forced_validation.attention_alignment_diagonality", diagonality, iteration)
        self.add_scalar("teacher_forced_validation.average_max_attention_weight", avg_prob, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets, *_ = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)
        
        # plot alignment, mel target and predicted, gate target and predicted
        idx = 0 # plot longest audio file
        self.add_image(
            "teacher_forced_alignment",
            plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
            iteration, dataformats='HWC')
        self.add_image(
            "mel_target",
            plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        self.add_image(
            "mel_predicted",
            plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        self.add_image(
            "gate",
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
            iteration, dataformats='HWC')
        
        idx = 1 # and plot 2nd longest audio file
        self.add_image(
            "teacher_forced_alignment2",
            plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
            iteration, dataformats='HWC')
        self.add_image(
            "mel_target2",
            plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        self.add_image(
            "mel_predicted2",
            plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        self.add_image(
            "gate2",
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
            iteration, dataformats='HWC')
コード例 #7
0
    def log_validation(self, reduced_loss, model, y, y_pred, gst_scores,
                       iteration):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments, _ = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)

        align_idx = alignments[idx].data.cpu().numpy().T
        gst_scores = gst_scores.data.cpu().numpy().T
        # print("Validation GST scores before plotting to tensorboard: {}".format(gst_scores.shape))
        meltarg_idx = mel_targets[idx].data.cpu().numpy()
        melout_idx = mel_outputs[idx].data.cpu().numpy()

        self.add_image("alignment", plot_alignment_to_numpy(align_idx),
                       iteration)
        self.add_image("gst_scores", plot_gst_scores_to_numpy(gst_scores),
                       iteration)
        self.add_image("mel_target", plot_spectrogram_to_numpy(meltarg_idx),
                       iteration)
        self.add_image("mel_predicted", plot_spectrogram_to_numpy(melout_idx),
                       iteration)
        self.add_image(
            "gate",
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                F.sigmoid(gate_outputs[idx]).data.cpu().numpy()), iteration)
コード例 #8
0
ファイル: logger.py プロジェクト: JanFschr/cookietts
 def log_infer(self, reduced_loss_dict, reduced_bestval_loss_dict, model, y, y_pred, iteration, val_teacher_force_till, val_p_teacher_forcing):
     prepend = 'inference'
     
     # plot datapoints/graphs
     self.plot_loss_dict(reduced_loss_dict,         iteration, f'{prepend}')
     self.plot_loss_dict(reduced_bestval_loss_dict, iteration, f'{prepend}_best')
     
     # plot spects / imgs
     n_items = min(self.n_items, y['gt_mel'].shape[0])
     
     for idx in range(n_items):# plot target spectrogram of longest audio file(s)
         self.add_image(
             f"{prepend}_{idx}/alignment",
             plot_alignment_to_numpy(y_pred['alignments'][idx].data.cpu().numpy().T),
             iteration, dataformats='HWC')
         self.add_image(
             f"{prepend}_{idx}/mel_pred",
             plot_spectrogram_to_numpy(y_pred['pred_mel_postnet'][idx].data.cpu().numpy()),
             iteration, dataformats='HWC')
         if self.plotted_targets_inf < 10:
             self.add_image(
                 f"{prepend}_{idx}/mel_gt",
                 plot_spectrogram_to_numpy(y['gt_mel'][idx].data.cpu().numpy()),
                 iteration, dataformats='HWC')
     self.plotted_targets_inf +=1 # target spect doesn't change so only needs to be plotted ~~once~~ a couple times.
コード例 #9
0
ファイル: logger.py プロジェクト: JanFschr/cookietts
 def log_validation(self, reduced_loss_dict, reduced_bestval_loss_dict, model, y, y_pred, iteration, val_teacher_force_till, val_p_teacher_forcing):
     prepend = 'validation'
     
     # plot datapoints/graphs
     self.plot_loss_dict(reduced_loss_dict,         iteration, f'{prepend}')
     self.plot_loss_dict(reduced_bestval_loss_dict, iteration, f'{prepend}_best')
     
     # plot spects / imgs
     n_items = min(self.n_items, y['gt_mel'].shape[0])
     
     mel_L1_map = torch.nn.L1Loss(reduction='none')(y_pred['pred_mel_postnet'], y['gt_mel'])
     mel_L1_map[:, -1, -1] = 5.0 # because otherwise the color map scale is crap
     
     for idx in range(n_items):# plot target spectrogram of longest audio file(s)
         self.add_image(
             f"{prepend}_{idx}/alignment",
             plot_alignment_to_numpy(y_pred['alignments'][idx].data.cpu().numpy().T),
             iteration, dataformats='HWC')
         self.add_image(
             f"{prepend}_{idx}/mel_pred",
             plot_spectrogram_to_numpy(y_pred['pred_mel_postnet'][idx].data.cpu().numpy()),
             iteration, dataformats='HWC')
         self.add_image(
             f"{prepend}_{idx}/mel_SE",
             plot_spectrogram_to_numpy(mel_L1_map[idx].data.cpu().numpy()),
             iteration, dataformats='HWC')
         if self.plotted_targets_val < 2:
             self.add_image(
                 f"{prepend}_{idx}/mel_gt",
                 plot_spectrogram_to_numpy(y['gt_mel'][idx].data.cpu().numpy()),
                 iteration, dataformats='HWC')
     self.plotted_targets_val +=1 # target spect doesn't change so only needs to be plotted once.
コード例 #10
0
    def log_validation(self, reduced_loss, model, y, y_pred, iteration):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets, alignment_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image(
            "alignment",
            plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
            iteration, dataformats='HWC')
        self.add_image(
            "mel_target",
            plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        self.add_image(
            "mel_predicted",
            plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        self.add_image(
            "gate",
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
            iteration, dataformats='HWC')
コード例 #11
0
    def log_validation(self, reduced_loss, model, y, y_pred, iteration):
        # self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)

        align = plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T)
        spec = plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy())
        mel = plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy())
        gate = plot_gate_outputs_to_numpy(
            gate_targets[idx].data.cpu().numpy(),
            torch.sigmoid(gate_outputs[idx]).data.cpu().numpy())

        wandb = self.wandb
        wandb.log({
            "validation loss": reduced_loss,
            "alignment": wandb.Image(align),
            "spectrogram": wandb.Image(spec),
            "mel_spec": wandb.Image(mel),
            "gate": wandb.Image(gate),
        })
コード例 #12
0
ファイル: logger.py プロジェクト: CookiePPP/codedump
 def log_infer(self, reduced_loss, model, y, y_pred, iteration, val_teacher_force_till, val_p_teacher_forcing, diagonality, avg_prob):
     self.add_scalar("infer.loss", reduced_loss, iteration)
     self.add_scalar("infer.attention_alignment_diagonality", diagonality, iteration)
     self.add_scalar("infer.average_max_attention_weight", avg_prob, iteration)
     _, mel_outputs, gate_outputs, alignments = y_pred
     mel_targets, gate_targets, *_ = y
     
     # plot alignment, mel target and predicted, gate target and predicted
     idx = 0 # plot longest audio file
     self.add_image(
         "infer_alignment",
         plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
         iteration, dataformats='HWC')
     self.add_image(
         "infer_mel_target",
         plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
         iteration, dataformats='HWC')
     self.add_image(
         "infer_mel_predicted",
         plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
         iteration, dataformats='HWC')
     self.add_image(
         "infer_gate",
         plot_gate_outputs_to_numpy(
             gate_targets[idx].data.cpu().numpy(),
             torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
         iteration, dataformats='HWC')
     
     idx = 1 # and plot 2nd longest audio file
     self.add_image(
         "infer_alignment2",
         plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
         iteration, dataformats='HWC')
     self.add_image(
         "infer_mel_target2",
         plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
         iteration, dataformats='HWC')
     self.add_image(
         "infer_mel_predicted2",
         plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
         iteration, dataformats='HWC')
     self.add_image(
         "infer_gate2",
         plot_gate_outputs_to_numpy(
             gate_targets[idx].data.cpu().numpy(),
             torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
         iteration, dataformats='HWC')
コード例 #13
0
ファイル: train.py プロジェクト: zachwe/DeepLearningExamples
def validate(model, criterion, valset, epoch, batch_iter, batch_size,
             world_size, collate_fn, distributed_run, rank, batch_to_gpu,
             summary_writer=None):
    """Handles all the validation scoring and printing"""
    with evaluating(model), torch.no_grad():
        val_sampler = DistributedSampler(valset) if distributed_run else None
        val_loader = DataLoader(valset, num_workers=1, shuffle=False,
                                sampler=val_sampler,
                                batch_size=batch_size, pin_memory=False,
                                collate_fn=collate_fn)

        val_loss = 0.0
        num_iters = 0
        val_items_per_sec = 0.0
        for i, batch in enumerate(val_loader):
            torch.cuda.synchronize()
            iter_start_time = time.perf_counter()

            x, y, num_items = batch_to_gpu(batch)
            y_pred = model(x)
            loss = criterion(y_pred, y)
            if i % 50 == 0 and summary_writer is not None:
                _, mel_outputs_postnet, _, alignments = y_pred
                summary_writer.add_image(
                    f'attention_weights_sample_{i}',
                    plot_alignment_to_numpy(alignments[0].data.cpu().numpy().T),
                    epoch,
                    dataformats='HWC',
                )
                summary_writer.add_image(
                    f'mel_outputs_sample_{i}',
                    plot_mel_to_numpy(mel_outputs_postnet[0].data.cpu().numpy()),
                    epoch,
                    dataformats='HWC',
                )
            if distributed_run:
                reduced_val_loss = reduce_tensor(loss.data, world_size).item()
                reduced_num_items = reduce_tensor(num_items.data, 1).item()
            else:               #
                reduced_val_loss = loss.item()
                reduced_num_items = num_items.item()
            val_loss += reduced_val_loss

            torch.cuda.synchronize()
            iter_stop_time = time.perf_counter()
            iter_time = iter_stop_time - iter_start_time

            items_per_sec = reduced_num_items/iter_time
            DLLogger.log(step=(epoch, batch_iter, i), data={'val_items_per_sec': items_per_sec})
            val_items_per_sec += items_per_sec
            num_iters += 1

        val_loss = val_loss/(i + 1)

        DLLogger.log(step=(epoch,), data={'val_loss': val_loss})
        DLLogger.log(step=(epoch,), data={'val_items_per_sec':
                                         (val_items_per_sec/num_iters if num_iters > 0 else 0.0)})

        return val_loss, val_items_per_sec
コード例 #14
0
    def log_alignment(self, model, dec_enc_attn, alignment, mel_padded, mel_predict, test_attn, iteration):

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        
        idx = random.randint(0, dec_enc_attn[0].size(0) - 1)
        mel_padded = mel_padded.permute(0, 2, 1)
        mel_predict = mel_predict.permute(0, 2, 1)
        '''self.add_image(
            "encoder_self_alignment",
            plot_alignment_to_numpy(enc_slf_attn[idx].data.cpu().numpy().T),
            iteration)'''
        for i in range(len(dec_enc_attn)):
            self.add_image(
                "decoder_encoder_alignment_{}".format(i),
                plot_alignment_to_numpy(dec_enc_attn[i][idx].data.cpu().numpy().T),
                iteration)
            self.add_image(
                "test_alignment_{}".format(i),
                plot_alignment_to_numpy(test_attn[len(test_attn)-i-1][idx].data.cpu().numpy().T),
                iteration)
        self.add_image(
            "target_alignment",
            plot_alignment_to_numpy(alignment[idx].data.cpu().numpy().T),
            iteration)
        self.add_image(
            "target_mel",
            plot_spectrogram_to_numpy(mel_padded[idx].data.cpu().numpy().T),
            iteration)
        self.add_image(
            "predict_mel",
            plot_spectrogram_to_numpy(mel_predict[idx].data.cpu().numpy().T),
            iteration)
コード例 #15
0
def inference(args):

    sentences = get_sentences(args)

    model = load_model(hparams)
    model.load_state_dict(torch.load(args.checkpoint)['state_dict'])
    model.cuda().eval()

    test_set = TextMelLoaderEval(sentences, hparams)
    test_collate_fn = TextMelCollateEval(hparams)
    test_sampler = DistributedSampler(
        valset) if hparams.distributed_run else None
    test_loader = DataLoader(test_set,
                             num_workers=0,
                             sampler=test_sampler,
                             batch_size=hparams.synth_batch_size,
                             pin_memory=False,
                             drop_last=True,
                             collate_fn=test_collate_fn)

    T2_output_range = (-hparams.max_abs_value,
                       hparams.max_abs_value) if hparams.symmetric_mels else (
                           0, hparams.max_abs_value)

    os.makedirs(args.out_filename, exist_ok=True)

    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            mel_outputs, mel_outputs_postnet, _, alignments = model.inference(
                batch)
            align_img = Image.fromarray(
                plot_alignment_to_numpy(alignments[0].data.cpu().numpy().T))
            spec_img = Image.fromarray(
                plot_spectrogram_to_numpy(
                    mel_outputs_postnet[0].data.cpu().numpy()))
            align_img.save(
                os.path.join(args.out_filename,
                             'sentence_{}_alignment.jpg'.format(i)))
            spec_img.save(
                os.path.join(args.out_filename,
                             'sentence_{}_mel-spectrogram.jpg'.format(i)))
            mels = mel_outputs_postnet[0].cpu().numpy()

            mel_path = os.path.join(args.out_filename,
                                    'sentence_{}_mel-feats.npy'.format(i))
            mels = np.clip(mels, T2_output_range[0], T2_output_range[1])
            np.save(mel_path, mels.T, allow_pickle=False)

            print('CHECK MEL SHAPE:', mels.T.shape)
コード例 #16
0
    def log_validation(self,
                       reduced_loss,
                       model,
                       y,
                       y_pred,
                       iteration,
                       model_name="",
                       log_embedding=False):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image(
            "alignment",
            np.moveaxis(
                plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
                2, 0), iteration)
        self.add_image(
            "mel_target",
            np.moveaxis(
                plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
                2, 0), iteration)
        self.add_image(
            "mel_predicted",
            np.moveaxis(
                plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
                2, 0), iteration)
        self.add_image(
            "gate",
            np.moveaxis(
                plot_gate_outputs_to_numpy(
                    gate_targets[idx].data.cpu().numpy(),
                    torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()), 2,
                0), iteration)
        if log_embedding:
            self.add_embedding(
                model.speaker_embedding.weight.detach().cpu().numpy(), [
                    str(i)
                    for i in range(model.speaker_embedding.num_embeddings)
                ],
                global_step=iteration,
                tag='emb_{}'.format(model_name))
コード例 #17
0
    def log_validation(self, reduced_loss, reduced_losses, reduced_acces,
                       model, y, y_pred, iteration, task):

        self.add_scalar('validation.loss.%s' % task, reduced_loss, iteration)
        self.add_scalar('validation.acc.%s.texcl' % task, reduced_acces[0],
                        iteration)
        mel_hidden, text_logit_from_mel_hidden, audio_seq2seq_alignments, text_lengths = y_pred
        # plot alignment, mel target and predicted, stop target and predicted
        idx = random.randint(0, audio_seq2seq_alignments.size(0) - 1)
        audio_seq2seq_alignments = audio_seq2seq_alignments.data.cpu().numpy()

        self.add_image("%s.audio_seq2seq_alignment" % task,
                       plot_alignment_to_numpy(
                           audio_seq2seq_alignments[idx].T),
                       iteration,
                       dataformats='HWC')
コード例 #18
0
ファイル: logger.py プロジェクト: Marcus-Arcadius/cookietts
    def log_infer(self, reduced_loss, model, y, y_pred, iteration,
                  val_teacher_force_till, val_p_teacher_forcing, diagonality,
                  avg_prob):
        self.add_scalar("infer.loss", reduced_loss, iteration)
        self.add_scalar("infer.attention_alignment_diagonality", diagonality,
                        iteration)
        self.add_scalar("infer.average_max_attention_weight", avg_prob,
                        iteration)
        mel_outputs, mel_outputs_postnet, gate_outputs, alignments, *_ = y_pred
        if mel_outputs_postnet is not None:
            mel_outputs = mel_outputs_postnet
        mel_outputs_GAN = y_pred[8][0]
        mel_targets, gate_targets, *_ = y
        mel_outputs = mel_outputs[:, :mel_targets.shape[1], :]

        plot_n_files = 5
        # plot infer alignment, mel target and predicted, gate predicted
        for idx in range(plot_n_files):  # plot longest x audio files
            str_idx = '' if idx == 0 else idx
            self.add_image(f"infer_alignment{str_idx}",
                           plot_alignment_to_numpy(
                               alignments[idx].data.cpu().numpy().T),
                           iteration,
                           dataformats='HWC')
            self.add_image(f"infer_mel_target{str_idx}",
                           plot_spectrogram_to_numpy(
                               mel_targets[idx].data.cpu().numpy()),
                           iteration,
                           dataformats='HWC')
            self.add_image(f"infer_mel_predicted{str_idx}",
                           plot_spectrogram_to_numpy(
                               mel_outputs[idx].data.cpu().numpy()),
                           iteration,
                           dataformats='HWC')
            if mel_outputs_GAN is not None:
                self.add_image(f"mel_predicted_GAN{str_idx}",
                               plot_spectrogram_to_numpy(
                                   mel_outputs_GAN[idx].data.cpu().numpy()),
                               iteration,
                               dataformats='HWC')
            self.add_image(f"infer_gate{str_idx}",
                           plot_gate_outputs_to_numpy(
                               gate_targets[idx].data.cpu().numpy(),
                               torch.sigmoid(
                                   gate_outputs[idx]).data.cpu().numpy()),
                           iteration,
                           dataformats='HWC')
コード例 #19
0
    def log_validation(self,
                       reduced_loss,
                       model,
                       y,
                       y_pred,
                       iteration,
                       speaker_acc=0,
                       augment_acc=0):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        self.add_scalar("Speaker_classifier_ACC", speaker_acc, iteration)
        self.add_scalar("Augment_classifier_ACC", augment_acc, iteration)
        _, mel_outputs, gate_outputs, alignments, speaker_output, augmentation_output, _, _ = y_pred
        mel_targets, gate_targets, speaker_id, labels = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image(
            "alignment",
            torch.from_numpy(
                plot_alignment_to_numpy(
                    alignments[idx].data.cpu().numpy().T)).permute(2, 0, 1),
            iteration)
        self.add_image(
            "mel_target",
            torch.from_numpy(
                plot_spectrogram_to_numpy(
                    mel_targets[idx].data.cpu().numpy())).permute(2, 0, 1),
            iteration)
        self.add_image(
            "mel_predicted",
            torch.from_numpy(
                plot_spectrogram_to_numpy(
                    mel_outputs[idx].data.cpu().numpy())).permute(2, 0, 1),
            iteration)
        self.add_image(
            "gate",
            torch.from_numpy(
                plot_gate_outputs_to_numpy(
                    gate_targets[idx].data.cpu().numpy(),
                    F.sigmoid(gate_outputs[idx]).data.cpu().numpy())).permute(
                        2, 0, 1), iteration)
コード例 #20
0
    def log_validation(self, reduced_loss, model, y, y_pred, iteration):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image("alignment",
                       plot_alignment_to_numpy(
                           alignments[idx].data.cpu().numpy().T),
                       iteration,
                       dataformats='HWC')
        self.add_image("mel_target",
                       plot_spectrogram_to_numpy(
                           mel_targets[idx].data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')
        self.add_image("mel_predicted",
                       plot_spectrogram_to_numpy(
                           mel_outputs[idx].data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')
        self.add_image("gate",
                       plot_gate_outputs_to_numpy(
                           gate_targets[idx].data.cpu().numpy(),
                           torch.sigmoid(
                               gate_outputs[idx]).data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')

        mel = mel_outputs.cpu()[0]
        if len(mel.shape) == 2:
            mel = mel.unsqueeze(0)
        audio = self.melgan.inference(mel)
        self.add_audio('audio',
                       audio,
                       global_step=iteration,
                       sample_rate=self.sampling_rate,
                       walltime=None)
コード例 #21
0
    def log_validation(self, reduced_loss, model, y, y_pred, iteration):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        wandb.log({'validation.loss': reduced_loss}, step=iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)

        alignment_arr = plot_alignment_to_numpy(
            alignments[idx].data.cpu().numpy().T)
        self.add_image("alignment", alignment_arr, iteration)
        wandb.log(
            {"alignment": [wandb.Image(alignment_arr, caption="Alignment")]},
            step=iteration)

        mel_target = plot_spectrogram_to_numpy(
            mel_targets[idx].data.cpu().numpy())
        self.add_image("mel_target", mel_target, iteration)
        wandb.log(
            {"mel_target": [wandb.Image(mel_target, caption="Mel target")]},
            step=iteration)

        mel_predicted = plot_spectrogram_to_numpy(
            mel_outputs[idx].data.cpu().numpy())
        self.add_image("mel_predicted", mel_predicted, iteration)
        wandb.log(
            {
                "mel_predicted":
                [wandb.Image(mel_predicted, caption="Mel predicted")]
            },
            step=iteration)

        self.add_image(
            "gate",
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
            iteration)
コード例 #22
0
ファイル: logger.py プロジェクト: zge/tacotron2-vae
    def log_validation(self, reduced_loss, model, y, y_pred, iteration):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        if self.use_vae:
            _, mel_outputs, gate_outputs, alignments, mus, _, _, emotions = y_pred
        else:
            _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y
        #print('emotion:\n{}'.format(emotions))

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image(
            "alignment",
            plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
            iteration, dataformats=self.dataformat)
        self.add_image(
            "mel_target",
            plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
            iteration, dataformats=self.dataformat)
        self.add_image(
            "mel_predicted",
            plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
            iteration, dataformats=self.dataformat)
        self.add_image(
            "gate",
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
            iteration, dataformats=self.dataformat)
        if self.use_vae:
            self.add_image(
                "latent_dim (regular)",
                plot_scatter(mus, emotions),
                iteration, dataformats=self.dataformat)
            self.add_image(
                "latent_dim (t-sne)",
                plot_tsne(mus, emotions),
                iteration, dataformats=self.dataformat)
コード例 #23
0
ファイル: logger.py プロジェクト: q-hwang/tacotron2
    def log_validation(self, reduced_loss, model, x, y, y_pred, iteration,
                       hparams):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image(
            "alignment",
            plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
            iteration)
        self.add_image(
            "mel_target",
            plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
            iteration)
        self.add_image(
            "mel_predicted",
            plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
            iteration)
        self.add_image(
            "gate",
            plot_gate_outputs_to_numpy(
                gate_targets[idx].data.cpu().numpy(),
                F.sigmoid(gate_outputs[idx]).data.cpu().numpy()), iteration)
        self.add_audio(
            "audio_from_target",
            synthesis_griffin_lim(mel_targets[idx].unsqueeze(0), hparams),
            iteration, hparams.sampling_rate)
        self.add_audio(
            "audio_from_predicted",
            synthesis_griffin_lim(mel_outputs[idx].unsqueeze(0), hparams),
            iteration, hparams.sampling_rate)
        self.add_text(
            "text", ''.join([
                _id_to_symbol[symbol_id]
                for symbol_id in x[0][idx].data.cpu().numpy()
            ]), iteration)
コード例 #24
0
    def log_validation(self, params, iteration):
        for key, key_params in params.items():
            self.add_scalar(f'{key}.loss', key_params['loss'], iteration)

            _, mel_outputs, gate_outputs, alignments = key_params['y_pred']
            mel_targets, gate_targets = key_params['y']

            # plot alignment, mel target and predicted, gate target and predicted
            idx = random.randint(0, mel_outputs.size(0) - 1)
            self.add_image(
                f'{key}.alignment',
                plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
                iteration, dataformats='HWC')
            self.add_image(
                f'{key}.mel_target',
                plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
                iteration, dataformats='HWC')
            self.add_image(
                f'{key}.mel_predicted',
                plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
                iteration, dataformats='HWC')
コード例 #25
0
ファイル: logger.py プロジェクト: wavecross/zhrtvc
    def log_validation(self, reduced_loss, model, y, y_pred, iteration, x):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y
        text_inputs = x[0]
        speaker_ids = x[5]
        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image("alignment",
                       plot_alignment_to_numpy(
                           alignments[idx].data.cpu().numpy().T),
                       iteration,
                       dataformats='HWC')
        self.add_image("mel_predicted",
                       plot_spectrogram_to_numpy(
                           mel_outputs[idx].data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')
        self.add_image("gate",
                       plot_gate_outputs_to_numpy(
                           gate_targets[idx].data.cpu().numpy(),
                           torch.sigmoid(
                               gate_outputs[idx]).data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')
        # 记录一下合成的语音效果。
        audio_predicted = inv_linearspectrogram(
            mel_outputs[idx].data.cpu().numpy())
        self.add_audio('audio_predicted',
                       torch.from_numpy(audio_predicted),
                       iteration,
                       sample_rate=default_hparams.sample_rate)
        self.add_image("mel_target",
                       plot_spectrogram_to_numpy(
                           mel_targets[idx].data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')
        audio_target = inv_linearspectrogram(
            mel_targets[idx].data.cpu().numpy())
        self.add_audio('audio_target',
                       torch.from_numpy(audio_target),
                       iteration,
                       sample_rate=default_hparams.sample_rate)

        spk = int(speaker_ids[idx].data.cpu().numpy().flatten()[0])
        ph_ids = text_inputs[idx].data.cpu().numpy().flatten()
        phs_text = sequence_to_text(ph_ids)
        phs_size = len(ph_ids)
        reduced_loss = float(reduced_loss)
        audt_duration = int(
            len(audio_target) / (default_hparams.sample_rate / 1000))
        audp_duration = int(
            len(audio_predicted) / (default_hparams.sample_rate / 1000))
        spect_shape = mel_targets[idx].data.cpu().numpy().shape
        specp_shape = mel_outputs[idx].data.cpu().numpy().shape
        align_shape = alignments[idx].data.cpu().numpy().T.shape
        out_text = dict(speaker_id=spk,
                        phonemes=phs_text,
                        phonemes_size=phs_size,
                        validation_loss=reduced_loss,
                        audio_target_ms=audt_duration,
                        audio_predicted_ms=audp_duration,
                        spectrogram_target_shape=str(spect_shape),
                        spectrogram_predicted_shape=str(specp_shape),
                        alignment_shape=str(align_shape))
        out_text = json.dumps(out_text, indent=4, ensure_ascii=False)
        out_text = f'<pre>{out_text}</pre>'  # 支持html标签
        self.add_text('text', out_text, iteration)
コード例 #26
0
    def log_validation(self, reduced_loss, model, x, y, y_pred, iteration,
                       epoch, sample_rate):
        text_padded, input_lengths, mel_padded, max_len, output_lengths = x

        #self.add_scalar("validation.loss", reduced_loss, iteration) # Tensorboard log
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            # self.add_histogram(tag, value.data.cpu().numpy(), iteration) # Tensorboard log
            wandb.log(
                {
                    tag: wandb.Histogram(value.data.cpu().numpy()),
                    "epoch": epoch,
                    "iteration": iteration
                },
                step=iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)

        text_len = input_lengths[idx].item()
        text_string = sequence_to_text(text_padded[idx].tolist())[:text_len]

        mel_len = get_mel_length(alignments, idx, text_len)
        mel = mel_outputs[idx:idx + 1, :, :mel_len]

        np_wav = self.mel2wav(mel.type('torch.cuda.HalfTensor'))

        np_alignment = plot_alignment_to_numpy(
            alignments[idx].data.cpu().numpy().T, decoding_len=mel_len)
        '''self.add_image(
            "alignment",
            np_alignment,
            iteration, dataformats='HWC')'''

        np_mel_target = plot_spectrogram_to_numpy(
            mel_targets[idx].data.cpu().numpy())
        '''self.add_image(
            "mel_target",
            np_mel_target,
            iteration, dataformats='HWC')'''

        np_mel_predicted = plot_spectrogram_to_numpy(
            mel_outputs[idx].data.cpu().numpy())
        '''self.add_image(
            "mel_predicted",
            np_mel_predicted,
            iteration, dataformats='HWC')'''

        np_gate = plot_gate_outputs_to_numpy(
            gate_targets[idx].data.cpu().numpy(),
            torch.sigmoid(gate_outputs[idx]).data.cpu().numpy())
        '''self.add_image(
            "gate",
            np_gate,
            iteration, dataformats='HWC')'''

        # wandb log
        wandb.log(
            {
                "val/loss":
                reduced_loss,
                "val/alignment":
                [wandb.Image(np_alignment, caption=text_string)],
                "val/audio": [
                    wandb.Audio(np_wav.astype(np.float32),
                                caption=text_string,
                                sample_rate=sample_rate)
                ],
                "val/mel_target": [wandb.Image(np_mel_target)],
                "val/mel_predicted": [wandb.Image(np_mel_predicted)],
                "val/gate": [wandb.Image(np_gate)],
                "epoch":
                epoch,
                "iteration":
                iteration
            },
            step=iteration)

        # foward attention ratio
        hop_list = [1]
        for hop_size in hop_list:
            mean_far, batch_far = forward_attention_ratio(
                alignments, input_lengths, hop_size)
            log_name = "mean_forward_attention_ratio.val/hop_size={}".format(
                hop_size)
            wandb.log(
                {
                    log_name: mean_far,
                    "epoch": epoch,
                    "iteration": iteration
                },
                step=iteration)
            log_name = "forward_attention_ratio.val/hop_size={}".format(
                hop_size)
            wandb.log(
                {
                    log_name: wandb.Histogram(batch_far.data.cpu().numpy()),
                    "epoch": epoch,
                    "iteration": iteration
                },
                step=iteration)
コード例 #27
0
ファイル: logger.py プロジェクト: Marcus-Arcadius/cookietts
    def log_teacher_forced_validation(self, reduced_loss, model, y, y_pred,
                                      iteration, val_teacher_force_till,
                                      val_p_teacher_forcing, diagonality,
                                      avg_prob):
        self.add_scalar("teacher_forced_validation.loss", reduced_loss,
                        iteration)
        self.add_scalar(
            "teacher_forced_validation.attention_alignment_diagonality",
            diagonality, iteration)
        self.add_scalar(
            "teacher_forced_validation.average_max_attention_weight", avg_prob,
            iteration)
        mel_outputs, mel_outputs_postnet, gate_outputs, alignments, *_ = y_pred
        if mel_outputs_postnet is not None:
            mel_outputs = mel_outputs_postnet
        mel_outputs_GAN = y_pred[8][0]
        mel_targets, gate_targets, *_ = y
        mel_outputs = mel_outputs[:, :mel_targets.shape[1], :]
        mel_MSE_map = torch.nn.MSELoss(reduction='none')(mel_outputs,
                                                         mel_targets)
        mel_MSE_map[:, -1,
                    -1] = 20.0  # because otherwise the color map scale is crap

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        plot_n_files = 5
        # plot alignment, mel target and predicted, gate target and predicted
        for idx in range(plot_n_files):  # plot longest x audio files
            str_idx = '' if idx == 0 else idx
            self.add_image(f"teacher_forced_alignment{str_idx}",
                           plot_alignment_to_numpy(
                               alignments[idx].data.cpu().numpy().T),
                           iteration,
                           dataformats='HWC')
            self.add_image(f"mel_target{str_idx}",
                           plot_spectrogram_to_numpy(
                               mel_targets[idx].data.cpu().numpy()),
                           iteration,
                           dataformats='HWC')
            self.add_image(f"mel_predicted{str_idx}",
                           plot_spectrogram_to_numpy(
                               mel_outputs[idx].data.cpu().numpy()),
                           iteration,
                           dataformats='HWC')
            if mel_outputs_GAN is not None:
                self.add_image(f"mel_predicted_GAN{str_idx}",
                               plot_spectrogram_to_numpy(
                                   mel_outputs_GAN[idx].data.cpu().numpy()),
                               iteration,
                               dataformats='HWC')
            self.add_image(f"mel_squared_error{str_idx}",
                           plot_spectrogram_to_numpy(
                               mel_MSE_map[idx].data.cpu().numpy()),
                           iteration,
                           dataformats='HWC')
            self.add_image(f"gate{str_idx}",
                           plot_gate_outputs_to_numpy(
                               gate_targets[idx].data.cpu().numpy(),
                               torch.sigmoid(
                                   gate_outputs[idx]).data.cpu().numpy()),
                           iteration,
                           dataformats='HWC')
コード例 #28
0
ファイル: logger.py プロジェクト: intflow/tacotron2
    def log_validation(self, reduced_loss, model, y, y_pred, iteration):
        self.add_scalar("validation.loss", reduced_loss, iteration)
        _, mel_outputs, gate_outputs, alignments = y_pred
        mel_targets, gate_targets = y

        # plot distribution of parameters
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, gate target and predicted
        idx = random.randint(0, alignments.size(0) - 1)
        self.add_image("alignment",
                       plot_alignment_to_numpy(
                           alignments[idx].data.cpu().numpy().T),
                       iteration,
                       dataformats='HWC')
        self.add_image("mel_target",
                       plot_spectrogram_to_numpy(
                           mel_targets[idx].data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')
        self.add_image("mel_predicted",
                       plot_spectrogram_to_numpy(
                           mel_outputs[idx].data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')
        self.add_image("gate",
                       plot_gate_outputs_to_numpy(
                           gate_targets[idx].data.cpu().numpy(),
                           torch.sigmoid(
                               gate_outputs[idx]).data.cpu().numpy()),
                       iteration,
                       dataformats='HWC')


# import random
# import torch.nn.functional as F
# from tensorboardX import SummaryWriter
# from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
# from plotting_utils import plot_gate_outputs_to_numpy

# class Tacotron2Logger(SummaryWriter):
#     def __init__(self, logdir):
#         super(Tacotron2Logger, self).__init__(logdir)

#     def log_training(self, reduced_loss, grad_norm, learning_rate, duration,
#                      iteration):
#             self.add_scalar("training.loss", reduced_loss, iteration)
#             self.add_scalar("grad.norm", grad_norm, iteration)
#             self.add_scalar("learning.rate", learning_rate, iteration)
#             self.add_scalar("duration", duration, iteration)

#     def log_validation(self, reduced_loss, model, y, y_pred, iteration):
#         self.add_scalar("validation.loss", reduced_loss, iteration)
#         _, mel_outputs, gate_outputs, alignments = y_pred
#         mel_targets, gate_targets = y

#         # plot distribution of parameters
#         for tag, value in model.named_parameters():
#             tag = tag.replace('.', '/')
#             self.add_histogram(tag, value.data.cpu().numpy(), iteration)

#         # plot alignment, mel target and predicted, gate target and predicted
#         idx = random.randint(0, alignments.size(0) - 1)
#         self.add_image(
#             "alignment",
#             plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
#             iteration)
#         self.add_image(
#             "mel_target",
#             plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
#             iteration)
#         self.add_image(
#             "mel_predicted",
#             plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
#             iteration)
#         self.add_image(
#             "gate",
#             plot_gate_outputs_to_numpy(
#                 gate_targets[idx].data.cpu().numpy(),
#                 F.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
#             iteration)
コード例 #29
0
    def log_validation(self, reduced_loss, reduced_losses, reduced_acces, model, y, y_pred, iteration, task):

        self.add_scalar('validation.loss.%s'%task, reduced_loss, iteration)
        self.add_scalar("validation.loss.%s.recon"%task, reduced_losses[0], iteration)
        self.add_scalar("validation.loss.%s.recon_post"%task, reduced_losses[1], iteration)
        self.add_scalar("validation.loss.%s.stop"%task,  reduced_losses[2], iteration)
        self.add_scalar("validation.loss.%s.contr"%task, reduced_losses[3], iteration)
        self.add_scalar("validation.loss.%s.consi"%task, reduced_losses[4], iteration)
        self.add_scalar("validation.loss.%s.spenc"%task, reduced_losses[5], iteration)
        self.add_scalar("validation.loss.%s.spcla"%task, reduced_losses[6], iteration)
        self.add_scalar("validation.loss.%s.texcl"%task, reduced_losses[7], iteration)
        self.add_scalar("validation.loss.%s.spadv"%task, reduced_losses[8], iteration)

        self.add_scalar('validation.acc.%s.spenc'%task, reduced_acces[0], iteration)
        self.add_scalar('validation.acc.%s.spcla'%task, reduced_acces[1], iteration)
        self.add_scalar('validation.acc.%s.texcl'%task, reduced_acces[2], iteration)
        
        predicted_mel, post_output, predicted_stop, alignments, \
            text_hidden, mel_hidden,  text_logit_from_mel_hidden, \
            audio_seq2seq_alignments, \
            speaker_logit_from_mel, speaker_logit_from_mel_hidden, \
            text_lengths, mel_lengths, SE_alignments = y_pred

        #predicted_mel, post_output, predicted_stop, alignments, \
        #    text_hidden, mel_hidden,  text_logit_from_mel_hidden, \
        #    audio_seq2seq_alignments, \
        #    speaker_logit_from_mel_hidden, \
        #    text_lengths, mel_lengths = y_pred

        # text_target, mel_target, spc_target, speaker_target,  stop_target  = y
        text_target, mel_target, speaker_target,  stop_target  = y

        stop_target = stop_target.reshape(stop_target.size(0), -1, int(stop_target.size(1)/predicted_stop.size(1)))
        stop_target = stop_target[:,:,0]
        #pdb.set_trace()

        # plot distribution of parameters
        #for tag, value in model.named_parameters():
        #    tag = tag.replace('.', '/')
        #    self.add_histogram(tag, value.data.cpu().numpy(), iteration)

        # plot alignment, mel target and predicted, stop target and predicted
        idx = random.randint(0, alignments.size(0) - 1)

        alignments = alignments.data.cpu().numpy()
        audio_seq2seq_alignments = audio_seq2seq_alignments.data.cpu().numpy()
        SE_alignments = SE_alignments.data.cpu().numpy()

        self.add_image(
            "%s.alignment"%task,
            plot_alignment_to_numpy(alignments[idx].T),
            iteration, dataformats='HWC')
        
        # plot more alignments
        plot_alignment(alignments[:4], self.ali_path+'/step-%d-%s.pdf'%(iteration, task))

        self.add_image(
            "%s.audio_seq2seq_alignment"%task,
            plot_alignment_to_numpy(audio_seq2seq_alignments[idx].T),
            iteration, dataformats='HWC')

        self.add_image(
            "%s.SE_alignments"%task,
            plot_alignment_to_numpy(SE_alignments[idx].T),
            iteration, dataformats='HWC')

        self.add_image(
            "%s.mel_target"%task,
            plot_spectrogram_to_numpy(mel_target[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        
        self.add_image(
            "%s.mel_predicted"%task,
            plot_spectrogram_to_numpy(predicted_mel[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')
        
        # self.add_image(
        #     "%s.spc_target"%task,
        #     plot_spectrogram_to_numpy(spc_target[idx].data.cpu().numpy()),
        #     iteration, dataformats='HWC')
        
        self.add_image(
            "%s.post_predicted"%task,
            plot_spectrogram_to_numpy(post_output[idx].data.cpu().numpy()),
            iteration, dataformats='HWC')

        self.add_image(
            "%s.stop"%task,
            plot_gate_outputs_to_numpy(
                stop_target[idx].data.cpu().numpy(),
                F.sigmoid(predicted_stop[idx]).data.cpu().numpy()),
            iteration, dataformats='HWC')