Example #1
0
        binary=binary,
        fill_gaps=fill_gaps,
        fill_mode=fill_mode,
        fix_jumps=fix_jumps)
    batch_size = len(val_mel)
    for i in range(batch_size):
        sample_idx = total_val_samples + i
        all_val_durations = np.append(all_val_durations, durations[i])
        new_alignments.append(final_align[i])
        sample = (unpad_mels[i], unpad_phonemes[i], durations[i])
        np.save(str(val_target_dir / f'{sample_idx}_mel_phon_dur.npy'), sample)
    total_val_samples += batch_size
all_val_durations[all_val_durations >= 20] = 20
buckets = len(set(all_val_durations))
summary_manager.add_histogram(values=all_val_durations,
                              tag='ValidationDurations',
                              buckets=buckets)
for i, alignment in enumerate(new_alignments):
    summary_manager.add_image(tag='ExtractedValidationAlignments',
                              image=tf.expand_dims(
                                  tf.expand_dims(alignment, 0), -1),
                              step=i)

iterator = tqdm(enumerate(train_batches))
all_train_durations = np.array([])
new_alignments = []
total_train_samples = 0
for c, batch_file in iterator:
    iterator.set_description(f'Extracting training alignments')
    if not running_predictions:
        train_mel, train_text, train_alignments = np.load(str(batch_file),
Example #2
0
                pos=pos + 2)

    summary_manager.display_loss(output, tag='Train')
    summary_manager.display_scalar(tag='Meta/learning_rate',
                                   scalar_value=model.optimizer.lr)
    summary_manager.display_scalar(tag='Meta/decoder_prenet_dropout',
                                   scalar_value=model.decoder_prenet.rate)
    summary_manager.display_scalar(tag='Meta/drop_n_heads',
                                   scalar_value=model.drop_n_heads)
    if model.step % config['train_images_plotting_frequency'] == 0:
        summary_manager.display_attention_heads(output,
                                                tag='TrainAttentionHeads')
        summary_manager.display_mel(mel=output['mel'][0],
                                    tag=f'Train/predicted_mel')
        summary_manager.display_mel(mel=mel[0], tag=f'Train/target_mel')
        summary_manager.add_histogram(tag=f'Train/Predicted durations',
                                      values=output['duration'])
        summary_manager.add_histogram(tag=f'Train/Target durations',
                                      values=durations)

    if model.step % config['weights_save_frequency'] == 0:
        save_path = manager.save()
        t.display(f'checkpoint at step {model.step}: {save_path}',
                  pos=len(config['n_steps_avg_losses']) + 2)

    if model.step % config['validation_frequency'] == 0:
        t.display(f'Validating', pos=len(config['n_steps_avg_losses']) + 3)
        val_loss, time_taken = validate(model=model,
                                        val_dataset=val_dataset,
                                        summary_manager=summary_manager)
        t.display(
            f'validation loss at step {model.step}: {val_loss} (took {time_taken}s)',