Пример #1
0
        )
        estimator_spec = tf.estimator.EstimatorSpec(
            mode = mode, loss = loss, train_op = train_op
        )

    elif mode == tf.estimator.ModeKeys.EVAL:

        estimator_spec = tf.estimator.EstimatorSpec(
            mode = tf.estimator.ModeKeys.EVAL, loss = loss
        )

    return estimator_spec


train_hooks = [tf.train.LoggingTensorHook(['total_loss'], every_n_iter = 1)]
train_dataset = get_dataset()

save_directory = 'speaker-split-fast-swave'

train.run_training(
    train_fn = train_dataset,
    model_fn = model_fn,
    model_dir = save_directory,
    num_gpus = 1,
    log_step = 1,
    save_checkpoint_step = 3000,
    max_steps = total_steps,
    train_hooks = train_hooks,
    eval_step = 0,
)
    return estimator_spec


train_hooks = [
    tf.train.LoggingTensorHook(
        [
            'loss',
            'duration_loss',
            'mel_loss_before',
            'mel_loss_after',
            'energies_loss',
            'f0s_loss',
        ],
        every_n_iter=1,
    )
]

train_dataset = get_dataset(files)

train.run_training(
    train_fn=train_dataset,
    model_fn=model_fn,
    model_dir='fastspeech2-husein-v2',
    num_gpus=1,
    log_step=1,
    save_checkpoint_step=2000,
    max_steps=total_steps,
    eval_fn=None,
    train_hooks=train_hooks,
)
Пример #3
0
    elif mode == tf.estimator.ModeKeys.EVAL:

        estimator_spec = tf.estimator.EstimatorSpec(
            mode=tf.estimator.ModeKeys.EVAL,
            loss=loss,
            eval_metric_ops={'accuracy': accuracy},
        )

    return estimator_spec


train_hooks = [
    tf.train.LoggingTensorHook(['train_accuracy', 'train_loss'],
                               every_n_iter=1)
]

train_dataset = get_dataset()

save_directory = 'output-speakernet-speaker-count'

train.run_training(
    train_fn=train_dataset,
    model_fn=model_fn,
    model_dir=save_directory,
    num_gpus=1,
    log_step=1,
    save_checkpoint_step=25000,
    max_steps=300_000,
    train_hooks=train_hooks,
)
Пример #4
0
            loss = loss,
            eval_metric_ops = {
                'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
                    logits, targets_int32, seq_lens
                )
            },
        )

    return estimator_spec


train_hooks = [
    tf.train.LoggingTensorHook(
        ['train_accuracy', 'train_loss'], every_n_iter = 1
    )
]
train_dataset = get_dataset('training-librispeech/data/librispeech-train-*')
dev_dataset = get_dataset('training-librispeech/data/librispeech-dev-*')

train.run_training(
    train_fn = train_dataset,
    model_fn = model_fn,
    model_dir = 'asr-quartznet-librispeech-adam',
    num_gpus = 2,
    log_step = 1,
    save_checkpoint_step = parameters['lr_policy_params']['warmup_steps'],
    max_steps = parameters['lr_policy_params']['decay_steps'],
    eval_fn = dev_dataset,
    train_hooks = train_hooks,
)
    return estimator_spec


train_hooks = [
    tf.train.LoggingTensorHook(
        [
            'loss',
            'stop_token_loss',
            'mel_loss_before',
            'mel_loss_after',
            'loss_att',
        ],
        every_n_iter = 1,
    )
]

train_dataset = get_dataset(files['train'])
dev_dataset = get_dataset(files['test'])

train.run_training(
    train_fn = train_dataset,
    model_fn = model_fn,
    model_dir = 'tacotron2-female-3',
    num_gpus = 1,
    log_step = 1,
    save_checkpoint_step = 5000,
    max_steps = num_train_steps,
    eval_fn = dev_dataset,
    train_hooks = train_hooks,
)
Пример #6
0
        )

    elif mode == tf.estimator.ModeKeys.EVAL:

        estimator_spec = tf.estimator.EstimatorSpec(
            mode = tf.estimator.ModeKeys.EVAL, loss = loss
        )

    return estimator_spec


train_hooks = [tf.train.LoggingTensorHook(['train_loss'], every_n_iter = 1)]
train_dataset = get_dataset(
    '../speech-bahasa/bahasa-asr/data/bahasa-asr-train-*'
)
dev_dataset = get_dataset(
    '../speech-bahasa/bahasa-asr-test/data/bahasa-asr-dev-*'
)

train.run_training(
    train_fn = train_dataset,
    model_fn = model_fn,
    model_dir = 'asr-base-conformer-transducer',
    num_gpus = 2,
    log_step = 1,
    save_checkpoint_step = 5000,
    max_steps = 500_000,
    eval_fn = dev_dataset,
    train_hooks = train_hooks,
)
Пример #7
0
    return estimator_spec


train_hooks = [
    tf.train.LoggingTensorHook(
        [
            'loss',
            'stop_token_loss',
            'mel_loss_before',
            'mel_loss_after',
            'loss_att',
        ],
        every_n_iter = 1,
    )
]

train_dataset = get_dataset(files)

train.run_training(
    train_fn = train_dataset,
    model_fn = model_fn,
    model_dir = 'tacotron2-case-haqkiem',
    num_gpus = 1,
    log_step = 1,
    save_checkpoint_step = 2000,
    max_steps = 100000,
    eval_fn = None,
    train_hooks = train_hooks,
)
Пример #8
0
            eval_metric_ops={
                'accuracy':
                ctc.metrics.ctc_sequence_accuracy_estimator(
                    logits, targets_int32, seq_lens)
            },
        )

    return estimator_spec


train_hooks = [
    tf.train.LoggingTensorHook(['train_accuracy', 'train_loss'],
                               every_n_iter=1)
]
train_dataset = get_dataset(
    '../speech-bahasa/bahasa-asr/data/bahasa-asr-train-*')
dev_dataset = get_dataset(
    '../speech-bahasa/bahasa-asr-test/data/bahasa-asr-dev-*')

train.run_training(
    train_fn=train_dataset,
    model_fn=model_fn,
    model_dir='asr-mini-jasper-ctc',
    num_gpus=2,
    log_step=1,
    save_checkpoint_step=5000,
    max_steps=parameters['lr_policy_params']['decay_steps'],
    eval_fn=dev_dataset,
    train_hooks=train_hooks,
)
        )

    return estimator_spec


train_hooks = [
    tf.train.LoggingTensorHook(
        ['train_accuracy', 'train_loss'], every_n_iter = 1
    )
]

train_files = glob('vad2/data/vad-train-*') + glob('noise/data/vad-train-*')
train_dataset = get_dataset(train_files, batch_size = 32)

dev_files = glob('vad2/data/vad-dev-*') + glob('noise/data/vad-dev-*')
dev_dataset = get_dataset(dev_files, batch_size = 16)

save_directory = 'output-inception-v4-vad'

train.run_training(
    train_fn = train_dataset,
    model_fn = model_fn,
    model_dir = save_directory,
    num_gpus = 1,
    log_step = 1,
    save_checkpoint_step = 25000,
    max_steps = epochs,
    eval_fn = dev_dataset,
    train_hooks = train_hooks,
)