Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
    parser.add_argument('--log_dir', default=os.path.expanduser('~/tacotron'))
    parser.add_argument('--input', default='training/train.txt')
    parser.add_argument('--variant', default='tacotron')
    parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')
    parser.add_argument('--hparams', default='',
                        help='Hyperparameter overrides as a comma-separated list of name=value pairs')
    parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
    parser.add_argument('--summary_interval', type=int, default=100,
                        help='Steps between running summary ops.')
    parser.add_argument('--checkpoint_interval', type=int, default=1000,
                        help='Steps between writing checkpoints.')
    parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
    parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
    parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')
    parser.add_argument('--num_steps', type=int, default=100000, help='Maximum number of steps to run training for.')
    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
    run_name = args.name or args.variant
    log_dir = os.path.join(args.log_dir, 'logs-%s' % run_name)
    os.makedirs(log_dir, exist_ok=True)
    infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
    log('Initialised log file')
    hparams.parse(args.hparams)
    train(log_dir, args, args.input)
Пример #2
0
def setup_log(log_path, checkpoint_path, input_path):
    infolog.init(log_path, 'emt4_disc', None)
    log('hi')
    log('Checkpoint path: {}'.format(checkpoint_path))
    log('Loading training data from: {}'.format(input_path))
    log('Using model: {}'.format('emt4_disc'))
    log(hparams_debug_string())
Пример #3
0
def prepare_run(args):
	modified_hp = hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
	run_name = args.name or args.model
	log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
	os.makedirs(log_dir, exist_ok=True)
	infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name, args.slack_url)
	return log_dir, modified_hp
Пример #4
0
def prepare_run(args):
	modified_hp = hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
	run_name = args.name or args.model
	log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
	os.makedirs(log_dir, exist_ok=True)
	infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name)
	return log_dir, modified_hp
Пример #5
0
def prepare_run(args):
    modified_hp = hparams.parse(args.hparams)
    if args.hparams_json:
        import json
        with open(args.hparams_json) as hp_json_file:
            hp_json = json.dumps(json.load(hp_json_file)['hparams'])
            modified_hp = modified_hp.parse_json(hp_json)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
    run_name = args.name or args.model
    log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
    os.makedirs(log_dir, exist_ok=True)
    infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name,
                 args.slack_url)
    return log_dir, modified_hp
Пример #6
0
def main():
    # init log
    log_dir = os.path.join('checkpoint', data_configs.speaker)
    infolog.init(os.path.join(log_dir, 'Terminal_train_log'),
                 model_configs.model_name, None)
    # train model
    if model_configs.model_name == 'TDNN':
        model = TDNN(data_configs, model_configs, training_configs)
        model.initialize()
        model.train()
    elif model_configs.model_name == 'TDNN_LSTM':
        model = TDNN_LSTM(data_configs, model_configs, training_configs)
        model.initialize()
        model.train()
Пример #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
    parser.add_argument('--log_dir', default=os.path.expanduser('~/tacotron'))
    parser.add_argument('--tacotron_input', default='training/train.txt')
    parser.add_argument('--wavenet_input',
                        default='tacotron_output/gta/map.txt')
    parser.add_argument('--model', default='tacotron')
    parser.add_argument('--variant', default='tacotron')
    parser.add_argument(
        '--name',
        help='Name of the run. Used for logging. Defaults to model name.')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--restore_step',
                        type=int,
                        help='Global step to restore from checkpoint.')
    parser.add_argument('--summary_interval',
                        type=int,
                        default=100,
                        help='Steps between running summary ops.')
    parser.add_argument('--checkpoint_interval',
                        type=int,
                        default=1000,
                        help='Steps between writing checkpoints.')
    parser.add_argument('--slack_url',
                        help='Slack webhook URL to get periodic reports.')
    parser.add_argument('--tf_log_level',
                        type=int,
                        default=1,
                        help='Tensorflow C++ log level.')
    parser.add_argument('--git',
                        action='store_true',
                        help='If set, verify that the client is clean.')
    parser.add_argument('--num_steps',
                        type=int,
                        default=100000,
                        help='Maximum number of steps to run training for.')
    parser.add_argument(
        '--wavenet_num_steps',
        type=int,
        default=500000,
        help='Maximum number of steps to run wavenet training for')
    parser.add_argument('--eal_dir', default='')
    parser.add_argument('--eal_ckpt', default='')
    parser.add_argument('--eal_ft',
                        default=False,
                        action='store_true',
                        help='load the weights, not Adam / BatchNorm history')
    parser.add_argument('--eal_trainAlign',
                        default=False,
                        action='store_true',
                        help='train attention mechanism')
    parser.add_argument('--eal_trainJoint',
                        default=False,
                        action='store_true',
                        help='train all weight in EAL mode with joint cost')
    parser.add_argument('--eal_alignScale',
                        type=float,
                        default=1.0,
                        help='scaler for loss_align')
    args = parser.parse_args()

    accepted_models = ['tacotron', 'wavenet']

    if args.model not in accepted_models:
        raise ValueError(
            'please enter a valid model to train: {}'.format(accepted_models))

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
    run_name = args.name or args.variant
    log_dir = os.path.join(args.log_dir, 'logs-%s' % run_name)
    os.makedirs(log_dir, exist_ok=True)
    infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
    log('Initialised log file')
    hparams.parse(args.hparams)

    if args.model == 'tacotron':
        train_tacotron(log_dir, args, args.tacotron_input)
    elif args.model == 'wavenet':
        wavenet_train(log_dir, args, hparams, args.wavenet_input)
    else:
        raise ValueError('Model provided {} unknown! {}'.format(
            args.model, accepted_models))
Пример #8
0
from tensorboardX import SummaryWriter
from matplotlib import cm
from warnings import warn

from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_raw, is_scalar_input
from wavenet_vocoder.mixture import discretized_mix_logistic_loss
from wavenet_vocoder.mixture import sample_from_discretized_mix_logistic, sample_from_gaussian

import audio
from hparams import hparams, hparams_debug_string
import infolog

log_dir = "log_wavenet_single"
run_name = "wavenet_single_gaussian"
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name)
log = infolog.log

global_step = 0
global_test_step = 0
global_epoch = 0
use_cuda = torch.cuda.is_available()
if use_cuda:
    cudnn.benchmark = False


def sanity_check(model, c, g):
    if model.has_speaker_embedding():
        if g is None:
            raise RuntimeError(
                "WaveNet expects speaker embedding, but speaker-id is not provided"
            every_n_iter=1000)
        return tf.estimator.EstimatorSpec(mode=mode,
                                          train_op=train_op,
                                          loss=loss,
                                          training_hooks=[logging_hook])


#load_path = None  # 새로운 training
load_path = 'hccho-ckpt\\DCT-2019-08-03_08-59-35'
#####

load_path, restore_path, checkpoint_path = prepare_dirs(hp, load_path)

#### log 파일 작성
log_path = os.path.join(load_path, 'train.log')
infolog.init(log_path, hp.model_name)

infolog.set_tf_log(load_path)  # Estimator --> log 저장
tf.logging.set_verbosity(tf.logging.INFO)  # 이게 있어야 train log가 출력된다.

# load data
inputs, targets, word_to_index, index_to_word, VOCAB_SIZE, INPUT_LENGTH, OUTPUT_LENGTH = load_data(
    hp)  # (50000, 29), (50000, 12)
hp.add_hparam('VOCAB_SIZE', VOCAB_SIZE)
hp.add_hparam('INPUT_LENGTH', INPUT_LENGTH)  # 29
hp.add_hparam('OUTPUT_LENGTH', OUTPUT_LENGTH)  # 11

train_input, test_input, train_target, test_target = train_test_split(
    inputs, targets, test_size=0.1, random_state=13371447)

datfeeder = DataFeeder(train_input,
Пример #10
0
def prepare_run(run_name):
    log_dir = os.path.join(dirname(__file__), 'logs-{}'.format(run_name))
    os.makedirs(log_dir, exist_ok=True)
    infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name)
    return log_dir