コード例 #1
0
ファイル: eval.py プロジェクト: keithito/tacotron
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--checkpoint', required=True, help='Path to model checkpoint')
  parser.add_argument('--hparams', default='',
    help='Hyperparameter overrides as a comma-separated list of name=value pairs')
  args = parser.parse_args()
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
  hparams.parse(args.hparams)
  run_eval(args)
コード例 #2
0
def main():
    args = docopt(__doc__)
    print("Command line args:\n", args)
    checkpoint_dir = args["--checkpoint-dir"]
    data_root = args["--data-root"]
    dataset_name = args["--dataset"]
    assert dataset_name in ["jsut"]
    dataset = importlib.import_module("data." + dataset_name)
    dataset_instance = dataset.instantiate(in_dir="", out_dir=data_root)

    hparams.parse(args["--hparams"])
    print(hparams_debug_string())

    tf.logging.set_verbosity(tf.logging.INFO)
    train(hparams, checkpoint_dir, dataset_instance.source_files,
          dataset_instance.target_files)
コード例 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default='')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--model', default='Tacotron')
    parser.add_argument('--dataset', default='LJSpeech-1.1')
    parser.add_argument('--language', default='en_US')
    parser.add_argument('--voice', default='female')
    parser.add_argument('--reader', default='mary_ann')
    parser.add_argument('--merge_books', type=bool, default=False)
    parser.add_argument('--book', default='northandsouth')
    parser.add_argument('--n_jobs', type=int, default=cpu_count())
    args = parser.parse_args()

    accepted_models = ['Tacotron', 'WaveRNN']

    if args.model not in accepted_models:
        raise ValueError(
            'please enter a valid model to train: {}'.format(accepted_models))

    modified_hp = hparams.parse(args.hparams)

    if args.model == 'Tacotron':
        preprocess(args, modified_hp)
    else:
        wavernn_preprocess(args, modified_hp)
コード例 #4
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--input_dir',
                        default='tacotron_log',
                        help='folder to contain inputs sentences/targets')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument(
        '--text_list',
        default='',
        help=
        'Text file contains list of texts to be synthesized. Valid if mode=eval'
    )
    parser.add_argument('--output_dir',
                        default='taco_output/',
                        help='folder to contain synthesized mel spectrograms')
    args = parser.parse_args()

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    modified_hp = hparams.parse(args.hparams)
    sentences = _get_sentences(args)

    #try:
    #	checkpoint_path = tf.train.get_checkpoint_state(os.path.join(args.input_dir,'taco_pretrained')).model_checkpoint_path
    #	log('loaded model at {}'.format(checkpoint_path))
    #except:
    #	raise RuntimeError('Failed to load checkpoint at {}'.format(args.checkpoint))
    checkpoint_path = "tacotron_log/taco_pretrained/tacotron_model.ckpt-7500"
    _run_eval(args, checkpoint_path, args.output_dir, modified_hp, sentences)
コード例 #5
0
ファイル: synthesize.py プロジェクト: yqlihust/Tacotron_VAE
def prepare_run(args, weight):
	modified_hp = hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

	run_name = args.name or args.tacotron_name or args.model
	taco_checkpoint = os.path.join('Tacotron_VAE/logs-' + run_name + weight , 'taco_' + args.checkpoint)
	return taco_checkpoint, modified_hp
コード例 #6
0
ファイル: train.py プロジェクト: zuoxiang95/CRNN
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default=os.path.expanduser('~/crnn'))
    parser.add_argument('--input', default='training/train.txt')
    parser.add_argument('--model', default='crnn')
    parser.add_argument('--restore_step',
                        type=int,
                        help='Global step to restore from checkpoint.')
    parser.add_argument('--summary_interval', type=int, default=100)
    parser.add_argument('--checkpoint_interval', type=int, default=1000)
    args = parser.parse_args()
    run_name = args.name or args.model
    log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
    os.makedirs(log_dir, exist_ok=True)
    hparams.parse(args.hparams)
    train(log_dir, args)
コード例 #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint',
                        required=True,
                        help='Path to model checkpoint')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    hparams.max_iters = 100
    hparams.parse(args.hparams)
    run_eval(args)
コード例 #8
0
ファイル: preprocess.py プロジェクト: shaun95/WaveRNN-TF
def main():
    print('initializing preprocessing..')
    parser = argparse.ArgumentParser()
    parser.add_argument('--input_dir', default='')
    parser.add_argument('--output_dir', default='')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--dataset', default='LJSpeech-1.1')
    parser.add_argument('--language', default='en_US')
    parser.add_argument('--voice', default='female')
    parser.add_argument('--reader', default='mary_ann')
    parser.add_argument('--merge_books', default='False')
    parser.add_argument('--book', default='northandsouth')
    parser.add_argument('--output', default='training_data_dual_channels')
    #parser.add_argument('--n_jobs', type=int, default=cpu_count())
    parser.add_argument('--n_jobs', type=int, default=1)
    args = parser.parse_args()

    modified_hp = hparams.parse(args.hparams)

    assert args.merge_books in ('False', 'True')

    run_preprocess(args, modified_hp)
    print('Warning: preprocessed format is audio [T], mel [T, C] linear[T, C]')
コード例 #9
0
def main():
    print('initializing preprocessing..')
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default='')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--dataset', default='emt4')
    parser.add_argument('--language', default='en_US')
    parser.add_argument('--voice', default='female')
    parser.add_argument('--reader', default='mary_ann')
    parser.add_argument('--merge_books', default='False')
    parser.add_argument('--book', default='northandsouth')
    parser.add_argument('--output', default='training_data')
    parser.add_argument('--n_jobs', type=int, default=cpu_count())
    parser.add_argument('--folder_wav_dir', default='../../data/')
    parser.add_argument('--TEST', default=False, action='store_true')
    parser.add_argument('--philly', default=False, action='store_true')
    parser.add_argument('--db', type=int, default=hparams.trim_top_db)
    args = parser.parse_args()

    modified_hp = hparams.parse(args.hparams)
    modified_hp.trim_top_db = args.db

    assert args.merge_books in ('False', 'True')

    run_preprocess(args, modified_hp)
コード例 #10
0
def prepare_run(args):
    modified_hp = hparams.parse(args.hparams)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    run_name = args.name or args.name or args.model
    cent_checkpoint = os.path.join('logs-' + run_name,
                                   'cent_' + args.checkpoint)
    return cent_checkpoint, modified_hp
コード例 #11
0
ファイル: eval.py プロジェクト: lishoahua/Tacotron-Chinese
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint',
                        default='logs-tacotron',
                        help='Path to model checkpoint')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    hparams.parse(args.hparams)
    run_eval(args.checkpoint)
コード例 #12
0
def prepare_run(args):
    modified_hp = hparams.parse(args.hparams)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    run_name = args.name or args.tacotron_name or args.model
    taco_checkpoint = args.output_model_path
    return taco_checkpoint, modified_hp
コード例 #13
0
ファイル: synthesize.py プロジェクト: ming024/vae-Tacotron-2
def prepare_run(args):
    modified_hp = hparams.parse(args.hparams)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    taco_checkpoint = args.taco_checkpoint
    wave_checkpoint = args.wave_checkpoint
    return taco_checkpoint, wave_checkpoint, modified_hp
コード例 #14
0
def init_tacotron2(args):
    # t2
    print('\n#####################################')
    if args.model == 'Tacotron':
        print('\nInitialising Tacotron Model...\n')
        t2_hparams = hparams.parse(args.hparams)
        try:
            checkpoint_path = tf.train.get_checkpoint_state(
                args.taco_checkpoint).model_checkpoint_path
            log('loaded model at {}'.format(checkpoint_path))
        except:
            raise RuntimeError('Failed to load checkpoint at {}'.format(
                args.taco_checkpoint))

        output_dir = 'tacotron_' + args.output_dir
        eval_dir = os.path.join(output_dir, 'eval')
        log_dir = os.path.join(output_dir, 'logs-eval')
        print('eval_dir:', eval_dir)
        print('args.mels_dir:', args.mels_dir)

        # Create output path if it doesn't exist
        os.makedirs(eval_dir, exist_ok=True)
        os.makedirs(log_dir, exist_ok=True)
        os.makedirs(os.path.join(log_dir, 'wavs'), exist_ok=True)
        os.makedirs(os.path.join(log_dir, 'plots'), exist_ok=True)
        log(hparams_debug_string())
        synth = Synthesizer()
        synth.load(checkpoint_path, t2_hparams)

    return synth, eval_dir, log_dir
コード例 #15
0
def main():
    print('initializing preprocessing...')
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default='')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--dataset', default='SIWIS')
    parser.add_argument('--language', default='en_US')
    parser.add_argument('--voice', default='female')
    parser.add_argument('--reader', default='mary_ann')
    parser.add_argument('--merge_books', default='False')
    parser.add_argument('--book', default='northandsouth')
    parser.add_argument('--output', default='training_data')
    parser.add_argument('--n_jobs', type=int, default=cpu_count())
    args = parser.parse_args()

    modified_hp = hparams.parse(args.hparams)

    assert args.merge_books in ('False', 'True')

    run_preprocess(args, modified_hp)
コード例 #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint',
                        required=True,
                        help='Path to model checkpoint')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--text', default='黑熊闯进王明辉家后院觅食~铁砂掌爱好者张辉表演劈砖')
    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    hparams.parse(args.hparams)
    run_eval(args)
コード例 #17
0
def prepare_run(args):
    modified_hp = hparams.parse(args.hparams)
    print(hparams_debug_string())
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
    run_name = args.name or args.model
    log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
    os.makedirs(log_dir, exist_ok=True)
    return log_dir, modified_hp
コード例 #18
0
def tacotron_synthesize(args):
	hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
	output_dir = 'tacotron_' + args.output_dir
	if args.text_list != '':
		with open(args.text_list, 'rb') as f:
			sentences = list(map(lambda l:l.decode("utf-8")[:-1], f.readlines()))
	else:
		sentences = hparams.sentences

	try:
		checkpoint_path = tf.train.get_checkpoint_state(args.checkpoint).model_checkpoint_path
		print('loaded model at {}'.format(checkpoint_path))
	except:
		raise AssertionError('Cannot restore checkpoint: {}, did you train a model?'.format(args.checkpoint))

	run_eval(args, checkpoint_path, output_dir, sentences)
コード例 #19
0
def prepare_run(args):
    modified_hp = hparams.parse(args.hparams)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    taco_checkpoint = os.path.join('logs-' + args.model,
                                   'taco_' + args.checkpoint)

    return taco_checkpoint, modified_hp
コード例 #20
0
def main():
    args = docopt(__doc__)
    print("Command line args:\n", args)
    checkpoint_dir = args["--checkpoint-dir"]
    source_data_root = args["--source-data-root"]
    target_data_root = args["--target-data-root"]
    selected_list_dir = args["--selected-list-dir"]
    use_multi_gpu = args["--multi-gpus"]

    if args["--hparam-json-file"]:
        with open(args["--hparam-json-file"]) as f:
            json = "".join(f.readlines())
            hparams.parse_json(json)

    hparams.parse(args["--hparams"])

    training_list = list(load_key_list("train.csv", selected_list_dir))
    validation_list = list(load_key_list("validation.csv", selected_list_dir))

    training_source_files = [os.path.join(source_data_root, f"{key}.{hparams.source_file_extension}") for key in
                             training_list]
    training_target_files = [os.path.join(target_data_root, f"{key}.{hparams.target_file_extension}") for key in
                             training_list]
    validation_source_files = [os.path.join(source_data_root, f"{key}.{hparams.source_file_extension}") for key in
                               validation_list]
    validation_target_files = [os.path.join(target_data_root, f"{key}.{hparams.target_file_extension}") for key in
                               validation_list]

    log = logging.getLogger("tensorflow")
    log.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh = logging.FileHandler(hparams.logfile)
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    log.addHandler(fh)
    tf.logging.set_verbosity(tf.logging.INFO)

    tf.logging.info(hparams_debug_string())

    train_and_evaluate(hparams,
                       checkpoint_dir,
                       training_source_files,
                       training_target_files,
                       validation_source_files,
                       validation_target_files,
                       use_multi_gpu)
コード例 #21
0
def main():
    args = docopt(__doc__)
    print("Command line args:\n", args)
    checkpoint_dir = args["--checkpoint-dir"]
    data_root = args["--data-root"]
    dataset_name = args["--dataset"]
    assert dataset_name in ["blizzard2012", "ljspeech"]
    corpus = importlib.import_module("datasets." + dataset_name)
    corpus_instance = corpus.instantiate(in_dir="", out_dir=data_root)

    hparams.parse(args["--hparams"])
    print(hparams_debug_string())

    tf.logging.set_verbosity(tf.logging.INFO)
    train_and_evaluate(hparams, checkpoint_dir,
                       corpus_instance.training_target_files,
                       corpus_instance.validation_target_files)
コード例 #22
0
def gst_synthesize(args, checkpoint, sentences=None, reference_mel=None):
    output_dir = "gst_" + args.output_dir
    checkpoint_path = tf.train.get_checkpoint_state(
        checkpoint).model_checkpoint_path

    log('loaded model at {}'.format(checkpoint_path))

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    hparams.parse(args.hparams)
    if args.mode == 'eval':
        return run_eval(args, checkpoint_path, output_dir, sentences,
                        reference_mel)
    elif args.mode == 'synthesis':
        return run_synthesis(args, checkpoint_path, output_dir)
    else:
        run_live(args, checkpoint_path)
コード例 #23
0
ファイル: train.py プロジェクト: davidtranno1/Tacotron-3
def prepare_run(args):
	modified_hp = hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
	run_name = args.name or args.model
	log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
	os.makedirs(log_dir, exist_ok=True)
	infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name, args.slack_url)
	return log_dir, modified_hp
コード例 #24
0
ファイル: train.py プロジェクト: duvtedudug/Tacotron-2
def prepare_run(args):
	modified_hp = hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
	run_name = args.name or args.model
	log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
	os.makedirs(log_dir, exist_ok=True)
	infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name)
	return log_dir, modified_hp
コード例 #25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--base_dir',
        default=os.path.expanduser(
            'C:\\Users\\blcdec\\project\\gst-tacotron\\tacotron_imu'))
    parser.add_argument('--input', default='training/train.txt')
    parser.add_argument('--model', default='tacotron')
    parser.add_argument(
        '--name',
        help='Name of the run. Used for logging. Defaults to model name.')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--restore_step',
                        type=int,
                        help='Global step to restore from checkpoint.')
    parser.add_argument('--summary_interval',
                        type=int,
                        default=100,
                        help='Steps between running summary ops.')
    parser.add_argument('--checkpoint_interval',
                        type=int,
                        default=1000,
                        help='Steps between writing checkpoints.')
    parser.add_argument('--slack_url',
                        help='Slack webhook URL to get periodic reports.')
    parser.add_argument('--tf_log_level',
                        type=int,
                        default=1,
                        help='Tensorflow C++ log level.')
    parser.add_argument('--git',
                        action='store_true',
                        help='If set, verify that the client is clean.')
    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
    run_name = args.name or args.model
    log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
    os.makedirs(log_dir, exist_ok=True)
    infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
    hparams.parse(args.hparams)
    train(log_dir, args)
コード例 #26
0
def prepare_run(args):
    modified_hp = hparams.parse(args.hparams)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

    taco_checkpoint = os.path.join(args.base_dir, 'logs-' + args.model, 'taco_pretrained')
    wave_checkpoint = os.path.join(args.base_dir, 'logs-' + args.model, 'wavernn_pretrained', 'wavernn_model.pyt')

    return taco_checkpoint, wave_checkpoint, modified_hp
コード例 #27
0
def tacotron_synthesize(args):
    hparams.parse(args.hparams)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    output_dir = 'tacotron_' + args.output_dir

    try:
        checkpoint_path = tf.train.get_checkpoint_state(
            args.checkpoint).model_checkpoint_path
        print('loaded model at {}'.format(checkpoint_path))
    except:
        raise AssertionError(
            'Cannot restore checkpoint: {}, did you train a model?'.format(
                args.checkpoint))

    if args.mode == 'eval':
        run_eval(args, checkpoint_path, output_dir)
    else:
        run_synthesis(args, checkpoint_path, output_dir)
コード例 #28
0
ファイル: eval.py プロジェクト: zldzmfoq12/Tacotron2
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint',
                        required=True,
                        help='Path to model checkpoint')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--gpu', default='1')
    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    hparams.parse(args.hparams)
    run_eval(args)
コード例 #29
0
def prepare_run(args):
	modified_hp = hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

	run_name = args.name or args.gst_name 
	gst_checkpoint = os.path.join('logs-' + run_name, 'gst_' + args.checkpoint)
	run_name = args.name or args.wavenet_name
	wave_checkpoint = os.path.join('logs-' + run_name, 'wave_' + args.checkpoint)
	return gst_checkpoint, wave_checkpoint, modified_hp
コード例 #30
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default=os.getcwd())

    #### read both positive metadata and negative metadata
    parser.add_argument('--input_pos', default='training/train-pos.txt')
    parser.add_argument('--input_neg', default='training/train-neg.txt')

    parser.add_argument('--model', default='ttsGAN')
    parser.add_argument(
        '--name',
        help='Name of the run. Used for logging. Defaults to model name.')
    parser.add_argument(
        '--hparams',
        default='',
        help=
        'Hyperparameter overrides as a comma-separated list of name=value pairs'
    )
    parser.add_argument('--restore_step',
                        type=int,
                        help='Global step to restore from checkpoint.')
    parser.add_argument('--summary_interval',
                        type=int,
                        default=100,
                        help='Steps between running summary ops.')
    parser.add_argument('--checkpoint_interval',
                        type=int,
                        default=1000,
                        help='Steps between writing checkpoints.')
    parser.add_argument('--tf_log_level',
                        type=int,
                        default=1,
                        help='Tensorflow C++ log level.')
    parser.add_argument('--slack_url', default=None)
    parser.add_argument('--git', default=False)

    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
    run_name = args.name or args.model
    log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
    os.makedirs(log_dir, exist_ok=True)
    infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
    hparams.parse(args.hparams)
    train(log_dir, args)
コード例 #31
0
ファイル: train.py プロジェクト: ArwenFeng/test
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--name', default='test', help='Name of the run. Used for logging. Defaults to model name.')
    parser.add_argument('--hp', default='',
    help='Hyperparameter overrides as a comma-separated list of name=value pairs')
    parser.add_argument('--model', default='SED_MDD')
    parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
    parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.')
    parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.')
    parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
    args = parser.parse_args()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
    #os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    run_name = args.name
    log_dir = hp.logdir
    os.makedirs(log_dir, exist_ok=True)
    infolog.init(os.path.join(log_dir, 'train.log'), run_name)
    hp.parse(args.hp)
    train(log_dir, args)
コード例 #32
0
ファイル: synthesis.py プロジェクト: zhangxt/FFTNet
def main():
    args = get_args()
    if args.preset is not None:
        with open(args.preset) as f:
            hparams.parse_json(f.read())

    modified_hp = hparams.parse(args.hparams)
    print(hparams_debug_string())
    synthesis(args.checkpoint_path, args.local_path, args.global_id,
              args.output_dir, modified_hp)
コード例 #33
0
ファイル: synthesize.py プロジェクト: duvtedudug/Tacotron-2
def prepare_run(args):
	modified_hp = hparams.parse(args.hparams)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

	run_name = args.name or args.tacotron_name or args.model
	taco_checkpoint = os.path.join('logs-' + run_name, 'taco_' + args.checkpoint)

	run_name = args.name or args.wavenet_name or args.model
	wave_checkpoint = os.path.join('logs-' + run_name, 'wave_' + args.checkpoint)
	return taco_checkpoint, wave_checkpoint, modified_hp
コード例 #34
0
ファイル: train.py プロジェクト: keithito/tacotron
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
  parser.add_argument('--input', default='training/train.txt')
  parser.add_argument('--model', default='tacotron')
  parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')
  parser.add_argument('--hparams', default='',
    help='Hyperparameter overrides as a comma-separated list of name=value pairs')
  parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
  parser.add_argument('--summary_interval', type=int, default=100,
    help='Steps between running summary ops.')
  parser.add_argument('--checkpoint_interval', type=int, default=1000,
    help='Steps between writing checkpoints.')
  parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
  parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
  parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')
  args = parser.parse_args()
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
  run_name = args.name or args.model
  log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
  os.makedirs(log_dir, exist_ok=True)
  infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
  hparams.parse(args.hparams)
  train(log_dir, args)
コード例 #35
0
ファイル: preprocess.py プロジェクト: duvtedudug/Tacotron-2
def main():
	print('initializing preprocessing..')
	parser = argparse.ArgumentParser()
	parser.add_argument('--base_dir', default='')
	parser.add_argument('--hparams', default='', 
		help='Hyperparameter overrides as a comma-separated list of name=value pairs')
	parser.add_argument('--dataset', default='LJSpeech-1.1')
	parser.add_argument('--language', default='en_US')
	parser.add_argument('--voice', default='female')
	parser.add_argument('--reader', default='mary_ann')
	parser.add_argument('--merge_books', default='False')
	parser.add_argument('--book', default='northandsouth')
	parser.add_argument('--output', default='training_data')
	parser.add_argument('--n_jobs', type=int, default=cpu_count())
	args = parser.parse_args()

	modified_hp = hparams.parse(args.hparams)

	assert args.merge_books in ('False', 'True')

	run_preprocess(args, modified_hp)
コード例 #36
0
ファイル: demo_server.py プロジェクト: keithito/tacotron
class SynthesisResource:
  def on_get(self, req, res):
    if not req.params.get('text'):
      raise falcon.HTTPBadRequest()
    res.data = synthesizer.synthesize(req.params.get('text'))
    res.content_type = 'audio/wav'


synthesizer = Synthesizer()
api = falcon.API()
api.add_route('/synthesize', SynthesisResource())
api.add_route('/', UIResource())


if __name__ == '__main__':
  from wsgiref import simple_server
  parser = argparse.ArgumentParser()
  parser.add_argument('--checkpoint', required=True, help='Full path to model checkpoint')
  parser.add_argument('--port', type=int, default=9000)
  parser.add_argument('--hparams', default='',
    help='Hyperparameter overrides as a comma-separated list of name=value pairs')
  args = parser.parse_args()
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
  hparams.parse(args.hparams)
  print(hparams_debug_string())
  synthesizer.load(args.checkpoint)
  print('Serving on port %d' % args.port)
  simple_server.make_server('0.0.0.0', args.port, api).serve_forever()
else:
  synthesizer.load(os.environ['CHECKPOINT'])