def main(): parser = argparse.ArgumentParser() parser.add_argument('--checkpoint', default='', help='Path to model checkpoint') parser.add_argument( '--name', default='test', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hp', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--model', default='SED_MDD') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name os.makedirs(hp.logdir, exist_ok=True) infolog.init(os.path.join(hp.logdir, 'eval_new.log'), run_name) hp.parse(args.hp) eval(args)
def main(): config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 1.0 session = tf.Session(config=config) parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default='E:\\SRP\\TS') parser.add_argument('--input', default='training\\train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument('--name') parser.add_argument('--hparams', default='') parser.add_argument('--restore_step', type=bool, default=True) parser.add_argument('--summary_interval', type=int, default=1000, help='每隔多少步进行一次总结') parser.add_argument('--checkpoint_interval', type=int, default=5000, help='每隔多少步生成检查点') parser.add_argument('--slack_url') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow日志等级') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) os.environ['CUDA_VISIBLE_DEVICES'] = '0' run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron')) parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=100, help='Steps between writing checkpoints.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join('./Ckpts_amused', 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.getcwd()) parser.add_argument('--input', default='training_data/train.txt') parser.add_argument('--model', default='first_ctc') parser.add_argument('--hparams', default='', help='Hyperparameter overrides as a comma-separated list of name=value pairs') parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=1, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1, help='Steps between writing checkpoints.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--sample_file', type=str) parser.add_argument('--label_file',type=str) parser.add_argument('--seg_file',type=str) args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.model log_dir = os.path.join(args.base_dir, 'predictors/logs_%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'inference.log'), run_name) #hparams.parse(args.hparams) #FIXME run_ctc(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron/Tacotron2/')) parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument('--hparams', default='', help='Hyperparameter overrides as a comma-separated list of name=value pairs') parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') parser.add_argument('--gpu', default='1') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu run_name = args.name or args.model hparams.parse(args.hparams) attention_name = hparams.attention_type print(attention_name) log_dir = os.path.join(args.base_dir, 'logs-%s-%s' % (run_name, attention_name)) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) train(log_dir, args)
def prepare_run(args): modified_hp = hparams.parse(args.hparams) os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) log_dir = os.path.join(args.base_dir, 'logs-{}'.format(args.name)) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'Terminal_train_log'), args.name) return log_dir, modified_hp
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron')) parser.add_argument('--input', default='training/train.txt') # metadata parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') # 追加 parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') args = parser.parse_args() run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) parse_hparams(args.hparams) train(log_dir, args)
def main(): def _str_to_bool(s): """Convert string to bool (in argparse context).""" if s.lower() not in ['true', 'false']: raise ValueError('Argument needs to be a ' 'boolean, got {}'.format(s)) return {'true': True, 'false': False}[s.lower()] parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default='./logs/') parser.add_argument('--model', default='tacotron') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument( '--GPUs_id', default='[0]', help='The GPUs\' id list that will be used. Default is 0') parser.add_argument('--description', default=None, help='description of the model') parser.add_argument('--train_data', type=str, default='THCHS', help='training datas to be used, comma-separated') parser.add_argument('--data_type', type=str, default='npy', help='tfrecord or npy') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) log_dir = os.path.join(args.base_dir, 'logs-%s-%s' % (args.model, args.description)) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log')) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('/mnt1/tacotron')) parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument( '--transfer_dir', default='', help='Directory from which to get checkpoint from transfer') parser.add_argument('--transfer_run', default='', help='Name of the run from which to get checkpoint') parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) if len(args.transfer_dir): ckpt_dir = os.path.join(args.transfer_dir, 'logs-%s' % args.transfer_run) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args, trans_ckpt_dir=ckpt_dir)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('.')) parser.add_argument('--dataset', default='bznsyp', choices=['bznsyp', 'ljspeech']) parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=bool, default=True, help='Global step to restore from checkpoint.') parser.add_argument('--restore_decoder', type=bool, default=False, help='if set, restore the decoder weights') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default='./') parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument('--restore_step', type=int) parser.add_argument('--summary_interval', type=int, default=100) parser.add_argument('--checkpoint_interval', type=int, default=1000) args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' run_name = args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('~/Songbird')) parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument('--vgg19_pretrained_model', default='training/vgg19/vgg19.npy') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument( '--slack_url', help='Slack web-hook URL to get periodic reports.', default= 'https://hooks.slack.com/services/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' ) parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.getcwd()) #### read both positive metadata and negative metadata parser.add_argument('--input_pos', default='training/train-pos.txt') parser.add_argument('--input_neg', default='training/train-neg.txt') parser.add_argument('--model', default='ttsGAN') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--slack_url', default=None) parser.add_argument('--git', default=False) args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--name', default='test', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument('--hp', default='', help='Hyperparameter overrides as a comma-separated list of name=value pairs') parser.add_argument('--model', default='SED_MDD') parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) #os.environ["CUDA_VISIBLE_DEVICES"] = '0' run_name = args.name log_dir = hp.logdir os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name) hp.parse(args.hp) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() # runtime parser.add_argument('--base_dir', default=os.path.expanduser('~/Work/Projects/keithito-tacotron')) parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument('--hparams', default='', help='Hyperparameter overrides as a comma-separated list of name=value pairs') parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') args = parser.parse_args() # # debug # # args = parser.parse_args() # args = Parser() # args.base_dir = os.path.expanduser('~/Work/Projects/keithito-tacotron') # args.input = 'training/train.txt' # args.model = 'tacotron' # args.name = 'run2' # args.hparams = '' # args.restore_step = 0 # args.summary_interval = 100 # args.checkpoint_interval = 100 # args.slack_url = '' # args.tf_log_level = 1 # args.git = False os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron')) parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument('--hparams', default='', help='Hyperparameter overrides as a comma-separated list of name=value pairs') parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron')) parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') parser.register("type", "bool", lambda v: v.lower() == "true") # Flags for defining the tf.train.ClusterSpec parser.add_argument("--ps_hosts", type=str, default="", help="Comma-separated list of hostname:port pairs") parser.add_argument("--worker_hosts", type=str, default="", help="Comma-separated list of hostname:port pairs") parser.add_argument("--job_name", type=str, default="", help="One of 'ps', 'worker'") # Flags for defining the tf.train.Server parser.add_argument("--task_index", type=int, default=0, help="Index of task within the job") args, unparsed = parser.parse_known_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser('./')) parser.add_argument('--input', default='training') parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') parser.add_argument( '--GPUs_id', default='[0]', help='The GPUs\' id list that will be used. Default is 0') parser.add_argument('--description', default=None, help='description of the model') parser.add_argument('--datasets', default="['npy_ljspeech','npy_vctk']", help='the datasets used for training' ) # "['npy_vctk', 'npy_ljspeech']" parser.add_argument('--batch_size', default=None, type=int, help='batch_size') # parser.add_argument('--prenet_layer1', default=256, type=int, help='prenet_layer1') # parser.add_argument('--prenet_layer2', default=128, type=int, help='prenet_layer2') # parser.add_argument('--gru_size', default=256, type=int, help='gru_size') # parser.add_argument('--attention_size', default=256, type=int, help='attention_size') # parser.add_argument('--rnn_size', default=256, type=int, help='rnn_size') # parser.add_argument('--weight_decay', default=0, type=float, help='weight_decay') # parser.add_argument('--enable_fv1', default=True, type=bool, help='enable_fv1') # parser.add_argument('--enable_fv2', default=True, type=bool, help='enable_fv2') # args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s-%s' % (run_name, args.description)) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args)
def train(log_dir, args): run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) checkpoint_path = os.path.join(log_dir, 'model.ckpt') with open(args.input, encoding='utf-8') as f: metadata = [row.strip().split('|') for row in f] metadata = sorted(metadata, key=lambda x: x[2]) data_element = get_dataset(metadata, args.data_dir, hparams) global_step = tf.Variable(0, name='global_step', trainable=False) with tf.variable_scope('model') as scope: model = create_model(args.model, hparams) model.initialize(data_element['input'], data_element['input_lengths'], data_element['mel_targets'], data_element['linear_targets']) model.add_loss() model.add_optimizer(global_step) saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2) sess = tf.Session() sess.run(tf.global_variables_initializer()) for _ in range(int(args.max_iter)): start_time = time.time() step, mel_loss, lin_loss, loss, opt = sess.run([ global_step, model.mel_loss, model.linear_loss, model.loss, model.optimize ]) end_time = time.time() message = 'Step %7d [%.03f sec/step, loss = %.05f (mel : %.05f + lin : %.05f)]' % ( step, end_time - start_time, loss, mel_loss, lin_loss) log(message) if loss > 100 or math.isnan(loss): log('Loss exploded to %.05f at step %d!' % (loss, step)) raise Exception('Loss Exploded') if step % args.checkpoint_interval == 0: log('Saving checkpoint to: %s-%d' % (checkpoint_path, step)) saver.save(sess, checkpoint_path, global_step=step) log('Saving audio and alignment...') input_seq, spectrogram, alignment = sess.run([ model.inputs[0], model.linear_outputs[0], model.alignments[0] ]) waveform = audio.inv_spectrogram(spectrogram.T) audio.save_wav(waveform, os.path.join(log_dir, 'step-%d-audio.wav' % step)) plot.plot_alignment(alignment, os.path.join(log_dir, 'step-%d-align.png' % step), info='%s, %s, step=%d, loss=%.5f' % (args.model, time_string(), step, loss)) log('Input: %s' % sequence_to_text(input_seq))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.expanduser(r'E:\data\logs')) parser.add_argument('--input', default=r'E:\data\biaobei\specs\train-blank.txt') parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', default='biaobei-blank', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', default=6000, type=int, help='Global step to restore from checkpoint.') parser.add_argument( '--summary_interval', type=int, default=200, # 100, help='Steps between running summary ops.') parser.add_argument( '--checkpoint_interval', type=int, default=2000, # 1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) json.dump(hparams.values(), open(os.path.join(log_dir, 'hparams.json'), 'wt', encoding='utf8'), indent=4) json.dump(args.__dict__, open(os.path.join(log_dir, 'args.json'), 'wt', encoding='utf8'), indent=4) json.dump(symbols, open(os.path.join(log_dir, 'symbols.json'), 'wt', encoding='utf8'), indent=4) train(log_dir, args)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--base_dir', default=os.path.dirname(os.path.abspath(__file__)), help="tacotron install-dir (Def: %(default)s)") parser.add_argument('--input', default='training/train.txt') parser.add_argument('--model', default='tacotron') parser.add_argument( '--name', help='Name of the run. Used for logging. Defaults to model name.') parser.add_argument( '--hparams', default='', help= 'Hyperparameter overrides as a comma-separated list of name=value pairs' ) parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.') parser.add_argument('--summary_interval', type=int, default=100, help='Steps between running summary ops.') parser.add_argument('--checkpoint_interval', type=int, default=1000, help='Steps between writing checkpoints.') parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.') parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.') parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') device_arg = parser.add_mutually_exclusive_group() device_arg.add_argument( "--cpu", action="store_true", help= 'use cpu for calculations, this is the default on sytems without available gpu card (Def: %(default)s)' ) device_arg.add_argument( '-d', "--gpu_device", default=None, nargs="?", const=-1, type=int, help= 'use gpu device, use without argument for arbitrary gpu, this is the default for systems with gpu (Def: %(default)s)' ) parser.add_argument( "--soft_lock", action="store_true", help='only request a soft lock on the GPU (Def: %(default)s)') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level) comp_device = None gpu_device_id = None if not args.cpu: # gpu_ids will be None on systems without gpu nvidia card gpu_ids = gpl.board_ids() if gpu_ids is not None: if args.gpu_device is None or args.gpu_device == -1: gpu_device_id = -1 elif args.gpu_device in gpu_ids: gpu_device_id = args.gpu_device else: raise RuntimeError( "train_onsets::error:: selected gpu device if {} is not free, select an id from {}" .format(args.gpu_device, gpu_ids)) elif args.gpu_device is not None: raise RuntimeError( "train_onsets::error:: no gpu devices available on thsi system, you cannot select a gpu" ) print("gpu_device_id", gpu_device_id) try: # now we lock a GPU because we will need one if gpu_device_id is not None: gpu_id_locked = get_gpu_lock(gpu_device_id=gpu_device_id, soft=args.soft_lock) # obtainlock positions CUDA_VISIBLE_DEVICES such that only the selected GPU is visibale, # therefore we need now select /GPU:0 comp_device = "/GPU:0" else: gpu_id_locked = -1 comp_device = "/cpu:0" os.environ['CUDA_VISIBLE_DEVICES'] = "" run_name = args.name or args.model log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name) os.makedirs(log_dir, exist_ok=True) infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url) hparams.parse(args.hparams) train(log_dir, args) except Exception as ex: import traceback tb = traceback.format_exc() print("{0} received exception::".format(sys.argv[0]), str(ex), tb, file=sys.stderr) finally: # terminate input pipeline if ("GPU" in comp_device) and (gpu_id_locked >= 0): gpl.free_lock(gpu_id_locked)