def test_melgan_trainable(dict_g, dict_d, dict_loss): batch_size = 4 batch_length = 4096 args_g = make_melgan_generator_args(**dict_g) args_d = make_melgan_discriminator_args(**dict_d) args_g = MelGANGeneratorConfig(**args_g) args_d = MelGANDiscriminatorConfig(**args_d) generator = TFMelGANGenerator(args_g) discriminator = TFMelGANMultiScaleDiscriminator(args_d)
def test_hifigan_trainable(dict_g, dict_d, dict_loss): batch_size = 4 batch_length = 4096 args_g = make_hifigan_generator_args(**dict_g) args_d_p, args_d_s = make_hifigan_discriminator_args(**dict_d) args_g = HifiGANGeneratorConfig(**args_g) args_d_p = HifiGANDiscriminatorConfig(**args_d_p) args_d_s = MelGANDiscriminatorConfig(**args_d_s) generator = TFHifiGANGenerator(args_g) discriminator_p = TFHifiGANMultiPeriodDiscriminator(args_d_p) discriminator_s = TFMelGANMultiScaleDiscriminator(args_d_s) discriminator = TFHifiGANDiscriminator(discriminator_p, discriminator_s)
def main(): """Run training process.""" parser = argparse.ArgumentParser( description= "Train Hifigan (See detail in examples/hifigan/train_hifigan.py)") parser.add_argument( "--train-dir", default=None, type=str, help="directory including training data. ", ) parser.add_argument( "--dev-dir", default=None, type=str, help="directory including development data. ", ) parser.add_argument("--use-norm", default=1, type=int, help="use norm mels for training or raw.") parser.add_argument("--outdir", type=str, required=True, help="directory to save checkpoints.") parser.add_argument("--config", type=str, required=True, help="yaml format configuration file.") parser.add_argument( "--resume", default="", type=str, nargs="?", help='checkpoint file path to resume training. (default="")', ) parser.add_argument( "--verbose", type=int, default=1, help="logging level. higher is more logging. (default=1)", ) parser.add_argument( "--generator_mixed_precision", default=0, type=int, help="using mixed precision for generator or not.", ) parser.add_argument( "--discriminator_mixed_precision", default=0, type=int, help="using mixed precision for discriminator or not.", ) parser.add_argument( "--pretrained", default="", type=str, nargs="?", help="path of .h5 melgan generator to load weights from", ) args = parser.parse_args() # return strategy STRATEGY = return_strategy() # set mixed precision config if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1: tf.config.optimizer.set_experimental_options( {"auto_mixed_precision": True}) args.generator_mixed_precision = bool(args.generator_mixed_precision) args.discriminator_mixed_precision = bool( args.discriminator_mixed_precision) args.use_norm = bool(args.use_norm) # set logger if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, stream=sys.stdout, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, stream=sys.stdout, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, stream=sys.stdout, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) # check arguments if args.train_dir is None: raise ValueError("Please specify --train-dir") if args.dev_dir is None: raise ValueError("Please specify either --valid-dir") # load and save config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) config["version"] = tensorflow_tts.__version__ with open(os.path.join(args.outdir, "config.yml"), "w") as f: yaml.dump(config, f, Dumper=yaml.Dumper) for key, value in config.items(): logging.info(f"{key} = {value}") # get dataset if config["remove_short_samples"]: mel_length_threshold = config["batch_max_steps"] // config[ "hop_size"] + 2 * config["hifigan_generator_params"].get( "aux_context_window", 0) else: mel_length_threshold = None if config["format"] == "npy": audio_query = "*-wave.npy" mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy" audio_load_fn = np.load mel_load_fn = np.load else: raise ValueError("Only npy are supported.") # define train/valid dataset train_dataset = AudioMelDataset( root_dir=args.train_dir, audio_query=audio_query, mel_query=mel_query, audio_load_fn=audio_load_fn, mel_load_fn=mel_load_fn, mel_length_threshold=mel_length_threshold, ).create( is_shuffle=config["is_shuffle"], map_fn=lambda items: collater( items, batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32), hop_size=tf.constant(config["hop_size"], dtype=tf.int32), ), allow_cache=config["allow_cache"], batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync * config["gradient_accumulation_steps"], ) valid_dataset = AudioMelDataset( root_dir=args.dev_dir, audio_query=audio_query, mel_query=mel_query, audio_load_fn=audio_load_fn, mel_load_fn=mel_load_fn, mel_length_threshold=mel_length_threshold, ).create( is_shuffle=config["is_shuffle"], map_fn=lambda items: collater( items, batch_max_steps=tf.constant(config["batch_max_steps_valid"], dtype=tf.int32), hop_size=tf.constant(config["hop_size"], dtype=tf.int32), ), allow_cache=config["allow_cache"], batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync, ) # define trainer trainer = MultiSTFTMelganTrainer( steps=0, epochs=0, config=config, strategy=STRATEGY, is_generator_mixed_precision=args.generator_mixed_precision, is_discriminator_mixed_precision=args.discriminator_mixed_precision, ) with STRATEGY.scope(): # define generator and discriminator generator = TFHifiGANGenerator( HifiGANGeneratorConfig(**config["hifigan_generator_params"]), name="hifigan_generator", ) multiperiod_discriminator = TFHifiGANMultiPeriodDiscriminator( HifiGANDiscriminatorConfig( **config["hifigan_discriminator_params"]), name="hifigan_multiperiod_discriminator", ) multiscale_discriminator = TFMelGANMultiScaleDiscriminator( MelGANDiscriminatorConfig( **config["melgan_discriminator_params"], name="melgan_multiscale_discriminator", )) discriminator = TFHifiGANDiscriminator( multiperiod_discriminator, multiscale_discriminator, name="hifigan_discriminator", ) # dummy input to build model. fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32) y_hat = generator(fake_mels) discriminator(y_hat) if len(args.pretrained) > 1: generator.load_weights(args.pretrained) logging.info( f"Successfully loaded pretrained weight from {args.pretrained}." ) generator.summary() discriminator.summary() # define optimizer ''' generator_lr_fn = getattr( tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"] )(**config["generator_optimizer_params"]["lr_params"]) discriminator_lr_fn = getattr( tf.keras.optimizers.schedules, config["discriminator_optimizer_params"]["lr_fn"], )(**config["discriminator_optimizer_params"]["lr_params"]) ''' learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=config["optimizer_params"] ["initial_learning_rate"], decay_steps=config["optimizer_params"]["decay_steps"], end_learning_rate=config["optimizer_params"]["end_learning_rate"], ) learning_rate_fn = WarmUp( initial_learning_rate=config["optimizer_params"] ["initial_learning_rate"], decay_schedule_fn=learning_rate_fn, warmup_steps=int(config["train_max_steps"] * config["optimizer_params"]["warmup_proportion"]), ) gen_optimizer = tf.keras.optimizers.Adam( learning_rate=learning_rate_fn, beta_1=0.8, beta_2=0.99, amsgrad=config["optimizer_params"]["amsgrad"], ) dis_optimizer = tf.keras.optimizers.Adam( learning_rate=learning_rate_fn, beta_1=0.8, beta_2=0.99, amsgrad=config["optimizer_params"]["amsgrad"], ) trainer.compile( gen_model=generator, dis_model=discriminator, gen_optimizer=gen_optimizer, dis_optimizer=dis_optimizer, ) # start training try: trainer.fit( train_dataset, valid_dataset, saved_path=os.path.join(config["outdir"], "checkpoints/"), resume=args.resume, ) except KeyboardInterrupt: trainer.save_checkpoint() logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")