def main(): """Run training process.""" parser = argparse.ArgumentParser( description="Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)" ) parser.add_argument("--feature", '-f', required=True) parser.add_argument("--config", '-c', required=True) parser.add_argument("--restore_am", '-am', required=True) parser.add_argument("--restore_iam", '-iam', required=True) args = parser.parse_args() # return strategy STRATEGY = return_strategy() # load and save config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) with open(config['speech_config']) as f: speech_config = yaml.load(f, Loader=yaml.Loader) config.update(speech_config) config['n_mels'] = config['asr_features'] config['hop_size'] = config['asr_downsample'] * config['sample_rate'] * config['stride_ms'] // 1000 config.update(vars(args)) config["version"] = tensorflow_tts.__version__ for key, value in config.items(): logging.info(f"{key} = {value}") with STRATEGY.scope(): generator = MelGANGenerator( config=MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"] ), name="multi_band_melgan_generator", ) generator.set_shape(config['n_mels']) pqmf = TFPQMF( MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"] ), dtype=tf.float32, name="pqmf", ) # dummy input to build model. fake_mels = tf.random.uniform(shape=[1, 100, config['n_mels']], dtype=tf.float32) output = generator(mels=fake_mels, training=False) y_hat = pqmf.synthesis(output) print('y_hat', y_hat.shape) generator.load_weights(args.resume) elif args.feature.endswith('.wav'): signal, _ = librosa.load(args.feature, sr=config['sample_rate']) mels = speech_featurizer.tf_extract(signal) with tf.device('/cpu:0'): mels = conformer.encoder_inference(mels)
def load_mb_melgan(config_path, model_path): with open(config_path) as f: raw_config = yaml.load(f, Loader=yaml.Loader) mb_melgan_config = MultiBandMelGANGeneratorConfig( **raw_config["generator_params"]) mb_melgan = TFMelGANGenerator(config=mb_melgan_config, name="melgan_generator") mb_melgan._build() mb_melgan.load_weights(model_path) pqmf = TFPQMF(config=mb_melgan_config, name="pqmf") return (mb_melgan, pqmf)
def build_iam(self, config, model_path): generator = MelGANGenerator( config=MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), name="multi_band_melgan_generator", ) generator.set_shape(config['n_mels']) pqmf = TFPQMF( MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), dtype=tf.float32, name="pqmf", ) fake_mels = tf.random.uniform(shape=[1, 100, config['n_mels']], dtype=tf.float32) output = generator(mels=fake_mels, training=False) y_hat = pqmf.synthesis(output) print('loading iam...') generator.load_weights(model_path) return generator, pqmf
def _load_mb_melgan2(self, path='./model_files/multiband_melgan2'): # initialize melgan model for vocoding config = os.path.join(path, 'config.yml') with open(config) as f: melgan_config = yaml.load(f, Loader=yaml.Loader) melgan_config = MultiBandMelGANGeneratorConfig( **melgan_config["multiband_melgan_generator_params"]) melgan = TFMBMelGANGenerator(config=melgan_config, name='melgan_generator') melgan._build() weights = os.path.join(path, 'libritts_24k.h5') melgan.load_weights(weights) return melgan
def test_multi_band_melgan(dict_g): args_g = make_multi_band_melgan_generator_args(**dict_g) args_g = MultiBandMelGANGeneratorConfig(**args_g) generator = TFMelGANGenerator(args_g, name="multi_band_melgan") generator._build() pqmf = TFPQMF(args_g, name="pqmf") fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32) fake_y = tf.random.uniform(shape=[1, 100 * 256, 1], dtype=tf.float32) y_hat_subbands = generator(fake_mels) y_hat = pqmf.synthesis(y_hat_subbands) y_subbands = pqmf.analysis(fake_y) assert np.shape(y_subbands) == np.shape(y_hat_subbands) assert np.shape(fake_y) == np.shape(y_hat)
def build_vc(self, config, model_path): encoder = Encoder(**config['encoder']) generator = MelGANGeneratorVQ( encoder=encoder, config=MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), name="multi_band_melgan_generator", ) generator.set_shape(config['n_mels'], config['gc_channels']) fake_mels = tf.random.uniform(shape=[1, 100, config['n_mels']], dtype=tf.float32) fake_gc = tf.random.uniform(shape=[1, config['gc_channels']], dtype=tf.float32) y_mb_hat = generator(mels=fake_mels, gc=fake_gc, training=False)['y_mb_hat'] print('loading vc...') generator.load_weights(model_path) return generator
def main(): """Run training process.""" parser = argparse.ArgumentParser( description="Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)" ) parser.add_argument( "--train-dir", default=None, type=str, help="directory including training data. ", ) parser.add_argument( "--dev-dir", default=None, type=str, help="directory including development data. ", ) parser.add_argument( "--use-norm", default=1, type=int, help="use norm mels for training or raw." ) parser.add_argument( "--outdir", type=str, required=True, help="directory to save checkpoints." ) parser.add_argument( "--config", type=str, required=True, help="yaml format configuration file." ) parser.add_argument( "--resume", default="", type=str, nargs="?", help='checkpoint file path to resume training. (default="")', ) parser.add_argument( "--verbose", type=int, default=1, help="logging level. higher is more logging. (default=1)", ) parser.add_argument( "--generator_mixed_precision", default=0, type=int, help="using mixed precision for generator or not.", ) parser.add_argument( "--discriminator_mixed_precision", default=0, type=int, help="using mixed precision for discriminator or not.", ) parser.add_argument( "--pretrained", default="", type=str, nargs="?", help='path of .h5 mb-melgan generator to load weights from', ) args = parser.parse_args() # return strategy STRATEGY = return_strategy() # set mixed precision config if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1: tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True}) args.generator_mixed_precision = bool(args.generator_mixed_precision) args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision) args.use_norm = bool(args.use_norm) # set logger if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, stream=sys.stdout, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, stream=sys.stdout, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, stream=sys.stdout, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) # check arguments if args.train_dir is None: raise ValueError("Please specify --train-dir") if args.dev_dir is None: raise ValueError("Please specify either --valid-dir") # load and save config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) config["version"] = tensorflow_tts.__version__ with open(os.path.join(args.outdir, "config.yml"), "w") as f: yaml.dump(config, f, Dumper=yaml.Dumper) for key, value in config.items(): logging.info(f"{key} = {value}") # get dataset if config["remove_short_samples"]: mel_length_threshold = config["batch_max_steps"] // config[ "hop_size" ] + 2 * config["multiband_melgan_generator_params"].get("aux_context_window", 0) else: mel_length_threshold = None if config["format"] == "npy": audio_query = "*-wave.npy" mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy" audio_load_fn = np.load mel_load_fn = np.load else: raise ValueError("Only npy are supported.") # define train/valid dataset train_dataset = AudioMelDataset( root_dir=args.train_dir, audio_query=audio_query, mel_query=mel_query, audio_load_fn=audio_load_fn, mel_load_fn=mel_load_fn, mel_length_threshold=mel_length_threshold, ).create( is_shuffle=config["is_shuffle"], map_fn=lambda items: collater( items, batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32), hop_size=tf.constant(config["hop_size"], dtype=tf.int32), ), allow_cache=config["allow_cache"], batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync, ) valid_dataset = AudioMelDataset( root_dir=args.dev_dir, audio_query=audio_query, mel_query=mel_query, audio_load_fn=audio_load_fn, mel_load_fn=mel_load_fn, mel_length_threshold=mel_length_threshold, ).create( is_shuffle=config["is_shuffle"], map_fn=lambda items: collater( items, batch_max_steps=tf.constant( config["batch_max_steps_valid"], dtype=tf.int32 ), hop_size=tf.constant(config["hop_size"], dtype=tf.int32), ), allow_cache=config["allow_cache"], batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync, ) # define trainer trainer = MultiBandMelganTrainer( steps=0, epochs=0, config=config, strategy=STRATEGY, is_generator_mixed_precision=args.generator_mixed_precision, is_discriminator_mixed_precision=args.discriminator_mixed_precision, ) with STRATEGY.scope(): # define generator and discriminator generator = TFMelGANGenerator( MultiBandMelGANGeneratorConfig(**config["multiband_melgan_generator_params"]), name="multi_band_melgan_generator", ) discriminator = TFParallelWaveGANDiscriminator( ParallelWaveGANDiscriminatorConfig( **config["parallel_wavegan_discriminator_params"] ), name="parallel_wavegan_discriminator", ) pqmf = TFPQMF( MultiBandMelGANGeneratorConfig(**config["multiband_melgan_generator_params"]), name="pqmf" ) # dummy input to build model. fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32) y_mb_hat = generator(fake_mels) y_hat = pqmf.synthesis(y_mb_hat) discriminator(y_hat) if len(args.pretrained) > 2: print("Loading pretrained weights...") generator.load_weights(args.pretrained) generator.summary() discriminator.summary() # define optimizer generator_lr_fn = getattr( tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"] )(**config["generator_optimizer_params"]["lr_params"]) discriminator_lr_fn = getattr( tf.keras.optimizers.schedules, config["discriminator_optimizer_params"]["lr_fn"], )(**config["discriminator_optimizer_params"]["lr_params"]) gen_optimizer = tf.keras.optimizers.Adam( learning_rate=generator_lr_fn, amsgrad=config["generator_optimizer_params"]["amsgrad"], ) dis_optimizer = RectifiedAdam( learning_rate=discriminator_lr_fn, amsgrad=False ) trainer.compile( gen_model=generator, dis_model=discriminator, gen_optimizer=gen_optimizer, dis_optimizer=dis_optimizer, pqmf=pqmf, ) # start training try: trainer.fit( train_dataset, valid_dataset, saved_path=os.path.join(config["outdir"], "checkpoints/"), resume=args.resume, ) except KeyboardInterrupt: trainer.save_checkpoint() logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
def main(): """Run training process.""" parser = argparse.ArgumentParser( description= "Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)" ) parser.add_argument("--feature", '-f', required=True) parser.add_argument("--speaker", '-s', required=True) parser.add_argument("--config", '-c', required=True) parser.add_argument("--resume", '-r', required=True) args = parser.parse_args() # return strategy STRATEGY = return_strategy() # load and save config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) with open(config['speech_config']) as f: speech_config = yaml.load(f, Loader=yaml.Loader) config.update(speech_config) config['hop_size'] = config['sample_rate'] * config['stride_ms'] // 1000 config['sampling_rate'] = config['sample_rate'] config.update(vars(args)) config["version"] = tensorflow_tts.__version__ for key, value in config.items(): logging.info(f"{key} = {value}") with STRATEGY.scope(): encoder = Encoder(**config['encoder']) generator = MelGANGeneratorVQ( encoder=encoder, config=MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), name="multi_band_melgan_generator", ) generator.set_shape(config['n_mels'], config['gc_channels']) pqmf = TFPQMF( MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), dtype=tf.float32, name="pqmf", ) # dummy input to build model. fake_mels = tf.random.uniform(shape=[1, 100, config['n_mels']], dtype=tf.float32) fake_gc = tf.random.uniform(shape=[1, config['gc_channels']], dtype=tf.float32) y_mb_hat = generator(mels=fake_mels, gc=fake_gc, training=False)['y_mb_hat'] y_hat = pqmf.synthesis(y_mb_hat) generator.load_weights(args.resume) generator.summary() speech_featurizer = TFSpeechFeaturizer(speech_config) if args.feature.endswith('_mel.npy'): mels = tf.constant(np.load(args.feature), tf.float32) else: signal, _ = librosa.load(args.feature, sr=config['sample_rate']) mels = speech_featurizer.tf_extract(signal) mels = tf.reshape(mels, [1, -1, config['n_mels']]) gc = tf.constant( np.load(args.speaker).reshape([1, config['gc_channels']]), tf.float32) # gc = tf.constant(np.zeros(256).reshape([1, config['gc_channels']]), tf.float32) output = generator(mels=mels, gc=gc, training=False)['y_mb_hat'] y_hat = pqmf.synthesis(output).numpy().reshape([-1]) print('output:', y_hat.shape) save_name = args.feature.replace('.wav', '_gen_vc.wav') save_name = args.feature.replace('_mel.npy', '_gen_vc.wav') save_name = save_name.split('/')[-1] wavfile.write(save_name, config['sample_rate'], y_hat) def depreemphasis(signal: np.ndarray, coeff=0.97): if not coeff or coeff <= 0.0: return signal x = np.zeros(signal.shape[0], dtype=np.float32) x[0] = signal[0] for n in range(1, signal.shape[0], 1): x[n] = coeff * x[n - 1] + signal[n] return x y_hat = depreemphasis(y_hat) wavfile.write(save_name.replace('.wav', '_depre.wav'), config['sample_rate'], y_hat)
def main(): """Run melgan decoding from folder.""" parser = argparse.ArgumentParser( description="Generate Audio from melspectrogram with trained melgan " "(See detail in example/melgan/decode_melgan.py).") parser.add_argument("--rootdir", default=None, type=str, required=True, help="directory including ids/durations files.") parser.add_argument("--outdir", type=str, required=True, help="directory to save generated speech.") parser.add_argument("--checkpoint", type=str, required=True, help="checkpoint file to be loaded.") parser.add_argument("--use-norm", type=int, default=1, help="Use norm or raw melspectrogram.") parser.add_argument("--batch-size", type=int, default=8, help="batch_size.") parser.add_argument( "--config", default=None, type=str, required=True, help="yaml format configuration file. if not explicitly provided, " "it will be searched in the checkpoint directory. (default=None)") parser.add_argument( "--verbose", type=int, default=1, help="logging level. higher is more logging. (default=1)") args = parser.parse_args() # set logger if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s") elif args.verbose > 0: logging.basicConfig( level=logging.INFO, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s") else: logging.basicConfig( level=logging.WARN, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s") logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) # load config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) if config["format"] == "npy": mel_query = "*-norm-feats.npy" if args.use_norm == 1 else "*-raw-feats.npy" mel_load_fn = np.load else: raise ValueError("Only npy is supported.") # define data-loader dataset = MelDataset(root_dir=args.rootdir, mel_query=mel_query, mel_load_fn=mel_load_fn, return_utt_id=True) dataset = dataset.create(batch_size=args.batch_size) # define model and load checkpoint mb_melgan = TFMelGANGenerator( config=MultiBandMelGANGeneratorConfig(**config["generator_params"]), name='melgan') mb_melgan._build() mb_melgan.load_weights(args.checkpoint) pqmf = TFPQMF( config=MultiBandMelGANGeneratorConfig(**config["generator_params"]), name='pqmf') for data in tqdm(dataset, desc="[Decoding]"): utt_ids, mels, mel_lengths = data # melgan inference. generated_subbands = mb_melgan(mels) generated_audios = pqmf.synthesis(generated_subbands) # convert to numpy. generated_audios = generated_audios.numpy() # [B, T] # save to outdir for i, audio in enumerate(generated_audios): utt_id = utt_ids[i].numpy().decode("utf-8") sf.write(os.path.join(args.outdir, f"{utt_id}.wav"), audio[:mel_lengths[i].numpy() * config["hop_size"]], config["sampling_rate"], "PCM_16")
def main(): """Run training process.""" parser = argparse.ArgumentParser( description= "Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)" ) parser.add_argument( "--train-dir", '-td', default=None, type=str, help="directory including training data. ", ) parser.add_argument( "--dev-dir", '-dd', default=None, type=str, help="directory including development data. ", ) parser.add_argument( "--audio-query", '-aq', default='*_wav.npy', type=str, help="suffix of audio file", ) parser.add_argument( "--mel-query", '-mq', default='*_mel.npy', type=str, help="suffix of mel file", ) parser.add_argument("--outdir", '-od', type=str, required=True, help="directory to save checkpoints.") parser.add_argument("--config", '-c', type=str, required=True, help="yaml format configuration file.") parser.add_argument( "--resume", '-r', default="", type=str, nargs="?", help='checkpoint file path to resume training. (default="")', ) parser.add_argument( "--verbose", '-v', type=int, default=1, help="logging level. higher is more logging. (default=1)", ) parser.add_argument( "--generator_mixed_precision", '-gmxp', default=0, type=int, help="using mixed precision for generator or not.", ) parser.add_argument( "--discriminator_mixed_precision", '-dmxp', default=0, type=int, help="using mixed precision for discriminator or not.", ) parser.add_argument( "--pretrained", '-p', default="", type=str, nargs="?", help="path of .h5 mb-melgan generator to load weights from", ) args = parser.parse_args() # return strategy STRATEGY = return_strategy() # set mixed precision config if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1: tf.config.optimizer.set_experimental_options( {"auto_mixed_precision": True}) args.generator_mixed_precision = bool(args.generator_mixed_precision) args.discriminator_mixed_precision = bool( args.discriminator_mixed_precision) # set logger if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, stream=sys.stdout, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, stream=sys.stdout, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, stream=sys.stdout, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) # check arguments if args.train_dir is None: raise ValueError("Please specify --train-dir") if args.dev_dir is None: raise ValueError("Please specify either --valid-dir") # load and save config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) with open(config['speech_config']) as f: mel_config = yaml.load(f, Loader=yaml.Loader) config.update(mel_config) config['hop_size'] = config['sample_rate'] * config['stride_ms'] // 1000 config['sampling_rate'] = config['sample_rate'] config.update(vars(args)) config["version"] = tensorflow_tts.__version__ with open(os.path.join(args.outdir, "config.yml"), "w") as f: yaml.dump(config, f, Dumper=yaml.Dumper) for key, value in config.items(): logging.info(f"{key} = {value}") # get dataset if config["remove_short_samples"]: mel_length_threshold = config["batch_max_steps"] // config["hop_size"] \ + 2 * config["multiband_melgan_generator_params"].get("aux_context_window", 0) else: mel_length_threshold = None audio_query = args.audio_query mel_query = args.mel_query audio_load_fn = np.load mel_load_fn = np.load # include global condition def collater_gc(items, **kwargs): gc = items['gc'] items = collater(items, **kwargs) items['gc'] = gc return items # define train/valid dataset train_dataset = MelGC( training=True, n_mels=config['n_mels'], gc_channels=config['gc_channels'], root_dir=args.train_dir, audio_query=audio_query, mel_query=mel_query, audio_load_fn=audio_load_fn, mel_load_fn=mel_load_fn, mel_length_threshold=mel_length_threshold, ).create( is_shuffle=config["is_shuffle"], map_fn=lambda items: collater_gc( items, batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32), hop_size=tf.constant(config["hop_size"], dtype=tf.int32), ), allow_cache=config["allow_cache"], batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync * config["gradient_accumulation_steps"], ) valid_dataset = MelGC( training=False, n_mels=config['n_mels'], gc_channels=config['gc_channels'], root_dir=args.dev_dir, audio_query=audio_query, mel_query=mel_query, audio_load_fn=audio_load_fn, mel_load_fn=mel_load_fn, mel_length_threshold=mel_length_threshold, ).create( is_shuffle=config["is_shuffle"], map_fn=lambda items: collater_gc( items, batch_max_steps=tf.constant(config["batch_max_steps_valid"], dtype=tf.int32), hop_size=tf.constant(config["hop_size"], dtype=tf.int32), ), allow_cache=config["allow_cache"], batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync, ) # define trainer trainer = MultiBandMelganVQTrainer( steps=0, epochs=0, config=config, strategy=STRATEGY, is_generator_mixed_precision=args.generator_mixed_precision, is_discriminator_mixed_precision=args.discriminator_mixed_precision, ) with STRATEGY.scope(): encoder = Encoder(**config['encoder']) generator = MelGANGeneratorVQ( encoder=encoder, config=MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), name="multi_band_melgan_generator", ) generator.set_shape(config['n_mels'], config['gc_channels']) discriminator = TFMelGANMultiScaleDiscriminator( MultiBandMelGANDiscriminatorConfig( **config["multiband_melgan_discriminator_params"]), name="multi_band_melgan_discriminator", ) pqmf = TFPQMF( MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), dtype=tf.float32, name="pqmf", ) # dummy input to build model. fake_mels = tf.random.uniform(shape=[1, 100, config['n_mels']], dtype=tf.float32) fake_gc = tf.random.uniform(shape=[1, config['gc_channels']], dtype=tf.float32) y_mb_hat = generator(mels=fake_mels, gc=fake_gc, training=True) for k in y_mb_hat: print(k, y_mb_hat[k].shape) y_hat = pqmf.synthesis(y_mb_hat['y_mb_hat']) print('y_hat:', y_hat.shape) discriminator(y_hat) if len(args.pretrained) > 1: generator.load_weights(args.pretrained) logging.info( f"Successfully loaded pretrained weight from {args.pretrained}." ) encoder.summary() generator.summary() discriminator.summary() # define optimizer generator_lr_fn = getattr( tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"])( **config["generator_optimizer_params"]["lr_params"]) discriminator_lr_fn = getattr( tf.keras.optimizers.schedules, config["discriminator_optimizer_params"]["lr_fn"], )(**config["discriminator_optimizer_params"]["lr_params"]) gen_optimizer = tf.keras.optimizers.Adam( learning_rate=generator_lr_fn, amsgrad=config["generator_optimizer_params"]["amsgrad"], ) dis_optimizer = tf.keras.optimizers.Adam( learning_rate=discriminator_lr_fn, amsgrad=config["discriminator_optimizer_params"]["amsgrad"], ) trainer.compile( gen_model=generator, dis_model=discriminator, gen_optimizer=gen_optimizer, dis_optimizer=dis_optimizer, pqmf=pqmf, ) # start training try: trainer.fit( train_dataset, valid_dataset, saved_path=os.path.join(config["outdir"], "checkpoints/"), resume=args.resume, ) except KeyboardInterrupt: trainer.save_checkpoint() logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
) parser.add_argument( "--restore", '-r', default=None, type=str, ) args = parser.parse_args() with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) encoder = Encoder() generator = TFMelGANGeneratorGC( config=MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), encoder=encoder, name="multi_band_melgan_generator", ) pqmf = TFPQMF( MultiBandMelGANGeneratorConfig( **config["multiband_melgan_generator_params"]), dtype=tf.float32, name="pqmf", ) class Model(tf.keras.Model): def __init__(self, generator, pqmf, **kwargs): super().__init__(**kwargs) generator._build()