def __init__(self, hparams: ExtendedHParams, id_list: List[str]): if hparams is None: hparams = self.create_hparams() hparams.out_dir = os.path.curdir # super(AcousticModelTrainer, self).__init__(id_list=id_list, data_reader_configs=data_reader_configs, hparams=hparams) # Call ModelTrainer base class init directly. super().__init__(id_list=id_list, hparams=hparams) if hparams.scheduler_type == "default": hparams.scheduler_type = "Plateau" hparams.scheduler_args["verbose"] = True
def __init__(self, hparams: ExtendedHParams, id_list: List[str], data_reader_configs: List[DataReaderConfig] = None): if hparams is None: hparams = self.create_hparams() hparams.out_dir = os.path.curdir # Write missing default parameters. # if hparams.synth_dir is None: # hparams.synth_dir = os.path.join(hparams.out_dir, "synth") super().__init__(data_reader_configs=data_reader_configs, id_list=id_list, hparams=hparams) # in_to_out_multiplier = int(hparams.frame_rate_output_Hz / # (1000.0 / hparams.frame_size_ms)) # num_frames_per_sec = 1000.0 / hparams.frame_size_ms # # NOTE: Had to limit input length because of memory constraints. # max_frames_trainset = int(num_frames_per_sec * hparams.max_input_train_sec) \ # * in_to_out_multiplier # Multiply by number of seconds. # max_frames_testset = int(num_frames_per_sec * hparams.max_input_test_sec) \ # * in_to_out_multiplier # Ensure that test takes all frames. # self.dataset_train = LabelGensDataset( # self.id_list_train, self.InputGen, self.OutputGen, hparams, # random_select=True, max_frames_input=max_frames_trainset) # self.dataset_val = LabelGensDataset( # self.id_list_val, self.InputGen, self.OutputGen, hparams, # random_select=True, max_frames_input=max_frames_testset) # if self.loss_function is None: # if hparams.input_type == "mulaw-quantize": # self.loss_function = OneHotCrossEntropyLoss(reduction='none', shift=1) # else: # self.loss_function = DiscretizedMixturelogisticLoss(hparams.quantize_channels, # hparams.log_scale_min, # reduction='none', # hinge_loss=hparams.hinge_regularizer) if hparams.scheduler_type == "default": hparams.scheduler_type = "Noam" # hparams.scheduler_args['exponential_gamma'] = 0.99 hparams.scheduler_args['wormup_steps'] = 4000 # Override the collate and decollate methods of batches. self.batch_collate_fn = partial(self.prepare_batch, use_cond=hparams.use_cond, one_hot_target=True) self.batch_decollate_fn = self.decollate_network_output
def __init__(self, hparams: ExtendedHParams, id_list: List[str], data_reader_configs: List[DataReader.Config] = None): if hparams is None: hparams = self.create_hparams() hparams.out_dir = os.path.curdir super(AcousticModelTrainer, self).__init__(data_reader_configs=data_reader_configs, id_list=id_list, hparams=hparams) if hparams.scheduler_type == "default": hparams.scheduler_type = "Plateau" hparams.add_hparams(plateau_verbose=True)