def build_train_evaluate_model(self, limit_train_batches: int, limit_val_batches: int, epoch: int, batch_size: int, model_config: BiDirectionalConfig, precision: int, gpus: int, folders: bool): cuda = gpus > 0 train_path, val_path, test_path = self.download_data( DatasetConfig( target_dir=self.target_dir, manifest_dir=self.manifest_dir ), folders=folders ) train_cfg = self.create_training_config( limit_train_batches=limit_train_batches, limit_val_batches=limit_val_batches, max_epochs=epoch, batch_size=batch_size, train_path=train_path, val_path=val_path, model_config=model_config, precision=precision, gpus=gpus ) print("Running Training DeepSpeech Model Smoke Test") train(train_cfg) # Expected final model path after training print(os.listdir(self.model_dir)) model_path = self.model_dir + '/last.ckpt' assert os.path.exists(model_path) lm_configs = [ LMConfig(), # Test Greedy LMConfig( decoder_type=DecoderType.beam ) # Test Beam Decoder ] print("Running Inference Smoke Tests") for lm_config in lm_configs: self.eval_model( model_path=model_path, test_path=test_path, cuda=cuda, precision=precision, lm_config=lm_config ) self.inference(test_path=test_path, model_path=model_path, cuda=cuda, precision=precision, lm_config=lm_config)
def build_train_evaluate_model(self, epoch: int, batch_size: int, model_config: BiDirectionalConfig, use_half: bool, cuda: bool): train_manifest, val_manifest, test_manifest = self.download_data( DatasetConfig(target_dir=self.target_dir, manifest_dir=self.manifest_dir)) train_cfg = self.create_training_config(epoch=epoch, batch_size=batch_size, train_manifest=train_manifest, val_manifest=val_manifest, model_config=model_config, cuda=cuda) print("Running Training DeepSpeech Model Smoke Test") train(train_cfg) # Expected final model path after training model_path = self.model_dir + '/deepspeech_final.pth' assert os.path.exists(model_path) lm_configs = [ LMConfig(), # Test Greedy LMConfig(decoder_type=DecoderType.beam) # Test Beam Decoder ] print("Running Inference Smoke Tests") for lm_config in lm_configs: self.eval_model(model_path=model_path, test_manifest=test_manifest, cuda=cuda, use_half=use_half, lm_config=lm_config) self.inference(test_manifest=test_manifest, model_path=model_path, cuda=cuda, use_half=use_half, lm_config=lm_config)
def hydra_main(cfg: DeepSpeechConfig): train(cfg=cfg)
def hydra_main(cfg): train(cfg=cfg)
def hydra_main(cfg: DictConfig): train(cfg=cfg)
def main(args): training.train(args)