"model_dir": model_dir, "model_blueprint": model_blueprint, "exist_model": exist_model, "start_epoch": train_stage, "epochs": epochs, "use_gpu": use_gpu, "gpu_id": gpu_id, "max_change": 10., "benchmark": benchmark, "suffix": suffix, "report_times_every_epoch": report_times_every_epoch, "report_interval_iters": report_interval_iters, "record_file": "train.csv" }) trainer = trainer.SimpleTrainer(package) if run_lr_finder and utils.is_main_training(): trainer.run_lr_finder("lr_finder.csv", init_lr=1e-8, final_lr=10., num_iters=2000, beta=0.98) endstage = 3 # Do not start extractor. else: trainer.run() #### Extract xvector if stage <= 4 <= endstage and utils.is_main_training(): # There are some params for xvector extracting. data_root = "data" # It contains all dataset just like Kaldi recipe.
}, { "model_dir": model_dir, "model_blueprint": model_blueprint, "exist_model": exist_model, "start_epoch": train_stage, "epochs": epochs, "use_gpu": use_gpu, "gpu_id": gpu_id, "benchmark": benchmark, "suffix": suffix, "report_times_every_epoch": report_times_every_epoch, "report_interval_iters": report_interval_iters, "record_file": "train.csv" }) trainer = trainer.SimpleTrainer(package, stop_early=stop_early) if run_lr_finder and utils.is_main_training(): trainer.run_lr_finder("lr_finder.csv", init_lr=1e-8, final_lr=10., num_iters=2000, beta=0.98) endstage = 3 # Do not start extractor. else: trainer.run() #### Extract xvector if stage <= 4 <= endstage and utils.is_main_training(): # There are some params for xvector extracting. data_root = "data" # It contains all dataset just like Kaldi recipe.