##--------------------------------------------------##
## Other options
exist_model = ""  # Use it in transfer learning.
##--------------------------------------------------##
## Main params
traindata = "data/mfcc_23_pitch/voxceleb1_train_aug"
egs_dir = "exp/egs/mfcc_23_pitch_voxceleb1_train_aug" + "_" + sample_type

model_blueprint = "subtools/pytorch/model/snowdar-xvector.py"
model_dir = "exp/standard_voxceleb1"
##--------------------------------------------------##
##
######################################################### START #########################################################
##
#### Set seed
utils.set_all_seed(1024)
##
#### Set sleep time for a rest
# Use it to run a launcher with a countdown function when there are no extra GPU memory
# but you really want to go to bed and know when the GPU memory will be free.
if args.sleep > 0: time.sleep(args.sleep)
##
#### Init environment
# It is used for multi-gpu training if used (number of gpu-id > 1).
# And it will do nothing for single-GPU training.
utils.init_multi_gpu_training(args.gpu_id, args.multi_gpu_solution, args.port)
##
#### Auto-config params
# If multi-GPU used, it will auto-scale learning rate by multiplying number of processes.
optimizer_params["learn_rate"] = utils.auto_scale_lr(
    optimizer_params["learn_rate"])
Example #2
0
suffix = "params"  # Used in saved model file.
##--------------------------------------------------##
## Other options
exist_model = ""  # Use it in transfer learning.
##--------------------------------------------------##
## Main params
traindata = "data/mfcc_23_pitch/voxceleb1_train_aug"
egs_dir = "exp/egs/mfcc_23_pitch_voxceleb1_train_aug" + "_" + sample_type + "_max"

model_blueprint = "subtools/pytorch/model/xvector.py"
model_dir = "exp/standard_xv_baseline_warmR_voxceleb1"
##--------------------------------------------------##
##
#### Set seed
utils.set_all_seed(
    1024
)  # Note that, in different machine, random still will be different enven with the same seed,
# so, you could always get little different results by this launcher comparing to mine.

#### Preprocess
if stage <= 2 and endstage >= 0:
    # Here only give limited options because it is not convenient.
    # Suggest to pre-execute this shell script to make it freedom and then continue to run this launcher.
    kaldi_common.execute_command(
        "bash subtools/pytorch/pipeline/preprocess_to_egs.sh "
        "--stage {stage} --endstage {endstage} --valid-split-type {valid_split_type} "
        "--nj {nj} --cmn {cmn} --limit-utts {limit_utts} --min-chunk {chunk_size} --overlap {overlap} "
        "--sample-type {sample_type} --chunk-num {chunk_num} --scale {scale} --force-clear {force_clear} "
        "--valid-num-utts {valid_utts} --valid-chunk-num {valid_chunk_num_every_utt} --compress {compress} "
        "{traindata} {egs_dir}".format(
            stage=stage,