def train(Model): # load external LM with tf.device("/cpu:0"): dataset_dev = ASR_align_DataSet( file=[args.dirs.dev.data], args=args, _shuffle=False, transform=True) tfdata_train = TFData(dataset=None, dataAttr=['feature', 'label', 'align'], dir_save=args.dirs.train.tfdata, args=args).read(_shuffle=False) tfdata_dev = TFData(dataset=None, dataAttr=['feature', 'label', 'align'], dir_save=args.dirs.dev.tfdata, args=args).read(_shuffle=False) x_0, y_0, aligns_0 = next(iter(tfdata_train.take(args.num_supervised).\ padded_batch(args.num_supervised, ([None, args.dim_input], [None], [None])))) iter_train = iter(tfdata_train.cache().repeat().shuffle(3000).\ padded_batch(args.batch_size, ([None, args.dim_input], [None], [None])).prefetch(buffer_size=3)) tfdata_dev = tfdata_dev.padded_batch(args.batch_size, ([None, args.dim_input], [None], [None])) # get dataset ngram ngram_py, total_num = read_ngram(args.data.k, args.dirs.ngram, args.token2idx, type='list') kernel, py = ngram2kernel(ngram_py, args) # create model paremeters model = Model(args) compute_p_ngram = P_Ngram(kernel, args) model.summary() compute_p_ngram.summary() # build optimizer if args.opti.type == 'adam': optimizer = tf.keras.optimizers.Adam(args.opti.lr, beta_1=0.5, beta_2=0.9) # optimizer = tf.keras.optimizers.Adam(args.opti.lr*0.1, beta_1=0.5, beta_2=0.9) elif args.opti.type == 'sgd': optimizer = tf.keras.optimizers.SGD(lr=args.opti.lr, momentum=0.9, decay=0.98) writer = tf.summary.create_file_writer(str(args.dir_log)) ckpt = tf.train.Checkpoint(model=model, optimizer = optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, args.dir_checkpoint, max_to_keep=5) step = 0 # if a checkpoint exists, restore the latest checkpoint. if args.dirs.checkpoint: _ckpt_manager = tf.train.CheckpointManager(ckpt, args.dirs.checkpoint, max_to_keep=1) ckpt.restore(_ckpt_manager.latest_checkpoint) print ('checkpoint {} restored!!'.format(_ckpt_manager.latest_checkpoint)) step = int(_ckpt_manager.latest_checkpoint.split('-')[-1]) start_time = datetime.now() num_processed = 0 progress = 0 # step = 1600 while step < 99999999: start = time() x, _, aligns = next(iter_train) loss_EODM, loss_fs = train_step(x, aligns, py, model, compute_p_ngram, optimizer, args.lambda_fs) loss_supervise = train_G_supervised(x_0, y_0, model, optimizer, args.dim_output) num_processed += len(x) progress = num_processed / args.data.train_size if step % 10 == 0: print('EODM loss: {:.2f}\tloss_fs: {:.3f} * {}\tloss_supervise: {:.3f} * {}\tbatch: {} time: {:.2f} s {:.3f}% step: {}'.format( loss_EODM, loss_fs, args.lambda_fs, loss_supervise, args.lambda_supervision, x.shape, time()-start, progress*100.0, step)) with writer.as_default(): tf.summary.scalar("costs/loss_EODM", loss_EODM, step=step) tf.summary.scalar("costs/loss_fs", loss_fs, step=step) tf.summary.scalar("costs/loss_supervise", loss_supervise, step=step) if step % args.dev_step == 0: fer, cer = evaluation(tfdata_dev, args.data.dev_size, model) with writer.as_default(): tf.summary.scalar("performance/fer", fer, step=step) tf.summary.scalar("performance/cer", cer, step=step) if step % args.decode_step == 0: decode(dataset_dev[0], model) if step % args.save_step == 0: save_path = ckpt_manager.save(step) print('save model {}'.format(save_path)) step += 1 print('training duration: {:.2f}h'.format((datetime.now()-start_time).total_seconds()/3600))
def train(): with tf.device("/cpu:0"): dataset_train = ASR_align_DataSet( trans_file=args.dirs.train.trans, align_file=args.dirs.train.align, uttid2wav=args.dirs.train.wav_scp, feat_len_file=args.dirs.train.feat_len, args=args, _shuffle=True, transform=True) dataset_dev = ASR_align_DataSet(trans_file=args.dirs.dev.trans, align_file=args.dirs.dev.align, uttid2wav=args.dirs.dev.wav_scp, feat_len_file=args.dirs.dev.feat_len, args=args, _shuffle=False, transform=True) dataset_train_supervise = ASR_align_DataSet( trans_file=args.dirs.train_supervise.trans, align_file=args.dirs.train_supervise.align, uttid2wav=args.dirs.train_supervise.wav_scp, feat_len_file=args.dirs.train_supervise.feat_len, args=args, _shuffle=False, transform=True) feature_train_supervise = TFData( dataset=dataset_train_supervise, dir_save=args.dirs.train_supervise.tfdata, args=args).read() feature_train = TFData(dataset=dataset_train, dir_save=args.dirs.train.tfdata, args=args).read() feature_dev = TFData(dataset=dataset_dev, dir_save=args.dirs.dev.tfdata, args=args).read() supervise_uttids, supervise_x = next(iter(feature_train_supervise.take(args.num_supervised).\ padded_batch(args.num_supervised, ((), [None, args.dim_input])))) supervise_aligns = dataset_train_supervise.get_attrs( 'align', supervise_uttids.numpy()) supervise_bounds = dataset_train_supervise.get_attrs( 'bounds', supervise_uttids.numpy()) iter_feature_train = iter( feature_train.cache().repeat().shuffle(500).padded_batch( args.batch_size, ((), [None, args.dim_input])).prefetch(buffer_size=5)) feature_dev = feature_dev.padded_batch(args.batch_size, ((), [None, args.dim_input])) dataset_text = TextDataSet(list_files=[args.dirs.lm.data], args=args, _shuffle=True) tfdata_train = tf.data.Dataset.from_generator(dataset_text, (tf.int32), (tf.TensorShape([None]))) iter_text = iter(tfdata_train.cache().repeat().shuffle(1000).map( lambda x: x[:args.model.D.max_label_len]).padded_batch( args.batch_size, ([args.model.D.max_label_len])).prefetch(buffer_size=5)) # create model paremeters G = PhoneClassifier(args) D = PhoneDiscriminator3(args) G.summary() D.summary() optimizer_G = tf.keras.optimizers.Adam(args.opti.G.lr, beta_1=0.5, beta_2=0.9) optimizer_D = tf.keras.optimizers.Adam(args.opti.D.lr, beta_1=0.5, beta_2=0.9) writer = tf.summary.create_file_writer(str(args.dir_log)) ckpt = tf.train.Checkpoint(G=G, optimizer_G=optimizer_G) ckpt_manager = tf.train.CheckpointManager(ckpt, args.dir_checkpoint, max_to_keep=20) step = 0 # if a checkpoint exists, restore the latest checkpoint. if args.dirs.checkpoint: _ckpt_manager = tf.train.CheckpointManager(ckpt, args.dirs.checkpoint, max_to_keep=1) ckpt.restore(_ckpt_manager.latest_checkpoint) print('checkpoint {} restored!!'.format( _ckpt_manager.latest_checkpoint)) step = int(_ckpt_manager.latest_checkpoint.split('-')[-1]) start_time = datetime.now() num_processed = 0 progress = 0 while step < 99999999: start = time() for _ in range(args.opti.D_G_rate): uttids, x = next(iter_feature_train) stamps = dataset_train.get_attrs('stamps', uttids.numpy()) text = next(iter_text) P_Real = tf.one_hot(text, args.dim_output) cost_D, gp = train_D(x, stamps, P_Real, text > 0, G, D, optimizer_D, args.lambda_gp, args.model.D.max_label_len) # cost_D, gp = train_D(x, P_Real, text>0, G, D, optimizer_D, # args.lambda_gp, args.model.G.len_seq, args.model.D.max_label_len) uttids, x = next(iter_feature_train) stamps = dataset_train.get_attrs('stamps', uttids.numpy()) cost_G, fs = train_G(x, stamps, G, D, optimizer_G, args.lambda_fs) # cost_G, fs = train_G(x, G, D, optimizer_G, # args.lambda_fs, args.model.G.len_seq, args.model.D.max_label_len) loss_supervise = train_G_supervised(supervise_x, supervise_aligns, G, optimizer_G, args.dim_output, args.lambda_supervision) # loss_supervise, bounds_loss = train_G_bounds_supervised( # supervise_x, supervise_bounds, supervise_aligns, G, optimizer_G, args.dim_output) num_processed += len(x) progress = num_processed / args.data.train_size if step % 10 == 0: print( 'cost_G: {:.3f}|{:.3f}\tcost_D: {:.3f}|{:.3f}\tloss_supervise: {:.3f}\tbatch: {}|{}\tused: {:.3f}\t {:.3f}% iter: {}' .format(cost_G, fs, cost_D, gp, loss_supervise, x.shape, text.shape, time() - start, progress * 100.0, step)) with writer.as_default(): tf.summary.scalar("costs/cost_G", cost_G, step=step) tf.summary.scalar("costs/cost_D", cost_D, step=step) tf.summary.scalar("costs/gp", gp, step=step) tf.summary.scalar("costs/fs", fs, step=step) tf.summary.scalar("costs/loss_supervise", loss_supervise, step=step) if step % args.dev_step == 0: # fer, cer = evaluate(feature_dev, dataset_dev, args.data.dev_size, G) fer, cer_0 = evaluate(feature_dev, dataset_dev, args.data.dev_size, G, beam_size=0, with_stamp=True) fer, cer = evaluate(feature_dev, dataset_dev, args.data.dev_size, G, beam_size=0, with_stamp=False) with writer.as_default(): tf.summary.scalar("performance/fer", fer, step=step) tf.summary.scalar("performance/cer_0", cer_0, step=step) tf.summary.scalar("performance/cer", cer, step=step) if step % args.decode_step == 0: monitor(dataset_dev[0], G) if step % args.save_step == 0: save_path = ckpt_manager.save(step) print('save model {}'.format(save_path)) step += 1 print('training duration: {:.2f}h'.format( (datetime.now() - start_time).total_seconds() / 3600))
def train(): # load external LM with tf.device("/cpu:0"): dataset_train = ASR_align_DataSet( trans_file=args.dirs.train.trans, align_file=args.dirs.train.align, uttid2wav=args.dirs.train.wav_scp, feat_len_file=args.dirs.train.feat_len, args=args, _shuffle=True, transform=True) dataset_dev = ASR_align_DataSet( trans_file=args.dirs.dev.trans, align_file=args.dirs.dev.align, uttid2wav=args.dirs.dev.wav_scp, feat_len_file=args.dirs.dev.feat_len, args=args, _shuffle=False, transform=True) dataset_train_supervise = ASR_align_DataSet( trans_file=args.dirs.train_supervise.trans, align_file=args.dirs.train_supervise.align, uttid2wav=args.dirs.train.wav_scp, feat_len_file=args.dirs.train.feat_len, args=args, _shuffle=False, transform=True) feature_train = TFData(dataset=dataset_train, dir_save=args.dirs.train.tfdata, args=args).read() feature_dev = TFData(dataset=dataset_dev, dir_save=args.dirs.dev.tfdata, args=args).read() supervise_uttids, supervise_x = next(iter(feature_train.take(args.num_supervised).\ padded_batch(args.num_supervised, ((), [None, args.dim_input])))) supervise_aligns = dataset_train_supervise.get_attrs('align', supervise_uttids.numpy()) iter_feature_train = iter(feature_train.cache().repeat().shuffle(500).padded_batch(args.batch_size, ((), [None, args.dim_input])).prefetch(buffer_size=5)) feature_dev = feature_dev.padded_batch(args.batch_size, ((), [None, args.dim_input])) # get dataset ngram ngram_py, total_num = read_ngram(args.data.k, args.dirs.ngram, args.token2idx, type='list') kernel, py = ngram2kernel(ngram_py, args) # create model paremeters G = PhoneClassifier(args) compute_p_ngram = P_Ngram(kernel, args) G.summary() compute_p_ngram.summary() # build optimizer if args.opti.type == 'adam': optimizer = tf.keras.optimizers.Adam(args.opti.lr, beta_1=0.5, beta_2=0.9) elif args.opti.type == 'sgd': optimizer = tf.keras.optimizers.SGD(lr=args.opti.lr, momentum=0.9, decay=0.98) writer = tf.summary.create_file_writer(str(args.dir_log)) ckpt = tf.train.Checkpoint(G=G, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, args.dir_checkpoint, max_to_keep=5) step = 0 # if a checkpoint exists, restore the latest checkpoint. if args.dirs.checkpoint: _ckpt_manager = tf.train.CheckpointManager(ckpt, args.dirs.checkpoint, max_to_keep=1) ckpt.restore(_ckpt_manager.latest_checkpoint) print('checkpoint {} restored!!'.format(_ckpt_manager.latest_checkpoint)) step = int(_ckpt_manager.latest_checkpoint.split('-')[-1]) start_time = datetime.now() num_processed = 0 progress = 0 while step < 99999999: start = time() uttids, x = next(iter_feature_train) stamps = dataset_train.get_attrs('stamps', uttids.numpy()) loss_EODM, loss_fs = train_step(x, stamps, py, G, compute_p_ngram, optimizer, args.lambda_fs) # loss_EODM = loss_fs = 0 loss_supervise = train_G_supervised(supervise_x, supervise_aligns, G, optimizer, args.dim_output, args.lambda_supervision) num_processed += len(x) progress = num_processed / args.data.train_size if step % 10 == 0: print('EODM loss: {:.2f}\tloss_fs: {:.3f} * {}\tloss_supervise: {:.3f} * {}\tbatch: {} time: {:.2f} s {:.3f}% step: {}'.format( loss_EODM, loss_fs, args.lambda_fs, loss_supervise, args.lambda_supervision, x.shape, time()-start, progress*100.0, step)) with writer.as_default(): tf.summary.scalar("costs/loss_EODM", loss_EODM, step=step) tf.summary.scalar("costs/loss_fs", loss_fs, step=step) tf.summary.scalar("costs/loss_supervise", loss_supervise, step=step) if step % args.dev_step == 0: fer, cer = evaluate(feature_dev, dataset_dev, args.data.dev_size, G) with writer.as_default(): tf.summary.scalar("performance/fer", fer, step=step) tf.summary.scalar("performance/cer", cer, step=step) if step % args.decode_step == 0: monitor(dataset_dev[0], G) if step % args.save_step == 0: save_path = ckpt_manager.save(step) print('save model {}'.format(save_path)) step += 1 print('training duration: {:.2f}h'.format((datetime.now()-start_time).total_seconds()/3600))
def train(): dataset_dev = ASR_align_DataSet( file=[args.dirs.dev.data], args=args, _shuffle=False, transform=True) with tf.device("/cpu:0"): # wav data tfdata_train = TFData(dataset=None, dataAttr=['feature', 'label', 'align'], dir_save=args.dirs.train.tfdata, args=args).read(_shuffle=False) tfdata_dev = TFData(dataset=None, dataAttr=['feature', 'label', 'align'], dir_save=args.dirs.dev.tfdata, args=args).read(_shuffle=False) x_0, y_0, _ = next(iter(tfdata_train.take(args.num_supervised).map(lambda x, y, z: (x, y, z[:args.max_seq_len])).\ padded_batch(args.num_supervised, ([None, args.dim_input], [None], [None])))) iter_train = iter(tfdata_train.cache().repeat().shuffle(3000).map(lambda x, y, z: (x, y, z[:args.max_seq_len])).\ padded_batch(args.batch_size, ([None, args.dim_input], [None], [args.max_seq_len])).prefetch(buffer_size=3)) tfdata_dev = tfdata_dev.padded_batch(args.batch_size, ([None, args.dim_input], [None], [None])) # text data dataset_text = TextDataSet( list_files=[args.dirs.lm.data], args=args, _shuffle=True) tfdata_train_text = tf.data.Dataset.from_generator( dataset_text, (tf.int32), (tf.TensorShape([None]))) iter_text = iter(tfdata_train_text.cache().repeat().shuffle(100).map(lambda x: x[:args.max_seq_len]).padded_batch(args.batch_size, ([args.max_seq_len])).prefetch(buffer_size=5)) # create model paremeters G = PhoneClassifier(args) D = PhoneDiscriminator2(args) G.summary() D.summary() optimizer_G = tf.keras.optimizers.Adam(args.opti.G.lr, beta_1=0.5, beta_2=0.9) optimizer_D = tf.keras.optimizers.Adam(args.opti.D.lr, beta_1=0.5, beta_2=0.9) optimizer = tf.keras.optimizers.Adam(args.opti.G.lr, beta_1=0.5, beta_2=0.9) writer = tf.summary.create_file_writer(str(args.dir_log)) ckpt = tf.train.Checkpoint(G=G, optimizer_G = optimizer_G) ckpt_manager = tf.train.CheckpointManager(ckpt, args.dir_checkpoint, max_to_keep=5) step = 0 # if a checkpoint exists, restore the latest checkpoint. if args.dirs.checkpoint: _ckpt_manager = tf.train.CheckpointManager(ckpt, args.dirs.checkpoint, max_to_keep=1) ckpt.restore(_ckpt_manager.latest_checkpoint) print('checkpoint {} restored!!'.format(_ckpt_manager.latest_checkpoint)) step = int(_ckpt_manager.latest_checkpoint.split('-')[-1]) start_time = datetime.now() num_processed = 0 progress = 0 while step < 99999999: start = time() for _ in range(args.opti.D_G_rate): x, _, aligns = next(iter_train) text = next(iter_text) P_Real = tf.one_hot(text, args.dim_output) cost_D, gp = train_D(x, aligns, P_Real, text>0, G, D, optimizer_D, args.lambda_gp) x, _, aligns = next(iter_train) cost_G, fs = train_G(x, aligns, G, D, optimizer_G, args.lambda_fs) loss_supervise = train_G_supervised(x_0, y_0, G, optimizer_G, args.dim_output) num_processed += len(x) if step % 10 == 0: print('cost_G: {:.3f}|{:.3f}\tcost_D: {:.3f}|{:.3f}\tloss_supervise: {:.3f}\tbatch: {}|{}\tused: {:.3f}\t {:.3f}% iter: {}'.format( cost_G, fs, cost_D, gp, loss_supervise, x.shape, text.shape, time()-start, progress*100.0, step)) with writer.as_default(): tf.summary.scalar("costs/cost_G", cost_G, step=step) tf.summary.scalar("costs/cost_D", cost_D, step=step) tf.summary.scalar("costs/gp", gp, step=step) tf.summary.scalar("costs/fs", fs, step=step) tf.summary.scalar("costs/loss_supervise", loss_supervise, step=step) if step % args.dev_step == 0: fer, cer = evaluation(tfdata_dev, args.data.dev_size, G) with writer.as_default(): tf.summary.scalar("performance/fer", fer, step=step) tf.summary.scalar("performance/cer", cer, step=step) if step % args.decode_step == 0: decode(dataset_dev[0], G) if step % args.save_step == 0: save_path = ckpt_manager.save(step) print('save model {}'.format(save_path)) step += 1 print('training duration: {:.2f}h'.format((datetime.now()-start_time).total_seconds()/3600))
def Train(): dataset_train = ASR_align_DataSet(trans_file=args.dirs.train.trans, align_file=args.dirs.train.align, uttid2wav=args.dirs.train.wav_scp, feat_len_file=args.dirs.train.feat_len, args=args, _shuffle=True, transform=True) dataset_dev = ASR_align_DataSet(trans_file=args.dirs.dev.trans, align_file=args.dirs.dev.align, uttid2wav=args.dirs.dev.wav_scp, feat_len_file=args.dirs.dev.feat_len, args=args, _shuffle=False, transform=True) with tf.device("/cpu:0"): # wav data feature_train = TFData(dataset=dataset_train, dir_save=args.dirs.train.tfdata, args=args).read() feature_dev = TFData(dataset=dataset_dev, dir_save=args.dirs.dev.tfdata, args=args).read() if args.num_supervised: dataset_train_supervise = ASR_align_DataSet( trans_file=args.dirs.train_supervise.trans, align_file=args.dirs.train_supervise.align, uttid2wav=args.dirs.train_supervise.wav_scp, feat_len_file=args.dirs.train_supervise.feat_len, args=args, _shuffle=False, transform=True) feature_train_supervise = TFData( dataset=dataset_train_supervise, dir_save=args.dirs.train_supervise.tfdata, args=args).read() supervise_uttids, supervise_x = next(iter(feature_train_supervise.take(args.num_supervised).\ padded_batch(args.num_supervised, ((), [None, args.dim_input])))) supervise_aligns = dataset_train_supervise.get_attrs( 'align', supervise_uttids.numpy()) # supervise_bounds = dataset_train_supervise.get_attrs('bounds', supervise_uttids.numpy()) iter_feature_train = iter( feature_train.repeat().shuffle(500).padded_batch( args.batch_size, ((), [None, args.dim_input])).prefetch(buffer_size=5)) feature_dev = feature_dev.padded_batch(args.batch_size, ((), [None, args.dim_input])) # create model paremeters model = PhoneClassifier(args) model.summary() optimizer_G = tf.keras.optimizers.Adam(args.opti.lr, beta_1=0.5, beta_2=0.9) writer = tf.summary.create_file_writer(str(args.dir_log)) ckpt = tf.train.Checkpoint(model=model, optimizer_G=optimizer_G) ckpt_manager = tf.train.CheckpointManager(ckpt, args.dir_checkpoint, max_to_keep=20) step = 0 # if a checkpoint exists, restore the latest checkpoint. if args.dirs.checkpoint: _ckpt_manager = tf.train.CheckpointManager(ckpt, args.dirs.checkpoint, max_to_keep=1) ckpt.restore(_ckpt_manager.latest_checkpoint) print('checkpoint {} restored!!'.format( _ckpt_manager.latest_checkpoint)) step = int(_ckpt_manager.latest_checkpoint.split('-')[-1]) start_time = datetime.now() while step < 99999999: start = time() if args.num_supervised: x = supervise_x loss_supervise = train_G_supervised(supervise_x, supervise_aligns, model, optimizer_G, args.dim_output) # loss_supervise, bounds_loss = train_G_bounds_supervised( # x, supervise_bounds, supervise_aligns, model, optimizer_G, args.dim_output) else: uttids, x = next(iter_feature_train) aligns = dataset_train.get_attrs('align', uttids.numpy()) # trans = dataset_train.get_attrs('trans', uttids.numpy()) loss_supervise = train_G_supervised(x, aligns, model, optimizer_G, args.dim_output) # loss_supervise = train_G_TBTT_supervised(x, aligns, model, optimizer_G, args.dim_output) # bounds = dataset_train.get_attrs('bounds', uttids.numpy()) # loss_supervise, bounds_loss = train_G_bounds_supervised(x, bounds, aligns, model, optimizer_G, args.dim_output) # loss_supervise = train_G_CTC_supervised(x, trans, model, optimizer_G) if step % 10 == 0: print('loss_supervise: {:.3f}\tbatch: {}\tused: {:.3f}\tstep: {}'. format(loss_supervise, x.shape, time() - start, step)) # print('loss_supervise: {:.3f}\tloss_bounds: {:.3f}\tbatch: {}\tused: {:.3f}\tstep: {}'.format( # loss_supervise, bounds_loss, x.shape, time()-start, step)) with writer.as_default(): tf.summary.scalar("costs/loss_supervise", loss_supervise, step=step) if step % args.dev_step == 0: fer, cer_0 = evaluate(feature_dev, dataset_dev, args.data.dev_size, model, beam_size=0, with_stamp=True) fer, cer = evaluate(feature_dev, dataset_dev, args.data.dev_size, model, beam_size=0, with_stamp=False) with writer.as_default(): tf.summary.scalar("performance/fer", fer, step=step) tf.summary.scalar("performance/cer_0", cer_0, step=step) tf.summary.scalar("performance/cer", cer, step=step) if step % args.decode_step == 0: monitor(dataset_dev[0], model) if step % args.save_step == 0: save_path = ckpt_manager.save(step) print('save model {}'.format(save_path)) step += 1 print('training duration: {:.2f}h'.format( (datetime.now() - start_time).total_seconds() / 3600))