def train():
	# setting parameters
	batch_size = 32
	n_epoch = 100
	n_mfcc = 60

	# load speech data
	wav_path = os.path.join(os.getcwd(),'data','wav','train')
	label_file = os.path.join(os.getcwd(),'data','doc','trans','train.word.txt')
	speech_loader = SpeechLoader(wav_path, label_file, batch_size, n_mfcc)
	n_out = speech_loader.vocab_size

	# load model
	model = Model(n_out, batch_size=batch_size, n_mfcc=n_mfcc)

	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())
		
		saver = tf.train.Saver(tf.global_variables())

		for epoch in range(n_epoch):
			speech_loader.create_batches() # random shuffle data
			speech_loader.reset_batch_pointer()
			for batch in range(speech_loader.n_batches):
				start = time.time()
				batches_wav, batches_label = speech_loader.next_batch()
				feed = {model.input_data: batches_wav, model.targets: batches_label}
				train_loss, _ = sess.run([model.cost, model.optimizer_op], feed_dict=feed)
				end = time.time()
				print("epoch: %d/%d, batch: %d/%d, loss: %s, time: %.3f."%(epoch, n_epoch, batch, speech_loader.n_batches, train_loss, end-start))

			# save models
			if epoch % 5 ==0:
				saver.save(sess, os.path.join(os.getcwd(), 'model','speech.module'), global_step=epoch)
Exemplo n.º 2
0
def train():
    # setting parameters
    batch_size = 2
    n_epoch = 100
    n_mfcc = 60

    # load speech data
    wav_path = os.path.join(os.getcwd(), 'data', 'wav', 'train')
    label_file = os.path.join(os.getcwd(), 'data', 'doc', 'trans',
                              'train.word.txt')
    speech_loader = SpeechLoader(wav_path, label_file, batch_size, n_mfcc)
    n_out = speech_loader.vocab_size

    # load model
    model = Model(n_out, batch_size=batch_size, n_mfcc=n_mfcc)

    tf.summary.scalar('loss', model.cost)
    merged = tf.summary.merge_all()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(tf.global_variables())
        if len(os.listdir('./model')) > 3:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint('./model')
            saver.restore(sess, checkpoint)

        tf.train.write_graph(sess.graph_def, './model', 'model.pbtxt')
        summary_writer = tf.summary.FileWriter('./model', graph=sess.graph)

        for epoch in range(n_epoch):
            speech_loader.create_batches()  # random shuffle data
            speech_loader.reset_batch_pointer()
            for batch in range(speech_loader.n_batches):
                start = time.time()
                batches_wav, batches_label = speech_loader.next_batch()
                feed = {
                    model.input_data: batches_wav,
                    model.targets: batches_label
                }
                result, train_loss, _ = sess.run(
                    [merged, model.cost, model.optimizer_op], feed_dict=feed)
                end = time.time()
                print("epoch: %d/%d, batch: %d/%d, loss: %s, time: %.3f." %
                      (epoch, n_epoch, batch, speech_loader.n_batches,
                       train_loss, end - start))
                summary_writer.add_summary(result, epoch)

            # save models
            if epoch % 5 == 0:
                saver.save(sess,
                           os.path.join(os.getcwd(), 'model', 'speech.module'),
                           global_step=epoch)
Exemplo n.º 3
0
def train():
    '''

    :return:
    '''

    batch_size = 8
    n_mfcc = 60
    n_epoch = 100

    source_file = '/home/ydf_micro/datasets/data_thchs30'
    speech_loader = SpeechLoader(os.path.join(source_file, 'train'),
                                 batch_size, n_mfcc)

    n_out = speech_loader.vocab_size

    # load model

    model = WaveNet(n_out, batch_size=batch_size, n_mfcc=n_mfcc)

    saver = tf.train.Saver(tf.global_variables())

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # sess.graph.finalize() # Graph is read-only after this statement

        for epoch in range(n_epoch):
            speech_loader.create_batches()  # random shuffle data
            speech_loader.reset_batch_pointer()
            for batch in range(speech_loader.n_batches):
                batch_start = time.time()
                batches_wav, batches_label = speech_loader.next_batch()
                feed = {
                    model.input_data: batches_wav,
                    model.targets: batches_label
                }
                train_loss, _ = sess.run([model.cost, model.optimizer_op],
                                         feed_dict=feed)
                batch_end = time.time()
                print(
                    f'epoch: {epoch+1}/{n_epoch}, batch: {batch+1}/{speech_loader.n_batches}, '
                    f'loss: {train_loss:.2f}, time: {(batch_end-batch_start):.2f}s'
                )

            # save models
            if epoch % 5 == 0:
                saver.save(sess,
                           os.path.join(os.path.dirname(os.getcwd()), 'model',
                                        'speech.module'),
                           global_step=epoch)