def main():
    data_loader = InputHelper()
    data_loader.create_dictionary(FLAGS.data_dir + '/' + FLAGS.train_file,
                                  FLAGS.data_dir + '/')
    FLAGS.vocab_size = data_loader.vocab_size
    FLAGS.n_classes = data_loader.n_classes

    model = BiRNN(FLAGS.rnn_size, FLAGS.layer_size, FLAGS.vocab_size,
                  FLAGS.batch_size, FLAGS.sequence_length, FLAGS.n_classes,
                  FLAGS.grad_clip)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())
        ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        while True:
            # x = raw_input('请输入一个地址:\n')
            x = ''
            x = [data_loader.transform_raw(x, FLAGS.sequence_length)]

            labels = model.inference(sess, data_loader.labels, x)
            print(labels)
def main():

    data_loader = InputHelper(log=log)

    data_loader.load_embedding(FLAGS.embedding_file, FLAGS.embedding_size)

    data_loader.load_label_dictionary(FLAGS.label_dic)

    x, y, x_w_p, x_s_p = data_loader.load_valid(FLAGS.valid_file,
                                                FLAGS.interaction_rounds,
                                                FLAGS.sequence_length)

    FLAGS.embeddings = data_loader.embeddings

    FLAGS.vocab_size = len(data_loader.word2idx)

    FLAGS.n_classes = len(data_loader.label_dictionary)

    model = BiRNN(embedding_size=FLAGS.embedding_size,
                  rnn_size=FLAGS.rnn_size,
                  layer_size=FLAGS.layer_size,
                  vocab_size=FLAGS.vocab_size,
                  attn_size=FLAGS.attn_size,
                  sequence_length=FLAGS.sequence_length,
                  n_classes=FLAGS.n_classes,
                  interaction_rounds=FLAGS.interaction_rounds,
                  batch_size=FLAGS.batch_size,
                  embeddings=FLAGS.embeddings,
                  grad_clip=FLAGS.grad_clip,
                  learning_rate=FLAGS.learning_rate)

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(tf.global_variables())

        ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir)

        model_path = FLAGS.save_dir + '/model.ckpt-45'

        if ckpt and ckpt.model_checkpoint_path:

            saver.restore(sess, model_path)

        labels = model.inference(sess, y, x, x_w_p, x_s_p)

        corrcet_num = 0

        for i in range(len(labels)):

            if labels[i] == y[i]:

                corrcet_num += 1

        print('eval_acc = {:.3f}'.format(corrcet_num * 1.0 / len(labels)))

        data_loader.output_result(labels, FLAGS.valid_file, FLAGS.result_file)
Example #3
0
def main():
    data_loader = InputHelper()
    data_loader.create_dictionary(FLAGS.data_dir + '/' + FLAGS.train_file,
                                  FLAGS.data_dir + '/')
    FLAGS.vocab_size = data_loader.vocab_size
    FLAGS.n_classes = data_loader.n_classes
    wl = load_wl()
    # Define specified Model
    model = BiRNN(embedding_size=FLAGS.embedding_size,
                  rnn_size=FLAGS.rnn_size,
                  layer_size=FLAGS.layer_size,
                  vocab_size=FLAGS.vocab_size,
                  attn_size=FLAGS.attn_size,
                  sequence_length=FLAGS.sequence_length,
                  n_classes=FLAGS.n_classes,
                  grad_clip=FLAGS.grad_clip,
                  learning_rate=FLAGS.learning_rate)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())
        ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        now = 0
        for file in open('./data/total_txt_img_cat.list'):
            word_list = {}
            arr = np.zeros([len(wl), 50])
            lab = file.split('\t')[2]
            for line in open('./data/text_seqs/' + file.split()[0] + '.xml'):
                seq = line.split('\t')[0]
                x, w = data_loader.transform_raw(seq, FLAGS.sequence_length)
                _, out_features = model.inference(sess, data_loader.labels,
                                                  [x])
                for i, j in enumerate(w):
                    punc = '[,.!\'%*+-/=><]'
                    j = re.sub(punc, '', j)
                    if j in word_list:
                        word_list[j] += out_features[i]
                    else:
                        word_list[j] = out_features[i]
            count = 0
            for w in word_list:
                if w in wl:
                    arr[wl[w]] = word_list[w]
                    count += 1
            print('now:', now, 'count:', count, 'shape:', arr.shape)
            s = str(now)
            while len(s) < 4:
                s = '0' + s
            np.save('./text_lstm/text_' + s + '_' + lab.strip() + '.npy', arr)
            now += 1
def train():
    data_loader = InputHelper()
    data_loader.create_dictionary(FLAGS.data_dir + '/' + FLAGS.train_file,
                                  FLAGS.data_dir + '/')
    data_loader.create_batches(FLAGS.data_dir + '/' + FLAGS.train_file,
                               FLAGS.batch_size, FLAGS.sequence_length)
    FLAGS.vocab_size = data_loader.vocab_size
    FLAGS.n_classes = data_loader.n_classes

    test_data_loader = InputHelper()
    test_data_loader.load_dictionary(FLAGS.data_dir + '/dictionary')
    test_data_loader.create_batches(FLAGS.data_dir + '/' + FLAGS.test_file,
                                    1000, FLAGS.sequence_length)

    model = BiRNN(FLAGS.rnn_size, FLAGS.layer_size, FLAGS.vocab_size,
                  FLAGS.batch_size, FLAGS.sequence_length, FLAGS.n_classes,
                  FLAGS.grad_clip)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())

        for e in xrange(FLAGS.num_epochs):
            data_loader.reset_batch()
            sess.run(
                tf.assign(model.lr,
                          FLAGS.learning_rate * (FLAGS.decay_rate**e)))
            for b in xrange(data_loader.num_batches):
                start = time.time()
                x, y = data_loader.next_batch()
                feed = {
                    model.input_data: x,
                    model.targets: y,
                    model.output_keep_prob: FLAGS.dropout_keep_prob
                }
                train_loss, _ = sess.run([model.cost, model.train_op],
                                         feed_dict=feed)
                end = time.time()
                print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
                                .format(e * data_loader.num_batches + b,
                                        FLAGS.num_epochs * data_loader.num_batches,
                                        e, train_loss, end - start))

            test_data_loader.reset_batch()
            for i in xrange(test_data_loader.num_batches):
                test_x, test_y = test_data_loader.next_batch()
                feed = {
                    model.input_data: test_x,
                    model.targets: test_y,
                    model.output_keep_prob: 1.0
                }
                accuracy = sess.run(model.accuracy, feed_dict=feed)
                print 'accuracy:{0}'.format(accuracy)

            checkpoint_path = os.path.join(FLAGS.save_dir, 'model.ckpt')
            saver.save(sess,
                       checkpoint_path,
                       global_step=e * data_loader.num_batches)
            print 'model saved to {}'.format(checkpoint_path)
def train():
    data_loader = InputHelper()
    data_loader.create_dictionary(FLAGS.data_dir + '/' + FLAGS.train_file,
                                  FLAGS.data_dir + '/')
    data_loader.create_batches(FLAGS.data_dir + '/' + FLAGS.train_file,
                               FLAGS.batch_size, FLAGS.sequence_length)
    FLAGS.vocab_size = data_loader.vocab_size
    FLAGS.n_classes = data_loader.n_classes
    FLAGS.num_batches = data_loader.num_batches

    test_data_loader = InputHelper()
    test_data_loader.load_dictionary(FLAGS.data_dir + '/dictionary')
    test_data_loader.create_batches(FLAGS.data_dir + '/' + FLAGS.test_file,
                                    100, FLAGS.sequence_length)

    if FLAGS.pre_trained_vec:
        embeddings = np.load(FLAGS.pre_trained_vec)
        print(embeddings.shape)
        FLAGS.vocab_size = embeddings.shape[0]
        FLAGS.embedding_size = embeddings.shape[1]

    if FLAGS.init_from is not None:
        assert os.path.isdir(FLAGS.init_from), '{} must be a directory'.format(
            FLAGS.init_from)
        ckpt = tf.train.get_checkpoint_state(FLAGS.init_from)
        assert ckpt, 'No checkpoint found'
        assert ckpt.model_checkpoint_path, 'No model path found in checkpoint'

    # Define specified Model
    model = BiRNN(embedding_size=FLAGS.embedding_size,
                  rnn_size=FLAGS.rnn_size,
                  layer_size=FLAGS.layer_size,
                  vocab_size=FLAGS.vocab_size,
                  attn_size=FLAGS.attn_size,
                  sequence_length=FLAGS.sequence_length,
                  n_classes=FLAGS.n_classes,
                  grad_clip=FLAGS.grad_clip,
                  learning_rate=FLAGS.learning_rate)

    # define value for tensorboard
    tf.summary.scalar('train_loss', model.cost)
    tf.summary.scalar('accuracy', model.accuracy)
    merged = tf.summary.merge_all()

    # 调整GPU内存分配方案
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with tf.Session(config=tf_config) as sess:
        train_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())

        # using pre trained embeddings
        if FLAGS.pre_trained_vec:
            sess.run(model.embedding.assign(embeddings))
            del embeddings

        # restore model
        if FLAGS.init_from is not None:
            saver.restore(sess, ckpt.model_checkpoint_path)

        total_steps = FLAGS.num_epochs * FLAGS.num_batches
        for e in range(FLAGS.num_epochs):
            data_loader.reset_batch()
            for b in range(FLAGS.num_batches):
                start = time.time()
                x, y = data_loader.next_batch()
                feed = {
                    model.input_data: x,
                    model.targets: y,
                    model.output_keep_prob: FLAGS.dropout_keep_prob
                }
                train_loss, summary, _ = sess.run(
                    [model.cost, merged, model.train_op], feed_dict=feed)
                end = time.time()

                global_step = e * FLAGS.num_batches + b

                print(
                    '{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'
                    .format(global_step, total_steps, e, train_loss,
                            end - start))

                if global_step % 20 == 0:
                    train_writer.add_summary(summary,
                                             e * FLAGS.num_batches + b)

                if global_step % FLAGS.save_steps == 0:
                    checkpoint_path = os.path.join(FLAGS.save_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=global_step)
                    print('model saved to {}'.format(checkpoint_path))

            test_data_loader.reset_batch()
            test_accuracy = []
            for i in range(test_data_loader.num_batches):
                test_x, test_y = test_data_loader.next_batch()
                feed = {
                    model.input_data: test_x,
                    model.targets: test_y,
                    model.output_keep_prob: 1.0
                }
                accuracy = sess.run(model.accuracy, feed_dict=feed)
                test_accuracy.append(accuracy)
            print('test accuracy:{0}'.format(np.average(test_accuracy)))
Example #6
0
# -*- coding: utf-8 -*-

import os

import utils
from config import Config
from model import BiRNN

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

conf = Config()

wav_files, text_labels = utils.get_wavs_lables()

words_size, words, word_num_map = utils.create_dict(text_labels)


bi_rnn = BiRNN(wav_files, text_labels, words_size, words, word_num_map)
bi_rnn.build_train()
Example #7
0
train = tf.data.Dataset.from_tensor_slices(train).shuffle(BUFFER_SIZE)
train = train.batch(BATCH_SIZE, drop_remainder=True)

# Load embeddings matrix
embedding_matrix = load_embeddings(embedding_path=EMBEDDING_PATH,
                                   tokenizer=lang,
                                   vocab_size=vocab_size,
                                   embedding_dim=EMBEDDING_DIM,
                                   unk_token=UNK_TOKEN,
                                   start_token=START_TOKEN,
                                   end_token=END_TOKEN)

##------------------------------ Setup training  ------------------------------##

birnn = BiRNN(UNITS, PROJECTION_UNITS, MAX_SEQ_LEN, vocab_size, EMBEDDING_DIM,
              embedding_matrix)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()

train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
ckpt = tf.train.Checkpoint(birnn=birnn)
ckpt_manager = tf.train.CheckpointManager(ckpt, CHECKPOINT_PATH, max_to_keep=2)
train_summary_writer = tf.summary.create_file_writer(LOG_DIR)

# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
    ckpt.restore(ckpt_manager.latest_checkpoint)
    print('Latest checkpoint restored')

##------------------------------ Training  ------------------------------##
    FLAGS.vocab_size = embeddings.shape[0]
    FLAGS.embedding_size = embeddings.shape[1]
    # 获取模型
    if FLAGS.init_from is not None:
        assert os.path.isdir(FLAGS.init_from), '{} must be a directory'.format(
            FLAGS.init_from)
        ckpt = tf.train.get_checkpoint_state(FLAGS.init_from)
        assert ckpt, 'No checkpoint found'
        assert ckpt.model_checkpoint_path, 'No model path found in checkpoint'

    # Define specified Model
    model = BiRNN(embedding_size=FLAGS.embedding_size,
                  rnn_size=FLAGS.rnn_size,
                  layer_size=FLAGS.layer_size,
                  vocab_size=FLAGS.vocab_size,
                  attn_size=FLAGS.attn_size,
                  sequence_length=FLAGS.sequence_length,
                  n_classes=FLAGS.n_classes,
                  grad_clip=FLAGS.grad_clip,
                  learning_rate=FLAGS.learning_rate)

    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)

        # using pre trained embeddings
        if FLAGS.pre_trained_vec:
            sess.run(model.embedding.assign(embeddings))
            del embeddings
Example #9
0
def main(_):

    '''
    test_data_loader = InputHelper()
    test_data_loader.load_dictionary(FLAGS.data_dir+'/dictionary')
    test_data_loader.create_batches(FLAGS.data_dir+'/'+FLAGS.test_file, 100, FLAGS.sequence_length)
    '''
    if FLAGS.pre_trained_vec:
        embeddings = np.load(FLAGS.pre_trained_vec)
        print(embeddings.shape)
        FLAGS.vocab_size = embeddings.shape[0]
        FLAGS.embedding_size = embeddings.shape[1]

    if FLAGS.init_from is not None:
        assert os.path.isdir(FLAGS.init_from), '{} must be a directory'.format(FLAGS.init_from)
        ckpt = tf.train.get_checkpoint_state(FLAGS.init_from)
        assert ckpt,'No checkpoint found'
        assert ckpt.model_checkpoint_path,'No model path found in checkpoint'

    # Define specified Model
    model = BiRNN(embedding_size=FLAGS.embedding_size, rnn_size=FLAGS.rnn_size, layer_size=FLAGS.layer_size,    
        vocab_size=FLAGS.vocab_size, attn_size=FLAGS.attn_size, sequence_length=FLAGS.sequence_length,
        n_classes=FLAGS.n_classes, grad_clip=FLAGS.grad_clip, learning_rate=FLAGS.learning_rate)

    # define value for tensorboard
    tf.summary.scalar('train_loss', model.cost)
    tf.summary.scalar('accuracy', model.accuracy)
    merged = tf.summary.merge_all()

    # 调整GPU内存分配方案
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with tf.Session(config=tf_config) as sess:
        train_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())

        # using pre trained embeddings
        if FLAGS.pre_trained_vec:
            sess.run(model.embedding.assign(embeddings))
            del embeddings

        # restore model
        if FLAGS.init_from is not None:
            saver.restore(sess, ckpt.model_checkpoint_path)

        count=patchlength
        for e in xrange(FLAGS.num_batches):
            total_loss=0
            start = time.time()
            print('start')
            count,inputs,pads,answers = list_tags(count,FLAGS.batch_size)
            if count>=len(resp):
                count=patchlength
                continue
            feed = {model.input_data:inputs, model.targets:answers, model.output_keep_prob:FLAGS.dropout_keep_prob,model.pad:pads}
            train_loss, summary,  _ = sess.run([model.cost, merged, model.train_op], feed_dict=feed)
            end = time.time()


            print('{}/{} , train_loss = {:.3f}, time/batch = {:.3f}'.format(global_step.eval(), FLAGS.num_batches,  train_loss, end - start))
            total_loss+=train_loss


            if global_step.eval() % 20 == 0:
                train_writer.add_summary(summary, e)

            if acc>max_acc:
                max_acc=acc
                checkpoint_path = os.path.join(FLAGS.save_dir, 'model.ckpt')        
                saver.save(sess, checkpoint_path, global_step=global_step)
                print 'model saved to {}'.format(checkpoint_path)

            print ' loss:',total_loss/FLAGS.num_batches
            '''
Example #10
0
# -*- coding: utf-8 -*-

import os

import utils
from config import Config
from model import BiRNN

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

print('# Config')
conf = Config()
print('# Load waves and labels')
wav_files, text_labels = utils.get_wavs_lables()
print('# Create dictionary')
words_size, words, word_num_map = utils.create_dict(text_labels)
print('# Build Model')
bi_rnn = BiRNN(wav_files, text_labels, words_size, words, word_num_map)
print('# Begin to test')
wav_file = ['前进.wav']
wav_file = ['后退.wav']
# wav_file = ['左转.wav']
# wav_file = ['右转.wav']
text_label = ['前进 后退 左转 右转']
# print(type(wav_file))
bi_rnn.build_target_wav_file_test(wav_file, text_label)
Example #11
0
# -*- coding: utf-8 -*-

import os

import utils
from config import Config
from model import BiRNN

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

conf = Config()

wav_files, text_labels = utils.get_wavs_lables()

words_size, words, word_num_map = utils.create_dict(text_labels)

bi_rnn = BiRNN(wav_files, text_labels, words_size, words, word_num_map)
bi_rnn.build_test()

# wav_files = ['E:\\wav\\train\\A2\\A2_11.wav']
# txt_labels = ['北京 丰台区 农民 自己 花钱 筹办 万 佛 延寿 寺 迎春 庙会 吸引 了 区内 六十 支 秧歌队 参赛']
# bi_rnn = BiRNN(wav_files, text_labels, words_size, words, word_num_map)
# bi_rnn.build_target_wav_file_test(wav_files, txt_labels)
Example #12
0
def m2c_generator(max_num_sample):
    '''
        m2c Generator 
        Input  : a testing sample index 
        Output : Chord Label (n, 16)
                 Monophony Melody Label (n, 2)
                 BPM float 
        Average Elasped Time for one sample : 0.16 sec 
    '''
    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cpu_device = torch.device('cpu')

    # Load Data
    chord_dic = pd.read_pickle(CONFIG_ALL['data']['chord_dic'])

    # prepare features
    all_files = find_files(CONFIG_ALL['data']['test_dir'], '*.mid')
    input_dic = []
    for i_file in all_files:
        _ = midi_feature(i_file, sampling_fac=2)
        _ = np.reshape(_, (1, _.shape[0], _.shape[1]))
        input_dic.append({'midi': i_file, 'm_embed': _})
    print 'Total Number of files : ', len(input_dic)

    # training
    model = BiRNN(CONFIG_ALL['model']['input_size'],
                  CONFIG_ALL['model']['lstm_hidden_size'],
                  CONFIG_ALL['model']['fc_hidden_size'],
                  CONFIG_ALL['model']['num_layers'],
                  CONFIG_ALL['model']['num_classes_cf'],
                  CONFIG_ALL['model']['num_classes_c'], device).to(device)

    # Load Model
    path = os.path.join(CONFIG_ALL['model']['log_dir'],
                        CONFIG_ALL['model']['exp_name'], 'models/',
                        CONFIG_ALL['model']['eval_model'])
    model.load_state_dict(torch.load(path))

    # Test the model
    with torch.no_grad():
        while True:
            test_idx = yield

            if test_idx >= max_num_sample or test_idx < 0:
                print "Invalid sample index"
                continue
            m_embedding = input_dic[test_idx]['m_embed']
            out_cf, out_c = model(
                torch.tensor(m_embedding, dtype=torch.float).to(device))

            out_c = out_c.data.cpu().numpy()

            _, pred_cf = torch.max(out_cf.data, 1)
            pred_cf = pred_cf.data.cpu().numpy()

            i_out_tn1 = -1
            i_out_tn2 = -1
            i_out_tn3 = -1
            i_out_t = -1

            predicted = []
            c_threshold = 0.825
            f_threshold = 0.35
            #ochord_threshold = 1.0

            for idx, i_out in enumerate(out_c):
                # Seventh chord
                #T_chord_label = [0, 1, 2, 3, 4, 5, 102, 103, 104]
                #D_chord_label = [77, 78, 79, 55, 56, 57]
                #R_chord_label = [132]

                # Triad Chord
                T_chord_label = [0, 1, 37]
                D_chord_label = [20, 28]
                R_chord_label = [48]

                O_chord_label = [
                    i for i in range(0, 48) if not (i in T_chord_label) or (
                        i in D_chord_label) or (i in R_chord_label)
                ]

                # Bean Search for repeated note
                if pred_cf[idx] == 0:
                    L = np.argsort(
                        -np.asarray([i_out[i] for i in T_chord_label]))
                    if i_out_tn1 == T_chord_label[
                            L[0]] and i_out_tn2 == T_chord_label[L[0]]:
                        i_out_t = T_chord_label[L[1]]
                    else:
                        i_out_t = T_chord_label[L[0]]

                elif pred_cf[idx] == 1:
                    i_out_t = D_chord_label[np.argmax(
                        [i_out[i] for i in D_chord_label])]

                elif pred_cf[idx] == 3:
                    L = np.argsort(
                        -np.asarray([i_out[i] for i in O_chord_label]))
                    if i_out_tn1 == O_chord_label[
                            L[0]] and i_out_tn2 == O_chord_label[L[0]]:
                        i_out_t = O_chord_label[L[1]]
                    else:
                        i_out_t = O_chord_label[L[0]]

                else:
                    i_out_t = 48

                predicted.append(i_out_t)
                i_out_tn2 = i_out_tn1
                i_out_tn1 = i_out_t
                i_out_last = i_out

            # Write file to midi
            midi_original = pretty_midi.PrettyMIDI(input_dic[test_idx]['midi'])
            midi_chord = pro_chordlabel_to_midi(
                predicted,
                chord_dic,
                inv_beat_resolution=CONFIG_ALL['data']['chord_resolution'],
                constant_tempo=midi_original.get_tempo_changes()[1])
            midi_chord.instruments[0].name = "Predicted_w_func"
            midi_original.instruments.append(midi_chord.instruments[0])

            out_path = os.path.join('eval_test/', str(test_idx) + '.mid')
            ensure_dir(out_path)
            midi_original.write(out_path)
            print "Write Files to : ", out_path

            out_mc = midi_to_list(midi_original, predicted)

            yield {
                'melody': out_mc['melody'],
                'chord': out_mc['chord'],
                'BPM': float(midi_original.get_tempo_changes()[1])
            }
Example #13
0
from torch import nn as nn
from model import BiRNN
from data import vocab
import torchtext.vocab as Vocab
import os
from data import DATA_ROOT
from data import train_iter, test_iter
import time

# 判定是否能用GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 设置词嵌入维度、隐藏层神经元数量、隐藏层数量
embed_size, num_hiddens, num_layers = 300, 100, 2
net = BiRNN(vocab, embed_size, num_hiddens, num_layers)
# 加载维基百科预训练词向量(使用fasttext),cache为保存目录
fasttext_vocab = Vocab.FastText(cache=os.path.join(DATA_ROOT, "fasttext"))


def load_pretrained_embedding(words, pretrained_vocab):
    """从预训练好的vocab中提取出words对应的词向量"""
    # 初始化为0
    embed = torch.zeros(len(words), pretrained_vocab.vectors[0].shape[0])
    oov_count = 0  # out of vocabulary
    for i, word in enumerate(words):
        try:
            idx = pretrained_vocab.stoi[word]
            embed[i, :] = pretrained_vocab.vectors[idx]
        except KeyError:
            oov_count += 1
Example #14
0
print("索引化后的训练数据:", len(train_features))
print("索引化后的测试数据:", len(test_features))
print("训练集标签:", len(labels))

from model import generator, BiRNN, train, evaluate_accuracy
from torch import nn, optim
import torch
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(np.array(train_features),
                                                    np.array(labels),
                                                    test_size=0.2)

x_train = torch.tensor(x_train, dtype=torch.long)
y_train = torch.tensor(y_train)
x_test = torch.tensor(x_test, dtype=torch.long)
y_test = torch.tensor(y_test)

test_features = torch.tensor(np.array(test_features), dtype=torch.long)
embeddings_matrix = torch.tensor(embeddings_matrix)

net = BiRNN(word_index, 100, 100, 2)
net.embedding.weight.data.copy_(embeddings_matrix)
net.embedding.weight.requires_grad = False
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),
                       lr=0.0001)
num_epochs = 10
train_iter = generator(x_train, y_train, 10, train=True)
test_iter = generator(x_test, y_test, 10, train=False)

train(train_iter, test_iter, net, loss, optimizer, num_epochs)
Example #15
0
def train():
    #train data load
    data_loader = InputHelper(log=log)
    data_loader.load_embedding(FLAGS.embedding_file,FLAGS.embedding_size)
    train_data = data_loader.load_data(FLAGS.data_dir+'/'+FLAGS.train_file, FLAGS.data_dir+'/',FLAGS.interaction_rounds,FLAGS.sequence_length)
    x_batch,y_batch,train_interaction_point, train_word_point = data_loader.generate_batches(train_data,FLAGS.batch_size,FLAGS.interaction_rounds)
    FLAGS.vocab_size = len(data_loader.word2idx)
    FLAGS.n_classes = len(data_loader.label_dictionary)
    print FLAGS.n_classes
    FLAGS.num_batches = data_loader.num_batches
    FLAGS.embeddings = data_loader.embeddings
    # test data load
    test_data_loader = InputHelper(log=log)
    test_data_loader.load_info(embeddings=FLAGS.embeddings,word2idx=data_loader.word2idx,idx2word=data_loader.idx2word,
                                   label_dictionary=data_loader.label_dictionary)
    test_data = test_data_loader.load_data(FLAGS.data_dir + '/' + FLAGS.test_file, FLAGS.data_dir + '/',
                                       FLAGS.interaction_rounds, FLAGS.sequence_length)
    test_x_batch, test_y_batch, test_interaction_point,test_word_point = test_data_loader.generate_batches(test_data, FLAGS.batch_size, FLAGS.interaction_rounds)
    # Define specified Model
    model = BiRNN(embedding_size=FLAGS.embedding_size, rnn_size=FLAGS.rnn_size, layer_size=FLAGS.layer_size,
        vocab_size=FLAGS.vocab_size, attn_size=FLAGS.attn_size, sequence_length=FLAGS.sequence_length,
                n_classes=FLAGS.n_classes, interaction_rounds=FLAGS.interaction_rounds, batch_size=FLAGS.batch_size,
                  embeddings=FLAGS.embeddings,grad_clip=FLAGS.grad_clip, learning_rate=FLAGS.learning_rate)
    # define value for tensorboard
    tf.summary.scalar('train_loss', model.cost)
    tf.summary.scalar('accuracy', model.accuracy)
    merged = tf.summary.merge_all()

    # 调整GPU内存分配方案
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with tf.Session(config=tf_config) as sess:
        train_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(max_to_keep=1000)
        
        total_steps = FLAGS.num_epochs * FLAGS.num_batches
        for e in xrange(FLAGS.num_epochs):
            data_loader.reset_batch()
            e_avg_loss = []
            t_acc = []
            start = time.time()
            num_tt = []
#            w=open('temp/pre'+str(e)+'.txt','w')
            for b in xrange(FLAGS.num_batches):

                x, y, z,m = data_loader.next_batch(x_batch,y_batch,train_interaction_point,train_word_point)
                feed = {model.input_data:x, model.targets:y, model.output_keep_prob:FLAGS.dropout_keep_prob, model.word_point:m, model.sentence_point:z}
                train_loss,t_accs,yy,yyy, summary,  _ = sess.run([model.cost, model.accuracy,model.y_results,
                model.y_tr, merged, model.train_op], feed_dict=feed)
                e_avg_loss.append(train_loss)
                t_acc.append(t_accs)
                global_step = e * FLAGS.num_batches + b
                if global_step % 20 == 0:
                    train_writer.add_summary(summary, e * FLAGS.num_batches + b)
                num_t = 0
                for i in range(len(yy)):
                    if yy[i] == yyy[i] and yy[i] != 4:
                        num_t+=1
                num_tt.append(num_t*1.0/len(yy))
#                w.write('predict '+str(len(yy))+'\n')
#                for y in yy:
#                    w.write(str(y)+'\t')
#                w.write('\ntrue '+str(len(yyy))+'\n')
#                for ys in yyy:
#                    w.write(str(ys)+'\t')
#                w.write('\n')
#           w.close()


            # model test
            test_data_loader.reset_batch()
            test_accuracy = []
            test_a = []
            for i in xrange(test_data_loader.num_batches):
                test_x, test_y, test_z, test_m = test_data_loader.next_batch(test_x_batch,test_y_batch,test_interaction_point,test_word_point)
                feed = {model.input_data:test_x, model.targets:test_y, model.output_keep_prob:1.0,model.word_point:test_m, model.sentence_point:test_z}
                accuracy,y_p,y_r = sess.run([model.accuracy,model.y_results,model.y_tr],feed_dict=feed)
                test_accuracy.append(accuracy)
                num_test = 0
                for j in range(len(y_p)):
                    if y_p[j] == y_r[j] and y_p[j] != 4:
                        num_test+=1
                test_a.append(num_test*1.0/len(y_p))
            end = time.time()
            num_tt_acc = np.average(num_tt)
            num_test_acc = np.average(test_a)
            avg_loss = np.average(e_avg_loss)
            print('e{},loss = {:.3f}, train_acc = {:.3f}, test_acc = {:.3f}, time/epoch'.format(e,avg_loss,num_tt_acc,num_test_acc,end - start ))
            #print and save
#            avg_loss = np.average(e_avg_loss)
#            t_avg_acc = np.average(t_acc)
#            log.info('epoch {}, train_loss = {:.3f},train_acc = {:.3f} test_accuracy:{:.3f}, time/epoch = {:.3f}'.format(e, avg_loss,t_avg_acc,np.average(test_accuracy), end - start))
            checkpoint_path = os.path.join(FLAGS.save_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=e)
Example #16
0
def main(_):
    assert FLAGS.source_train_path, ("--source_train_path is required.")
    assert FLAGS.target_train_path, ("--target_train_path is required.")

    # Create vocabularies.
    source_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
                                     "vocabulary.source")
    target_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
                                     "vocabulary.target")
    utils.create_vocabulary(source_vocab_path, FLAGS.source_train_path, FLAGS.source_vocab_size)
    utils.create_vocabulary(target_vocab_path, FLAGS.target_train_path, FLAGS.target_vocab_size)

    # Read vocabularies.
    source_vocab, rev_source_vocab = utils.initialize_vocabulary(source_vocab_path)
    target_vocab, rev_target_vocab = utils.initialize_vocabulary(target_vocab_path)

    # Read parallel sentences.
    parallel_data = utils.read_data(FLAGS.source_train_path, FLAGS.target_train_path,
                                    source_vocab, target_vocab)

    # Read validation data set.
    if FLAGS.source_valid_path and FLAGS.target_valid_path:
        valid_data = utils.read_data(FLAGS.source_valid_path, FLAGS.target_valid_path,
                                    source_vocab, target_vocab)

    # Initialize BiRNN.
    config = Config(len(source_vocab),
                    len(target_vocab),
                    FLAGS.embedding_size,
                    FLAGS.state_size,
                    FLAGS.hidden_size,
                    FLAGS.num_layers,
                    FLAGS.learning_rate,
                    FLAGS.max_gradient_norm,
                    FLAGS.use_lstm,
                    FLAGS.use_mean_pooling,
                    FLAGS.use_max_pooling,
                    FLAGS.source_embeddings_path,
                    FLAGS.target_embeddings_path,
                    FLAGS.fix_pretrained)

    model = BiRNN(config)

    # Build graph.
    model.build_graph()

    # Train  model.
    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        train_iterator = utils.TrainingIteratorRandom(parallel_data, FLAGS.num_negative)
        train_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "train"), sess.graph)

        if FLAGS.source_valid_path and FLAGS.target_valid_path:
            valid_iterator = utils.EvalIterator(valid_data)
            valid_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "valid"), sess.graph)

        epoch_loss = 0
        epoch_completed = 0
        batch_completed = 0

        num_iter = int(np.ceil(train_iterator.size / FLAGS.batch_size * FLAGS.num_epochs))
        start_time = time.time()
        print("Training model on {} sentence pairs per epoch:".
            format(train_iterator.size, valid_iterator.size))

        for step in xrange(num_iter):
            source, target, label = train_iterator.next_batch(FLAGS.batch_size)
            source_len = utils.sequence_length(source)
            target_len = utils.sequence_length(target)
            feed_dict = {model.x_source: source,
                         model.x_target: target,
                         model.labels: label,
                         model.source_seq_length: source_len,
                         model.target_seq_length: target_len,
                         model.input_dropout: FLAGS.keep_prob_input,
                         model.output_dropout: FLAGS.keep_prob_output,
                         model.decision_threshold: FLAGS.decision_threshold}

            _, loss_value, epoch_accuracy,\
            epoch_precision, epoch_recall = sess.run([model.train_op,
                                                      model.mean_loss,
                                                      model.accuracy[1],
                                                      model.precision[1],
                                                      model.recall[1]],
                                                      feed_dict=feed_dict)
            epoch_loss += loss_value
            batch_completed += 1
            # Write the model's training summaries.
            if step % FLAGS.steps_per_checkpoint == 0:
                summary = sess.run(model.summaries, feed_dict=feed_dict)
                train_summary_writer.add_summary(summary, global_step=step)
            # End of current epoch.
            if train_iterator.epoch_completed > epoch_completed:
                epoch_time = time.time() - start_time
                epoch_loss /= batch_completed
                epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)
                epoch_completed += 1
                print("Epoch {} in {:.0f} sec\n"
                      "  Training: Loss = {:.6f}, Accuracy = {:.4f}, "
                      "Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}"
                      .format(epoch_completed, epoch_time,
                              epoch_loss, epoch_accuracy,
                              epoch_precision, epoch_recall, epoch_f1))
                # Save a model checkpoint.
                checkpoint_path = os.path.join(FLAGS.checkpoint_dir, "model.ckpt")
                model.saver.save(sess, checkpoint_path, global_step=step)
                # Evaluate model on the validation set.
                if FLAGS.source_valid_path and FLAGS.target_valid_path:
                    eval_epoch(sess, model, valid_iterator, valid_summary_writer)
                # Initialize local variables for new epoch.
                batch_completed = 0
                epoch_loss = 0
                sess.run(tf.local_variables_initializer())
                start_time = time.time()

        print("Training done with {} steps.".format(num_iter))
        train_summary_writer.close()
        valid_summary_writer.close()
Example #17
0
from model import BiRNN
from options import options

import tensorflow as tf
import numpy as np

from train import train, train_step

options = options()
opts = options.parse()

biLSTM = BiRNN(opts)

#load the data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

#make it so that the shape is [Batch, 1, 784] that can be fed into the LSTM module
x_train = x_train.reshape([-1, 1, 28 * 28]).astype(np.float32)
x_test = x_test.reshape([-1, 1, 28 * 28]).astype(np.float32)

#transform the input so that the values lie between 0 and 1
x_train = x_train / 255.0
x_test = x_test / 255.0

#make the classes integers
y_train = tf.cast(y_train, tf.int64)
y_test = tf.cast(y_test, tf.int64)

#set the buffer / batch sizes and shuffle the data
BUFFER_SIZE = 60000
BATCH_SIZE = opts.batch_size