Beispiel #1
0
def main(_):
    print('loading data...')
    tr_revs, tr_revs_content = read_data_file(FLAGS.train_file_path)
    word_idx_map, w2v = creat_word_embedding(tr_revs_content,
                                             FLAGS.embedding_dim)

    tr_data = data_parse(tr_revs, word_idx_map, FLAGS.max_sentence_len)

    te_revs, _ = read_data_file(FLAGS.test_file_path)
    te_data = data_parse(te_revs, word_idx_map, FLAGS.max_sentence_len)

    tc_lstm = TC_LSTM(
        n_hidden=FLAGS.n_hidden,
        n_class=FLAGS.n_class,
        max_sentence_len=FLAGS.max_sentence_len,
        l2_reg=FLAGS.l2_reg,
    )
    print('start training...')
    tc_lstm.learn(word_idx_map,
                  w2v,
                  tr_data,
                  te_data,
                  n_iter=FLAGS.n_iter,
                  batch_size=FLAGS.batch_size,
                  learning_rate=FLAGS.learning_rate)
Beispiel #2
0
def main(_):
    print('loading data...')
    tr_revs, tr_revs_content = read_data_file(FLAGS.train_file_path)
    word_idx_map, w2v = creat_word_embedding(tr_revs_content,
                                             FLAGS.embedding_dim)

    tr_x, _, tr_y = data_parse_one_direction(tr_revs, word_idx_map,
                                             FLAGS.max_sentence_len)

    te_revs, _ = read_data_file(FLAGS.test_file_path)
    te_x, _, te_y = data_parse_one_direction(te_revs, word_idx_map,
                                             FLAGS.max_sentence_len)

    cnn = Text_CNN(max_len=2 * FLAGS.max_sentence_len + 1,
                   n_classes=FLAGS.n_classes)

    print('start training...')
    cnn.learn(word_idx_map,
              w2v, (tr_x, tr_y), (te_x, te_y),
              n_iters=FLAGS.n_iters,
              batch_size=FLAGS.batch_size,
              learning_rate=FLAGS.learning_rate)
Beispiel #3
0
import random
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, classification_report
from torch import nn
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
import config
import utils
from model import DependencyBLSTM

args = config.args
output_dir = args.project_dir + 'Models/' + args.sim_name + '/'

utils.creat_word_embedding()


def run():
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    save_path = args.project_dir + 'Models/' + args.sim_name + '/model.pt'

    with open(output_dir + 'config', 'w') as config_file:
        argss = (str(args).split('(')[1].split(')')[0].split(','))
        for a in argss:
            config_file.write("{}\n".format(a))
    if os.path.exists(save_path):
        model = torch.load(save_path)
        model_loaded = True
        print('Great!!! Pre-Trained Model Loaded !!!')