Esempio n. 1
0
def main():
    print('NPI problem')
    train_data_path = './data/snli_1.0/snli_1.0_train.txt'
    dev_data_path = './data/snli_1.0/snli_1.0_dev.txt'
    vocab_path = './data/vocabList.txt'

    labels, premise, hypothesis = loadData(train_data_path)
    dev_labels, dev_premise, dev_hypothesis = loadData(dev_data_path)

    vocabList = readVocabList(vocab_path)
    print(len(vocabList))

    torch.manual_seed(64)
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    batch = 64
    epoch = 48
    embed_size = 100
    hidden_size = 100

    model = RNN(embed_size, hidden_size, vocabList, device).to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)

    s = time.time()
    train(premise, hypothesis, labels, model, optimizer, epoch, batch, device)
    e = time.time()
    print('the train time is : ', 1. * (e - s) / 60)
    # model.load_state_dict(torch.load('./data/rnn_params.pth'))
    dev_labels, dev_premise, dev_hypothesis = loadData(dev_data_path)

    test(premise, hypothesis, labels, model, device, batch)
    test(dev_premise, dev_hypothesis, dev_labels, model, device, batch)
Esempio n. 2
0
def selectModel():  # Model
    if args.model == "RNN":
        model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
    elif args.model == "LSTM":
        model = BiLSTM(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM,
                       N_LAYERS, BIDIRECTIONAL, DROPOUT)
    return model
Esempio n. 3
0
def train(args):
    data = Loader(args.data_path, args.batch_size, args.seq_length)
    args.vocab_size = data.vocab_size

    # resume training from existing model (if it exists)
    if args.load_from is not None:
        ckpt = tf.train.get_checkpoint_state(args.load_from)

    # create save directory if it does not already exist
    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
    # save configurations of current model
    with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
        pickle.dump(args, f)
    # save all characters and mapping from characters to index
    with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
        pickle.dump((data.chars, data.vocab), f)

    # instantiate and train RNN model
    print('Instantiating model...')
    model = RNN(args)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())

        # restore model
        if args.load_from is not None:
            saver.restore(sess, ckpt.model_checkpoint_path)

        print('Starting training...')
        for ep in range(args.num_epochs):
            # decreasing learning rate
            sess.run(tf.assign(model.learning_rate, args.learning_rate * (args.decay_rate ** ep)))
            state = sess.run(model.initial_state)

            for i in range(data.n_batches):
                start = time.time()
                x, y = data.next_batch()
                feed = {model.input_data: x, model.targets: y}

                # assign initial state from previous time step
                for j in range(len(state)):
                    feed[model.initial_state[j]] = state[j]

                loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
                end = time.time()
                print('{}/{} (epoch {}), train_loss={:.3f}, time/batch={:.3f}'
                      .format(ep * data.n_batches + i, args.num_epochs * data.n_batches, ep, loss, end - start))

            # save model
            checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=ep * data.n_batches + i)
            print("model saved to {}".format(checkpoint_path))
Esempio n. 4
0
def main():
    name2id = {'START':0, 'I-MISC':1, 'B-MISC':2, 'I-LOC':3, 'B-LOC':4,
               'I-ORG':5, 'B-ORG':6, 'I-PER':7, 'O':8, 'END':9}

    train_data_path = './data/conll2003/eng.train'
    params_path = './data/rnn_params.pth'

    data, labels = loadData(train_data_path)
    labels = [[name2id[name] for name in sents] for sents in labels]

    vocabList = createVocabList(data)

    train_x, test_x, train_y, test_y = data_split(data, labels, 0.1, 42)

    torch.manual_seed(64)
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    batch = 32
    epoch = 4
    embed_size = 100
    hidden_size = 50
    flag_load_model = 1
    n_label = len(name2id)
    corate = -1

    model = RNN(embed_size, hidden_size, n_label, vocabList, device).to(device)
    if flag_load_model:
        checkpoint = torch.load(params_path)
        model.load_state_dict(checkpoint['model_dict'])
        corate = checkpoint['corate']

    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)


    for key, value in model.named_parameters():
        print(key, value.shape)

    train(train_x, train_y, test_x, test_y,
          model, optimizer, device, epoch, batch, params_path, corate=corate)
    print(model.transition)

    # test_x = test_x[:5]
    # test_y = test_y[:5]
    test(test_x, test_y, model, device)
Esempio n. 5
0
def get_model(conf, data_helper, model_name):
    # retrieve model configurations
    max_epoch = conf.max_epoch
    num_negatives = conf.num_negatives
    batch_size_p = conf.batch_size_p
    eval_topk = conf.eval_topk
    optimizer = conf.optimizer
    loss = conf.loss
    user_dim = conf.user_dim
    item_dim = conf.item_dim
    data_spec = data_helper.data_spec
    user_count = data_spec.user_count
    item_count = data_spec.item_count
    word_count = data_spec.word_count
    emb_normalization = conf.emb_normalization
    max_content_len = data_spec.max_content_len
    support_groupping_for_all = True  # provide general speed-up

    # standard input & output of the model
    input_dtype = 'int32'
    row_cidx_prefx = tf.Variable(np.arange(batch_size_p, \
        dtype=input_dtype).reshape((batch_size_p, 1)))
    uid = Input(shape=(1,), dtype=input_dtype)
    cid = Input(shape=(1,), dtype=input_dtype)
    U_emb_given = Input(shape=(user_dim,), dtype='float32')
    C_emb_given = Input(shape=(item_dim,), dtype='float32')
    # unique cid, in two Lambda thanks to Keras, dummy
    cid_u = Lambda(lambda x: tf.reshape(tf.unique(x)[0], (-1, 1)), 
                   output_shape=(1,))(Reshape(())(cid))
    cid_x = Lambda(lambda x: tf.reshape(tf.unique(x)[1], (-1, 1)),
                   output_shape=(1,))(Reshape(())(cid))

    # retrieve content
    with tf.device('/cpu:0'):
        C = tf.Variable(data_helper.data['C'])
        get_content = lambda x: tf.reshape(tf.gather(C, x), 
                                           (-1, max_content_len))
        content = Lambda(get_content, output_shape=(max_content_len, ))(cid)
        content_u = Lambda(get_content, output_shape=(max_content_len, ))(cid_u)

    # user embedding: U_emb, U_emb_front (first batch_size_p)
    Emb_U = Embedding(user_count, user_dim, name='user_embedding',
                      activity_regularizer=activity_l2(conf.u_reg))
    U_emb = Reshape((user_dim, ))(Emb_U(uid))
    if emb_normalization:
        U_emb = Lambda(lambda x: tf.nn.l2_normalize(x, dim=-1))(U_emb)
    uid_front = Lambda(lambda x: x[:batch_size_p])(uid)  # thanks keras, dummy
    U_emb_front = Reshape((user_dim, ))(Emb_U(uid_front))

    # item embedding: C_emb_compact (no duplication), C_emb
    get_item_emb_combined_pretrain = ItemCombination().get_model()
    if model_name == 'pretrained':
        if conf.evaluation_mode:
            Emb_U = Embedding(user_count, user_dim, trainable=False,
                              weights=[conf.pretrain['user_emb']])
            U_emb = Reshape((user_dim, ))(Emb_U(uid))
            Emb_C = Embedding(item_count, item_dim, trainable=False,
                              weights=[conf.pretrain['item_emb']])
            C_emb = Reshape((item_dim, ))(Emb_C(cid))
        else:
            if conf.pretrain['transform']:
                C_emb = get_item_emb_combined_pretrain(None, cid, conf, data_spec)
            else:
                Emb_C = Embedding(item_count, item_dim, trainable=False,
                                  weights=[data_spec.C_pretrain])
                C_emb = Reshape((item_dim, ))(Emb_C(cid))
        C_emb_compact = C_emb
    elif model_name == 'mf':
        Emb_C = Embedding(item_count, item_dim, name='item_embedding')
        C_emb = Reshape((item_dim, ))(Emb_C(cid))
        C_emb_compact = C_emb
    else:
        if model_name == 'basic_embedding':
            Content_model = MeanPool(data_spec, conf).get_model()
        elif model_name == 'cnn_embedding':
            Content_model = CNN(data_spec, conf).get_model()
        elif model_name == 'rnn_embedding':
            Content_model = RNN(data_spec, conf).get_model()
        else:
            assert False, '[ERROR] Model name {} unknown'.format(model_name)
        C_emb_compact = Content_model([content_u, cid_u])  # (None, item_dim)
        C_emb_compact = get_item_emb_combined_pretrain(C_emb_compact, cid_u, \
            conf, data_spec) # (None, item_dim)
        # C_emb_u only computes unique set of items, no duplication
        C_emb_u = Lambda( \
            lambda x: tf.reshape(tf.gather(x[0], x[1]), (-1, item_dim)), \
            output_shape=(item_dim, ))([C_emb_compact, cid_x])
        if support_groupping_for_all:
            C_emb = C_emb_u
        else:  # otherwise only support groupping for group_neg_shared
            C_emb = Content_model([content, cid])  # (None, item_dim)
        if emb_normalization:
            C_emb_compact = Lambda(lambda x: tf.nn.l2_normalize(x, dim=-1))(C_emb_compact)
            C_emb = Lambda(lambda x: tf.nn.l2_normalize(x, dim=-1))(C_emb)
    
    # item embedding more: C_emb_front, C_emb_back
    cid_front = Lambda(lambda x: x[:batch_size_p])(cid)
    cid_back = Lambda(lambda x: x[batch_size_p:])(cid)
    C_emb_front = Lambda(lambda x: x[:batch_size_p])(C_emb)
    C_emb_back = Lambda(lambda x: x[batch_size_p:])(C_emb)

    # interact (with or without bias)
    Interact = InteractionDot(bias=conf.interaction_bias, 
                              user_count=user_count, item_count=item_count)

    pred_score = Interact.set_form('mul')([U_emb, C_emb, uid, cid])

    pred_score_with_given = Interact.set_form('mul')([U_emb_given, C_emb_given,
                                                      uid, cid])

    pred_score_neg_shared = Interact.set_form('matmul')([U_emb, C_emb, 
                                                         uid, cid])

    pred_score_neg_shared_comp = Interact.set_form('matmul')([ \
        U_emb, C_emb_compact, uid, cid_u])
    pos_idxs = tf.concat([row_cidx_prefx, \
        tf.reshape(cid_x, (-1, 1))], 1)  # (batch_size_p, 2)
    loss_neg_shared_comp = get_group_neg_shared_loss( \
        pred_score_neg_shared_comp, pos_idxs, loss, batch_size_p, conf)

    pred_pos_sampled_neg_shared = Interact.set_form('mul')([ \
        U_emb_front, C_emb_front, uid_front, cid_front])  # (batch_size_p, 1)
    pred_neg_sampled_neg_shared = Interact.set_form('matmul')([ \
        U_emb_front, C_emb_back, uid_front, cid_back])  # (batch_size_p, num_negatives)
    pred_score_sampled_neg_shared = Lambda(lambda x: tf.concat([x[0], x[1]], 1))( \
        [pred_pos_sampled_neg_shared, pred_neg_sampled_neg_shared])

    # uid-cid element-wise interaction
    # during training, first batch_size_p assumed positive
    model = Model(input=[uid, cid], output=[pred_score])
    model.compile(optimizer=optimizer, \
        loss=get_original_loss(loss, batch_size_p, num_negatives, conf))

    # uid-cid complete pairwise interaction (produce prediction matrix)
    # during training, diag is assumed positive
    model_neg_shared = Model(input=[uid, cid], output=[pred_score_neg_shared])
    model_neg_shared.compile(optimizer=optimizer, \
        loss=get_neg_shared_loss(loss, batch_size_p, conf))

    # uid and compacted cid complete pairwise interactions
    model_group_neg_shared = Model(input=[uid, cid], \
        output=[pred_score_neg_shared_comp])
    model_group_neg_shared.compile(optimizer=optimizer, \
        loss=lambda y_true, y_pred: loss_neg_shared_comp)  # dummy
    
    # sampled negatives are shared
    # first batch_size_p pairs are positive ones, 
    # uid[:batch_size_p] and cid[batch_size_p:] are negative links
    model_sampled_neg_shared = Model(input=[uid, cid], \
        output=[pred_score_sampled_neg_shared])
    model_sampled_neg_shared.compile(optimizer=optimizer, \
        loss=get_sampled_neg_shared_loss(loss, batch_size_p, 
                                         num_negatives, conf))

    # test efficient methods with given (uid, cid) pairs
    model_user_emb = Model(input=[uid], output=[U_emb])
    model_item_emb = Model(input=[cid], output=[C_emb])
    model_pred_pairs = Model(input=[U_emb_given, C_emb_given, uid, cid], \
        output=[pred_score_with_given])

    # construct models for monitoring all types of losses during training
    def get_all_losses(input, output, loss):
        model_all_loss = {'skip-gram': None, 'mse': None,
                          'log-loss': None, 'max-margin': None}
        for lname in model_all_loss:
            from keras.optimizers import SGD
            m = Model(input=input, output=output)
            m.compile(optimizer=SGD(0.), loss=loss)
            model_all_loss[lname] = m
        return model_all_loss

    model_all_loss = get_all_losses([uid, cid], [pred_score], \
        get_original_loss(loss, batch_size_p, num_negatives, conf))
    model_neg_shared_all_loss = get_all_losses([uid, cid], \
        [pred_score_neg_shared], \
        get_neg_shared_loss(loss, batch_size_p, conf))

    model_dict = {'model': model,
                  'model_neg_shared': model_neg_shared,
                  'model_group_neg_shared': model_group_neg_shared,
                  'model_sampled_neg_shared': model_sampled_neg_shared,
                  'model_user_emb': model_user_emb,
                  'model_item_emb': model_item_emb,
                  'model_pred_pairs': model_pred_pairs,
                  'model_all_loss': model_all_loss,
                  'model_neg_shared_all_loss': model_neg_shared_all_loss
                  }

    return model_dict
Esempio n. 6
0
#hyperparameters
file_name = 'testtraining.txt'
epochs = 20 #number of times training data is completely sent thru 
learning_rate = 0.002 
batch_size = 200 # batchsize for minibatch gradient descent
layer_size = 128  #number of neurons for each LSTM hidden layer
num_layers = 2 #number of LSTM layers
num_steps = 50  #number of time steps in RNN 

num_notes = 100
music_book_name = 'collection1.abc'

data = Data_Formatter(file_name, batch_size, num_steps)
num_classes = len(data.unique_notes)

rnn = RNN(batch_size,num_steps,num_classes,num_layers,layer_size,learning_rate, training=False)

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()

checkpoint= tf.train.get_checkpoint_state('saved_models')
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, checkpoint.model_checkpoint_path)
print('restored session!')

save_dir = os.path.join('songs', music_book_name)

file = open(save_dir, 'w')
file.write(rnn.generate_notes(sess,num_notes,data.notes_to_ind, data.ind_to_notes))
file.close()
        training_params['seed'] = random.randint(0, 9999)
    else:
        print('Using supplied seed')
    seed = training_params['seed']

    # Preprocess training dataset
    train_dataset.remove_nans()  # remove nans from data
    train_dataset.unbound(
        config['transform'])  # apply transform to coherence values
    train_dataset.create_test_set(seed=seed)  # create test set

    # Load model
    model_params = config[
        'model_hyperparameters'] if 'model_hyperparameters' in config else DEFAULT_MODEL_PARAMS
    model_params['data_dim'] = train_dataset.data_dim
    model = RNN(model_params).to(device)  # move model onto device

    # Check if best_model.pth already exists from previous training, or if we're passing a model via the command line

    # If we're passing a previous model from the command line, use that
    if args.best_model is not None:
        best_model_path = args.best_model
        print("Loading a pre-existing model from {}".format(args.best_model))

    # If we can't find a model from the command line, look for one saved in save_dir
    elif os.path.isfile(os.path.join(save_dir, 'best_model.pth')):
        best_model_path = os.path.join(save_dir, 'best_model.pth')
        print('Found a best model from previous training: {}'.format(
            best_model_path))

    # If we don't have a model yet, we'll have to train one
Esempio n. 8
0
import time, os
from rnn_model import RNN

#hyperparameters
file_name = 'testtraining.txt'
epochs = 20 #number of times training data is completely sent thru 
learning_rate = 0.002 
batch_size = 200 # batchsize for minibatch gradient descent
layer_size = 128  #number of neurons for each LSTM hidden layer
num_layers = 2 #number of LSTM layers
num_steps = 50  #number of time steps in RNN 

data = Data_Formatter(file_name, batch_size, num_steps)
num_classes = len(data.unique_notes)

rnn = RNN(batch_size,num_steps,num_classes,num_layers,layer_size,learning_rate)

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
saver = tf.train.Saver()

for epoch in range(epochs):
	data.reset()
	for batch in range(data.num_batches):
		stime = time.time()
		x,y = data.next_batch()
		input_feed = {rnn.x: x, rnn.targets: y}
		train_loss, state = sess.run([rnn.cost, rnn.optimizer], input_feed)
		''' following code is to check for overfitting (Make sure to switch to testing data)
		  loss= sess.run([rnn.cost], input_feed)
		  print ('testing loss= %s', %(loss)) '''
Esempio n. 9
0
    if FLAGS.is_train:
        print('Training ...')

        # Batch size x time steps x features.
        #if FLAGS.single_time_step:
        #    x = tf.placeholder(tf.float32, [None, data_sampler.x_dim, 1] , name='x')
        #else:
        x = tf.placeholder(tf.float32, [None, 1, data_sampler.x_dim], name='x')
        y = tf.placeholder(tf.int32, [None, data_sampler.n_classes], name='y')
        x_len = tf.placeholder(tf.int32, [
            None,
        ], name='x_len')

        net = RNN(x=x,
                  x_len=x_len,
                  cell_size=FLAGS.cell_size,
                  num_classes=FLAGS.num_classes,
                  num_layers=FLAGS.num_layers,
                  use_cos=FLAGS.use_cos)

        logits, _ = net()
        prob_op = tf.nn.softmax(logits)

        # loss function
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
        correct_prediction = tf.equal(tf.argmax(prob_op, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        confusion_matrix = tf.contrib.metrics.confusion_matrix(
            labels=tf.argmax(y, 1), predictions=tf.argmax(logits, 1))

        # Solver
Esempio n. 10
0
def run_RNN(x_train, y_train_rnn, x_test, y_test_rnn, output_path, data_name,
            configs):
    # Check the configurations
    w_hours = configs['w_hours']
    input_dim = configs['input_dim']
    output_dim = configs['output_dim']
    hidden_dim = configs['hidden_dim']
    num_layers = configs['num_layers']
    learning_rate = configs['learning_rate']
    dropout = configs['dropout']
    model_type = configs['model']

    if model_type[:5] == 'GARCH':
        garch = True
    else:
        garch = False

    if w_hours:
        input_dim += 1

    if garch:
        input_dim += 1

    # Set the model
    model = RNN(input_dim=input_dim,
                hidden_dim=hidden_dim,
                output_dim=output_dim,
                num_layers=num_layers,
                device=device,
                dropout=dropout,
                model=model_type).to(device)
    criterion = torch.nn.MSELoss(reduction='mean')
    criterion_mae = torch.nn.L1Loss()

    optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Train model
    num_epochs = 1200

    hist = np.zeros(num_epochs)
    start_time = time.time()
    for t in range(num_epochs):
        y_train_pred = model(x_train)
        loss = criterion(y_train_pred, y_train_rnn)
        loss_mae = criterion_mae(y_train_pred, y_train_rnn)
        if t % 100 == 0 and t != 0:
            print("Epoch ", t, "MSE: ", loss.item(), "MAE: ", loss_mae.item())
        hist[t] = loss.item()
        optimiser.zero_grad()
        loss.backward()
        optimiser.step()

    training_time = time.time() - start_time
    print("Training time: {}".format(training_time))
    """
    fig, ax = plt.subplots()
    start = 0
    end = 400
    x = np.arange(y_train_pred.shape[0])[start:end]
    y_pred = y_train_pred.cpu().detach().numpy()[:, 0][start:end]
    y_real = y_train_rnn.cpu().detach().numpy()[:, 0][start:end]
    ax.plot(x, y_pred, label='pred')
    ax.plot(x, y_real, label='real')
    ax.legend()
    plt.savefig('plot_out/' + output_path + '/' + data_name + '.jpg')
    plt.show()
    """

    # Test the result
    print('x_test', x_test.shape)
    x_temp = x_train[:x_test.shape[0]]
    # Run to generate the hn and cn.
    model(x_temp)

    # Run to check the result
    y_test_pred = model(x_test)
    loss = criterion(y_test_pred, y_test_rnn)
    loss_mae = criterion_mae(y_test_pred, y_test_rnn)
    MSE = round(loss.item(), 6)
    MAE = round(loss_mae.item(), 6)
    print("Test MSE: ", MSE)
    print("Test MAE: ", MAE)

    # Save the test results
    """
    x = np.arange(y_test_pred.shape[0]).tolist()
    y_test_pred = y_test_pred.cpu().detach().numpy()[:, 0].tolist()
    y_test_rnn = y_test_rnn.cpu().detach().numpy()[:, 0].tolist()

    df = pd.DataFrame(
        {"real": y_test_rnn,
         "pred": y_test_pred
         })

    df.to_csv(
        './data_out/' + output_path + '/' + data_name + '_MSE_' + str(MSE) + '.csv')

    del df
    """

    with open('result_final.txt', 'a') as outfile:
        outfile.write(data_name + '_' + output_path + ' -> MSE: ' + str(MSE) +
                      ', MAE: ' + str(MAE) + '\n')

    return MSE
 repeat=False,shuffle=True,
 batch_size=32,device=DEVICE)

for batch in train_loader:
    
    break

EPOCH = 5
BATCH_SIZE = 32
EMBED = 300
KERNEL_SIZES = [3,4,5]
KERNEL_DIM = 100
LR = 0.001

# model = CNNClassifier(len(TEXT.vocab), EMBED, 1, KERNEL_DIM, KERNEL_SIZES)
model = RNN(len(TEXT.vocab), EMBED, KERNEL_DIM, 1, bidirec=False)

loss_function = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3], gamma=0.1)

model.load_state_dict(torch.load('./model/rnn_text.pth'))

if USE_CUDA:
    model = model.cuda()

# ### evaluate
# model.eval()
# num_hit=0
# for i,batch in enumerate(test_loader):
#     inputs, targets = batch.text, batch.label.float()
def _get_model(data_dict, config):
    n_hidden = config["n_hidden"]
    n_letters = config["n_letters"]
    n_categories = data_dict["n_categories"]
    model = RNN(n_letters, n_hidden, n_categories).to(config["device"])
    return model
Esempio n. 13
0
import time
import math
import matplotlib.pyplot as plt

# 读取数据
train_data_path = "train_data.csv"
# 不包括文件头部信息  反斜杠
train_data = pd.read_csv(train_data_path, header=None, sep="\t")

# 转换数据到列表形式
train_data = train_data.values.tolist()

input_size = 768
hidden_size = 128
n_categories = 2
rnn = RNN(input_size, hidden_size, n_categories)


def timeSince(since):
    "获得每次打印的训练耗时, since是训练开始时间"
    # 获得当前时间
    now = time.time()
    # 获得时间差,就是训练耗时
    s = now - since
    # 将秒转化为分钟, 并取整
    m = math.floor(s / 60)
    # 计算剩下不够凑成1分钟的秒数
    s -= m * 60
    # 返回指定格式的耗时
    return '%dm %ds' % (m, s)
Esempio n. 14
0
        model.xs: x_test,
        model.ys: y_test,
        # create initial state
    }

    state, pred = sess.run([model.cell_final_state, model.pred],
                           feed_dict=feed_dict)

    # plotting
    # plt.figure(v)
    plt.plot(x_test.flatten(), 'r', y_test.flatten(), 'b--', state.flatten(),
             'k-.')
    plt.ylim((-16, 16))
    plt.draw()
    plt.pause(0.3)
    os.system("pause")


if __name__ == '__main__':
    if len(sys.argv) != 2 or sys.argv[1] not in ['train', 'test']:
        raise ValueError("""usage: python run_rnn.py [train / test]""")

    print('Config RNN model...')
    config = RNNConfig()
    model = RNN(config)
    # train()
    if sys.argv[1] == 'train':
        train()
    else:
        test()
Esempio n. 15
0
from rnn_model import RNN
import torch
'''     Device configuration      '''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

input_size = 54
hidden_size = 64
num_layers = 1
num_classes = 4

model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
model.load_state_dict(torch.load('./keytrack.pt'))
dummy_input = torch.randn(3, 32, 54).to(device)
traced_script_module = torch.jit.trace(model, dummy_input)

traced_script_module.save('keytrack_trace.pt')
Esempio n. 16
0
# 导入RNN模型结构
from rnn_model import RNN
# 导入bert预训练模型编码函数
from bert_chinese_encode import get_bert_encode_for_single

# 预加载的模型参数路径
MODEL_PATH = 'BERT_RNN.pth'

# 隐层节点数, 输入层尺寸, 类别数都和训练时相同即可
n_hidden = 128
input_size = 768
n_categories = 2

# 实例化RNN模型, 并加载保存模型参数
rnn = RNN(input_size, n_hidden, n_categories)
rnn.load_state_dict(torch.load(MODEL_PATH))


def _test(line_tensor):
    """模型测试函数, 它将用在模型预测函数中, 用于调用RNN模型并返回结果.它的参数line_tensor代表输入文本的张量表示"""
    # 初始化隐层张量
    hidden = rnn.initHidden()
    # 与训练时相同, 遍历输入文本的每一个字符
    for i in range(line_tensor.size()[0]):
        # 将其逐次输送给rnn模型
        output, hidden = rnn(line_tensor[i].unsqueeze(0), hidden)
    # 获得rnn模型最终的输出
    return output