Exemplo n.º 1
0
#			mode=theano.compile.MonitorMode(post_func=detect_nan)
		)
	update = theano.function(
			inputs=[lr],
			updates = param_update + clear_update,
			outputs = [ T.sqrt(T.sum(T.sqr(w))) for w in deltas ],
			on_unused_input='warn',
#			mode=theano.compile.MonitorMode(post_func=detect_nan)
		)
	return acc,update

if __name__ == "__main__":
	training_file = sys.argv[1]
	compute_tree_exists = False

	vocab_in = vocab.load("qa2.pkl")
	vocab_size = len(vocab_in)
	print "Vocab size is:", vocab_size
	evidence_count = 2
	if compute_tree_exists:
		inputs,outputs,params,grads = pickle.load(open("compute_tree.pkl"))
	else:
		print "Creating compute tree...",
		P = Parameters()
		story = T.ivector('story')
		idxs  = T.ivector('idxs')
		qstn  = T.ivector('qstn')
		ans_evds = T.ivector('ans_evds')
		ans_lbl = T.iscalar('ans_lbl')

		attention = model.build(P,
Exemplo n.º 2
0
Arquivo: train.py Projeto: musyoku/NLP
# -*- coding: utf-8 -*-
import os, sys, time
import numpy as np
import model
import vocab
from config import config

data_dir = "text"
model_dir = "model"
dataset, config.n_vocab, config.n_dataset = vocab.load(data_dir)
lm = model.build()
lm.load(model_dir)

n_epoch = 1000
n_train = 5000
batchsize = 64
total_time = 0

# 長すぎるデータはメモリに乗らないこともあります
max_length_of_chars = 100

# 学習初期は短い文章のみ学習し、徐々に長くしていきます。
# この機能が必要ない場合は最初から大きな値を設定します。
current_length_limit = 15

def make_batch():
	target_batch_array = []
	max_length_in_batch = 0
	for b in xrange(batchsize):
		length = current_length_limit + 1
		while length > current_length_limit:
Exemplo n.º 3
0
    tmp_file = args.temporary_file

    max_epochs = args.max_epochs
    batch_size = args.batch_size
    improvement_threshold = args.improvement_threshold
    validation_percent = args.validation_percent
    patience = args.patience
    checkpoint = args.checkpoint

    embedding_size = args.embedding_size
    hidden_size = args.hidden_size

    l2_coefficient = args.l2

    id2char = pickle.load(open(vocab_file,'r'))
    char2id = vocab.load(vocab_file)
    P = Parameters()
    lang_model = model.build(P,
                             character_count=len(char2id) + 1,
                             embedding_size=embedding_size,
                             hidden_size=hidden_size
                             )

    def cost(X, P): # batch_size x time
        eps = 1e-3
        X = X.T                                         # time x batch_size
        char_prob_dist = lang_model(X[:-1])                # time x batch_size x output_size
        char_prob_dist = (1 - 2 * eps) * char_prob_dist + eps
        label_prob = char_prob_dist[
            T.arange(X.shape[0] - 1).dimshuffle(0, 'x'),
            T.arange(X.shape[1]).dimshuffle('x', 0),
Exemplo n.º 4
0
import numpy as np
import cPickle as pickle

import theano
import theano.tensor as T
import numpy as np

import vocab
import model
from theano_toolkit.parameters import Parameters

if __name__ == "__main__":
    model_file = args.model_file
    temp_input = args.temperature
    id2char = pickle.load(args.vocab_file)
    char2id = vocab.load(args.vocab_file.name)
    prime_str = args.prime

    P = Parameters()
    sampler = model.build_sampler(P,
                                  character_count=len(char2id) + 1,
                                  embedding_size=20,
                                  hidden_size=100
                                  )
    P.load(model_file)
    temp = T.scalar('temp')
    char = T.iscalar('char')
    p_cell_1, p_hidden_1, p_cell_2, p_hidden_2 = T.vector("p_cell_1"), T.vector("p_hidden_2"), T.vector("p_cell_2"), T.vector("p_hidden_2")

    output, cell_1, hidden_1, cell_2, hidden_2 = sampler(temp, char, p_cell_1, p_hidden_1, p_cell_2, p_hidden_2)
    sample = theano.function(
Exemplo n.º 5
0
import numpy as np
import cPickle as pickle

import theano
import theano.tensor as T
import numpy as np

import vocab
import model
from theano_toolkit.parameters import Parameters

if __name__ == "__main__":
    model_file = args.model_file
    temp_input = args.temperature
    id2char = pickle.load(args.vocab_file)
    char2id = vocab.load(args.vocab_file.name)
    prime_str = args.prime

    P = Parameters()
    sampler = model.build_sampler(P,
                                  character_count=len(char2id) + 1,
                                  embedding_size=20,
                                  hidden_size=100)
    P.load(model_file)
    temp = T.scalar('temp')
    char = T.iscalar('char')
    p_cell_1, p_hidden_1, p_cell_2, p_hidden_2 = T.vector(
        "p_cell_1"), T.vector("p_hidden_2"), T.vector("p_cell_2"), T.vector(
            "p_hidden_2")

    output, cell_1, hidden_1, cell_2, hidden_2 = sampler(
Exemplo n.º 6
0
Arquivo: env.py Projeto: musyoku/NLP
# -*- coding: utf-8 -*-
import sys, os
from args import args
sys.path.append(os.path.split(os.getcwd())[0])
import vocab
from model import Conf, Model

dataset, n_vocab, n_dataset = vocab.load(args.text_dir)
conf = Conf()
conf.gpu_enabled = False if args.gpu_enabled == -1 else True
conf.n_vocab = n_vocab

# Embed
conf.char_embed_size = 50
conf.word_embed_size = 200

# Encoder
conf.word_encoder_lstm_units = [500]
conf.word_encoder_lstm_apply_batchnorm = False
conf.word_encoder_fc_hidden_units = []
conf.word_encoder_fc_apply_batchnorm = False
conf.word_encoder_fc_apply_dropout = False
conf.word_encoder_fc_nonlinear = "elu"

# Decoder
conf.word_decoder_lstm_units = [500]
conf.word_decoder_lstm_apply_batchnorm = False
conf.word_decoder_merge_type = "concat"

# Discriminator
conf.discriminator_hidden_units = [200, 200]
Exemplo n.º 7
0
# -*- coding: utf-8 -*-
import os, sys, time, codecs
import numpy as np
import model
import vocab
from config import config

# Windowsでprintする用
sys.stdout = codecs.getwriter(sys.stdout.encoding)(sys.stdout,
                                                   errors="xmlcharrefreplace")

data_dir = "text"
model_dir = "model"
dataset, config.n_vocab, config.n_dataset = vocab.load(data_dir)
lm = model.build()
lm.load(model_dir)


def sample_seq():
    target_batch_array = []
    max_length_in_batch = 0
    k = np.random.randint(0, config.n_dataset)
    target_seq = dataset[k]
    source_seq = target_seq[::-1]
    return source_seq, target_seq


for phrase in xrange(50):
    source_seq, target_seq = sample_seq()
    lm.reset_state()
    y_seq = lm.decode(source_seq, sampling_y=True, test=True)
Exemplo n.º 8
0
    )
    update = theano.function(
        inputs=[lr],
        updates=param_update + clear_update,
        outputs=[T.sqrt(T.sum(T.sqr(w))) for w in deltas],
        on_unused_input='warn',
        #			mode=theano.compile.MonitorMode(post_func=detect_nan)
    )
    return acc, update


if __name__ == "__main__":
    training_file = sys.argv[1]
    compute_tree_exists = False

    vocab_in = vocab.load("qa2.pkl")
    vocab_size = len(vocab_in)
    print "Vocab size is:", vocab_size
    evidence_count = 2
    if compute_tree_exists:
        inputs, outputs, params, grads = pickle.load(open("compute_tree.pkl"))
    else:
        print "Creating compute tree...",
        P = Parameters()
        story = T.ivector('story')
        idxs = T.ivector('idxs')
        qstn = T.ivector('qstn')
        ans_evds = T.ivector('ans_evds')
        ans_lbl = T.iscalar('ans_lbl')

        attention = model.build(P,