Ejemplo n.º 1
0
                print err
                #print "Analytic %.9f, Numerical %.9f, Relative Error %.9f" % (dL[j][i], numGrad, err)
                err2 += err
                count += 1

        if 0.001 > err2:
            print "Grad Check Passed for dL"
        else:
            print "Grad Check Failed for dL: Sum of Error = %.9f" % (err2 / count)


if __name__ == '__main__':
    import loadTree as tree

    train = tree.load_trees('./data/train.json', tree.aspect_label)
    training_word_map = tree.load_word_map()
    numW = len(training_word_map)
    tree.convert_trees(train, training_word_map)

    wvecDim = 10
    outputDim = 5
    memDim = 15

    rnn = TreeLSTM(wvecDim, memDim, outputDim, numW, mb_size=4)
    rnn.init_params()

    mbData = train[:10]

    print "Numerical gradient check..."
    rnn.check_grad(mbData)
Ejemplo n.º 2
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test", action="store_true", dest="test", default=False)

    # Optimizer
    parser.add_option("--minibatch", dest="minibatch", type="int", default=30)
    parser.add_option("--optimizer", dest="optimizer", type="string", default="adagrad")
    parser.add_option("--epochs", dest="epochs", type="int", default=50)
    parser.add_option("--step", dest="step", type="float", default=5e-2)
    parser.add_option("--rho", dest="rho", type="float", default=1e-3)

    # Dimension
    parser.add_option("--wvecDim", dest="wvec_dim", type="int", default=30)
    parser.add_option("--memDim", dest="mem_dim", type="int", default=30)

    parser.add_option("--outFile", dest="out_file", type="string", default="models/test.bin")
    parser.add_option("--inFile", dest="in_file", type="string", default="models/test.bin")
    parser.add_option("--data", dest="data", type="string", default="train")

    parser.add_option("--model", dest="model", type="string", default="RNN")
    parser.add_option("--label", dest="label_method", type="string", default="rating")

    (opts, args) = parser.parse_args(args)

    evaluate_accuracy_while_training = True

    if opts.label_method == 'rating':
        label_method = tree.rating_label
        opts.output_dim = 5
    elif opts.label_method == 'aspect':
        label_method = tree.aspect_label
        opts.output_dim = 5
    elif opts.label_method == 'pair':
        label_method = tree.pair_label
        opts.output_dim = 25
    else:
        raise '%s is not a valid labelling method.' % opts.label_method

    # Testing
    if opts.test:
        test(opts.in_file, opts.data, label_method, opts.model)
        return

    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tree.load_trees('./data/train.json', label_method)
    training_word_map = tree.load_word_map()
    opts.num_words = len(training_word_map)
    tree.convert_trees(trees, training_word_map)
    labels = [each.label for each in trees]
    count = np.zeros(opts.output_dim)
    for label in labels: count[label] += 1
    # weight = 10 / (count ** 0.1)
    weight = np.ones(opts.output_dim)

    if opts.model == 'RNTN':
        nn = RNTN(opts.wvec_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho)
    elif opts.model == 'RNN':
        nn = RNN(opts.wvec_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho, weight=weight)
    elif opts.model == 'TreeLSTM':
        nn = TreeLSTM(opts.wvec_dim, opts.mem_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho)
    elif opts.model == 'TreeTLSTM':
        nn = TreeTLSTM(opts.wvec_dim, opts.mem_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model

    nn.init_params()

    sgd = optimizer.SGD(nn, alpha=opts.step, minibatch=opts.minibatch, optimizer=opts.optimizer)

    dev_trees = tree.load_trees('./data/dev.json', label_method)
    tree.convert_trees(dev_trees, training_word_map)
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d" % e
        sgd.run(trees, e)
        end = time.time()
        print "Time per epoch : %f" % (end - start)

        with open(opts.out_file, 'w') as fid:
            pickle.dump(opts, fid)
            pickle.dump(sgd.costt, fid)
            nn.to_file(fid)
        if evaluate_accuracy_while_training:
            # pdb.set_trace()
            print "testing on training set real quick"
            train_accuracies.append(test(opts.out_file, "train", label_method, opts.model, trees))
            print "testing on dev set real quick"
            dev_accuracies.append(test(opts.out_file, "dev", label_method, opts.model, dev_trees))

    if evaluate_accuracy_while_training:
        print train_accuracies
        print dev_accuracies
        plt.plot(train_accuracies, label='train')
        plt.plot(dev_accuracies, label='dev')
        plt.legend(loc=2)
        plt.axvline(x=np.argmax(dev_accuracies), linestyle='--')
        plt.show()
Ejemplo n.º 3
0
Archivo: LSTM.py Proyecto: zxsted/DRM
                print "Analytic %.9f, Numerical %.9f, Relative Error %.9f" % (
                    dL[j][i], numGrad, err)
                err2 += err
                count += 1

        if 0.001 > err2:
            print "Grad Check Passed for dL"
        else:
            print "Grad Check Failed for dL: Sum of Error = %.9f" % (err2 /
                                                                     count)

if __name__ == '__main__':
    import loadTree as tree

    train = tree.load_trees('./data/train.json', tree.aspect_label)
    training_word_map = tree.load_word_map()
    numW = len(training_word_map)
    tree.convert_trees(train, training_word_map)

    wvecDim = 10
    outputDim = 5
    memDim = 25

    lstm = LSTM(wvecDim, memDim, outputDim, numW, mb_size=4)
    lstm.init_params()

    mbData = train[:4]

    print "Numerical gradient check..."
    rnn.check_grad(mbData, 1e-7)
Ejemplo n.º 4
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test", action="store_true", dest="test", default=False)

    # Optimizer
    parser.add_option("--minibatch", dest="minibatch", type="int", default=30)
    parser.add_option("--optimizer", dest="optimizer", type="string", default="adagrad")
    parser.add_option("--epochs", dest="epochs", type="int", default=50)
    parser.add_option("--step", dest="step", type="float", default=5e-2)
    parser.add_option("--rho", dest="rho", type="float", default=1e-3)

    # Dimension
    parser.add_option("--wvecDim", dest="wvec_dim", type="int", default=30)
    parser.add_option("--memDim", dest="mem_dim", type="int", default=30)

    parser.add_option("--outFile", dest="out_file", type="string", default="models/test.bin")
    parser.add_option("--inFile", dest="in_file", type="string", default="models/test.bin")
    parser.add_option("--data", dest="data", type="string", default="train")

    parser.add_option("--model", dest="model", type="string", default="RNN")
    parser.add_option("--label", dest="label_method", type="string", default="rating")

    (opts, args) = parser.parse_args(args)

    evaluate_accuracy_while_training = True

    if opts.label_method == 'rating':
        label_method = tree.rating_label
        opts.output_dim = 5
    elif opts.label_method == 'aspect':
        label_method = tree.aspect_label
        opts.output_dim = 5
    elif opts.label_method == 'pair':
        label_method = tree.pair_label
        opts.output_dim = 25
    else:
        raise '%s is not a valid labelling method.' % opts.label_method

    # Testing
    if opts.test:
        test(opts.in_file, opts.data, label_method, opts.model)
        return

    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tree.load_trees('./data/train.json', label_method)
    training_word_map = tree.load_word_map()
    opts.num_words = len(training_word_map)
    tree.convert_trees(trees, training_word_map)
    labels = [each.label for each in trees]
    count = np.zeros(opts.output_dim)
    for label in labels: count[label] += 1
    # weight = 10 / (count ** 0.1)
    weight = np.ones(opts.output_dim)

    if opts.model == 'RNTN':
        nn = RNTN(opts.wvec_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho)
    elif opts.model == 'RNN':
        nn = RNN(opts.wvec_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho, weight=weight)
    elif opts.model == 'TreeLSTM':
        nn = TreeLSTM(opts.wvec_dim, opts.mem_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho)
    elif opts.model == 'TreeTLSTM':
        nn = TreeTLSTM(opts.wvec_dim, opts.mem_dim, opts.output_dim, opts.num_words, opts.minibatch, rho=opts.rho)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model

    nn.init_params()

    sgd = optimizer.SGD(nn, alpha=opts.step, minibatch=opts.minibatch, optimizer=opts.optimizer)

    dev_trees = tree.load_trees('./data/dev.json', label_method)
    tree.convert_trees(dev_trees, training_word_map)
	# epochs 表示交叉验证的次数
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d" % e
        sgd.run(trees, e)
        end = time.time()
        print "Time per epoch : %f" % (end - start)

        with open(opts.out_file, 'w') as fid:
            pickle.dump(opts, fid)
            pickle.dump(sgd.costt, fid)
            nn.to_file(fid)
        if evaluate_accuracy_while_training:
            # pdb.set_trace()
            print "testing on training set real quick"
            train_accuracies.append(test(opts.out_file, "train", label_method, opts.model, trees))
            print "testing on dev set real quick"
            dev_accuracies.append(test(opts.out_file, "dev", label_method, opts.model, dev_trees))

    if evaluate_accuracy_while_training:
        print train_accuracies
        print dev_accuracies
        plt.plot(train_accuracies, label='train')
        plt.plot(dev_accuracies, label='dev')
        plt.legend(loc=2)
        plt.axvline(x=np.argmax(dev_accuracies), linestyle='--')
        plt.show()
Ejemplo n.º 5
0
import loadTree
import numpy as np
import cPickle as pickle

training_word_map = loadTree.load_word_map()
training_word_lst = loadTree.load_word_list()

glove_dic = 'data/glove.42B.300d.bin'
glove_vec = 'data/glove.42B.300d.npy'

with open(glove_dic, 'r') as fid:
    glove_word_map = pickle.load(fid)
glove_word_vec = np.load(glove_vec)

L = np.empty((300, len(training_word_lst)))
for word in training_word_lst:
    if word in glove_word_vec:
        L[:, training_word_map[word]] = glove_word_vec[glove_word_map[word]]