Beispiel #1
0
def test(netFile, dataSet, model='RNN', trees=None):
    if trees == None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s" % netFile
    with open(netFile, 'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)

        if (model == 'RNTN'):
            nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords,
                      opts.minibatch)
        elif (model == 'RNN'):
            nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords,
                     opts.minibatch)
        elif (model == 'RNN2'):
            nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim,
                      opts.numWords, opts.minibatch)
        elif (opts.model == 'RNN3'):
            nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim,
                      opts.numWords, opts.minibatch)
        elif (model == 'DCNN'):
            nn = DCNN(opts.wvecDim,
                      opts.ktop,
                      opts.m1,
                      opts.m2,
                      opts.n1,
                      opts.n2,
                      0,
                      opts.outputDim,
                      opts.numWords,
                      2,
                      opts.minibatch,
                      rho=1e-4)
            trees = cnn.tree2matrix(trees)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model

        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..." % model

    cost, correct, guess, total = nn.costAndGrad(trees, test=True)
    correct_sum = 0
    for i in xrange(0, len(correct)):
        correct_sum += (guess[i] == correct[i])

    # TODO
    # Plot the confusion matrix?
    conf_arr = np.zeros((5, 5))
    for i in xrange(0, len(correct)):
        current_correct = correct[i]
        current_guess = guess[i]
        conf_arr[current_correct][current_guess] += 1.0

    makeconf(conf_arr, model, dataSet)

    print "Cost %f, Acc %f" % (cost, correct_sum / float(total))
    return correct_sum / float(total)
Beispiel #2
0
def validate(rnn, results_dir):
    # log.info(rnn.W)
    trees = tr.loadTrees("dev")
    log.info("Validation...")
    cost, correct, total, df = rnn.costAndGrad(trees, test = True)
    log.info("Validation: Cost %f, Correct %d/%d, Acc %f" % (cost, correct, total, correct / float(total)))
    return df, cost, correct / float(total)
Beispiel #3
0
def test(netFile, dataSet, model='RNN', trees=None):
    if trees == None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s" % netFile
    opts = None
    with open(netFile, 'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)

        if (model == 'RNTN'):
            nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords,
                      opts.minibatch)
        elif (model == 'RNN'):
            nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords,
                     opts.minibatch)
        elif (model == 'RNN2'):
            nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim,
                      opts.numWords, opts.minibatch)
        elif (opts.model == 'RNN3'):
            nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim,
                      opts.numWords, opts.minibatch)
        elif (model == 'DCNN'):
            nn = DCNN(opts.wvecDim,
                      opts.ktop,
                      opts.m1,
                      opts.m2,
                      opts.n1,
                      opts.n2,
                      0,
                      opts.outputDim,
                      opts.numWords,
                      2,
                      opts.minibatch,
                      rho=1e-4)
            trees = cnn.tree2matrix(trees)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model

        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..." % model

    cost, correct, guess, total = nn.costAndGrad(trees, test=True)

    correct_sum = 0
    for i in xrange(0, len(correct)):
        correct_sum += (guess[i] == correct[i])

    cm = confusion_matrix(correct, guess)
    makeconf(cm)
    plt.savefig("plots/" + opts.model + "/confusion_matrix_" + model +
                "wvecDim_" + str(opts.wvecDim) + "_middleDim_" +
                str(opts.middleDim) + ".png")

    print "Cost %f, Acc %f" % (cost, correct_sum / float(total))
    return correct_sum / float(total)
Beispiel #4
0
def test(netFile,
         dataSet,
         model='RNN',
         trees=None,
         confusion_matrix_file=None,
         acti=None):
    if trees == None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s" % netFile
    with open(netFile, 'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)

        if (model == 'RNTN'):
            nn = RNTN(wvecDim=opts.wvecDim,
                      outputDim=opts.outputDim,
                      numWords=opts.numWords,
                      mbSize=opts.minibatch,
                      rho=opts.rho,
                      acti=acti)
        elif (model == 'RNN'):
            nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords,
                     opts.minibatch)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN' % opts.model

        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..." % model

    cost, correct, guess, total = nn.costAndGrad(trees, test=True)
    correct_sum = 0
    for i in xrange(0, len(correct)):
        correct_sum += (guess[i] == correct[i])

    correctSent = 0
    for tree in trees:
        sentLabel = tree.root.label
        sentPrediction = tree.root.prediction
        if sentLabel == sentPrediction:
            correctSent += 1

    # Generate confusion matrix
    #if confusion_matrix_file is not None:
    #    cm = confusion_matrix(correct, guess)
    #    makeconf(cm, confusion_matrix_file)

    print "%s: Cost %f, Acc %f, Sentence-Level: Acc %f" % (
        dataSet, cost, correct_sum / float(total),
        correctSent / float(len(trees)))
    return (correct_sum / float(total), correctSent / float(len(trees)))
Beispiel #5
0
def test(netFile,dataSet):
    trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    with open(netFile,'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)
        rnn = nnet.RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        rnn.initParams()
        rnn.fromFile(fid)
    print "Testing..."
    cost,correct,total = rnn.costAndGrad(trees,test=True)
    print "Cost %f, Correct %d/%d, Acc %f"%(cost,correct,total,correct/float(total))
Beispiel #6
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",action="store_true",dest="test",default=False)

    # Optimizer
    parser.add_option("--minibatch",dest="minibatch",type="int",default=30)
    parser.add_option("--optimizer",dest="optimizer",type="string",
        default="adagrad")
    parser.add_option("--epochs",dest="epochs",type="int",default=50)
    parser.add_option("--step",dest="step",type="float",default=1e-2)

    parser.add_option("--outputDim",dest="outputDim",type="int",default=5)
    parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30)
    parser.add_option("--outFile",dest="outFile",type="string",
        default="models/test.bin")
    parser.add_option("--inFile",dest="inFile",type="string",
        default="models/test.bin")
    parser.add_option("--data",dest="data",type="string",default="train")


    (opts,args)=parser.parse_args(args)

    # Testing
    if opts.test:
        test(opts.inFile,opts.data)
        return
    
    print "Loading data..."
    # load training data
    trees = tr.loadTrees()
    opts.numWords = len(tr.loadWordMap())

    rnn = nnet.RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
    rnn.initParams()

    sgd = optimizer.SGD(rnn,alpha=opts.step,minibatch=opts.minibatch,
        optimizer=opts.optimizer)

    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d"%e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f"%(end-start)

        with open(opts.outFile,'w') as fid:
            pickle.dump(opts,fid)
            pickle.dump(sgd.costt,fid)
            rnn.toFile(fid)
Beispiel #7
0
def test(netFile, dataSet, model="RNN", trees=None):
    if trees == None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s" % netFile
    with open(netFile, "r") as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)

        if model == "RNTN":
            nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch)
        elif model == "RNN":
            nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch)
        elif model == "RNN2":
            nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch)
        elif opts.model == "RNN3":
            nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch)
        elif model == "DCNN":
            nn = DCNN(
                opts.wvecDim,
                opts.ktop,
                opts.m1,
                opts.m2,
                opts.n1,
                opts.n2,
                0,
                opts.outputDim,
                opts.numWords,
                2,
                opts.minibatch,
                rho=1e-4,
            )
            trees = cnn.tree2matrix(trees)
        else:
            raise "%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN" % opts.model

        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..." % model

    cost, correct, guess, total = nn.costAndGrad(trees, test=True)
    correct_sum = 0
    for i in xrange(0, len(correct)):
        correct_sum += guess[i] == correct[i]

    # TODO
    # Plot the confusion matrix?

    print "Cost %f, Acc %f" % (cost, correct_sum / float(total))
    return correct_sum / float(total)
Beispiel #8
0
def test(netFile, dataSet):
    trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    with open(netFile, 'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)
        rnn = nnet.RNN(opts.wvecDim, opts.outputDim, opts.numWords,
                       opts.minibatch)
        rnn.initParams()
        rnn.fromFile(fid)
    print "Testing..."
    cost, correct, total = rnn.costAndGrad(trees, test=True)
    print "Cost %f, Correct %d/%d, Acc %f" % (cost, correct, total,
                                              correct / float(total))
Beispiel #9
0
def test(netFile,dataSet, model='RNN', trees=None,e=100):
    if trees==None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s"%netFile
    with open(netFile,'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)
        
        if (model=='RNTN'):
            nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout)
        elif(model=='RNN'):
            nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout)
        elif(model=='RNN2'):
            nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout)
        elif(model=='RNN2TANH'):
            nn = RNN2TANH(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout)
        elif(model=='RNN3'):
            nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout)
        elif(model=='DCNN'):
            nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
            trees = cnn.tree2matrix(trees)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, and DCNN'%opts.model
        
        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..."%model
    cost,correct, guess, total = nn.costAndGrad(trees,test=True)
    correct_sum = 0
    for i in xrange(0,len(correct)):        
        correct_sum+=(guess[i]==correct[i])

    if e%10==0:
        labels = range(max(set(correct))+1)
        correct = np.array(correct)
        guess = np.array(guess)
        conf_arr = []
        for i in labels:
            sub_arr = []
            for j in labels:   
                sub_arr.append(sum((correct == i) & (guess==j)))
            conf_arr.append(sub_arr)
        makeconf(conf_arr,'temp/'+model+'_conf_mat_'+dataSet+'_'+str(e)+'.')
    print "Cost %f, Acc %f"%(cost,correct_sum/float(total)), 
    print "Pos F1 %f"%(evaluateF1(correct, guess, 2)), "Neg F1 %f"%(evaluateF1(correct, guess, 0))
    return correct_sum/float(total)
Beispiel #10
0
def test(netFile,dataSet, model='RNN', trees=None):
    if trees==None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s"%netFile
    with open(netFile,'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)
        
        if (model=='RNTN'):
            nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN'):
            nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN2'):
            nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(opts.model=='RNN3'):
            nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='DCNN'):
            nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
            trees = cnn.tree2matrix(trees)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN'%opts.model
        
        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..."%model

    cost,correct, guess, total = nn.costAndGrad(trees,test=True)
    correct_sum = 0
    for i in xrange(0,len(correct)):        
        correct_sum+=(guess[i]==correct[i])
    
    # TODO
    # Plot the confusion matrix?
    conf_arr = np.zeros((5,5))
    for i in xrange(0,len(correct)):
        current_correct = correct[i]
        current_guess = guess[i]
        conf_arr[current_correct][current_guess] += 1.0

    makeconf(conf_arr, model, dataSet)
    
    
    print "Cost %f, Acc %f"%(cost,correct_sum/float(total))
    return correct_sum/float(total)
Beispiel #11
0
def test(model_dir, dataSet):
    trees = tr.loadTrees(dataSet)
    total_df = pd.DataFrame()
    assert model_dir is not None, "Must give model to test"
    with open(model_dir + "/checkpoint.bin", 'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)
        rnn = nnet.RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.optimizer_settings['minibatch'])
        rnn.initParams()
        rnn.fromFile(fid)
    log.info("Testing...")
    cost, correct, total, df = rnn.costAndGrad(trees, test = True)
    total_df = total_df.append(df, ignore_index = True)
    total_df.to_csv(model_dir + "/test_preds.csv", header = True, index = False)
    test_performance = pd.DataFrame()
    row = {"Cost": cost, "Correct": correct, "Total": total, "Accuracy": correct / float(total)}
    test_performance = test_performance.append(row, ignore_index = True)
    test_performance.to_csv(model_dir + "/test_performance.csv", header = True, index = False)
    log.info("Cost %f, Correct %d/%d, Acc %f" % (cost, correct, total, correct / float(total)))
Beispiel #12
0
def test(netFile,dataSet, model='RNN', trees=None):
    if trees==None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s"%netFile
    opts = None
    with open(netFile,'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)

        if (model=='RNTN'):
            nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN'):
            nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN2'):
            nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(opts.model=='RNN3'):
            nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='DCNN'):
            nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
            trees = cnn.tree2matrix(trees)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN'%opts.model

        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..."%model

    cost,correct, guess, total = nn.costAndGrad(trees,test=True)

    correct_sum = 0
    for i in xrange(0,len(correct)):
        correct_sum+=(guess[i]==correct[i])

    cm = confusion_matrix(correct, guess)
    makeconf(cm)
    plt.savefig("plots/" + opts.model + "/confusion_matrix_" + model + "wvecDim_" + str(opts.wvecDim) + "_middleDim_" + str(opts.middleDim) + ".png")

    print "Cost %f, Acc %f"%(cost,correct_sum/float(total))
    return correct_sum/float(total)
Beispiel #13
0
def test(netFile,dataSet, model='RNN', trees=None, confusion_matrix_file=None, acti=None):
    if trees==None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s"%netFile
    with open(netFile,'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)
        
        if (model=='RNTN'):
            nn = RNTN(wvecDim=opts.wvecDim,outputDim=opts.outputDim,numWords=opts.numWords,mbSize=opts.minibatch,rho=opts.rho, acti=acti)
        elif(model=='RNN'):
            nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN'%opts.model
        
        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..."%model

    cost,correct, guess, total = nn.costAndGrad(trees,test=True)
    correct_sum = 0
    for i in xrange(0,len(correct)):
        correct_sum+=(guess[i]==correct[i])

    correctSent = 0
    for tree in trees:
        sentLabel = tree.root.label
        sentPrediction = tree.root.prediction
        if sentLabel == sentPrediction:
            correctSent += 1


    # Generate confusion matrix
    #if confusion_matrix_file is not None:
    #    cm = confusion_matrix(correct, guess)
    #    makeconf(cm, confusion_matrix_file)

    print "%s: Cost %f, Acc %f, Sentence-Level: Acc %f"%(dataSet,cost,correct_sum/float(total),correctSent/float(len(trees)))
    return (correct_sum/float(total), correctSent/float(len(trees)))
Beispiel #14
0
def test_RNN():
    """Test RVNN model implementation. 
    """

    ### Training process ###
    '''
    config = Config()
    model = RNN_Model(config)
    start_time = time.time()
    stats = model.train(verbose=True)
    print 'Training time: {}'.format(time.time() - start_time)

    plt.plot(stats['loss_history'])
    plt.title('Loss history')
    plt.xlabel('Iteration')
    plt.ylabel('Loss')
    loss_filename = config.model_name + "loss_history" + ".png"
    plt.savefig(loss_filename)

    print 'Test'
    print '=-=-='
    '''
    ### Training process End ###

    ### Testing process ###
    #predictions, _ = model.predict(model.test_data, './weights/%s'%model.config.model_name)
    config = Config()
    model = RNN_Model(config, LOAD_DATA=False)

    #test_trees = tr.loadTrees('testing_data.txt.treerevised.txt', predicting_test=True)
    test_trees = tr.loadTrees(FLAGS.revised_tree, predicting_test=True)

    predictions, _ = model.predict(
        test_trees,
        './weights/rnn_embed=200_l2=0.020000_lr=0.010000_epoch=50.weights')

    f = open(FLAGS.outputfile_name, "w")
    for prediction in predictions:
        f.write(str(int(prediction)))
        f.write("\n")
def test(netFile, dataSet, L, model='RNN', trees=None, confusion_matrix_file=None, full=False):
    if trees==None:
        trees = tr.loadTrees(dataSet)
    if L is None:
        L = tr.loadWordEmbedding()
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s"%netFile
    with open(netFile,'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)

        if(model=='RNN2'):
            nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        else:
            raise '%s is not a valid neural network , only RNN2'%opts.model

        nn.initParams(L)
        nn.fromFile(fid)

    print "Testing %s..."%model

    cost, correct, guess, total, actss = nn.costAndGrad(trees,test=True)
    if full:
        #pass
        import pickle as pkl
        with open('{}_actss_{}.pkl'.format(netFile, dataSet),'w') as fid:
            pkl.dump(actss,fid)

    correct_sum = 0
    for i in xrange(0,len(correct)):
        correct_sum+=(guess[i]==correct[i])

    # Generate confusion matrix
    if confusion_matrix_file is not None:
        cm = confusion_matrix(correct, guess)
        makeconf(cm, confusion_matrix_file)

    print "Cost %f, Acc %f"%(cost,correct_sum/float(total))
    return correct_sum/float(total)
Beispiel #16
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",
                      action="store_true",
                      dest="test",
                      default=False)

    # Optimizer
    parser.add_option("--minibatch", dest="minibatch", type="int", default=30)
    parser.add_option("--optimizer",
                      dest="optimizer",
                      type="string",
                      default="adagrad")
    parser.add_option("--epochs", dest="epochs", type="int", default=50)
    parser.add_option("--step", dest="step", type="float", default=1e-2)
    parser.add_option("--init", dest="init", type="float", default=0.01)

    parser.add_option("--outputDim", dest="outputDim", type="int", default=5)
    parser.add_option("--wvecDim", dest="wvecDim", type="int", default=30)

    parser.add_option("--rho", dest="rho", type="float", default=1e-6)

    parser.add_option("--outFile",
                      dest="outFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--inFile",
                      dest="inFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--data", dest="data", type="string", default="train")

    parser.add_option("--model", dest="model", type="string", default="RNTN")

    parser.add_option("--maxTrain", dest="maxTrain", type="int", default=-1)
    parser.add_option("--activation",
                      dest="acti",
                      type="string",
                      default="tanh")

    parser.add_option("--partial",
                      action="store_true",
                      dest="partial",
                      default=False)
    parser.add_option("--w2v", dest="w2vmodel", type="string")

    (opts, args) = parser.parse_args(args)

    # make this false if you dont care about your accuracies per epoch, makes things faster!
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        cmfile = opts.inFile + ".confusion_matrix-" + opts.data + ".png"
        test(opts.inFile, opts.data, opts.model, acti=opts.acti)
        return

    print "Loading data..."

    embedding = None
    wordMap = None
    if opts.w2vmodel is not None:
        print "Loading pre-trained word2vec model from %s" % opts.w2vmodel
        w2v = models.Word2Vec.load(opts.w2vmodel)
        embedding, wordMap = readW2v(w2v, opts.wvecDim)

    train_accuracies = []
    train_rootAccuracies = []
    dev_accuracies = []
    dev_rootAccuracies = []
    # load training data
    trees = tr.loadTrees('train',
                         wordMap=wordMap)[:opts.maxTrain]  #train.full.15
    if opts.maxTrain > -1:
        print "Training only on %d trees" % opts.maxTrain
    opts.numWords = len(tr.loadWordMap())

    if opts.partial == True:
        print "Only partial feedback"

    if (opts.model == 'RNTN'):
        nn = RNTN(wvecDim=opts.wvecDim,
                  outputDim=opts.outputDim,
                  numWords=opts.numWords,
                  mbSize=opts.minibatch,
                  rho=opts.rho,
                  acti=opts.acti,
                  init=opts.init,
                  partial=opts.partial)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN' % opts.model

    nn.initParams(embedding=embedding)

    sgd = optimizer.SGD(nn,
                        alpha=opts.step,
                        minibatch=opts.minibatch,
                        optimizer=opts.optimizer)

    dev_trees = tr.loadTrees("dev")  #dev.full.15
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d" % e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f" % (end - start)

        with open(opts.outFile, 'w') as fid:
            pickle.dump(opts, fid)
            pickle.dump(sgd.costt, fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set"
            acc, sacc = test(opts.outFile,
                             "train",
                             opts.model,
                             trees,
                             acti=opts.acti)
            train_accuracies.append(acc)
            train_rootAccuracies.append(sacc)
            print "testing on dev set"
            dacc, dsacc = test(opts.outFile,
                               "dev",
                               opts.model,
                               dev_trees,
                               acti=opts.acti)
            dev_accuracies.append(dacc)
            dev_rootAccuracies.append(dsacc)
            # clear the fprop flags and dev_trees
            for tree in trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            print "fprop in trees cleared"

    if evaluate_accuracy_while_training:
        pdb.set_trace()
        print train_accuracies
        print dev_accuracies

        print "on sentence-level:"
        print train_rootAccuracies
        print dev_rootAccuracies

        # Plot train/dev_accuracies
        plt.figure()
        plt.plot(range(len(train_accuracies)), train_accuracies, label='Train')
        plt.plot(range(len(dev_accuracies)), dev_accuracies, label='Dev')
        plt.xlabel("Epoch")
        plt.ylabel("Accuracy")
        plt.legend()
        # plot.show()
        plt.savefig(opts.outFile + ".accuracy_plot.png")

        # Plot train/dev_accuracies
        plt.figure()
        plt.plot(range(len(train_rootAccuracies)),
                 train_rootAccuracies,
                 label='Train')
        plt.plot(range(len(dev_rootAccuracies)),
                 dev_rootAccuracies,
                 label='Dev')
        plt.xlabel("Epoch")
        plt.ylabel("Accuracy")
        plt.legend()
        # plot.show()
        plt.savefig(opts.outFile + ".sent.accuracy_plot.png")
Beispiel #17
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",
                      action="store_true",
                      dest="test",
                      default=False)

    # Optimizer
    parser.add_option("--minibatch", dest="minibatch", type="int", default=30)
    parser.add_option("--optimizer",
                      dest="optimizer",
                      type="string",
                      default="adagrad")
    parser.add_option("--epochs", dest="epochs", type="int", default=50)
    parser.add_option("--step", dest="step", type="float", default=1e-2)

    parser.add_option("--middleDim", dest="middleDim", type="int", default=10)
    parser.add_option("--outputDim", dest="outputDim", type="int", default=5)
    parser.add_option("--wvecDim", dest="wvecDim", type="int", default=30)

    # By @tiagokv, just to ease the first assignment test
    parser.add_option("--wvecDimBatch",
                      dest="wvecDimBatch",
                      type="string",
                      default="")

    # for DCNN only
    parser.add_option("--ktop", dest="ktop", type="int", default=5)
    parser.add_option("--m1", dest="m1", type="int", default=10)
    parser.add_option("--m2", dest="m2", type="int", default=7)
    parser.add_option("--n1", dest="n1", type="int", default=6)
    parser.add_option("--n2", dest="n2", type="int", default=12)

    parser.add_option("--outFile",
                      dest="outFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--inFile",
                      dest="inFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--data", dest="data", type="string", default="train")

    parser.add_option("--model", dest="model", type="string", default="RNN")

    (opts, args) = parser.parse_args(args)

    # make this false if you dont care about your accuracies per epoch, makes things faster!
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        test(opts.inFile, opts.data, opts.model)
        return

    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tr.loadTrees('train')
    opts.numWords = len(tr.loadWordMap())

    if (opts.model == 'RNTN'):
        nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch)
    elif (opts.model == 'RNN'):
        nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch)
    elif (opts.model == 'RNN2'):
        nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords,
                  opts.minibatch)
    elif (opts.model == 'RNN3'):
        nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords,
                  opts.minibatch)
    elif (opts.model == 'DCNN'):
        nn = DCNN(opts.wvecDim,
                  opts.ktop,
                  opts.m1,
                  opts.m2,
                  opts.n1,
                  opts.n2,
                  0,
                  opts.outputDim,
                  opts.numWords,
                  2,
                  opts.minibatch,
                  rho=1e-4)
        trees = cnn.tree2matrix(trees)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model

    nn.initParams()

    sgd = optimizer.SGD(nn,
                        alpha=opts.step,
                        minibatch=opts.minibatch,
                        optimizer=opts.optimizer)

    # assuring folder for plots exists
    if (os.path.isdir('plots') == False): os.makedirs('test')
    if (os.path.isdir('plots/' + opts.model) == False):
        os.makedirs('plots/' + opts.model)

    dev_trees = tr.loadTrees("dev")
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d" % e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f" % (end - start)

        with open(opts.outFile, 'w') as fid:
            pickle.dump(opts, fid)
            pickle.dump(sgd.costt, fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set real quick"
            train_accuracies.append(
                test(opts.outFile, "train", opts.model, trees))
            print "testing on dev set real quick"
            dev_accuracies.append(
                test(opts.outFile, "dev", opts.model, dev_trees))
            # clear the fprop flags in trees and dev_trees
            for tree in trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            print "fprop in trees cleared"

    if evaluate_accuracy_while_training:
        #pdb.set_trace()

        plt.figure()
        #Lets set up the plot
        plt.title('Accuracy in set per epochs')
        plt.plot(range(opts.epochs), train_accuracies, label='train')
        plt.plot(range(opts.epochs), dev_accuracies, label='dev')

        with open('dev_accu' + opts.model, 'a') as fid:
            fid.write(
                str(opts.wvecDim) + ',' + str(opts.middleDim) + ',' +
                str(dev_accuracies[-1]) + ';')

        #plt.axis([0,opts.epochs,0,1])
        plt.xlabel('epochs')
        plt.ylabel('accuracy')
        plt.legend(loc=2, borderaxespad=0.)

        #always save with middleDim, even if it's a one-layer RNN
        plt.savefig('plots/' + opts.model + '/accuracy_wvec_' +
                    str(opts.wvecDim) + '_middleDim_' + str(opts.middleDim) +
                    ' .png')

        print 'image saved at %s' % os.getcwd()
Beispiel #18
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",
                      action="store_true",
                      dest="test",
                      default=False)

    # Optimizer
    parser.add_option("--minibatch", dest="minibatch", type="int", default=30)
    parser.add_option("--optimizer",
                      dest="optimizer",
                      type="string",
                      default="adagrad")
    parser.add_option("--epochs", dest="epochs", type="int", default=50)
    parser.add_option("--step", dest="step", type="float", default=1e-2)

    parser.add_option("--outputDim", dest="outputDim", type="int", default=5)
    parser.add_option("--wvecDim", dest="wvecDim", type="int", default=30)
    parser.add_option("--outFile",
                      dest="outFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--inFile",
                      dest="inFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--data", dest="data", type="string", default="train")

    (opts, args) = parser.parse_args(args)

    # Testing
    if opts.test:
        test(opts.inFile, opts.data)
        return

    print "Loading data..."
    # load training data
    trees = tr.loadTrees()
    opts.numWords = len(tr.loadWordMap())

    rnn = nnet.RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch)
    rnn.initParams()

    sgd = optimizer.SGD(rnn,
                        alpha=opts.step,
                        minibatch=opts.minibatch,
                        optimizer=opts.optimizer)

    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d" % e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f" % (end - start)

        with open(opts.outFile, 'w') as fid:
            pickle.dump(opts, fid)
            pickle.dump(sgd.costt, fid)
            rnn.toFile(fid)
Beispiel #19
0
        default="sgd")
parser.add_option("--epochs",dest="epochs",type="int",default=50)
parser.add_option("--step",dest="step",type="float",default=1e-2)
parser.add_option("--outputDim",dest="outputDim",type="int",default=5)
parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30)
parser.add_option("--outFile",dest="outFile",type="string",
        default="models/distrntn.bin")
parser.add_option("--inFile",dest="inFile",type="string",
        default="models/distrntn.bin")
parser.add_option("--data",dest="data",type="string",default="train")
(opts,args)=parser.parse_args(None)


print "Loading data..."
# load training data
trees = tr.loadTrees()
opts.numWords = len(tr.loadWordMap())


#setup the rntn
rnn = nnet.RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
rnn.initParams()
sgd = optimizer.SGD(rnn,alpha=opts.step,minibatch=opts.minibatch,
    optimizer=opts.optimizer)

#setup spark
if mode == "local":
   # Set heap space size for java
   #os.environ["_JAVA_OPTIONS"] = "-Xmx1g"
   conf = (SparkConf()
           .setMaster("local[*]")
Beispiel #20
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",action="store_true",dest="test",default=False)

    # Optimizer
    parser.add_option("--minibatch",dest="minibatch",type="int",default=30)
    parser.add_option("--optimizer",dest="optimizer",type="string",
        default="adagrad")
    parser.add_option("--epochs",dest="epochs",type="int",default=50)
    parser.add_option("--step",dest="step",type="float",default=1e-2)


    parser.add_option("--middleDim",dest="middleDim",type="int",default=10)
    parser.add_option("--outputDim",dest="outputDim",type="int",default=5)
    parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30)

    # By @tiagokv, just to ease the first assignment test
    parser.add_option("--wvecDimBatch",dest="wvecDimBatch",type="string",default="")

    # for DCNN only
    parser.add_option("--ktop",dest="ktop",type="int",default=5)
    parser.add_option("--m1",dest="m1",type="int",default=10)
    parser.add_option("--m2",dest="m2",type="int",default=7)
    parser.add_option("--n1",dest="n1",type="int",default=6)
    parser.add_option("--n2",dest="n2",type="int",default=12)

    parser.add_option("--outFile",dest="outFile",type="string",
        default="models/test.bin")
    parser.add_option("--inFile",dest="inFile",type="string",
        default="models/test.bin")
    parser.add_option("--data",dest="data",type="string",default="train")

    parser.add_option("--model",dest="model",type="string",default="RNN")

    (opts,args)=parser.parse_args(args)


    # make this false if you dont care about your accuracies per epoch, makes things faster!
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        test(opts.inFile,opts.data,opts.model)
        return

    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tr.loadTrees('train')
    opts.numWords = len(tr.loadWordMap())

    if (opts.model=='RNTN'):
        nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='RNN'):
        nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='RNN2'):
        nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='RNN3'):
        nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='DCNN'):
        nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
        trees = cnn.tree2matrix(trees)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN'%opts.model

    nn.initParams()

    sgd = optimizer.SGD(nn,alpha=opts.step,minibatch=opts.minibatch,
        optimizer=opts.optimizer)

    # assuring folder for plots exists
    if( os.path.isdir('plots') == False ): os.makedirs('test')
    if( os.path.isdir('plots/' + opts.model ) == False ): os.makedirs('plots/' + opts.model)

    dev_trees = tr.loadTrees("dev")
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d"%e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f"%(end-start)

        with open(opts.outFile,'w') as fid:
            pickle.dump(opts,fid)
            pickle.dump(sgd.costt,fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set real quick"
            train_accuracies.append(test(opts.outFile,"train",opts.model,trees))
            print "testing on dev set real quick"
            dev_accuracies.append(test(opts.outFile,"dev",opts.model,dev_trees))
            # clear the fprop flags in trees and dev_trees
            for tree in trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            print "fprop in trees cleared"


    if evaluate_accuracy_while_training:
        #pdb.set_trace()

        plt.figure()
        #Lets set up the plot
        plt.title('Accuracy in set per epochs')
        plt.plot(range(opts.epochs),train_accuracies,label='train')
        plt.plot(range(opts.epochs),dev_accuracies,label='dev')

        with open('dev_accu' + opts.model,'a') as fid:
            fid.write(str(opts.wvecDim) + ',' + str(opts.middleDim) + ',' + str(dev_accuracies[-1]) + ';')

        #plt.axis([0,opts.epochs,0,1])
        plt.xlabel('epochs')
        plt.ylabel('accuracy')
        plt.legend(loc=2, borderaxespad=0.)

        #always save with middleDim, even if it's a one-layer RNN
        plt.savefig('plots/' + opts.model + '/accuracy_wvec_' + str(opts.wvecDim) + '_middleDim_' + str(opts.middleDim) + ' .png')

        print 'image saved at %s' % os.getcwd()
Beispiel #21
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",action="store_true",dest="test",default=False)

    # Optimizer
    parser.add_option("--minibatch",dest="minibatch",type="int",default=30)
    parser.add_option("--optimizer",dest="optimizer",type="string",
        default="adagrad")
    parser.add_option("--epochs",dest="epochs",type="int",default=50)
    parser.add_option("--step",dest="step",type="float",default=1e-2)
    parser.add_option("--init",dest="init",type="float",default=0.01)

    parser.add_option("--outputDim",dest="outputDim",type="int",default=5)
    parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30)

    parser.add_option("--rho",dest="rho",type="float",default=1e-6)

    parser.add_option("--outFile",dest="outFile",type="string",
        default="models/test.bin")
    parser.add_option("--inFile",dest="inFile",type="string",
        default="models/test.bin")
    parser.add_option("--data",dest="data",type="string",default="train")

    parser.add_option("--model",dest="model",type="string",default="RNTN")

    parser.add_option("--maxTrain",dest="maxTrain", type="int", default=-1)
    parser.add_option("--activation",dest="acti", type="string", default="tanh")

    parser.add_option("--partial",action="store_true",dest="partial",default=False)
    parser.add_option("--w2v",dest="w2vmodel", type="string")

    (opts,args)=parser.parse_args(args)


    # make this false if you dont care about your accuracies per epoch, makes things faster!
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        cmfile = opts.inFile + ".confusion_matrix-" + opts.data + ".png"
        test(opts.inFile,opts.data,opts.model,acti=opts.acti)
        return
    
    print "Loading data..."

    embedding = None
    wordMap = None
    if opts.w2vmodel is not None:
        print "Loading pre-trained word2vec model from %s" % opts.w2vmodel
        w2v = models.Word2Vec.load(opts.w2vmodel)
        embedding, wordMap = readW2v(w2v,opts.wvecDim)

    train_accuracies = []
    train_rootAccuracies = []
    dev_accuracies = []
    dev_rootAccuracies = []
    # load training data
    trees = tr.loadTrees('train',wordMap=wordMap)[:opts.maxTrain] #train.full.15
    if opts.maxTrain > -1:
        print "Training only on %d trees" % opts.maxTrain
    opts.numWords = len(tr.loadWordMap())


    if opts.partial==True:
        print "Only partial feedback"

    if (opts.model=='RNTN'):
        nn = RNTN(wvecDim=opts.wvecDim,outputDim=opts.outputDim,numWords=opts.numWords,
                  mbSize=opts.minibatch,rho=opts.rho, acti=opts.acti, init=opts.init, partial=opts.partial)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN'%opts.model
    
    nn.initParams(embedding=embedding)

    sgd = optimizer.SGD(nn,alpha=opts.step,minibatch=opts.minibatch,
        optimizer=opts.optimizer)


    dev_trees = tr.loadTrees("dev") #dev.full.15
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d"%e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f"%(end-start)

        with open(opts.outFile,'w') as fid:
            pickle.dump(opts,fid)
            pickle.dump(sgd.costt,fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set"
            acc, sacc = test(opts.outFile,"train",opts.model,trees, acti=opts.acti)
            train_accuracies.append(acc)
            train_rootAccuracies.append(sacc)
            print "testing on dev set"
            dacc, dsacc = test(opts.outFile,"dev",opts.model,dev_trees, acti=opts.acti)
            dev_accuracies.append(dacc)
            dev_rootAccuracies.append(dsacc)
            # clear the fprop flags and dev_trees
            for tree in trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            print "fprop in trees cleared"


    if evaluate_accuracy_while_training:
        pdb.set_trace()
        print train_accuracies
        print dev_accuracies

        print "on sentence-level:"
        print train_rootAccuracies
        print dev_rootAccuracies

        # Plot train/dev_accuracies
        plt.figure()
        plt.plot(range(len(train_accuracies)), train_accuracies, label='Train')
        plt.plot(range(len(dev_accuracies)), dev_accuracies, label='Dev')
        plt.xlabel("Epoch")
        plt.ylabel("Accuracy")
        plt.legend()
        # plot.show()
        plt.savefig(opts.outFile + ".accuracy_plot.png")

          # Plot train/dev_accuracies
        plt.figure()
        plt.plot(range(len(train_rootAccuracies)), train_rootAccuracies, label='Train')
        plt.plot(range(len(dev_rootAccuracies)), dev_rootAccuracies, label='Dev')
        plt.xlabel("Epoch")
        plt.ylabel("Accuracy")
        plt.legend()
        # plot.show()
        plt.savefig(opts.outFile + ".sent.accuracy_plot.png")
Beispiel #22
0
def run():
    print "Loading data..."
    model = "RNN"
    trees = tr.loadTrees('train')
    dev_trees = tr.loadTrees('dev')
    wvecDimList = [5, 15, 25, 35, 45]
    #wvecDimList = [10,20,40]
    accuracy_per_wvecDim = []
    epochs = 100
    outFileText = "./param/%s/%s_cost_and_acc" % (model, model)
    f = open(outFileText, 'w')
    for wvecDim in wvecDimList:
        nn = RNN(wvecDim, 5, len(tr.loadWordMap()), 30)
        nn.initParams()
        sgd = optimizer.SGD(nn, alpha=0.01, minibatch=30, optimizer="adagrad")
        outFile = "./param/%s/%s_wvecDim_%d_epochs_%d_step_001.bin" % (
            model, model, wvecDim, epochs)

        train_cost = []
        train_acc = []
        dev_cost = []
        dev_acc = []
        cost = 0
        accuracy = 0
        for e in range(epochs):
            start = time.time()
            sgd.run(trees)
            end = time.time()
            print "Time per epoch : %f" % (end - start)
            with open(outFile, 'w') as fid:
                hyperparam = {}
                hyperparam['alpha'] = 0.01
                hyperparam['minibatch'] = 30
                hyperparam['wvecDim'] = wvecDim
                pickle.dump(hyperparam, fid)
                nn.toFile(fid)

            cost, accuracy = test(nn, trees)
            train_cost.append(cost)
            train_acc.append(accuracy)

            cost, accuracy = test(nn, dev_trees)
            dev_cost.append(cost)
            dev_acc.append(accuracy)

            for tree in trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            print "fprop in trees cleared"

        plot_cost_acc(
            train_cost, dev_cost,
            "./figures/%s/%s_Cost_Figure_%d" % (model, model, wvecDim), epochs)
        plot_cost_acc(
            train_acc, dev_acc,
            "./figures/%s/%s_Accuracy_Figure_%d" % (model, model, wvecDim),
            epochs)

        anwser = "Cost = %f, Acc= %f" % (cost, accuracy)
        f.write(anwser)
        accuracy_per_wvecDim.append(accuracy)

    f.close()
    plt.figure(figsize=(6, 4))
    plt.title(r"Accuracies and vector Dimension")
    plt.xlabel("vector Dimension")
    plt.ylabel(r"Accuracy")
    plt.ylim(ymin=min(accuracy_per_wvecDim) * 0.8,
             ymax=max(accuracy_per_wvecDim) * 1.2)
    plt.plot(wvecDimList,
             accuracy_per_wvecDim,
             color='b',
             marker='o',
             linestyle='-')
    plt.savefig("./figures/%s/%s_Accuracy_and_vectorDimsension.png" %
                (model, model))
    plt.close()
Beispiel #23
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",
                      action="store_true",
                      dest="test",
                      default=False)

    # Optimizer
    parser.add_option("--minibatch", dest="minibatch", type="int", default=30)
    parser.add_option("--optimizer",
                      dest="optimizer",
                      type="string",
                      default="adagrad")
    parser.add_option("--epochs", dest="epochs", type="int", default=50)
    parser.add_option("--step", dest="step", type="float", default=1e-2)

    parser.add_option("--middleDim", dest="middleDim", type="int", default=10)
    parser.add_option("--outputDim", dest="outputDim", type="int", default=5)
    parser.add_option("--wvecDim", dest="wvecDim", type="int", default=30)

    # for DCNN only
    parser.add_option("--ktop", dest="ktop", type="int", default=5)
    parser.add_option("--m1", dest="m1", type="int", default=10)
    parser.add_option("--m2", dest="m2", type="int", default=7)
    parser.add_option("--n1", dest="n1", type="int", default=6)
    parser.add_option("--n2", dest="n2", type="int", default=12)

    parser.add_option("--outFile",
                      dest="outFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--inFile",
                      dest="inFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--data", dest="data", type="string", default="train")

    parser.add_option("--model", dest="model", type="string", default="RNN")

    (opts, args) = parser.parse_args(args)

    # make this false if you dont care about your accuracies per epoch, makes things faster!
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        test(opts.inFile, opts.data, opts.model)
        return

    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tr.loadTrees('train')
    opts.numWords = len(tr.loadWordMap())

    if (opts.model == 'RNTN'):
        nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch)
    elif (opts.model == 'RNN'):
        nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch)
    elif (opts.model == 'RNN2'):
        nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords,
                  opts.minibatch)
    elif (opts.model == 'RNN3'):
        nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords,
                  opts.minibatch)
    elif (opts.model == 'DCNN'):
        nn = DCNN(opts.wvecDim,
                  opts.ktop,
                  opts.m1,
                  opts.m2,
                  opts.n1,
                  opts.n2,
                  0,
                  opts.outputDim,
                  opts.numWords,
                  2,
                  opts.minibatch,
                  rho=1e-4)
        trees = cnn.tree2matrix(trees)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model

    nn.initParams()

    sgd = optimizer.SGD(nn,
                        alpha=opts.step,
                        minibatch=opts.minibatch,
                        optimizer=opts.optimizer)

    dev_trees = tr.loadTrees("dev")
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d" % e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f" % (end - start)

        with open(opts.outFile, 'w') as fid:
            pickle.dump(opts, fid)
            pickle.dump(sgd.costt, fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set real quick"
            train_accuracies.append(
                test(opts.outFile, "train", opts.model, trees))
            print "testing on dev set real quick"
            dev_accuracies.append(
                test(opts.outFile, "dev", opts.model, dev_trees))
            # clear the fprop flags in trees and dev_trees
            for tree in trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root, nodeFn=tr.clearFprop)
            print "fprop in trees cleared"

    if evaluate_accuracy_while_training:
        pdb.set_trace()
        print train_accuracies
        print dev_accuracies
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",action="store_true",dest="test",default=False)

    # Optimizer
    parser.add_option("--minibatch",dest="minibatch",type="int",default=30)
    parser.add_option("--optimizer",dest="optimizer",type="string",
        default="adagrad")
    parser.add_option("--epochs",dest="epochs",type="int",default=50)
    parser.add_option("--step",dest="step",type="float",default=1e-2)


    parser.add_option("--middleDim",dest="middleDim",type="int",default=10)
    parser.add_option("--outputDim",dest="outputDim",type="int",default=3)
    parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30)

    # for DCNN only
    parser.add_option("--ktop",dest="ktop",type="int",default=5)
    parser.add_option("--m1",dest="m1",type="int",default=10)
    parser.add_option("--m2",dest="m2",type="int",default=7)
    parser.add_option("--n1",dest="n1",type="int",default=6)
    parser.add_option("--n2",dest="n2",type="int",default=12)

    parser.add_option("--outFile",dest="outFile",type="string",
        default="models/test.bin")
    parser.add_option("--inFile",dest="inFile",type="string",
        default="models/test.bin")
    parser.add_option("--data",dest="data",type="string",default="train")

    parser.add_option("--model",dest="model",type="string",default="RNN")

    (opts,args)=parser.parse_args(args)

    # make this false if you dont care about your accuracies per epoch, makes things faster!
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        cmfile = opts.inFile + ".confusion_matrix-" + opts.data
        test(opts.inFile,opts.data,None,opts.model,confusion_matrix_file=cmfile,full=True)
        return

    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tr.loadTrees('train')
    opts.numWords = len(tr.loadWordMap())

    #Load word embeddings
    L = tr.loadWordEmbedding()

    if(opts.model=='RNN2'):
        nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
    else:
        raise '%s is not a valid neural network, only RNN2'%opts.model

    nn.initParams(L)

    sgd = optimizer.SGD(nn,alpha=opts.step,minibatch=opts.minibatch,
        optimizer=opts.optimizer)


    dev_trees = tr.loadTrees("dev")
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d"%e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f"%(end-start)

        with open(opts.outFile,'w') as fid:
            pickle.dump(opts,fid)
            pickle.dump(sgd.costt,fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set real quick"
            train_accuracies.append(test(opts.outFile,"train",L,opts.model,trees))
            print "testing on dev set real quick"
            dev_accuracies.append(test(opts.outFile,"dev",L,opts.model,dev_trees))
            # clear the fprop flags in trees and dev_trees
            for tree in trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            print "fprop in trees cleared"


    if evaluate_accuracy_while_training:
        # pdb.set_trace()
        print train_accuracies
        print dev_accuracies
        # Plot train/dev_accuracies here?
        plt.figure()
        plt.plot(range(len(train_accuracies)), train_accuracies, label='Train')
        plt.plot(range(len(dev_accuracies)), dev_accuracies, label='Dev')
        plt.xlabel("Epoch")
        plt.ylabel("Accuracy")
        plt.legend()
        # plot.show()
        plt.savefig(opts.outFile + ".accuracy_plot.png")
Beispiel #25
0
def run( args = None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",action="store_true",dest="test",default=False)

    # Optimizer
    parser.add_option("--minibatch",dest="minibatch",type="int",default=30)
    parser.add_option("--optimizer",dest="optimizer",type="string",default="adagrad")
    parser.add_option("--epochs",dest="epochs",type="int",default=50)
    parser.add_option("--step",dest="step",type="float",default=1e-2)


    parser.add_option("--middleDim",dest="middleDim",type="int",default=10)
    parser.add_option("--outputDim",dest="outputDim",type="int",default=5)
    parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30)

    # for DCNN only
    parser.add_option("--ktop",dest="ktop",type="int",default=5)
    parser.add_option("--m1",dest="m1",type="int",default=10)
    parser.add_option("--m2",dest="m2",type="int",default=7)
    parser.add_option("--n1",dest="n1",type="int",default=6)
    parser.add_option("--n2",dest="n2",type="int",default=12)
    
    parser.add_option("--outFile",dest="outFile",type="string",default="models/test.bin")
    parser.add_option("--inFile",dest="inFile",type="string",default="models/test.bin")
    parser.add_option("--data",dest="data",type="string",default="train")

    parser.add_option("--model",dest="model",type="string",default="RNN")

    (opts,args)=parser.parse_args(args)


    # make this false if you dont care about your accuracies per epoch, makes things faster!
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        test(opts.inFile, opts.data, opts.model)
        return
    
    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tr.loadTrees('train')
    opts.numWords = len(tr.loadWordMap())

    if (opts.model=='RNTN'):
        nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='RNN'):
        nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='RNN2'):
        nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='RNN3'):
        nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
    elif(opts.model=='DCNN'):
        nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
        trees = cnn.tree2matrix(trees)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN'%opts.model
    
    nn.initParams()

    sgd = optimizer.SGD(nn, alpha=opts.step, minibatch=opts.minibatch, optimizer=opts.optimizer)


    dev_trees = tr.loadTrees("dev")
    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d"%e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f" %(end-start)

        with open(opts.outFile,'w') as fid:
            pickle.dump(opts,fid)
            pickle.dump(sgd.costt,fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set real quick"
            train_accuracies.append(test(opts.outFile,"train",opts.model,trees))
            print "testing on dev set real quick"
            dev_accuracies.append(test(opts.outFile,"dev",opts.model,dev_trees))
            # clear the fprop flags in trees and dev_trees
            for tree in trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            for tree in dev_trees:
                tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
            print "fprop in trees cleared"


    if evaluate_accuracy_while_training:
        pdb.set_trace()
        print train_accuracies
        print dev_accuracies
Beispiel #26
0
                costP, _ = self.costAndGrad(data)
                L[i, j] -= epsilon
                numGrad = (costP - cost) / epsilon
                err = np.abs(dL[j][i] - numGrad)
                err2 += err
                count += 1

        if 0.001 > err2 / count:
            print "Grad Check Passed for dL"
        else:
            print "Grad Check Failed for dL: Sum of Error = %.9f" % (err2 /
                                                                     count)


if __name__ == '__main__':

    import tree as treeM
    train = treeM.loadTrees()
    numW = len(treeM.loadWordMap())

    wvecDim = 10
    outputDim = 5

    rnn = RNN(wvecDim, outputDim, numW, mbSize=4)
    rnn.initParams()

    mbData = train[:4]

    print "Numerical gradient check..."
    rnn.check_grad(mbData)
Beispiel #27
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage = usage)

    parser.add_option("--test", action = "store_true", dest = "test", default = False)

    # Paramsfile includes hyperparameters for training
    parser.add_option('--params_file', dest = "params_file", default = './params/exp_params.json',
                      help = "Path to the file  containing the training settings")
    parser.add_option('--data_dir', dest = "data_dir", default = './trees',
                      help = "Directory containing the trees")

    # Directory containing the model to test
    parser.add_option("--model_directory", dest = "test_dir", type = "string")
    parser.add_option("--data", dest = "data", type = "string", default = "train")

    (opts, args) = parser.parse_args(args)

    results_dir = "./results"
    if opts.test:
        pass
    else:
        results_dir_current_job = os.path.join(results_dir, utils.now_as_str_f())
        while os.path.isdir(results_dir_current_job):  # generate a new timestamp if the current one already exists
            results_dir_current_job = os.path.join(results_dir, utils.now_as_str_f())
        os.makedirs(results_dir_current_job)

    # Load training settings (e.g. hyperparameters)
    params = utils.Params(opts.params_file)

    if opts.test:
        pass
    else:
        # Copy the settings file into the results directory
        copyfile(opts.params_file, os.path.join(results_dir_current_job, os.path.basename(opts.params_file)))

    # Get the logger
    if opts.test:
        log_path = os.path.join(opts.test_dir, 'testing.log')
    else:
        log_path = os.path.join(results_dir_current_job, 'training.log')
    log_level = params.log_level if hasattr(params, 'log_level') else logging.DEBUG
    log = utils.get_logger(log_path, log_level)

    if opts.test:
        log.info("Testing directory: " + opts.test_dir)
        log.info("Dataset used for testing: " + opts.data)
    else:
        log.info("Results directory: " + results_dir_current_job)
        log.info("Minibatch: " + str(params.optimizer_settings['minibatch']))
        log.info("Optimizer: " + params.optimizer)
        log.info("Epsilon: " + str(params.optimizer_settings['epsilon']))
        log.info("Alpha: " + str(params.optimizer_settings['alpha']))
        log.info("Number of samples used: " + str(params.sample_size))

    # Testing
    if opts.test:
        test(opts.test_dir, opts.data)
        return

    log.info("Loading data...")
    # load training data
    trees = tr.loadTrees(sample_size = params.sample_size)
    params.numWords = len(tr.loadWordMap())
    overall_performance = pd.DataFrame()

    rnn = nnet.RNN(params.wvecDim, params.outputDim, params.numWords, params.optimizer_settings['minibatch'])
    rnn.initParams()

    sgd = optimizer.SGD(rnn, alpha = params.optimizer_settings['alpha'],
                        minibatch = params.optimizer_settings['minibatch'],
                        optimizer = params.optimizer, epsilon = params.optimizer_settings['epsilon'])

    best_val_cost = float('inf')
    best_epoch = 0

    for e in range(params.num_epochs):
        start = time.time()
        log.info("Running epoch %d" % e)
        df, updated_model, train_cost, train_acc = sgd.run(trees)
        end = time.time()
        log.info("Time per epoch : %f" % (end - start))
        log.info("Training accuracy : %f" % train_acc)
        # VALIDATION
        val_df, val_cost, val_acc = validate(updated_model, results_dir_current_job)

        if val_cost < best_val_cost:
            # best validation cost we have seen so far
            log.info("Validation score improved, saving model")
            best_val_cost = val_cost
            best_epoch = e
            best_epoch_row = {"epoch": e, "train_cost": train_cost, "val_cost": val_cost, "train_acc": train_acc,
                              "val_acc": val_acc}
            with open(results_dir_current_job + "/checkpoint.bin", 'w') as fid:
                pickle.dump(params, fid)
                pickle.dump(sgd.costt, fid)
                rnn.toFile(fid)

        val_df.to_csv(results_dir_current_job + "/validation_preds_epoch_ " + str(e) + ".csv", header = True, index = False)
        df.to_csv(results_dir_current_job + "/training_preds_epoch_" + str(e) + ".csv", header = True, index = False)

        row = {"epoch": e, "train_cost": train_cost, "val_cost": val_cost, "train_acc": train_acc, "val_acc": val_acc}
        overall_performance = overall_performance.append(row, ignore_index = True)

        # break if no val loss improvement in the last epochs
        if (e - best_epoch) >= params.num_epochs_early_stop:
            log.tinfo("No improvement in the last {num_epochs_early_stop} epochs, stop training.".format(num_epochs_early_stop=params.num_epochs_early_stop))
            break

    overall_performance = overall_performance.append(best_epoch_row, ignore_index = True)
    overall_performance.to_csv(results_dir_current_job + "/train_val_costs.csv", header = True, index = False)
    log.info("Experiment end")
                L[i,j] -= epsilon
                numGrad = (costP - cost)/epsilon
                err = np.abs(dL[j][i] - numGrad)
                #print "Analytic %.9f, Numerical %.9f, Relative Error %.9f"%(dL[j][i],numGrad,err)
                err2+=err
                count+=1

        if 0.001 > err2/count:
            print "Passed :)"
        else:
            print "Failed : Sum of Error = %.9f" % (err2/count)

if __name__ == '__main__':

    import tree as treeM
    train = treeM.loadTrees()
    numW = len(treeM.loadWordMap())

    wvecDim = 10
    outputDim = 5

    nn = RNTN(wvecDim,outputDim,numW,mbSize=4)
    nn.initParams()

    mbData = train[:1]
    #cost, grad = nn.costAndGrad(mbData)

    print "Numerical gradient check..."
    nn.check_grad(mbData)

Beispiel #29
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",action="store_true",dest="test",default=False)

    # Optimizer
    parser.add_option("--minibatch",dest="minibatch",type="int",default=30)
    parser.add_option("--optimizer",dest="optimizer",type="string",
        default="adagrad")
    parser.add_option("--epochs",dest="epochs",type="int",default=50)
    parser.add_option("--step",dest="step",type="float",default=1e-2)
    parser.add_option("--rho",dest="rho",type="float",default=1e-4)

    parser.add_option("--middleDim",dest="middleDim",type="int",default=10)
    parser.add_option("--outputDim",dest="outputDim",type="int",default=3)
    parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30)

    # for DCNN only
    parser.add_option("--ktop",dest="ktop",type="int",default=5)
    parser.add_option("--m1",dest="m1",type="int",default=10)
    parser.add_option("--m2",dest="m2",type="int",default=7)
    parser.add_option("--n1",dest="n1",type="int",default=6)
    parser.add_option("--n2",dest="n2",type="int",default=12)
    
    parser.add_option("--outFile",dest="outFile",type="string",
        default="models/test.bin")
    parser.add_option("--inFile",dest="inFile",type="string",
        default="models/test.bin")
    parser.add_option("--data",dest="data",type="string",default="train")

    parser.add_option("--model",dest="model",type="string",default="RNN")

    parser.add_option("--pretrain",dest="pretrain",default=False)
    parser.add_option("--dropout",dest="dropout",default=False)

    (opts,args)=parser.parse_args(args)


    # make this false if you dont care about your accuracies per epoch
    evaluate_accuracy_while_training = True

    # Testing
    if opts.test:
        test(opts.inFile,opts.data,opts.model,e=1000)
        return
    
    print "Loading data..."
    train_accuracies = []
    dev_accuracies = []
    # load training data
    trees = tr.loadTrees('train')
    opts.numWords = len(tr.loadWordMap())

    if (opts.model=='RNTN'):
        nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout,opts.rho)
    elif(opts.model=='RNN'):
        nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout,opts.rho)
    elif(opts.model=='RNN2'):
        nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout,opts.rho)
    elif(opts.model=='RNN2TANH'):
        nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout,opts.rho)
    elif(opts.model=='RNN3'):
        nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch,opts.pretrain,opts.dropout,opts.rho)
    elif(opts.model=='DCNN'):
        nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
        trees = cnn.tree2matrix(trees)
    else:
        raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, and DCNN'%opts.model
    
    nn.initParams()

    sgd = optimizer.SGD(nn,alpha=opts.step,minibatch=opts.minibatch,
        optimizer=opts.optimizer)

    for e in range(opts.epochs):
        start = time.time()
        print "Running epoch %d"%e
        sgd.run(trees)
        end = time.time()
        print "Time per epoch : %f"%(end-start)

        with open(opts.outFile,'w') as fid:
            pickle.dump(opts,fid)
            pickle.dump(sgd.costt,fid)
            nn.toFile(fid)
        if evaluate_accuracy_while_training:
            print "testing on training set real quick"
            train_accuracies.append(test(opts.outFile,"train",opts.model,trees,e))
            print "testing on dev set real quick"
            dev_accuracies.append(test(opts.outFile,"dev",opts.model,e=e))
        if e%10==0:
            if evaluate_accuracy_while_training:
                print train_accuracies
                print dev_accuracies
                plt.figure()
                plt.plot(train_accuracies,label = 'Train')
                plt.plot(dev_accuracies,label = 'Dev')
                plt.legend()
                plt.savefig('temp/train_dev_accuracies_'+str(opts.model)+'_middle_'+str(opts.middleDim)+'.png')

    if evaluate_accuracy_while_training:
        print train_accuracies
        print dev_accuracies
        plt.figure()
        plt.plot(train_accuracies,label = 'Train')
        plt.plot(dev_accuracies,label = 'Dev')
        plt.legend()
        plt.savefig('temp/train_dev_accuracies_'+str(opts.model)+'_middle_'+str(opts.middleDim)+'.png')