def test(netFile, dataSet, model='RNN', trees=None): if trees == None: trees = tr.loadTrees(dataSet) assert netFile is not None, "Must give model to test" print "Testing netFile %s" % netFile with open(netFile, 'r') as fid: opts = pickle.load(fid) _ = pickle.load(fid) if (model == 'RNTN'): nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (model == 'RNN'): nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (model == 'RNN2'): nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN3'): nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (model == 'DCNN'): nn = DCNN(opts.wvecDim, opts.ktop, opts.m1, opts.m2, opts.n1, opts.n2, 0, opts.outputDim, opts.numWords, 2, opts.minibatch, rho=1e-4) trees = cnn.tree2matrix(trees) else: raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model nn.initParams() nn.fromFile(fid) print "Testing %s..." % model cost, correct, guess, total = nn.costAndGrad(trees, test=True) correct_sum = 0 for i in xrange(0, len(correct)): correct_sum += (guess[i] == correct[i]) # TODO # Plot the confusion matrix? conf_arr = np.zeros((5, 5)) for i in xrange(0, len(correct)): current_correct = correct[i] current_guess = guess[i] conf_arr[current_correct][current_guess] += 1.0 makeconf(conf_arr, model, dataSet) print "Cost %f, Acc %f" % (cost, correct_sum / float(total)) return correct_sum / float(total)
def test(netFile, dataSet, model='RNN', trees=None): if trees == None: trees = tr.loadTrees(dataSet) assert netFile is not None, "Must give model to test" print "Testing netFile %s" % netFile opts = None with open(netFile, 'r') as fid: opts = pickle.load(fid) _ = pickle.load(fid) if (model == 'RNTN'): nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (model == 'RNN'): nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (model == 'RNN2'): nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN3'): nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (model == 'DCNN'): nn = DCNN(opts.wvecDim, opts.ktop, opts.m1, opts.m2, opts.n1, opts.n2, 0, opts.outputDim, opts.numWords, 2, opts.minibatch, rho=1e-4) trees = cnn.tree2matrix(trees) else: raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model nn.initParams() nn.fromFile(fid) print "Testing %s..." % model cost, correct, guess, total = nn.costAndGrad(trees, test=True) correct_sum = 0 for i in xrange(0, len(correct)): correct_sum += (guess[i] == correct[i]) cm = confusion_matrix(correct, guess) makeconf(cm) plt.savefig("plots/" + opts.model + "/confusion_matrix_" + model + "wvecDim_" + str(opts.wvecDim) + "_middleDim_" + str(opts.middleDim) + ".png") print "Cost %f, Acc %f" % (cost, correct_sum / float(total)) return correct_sum / float(total)
def test(netFile, dataSet, L, model='RNN', trees=None, confusion_matrix_file=None, full=False): if trees==None: trees = tr.loadTrees(dataSet) if L is None: L = tr.loadWordEmbedding() assert netFile is not None, "Must give model to test" print "Testing netFile %s"%netFile with open(netFile,'r') as fid: opts = pickle.load(fid) _ = pickle.load(fid) if(model=='RNN2'): nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch) else: raise '%s is not a valid neural network , only RNN2'%opts.model nn.initParams(L) nn.fromFile(fid) print "Testing %s..."%model cost, correct, guess, total, actss = nn.costAndGrad(trees,test=True) if full: #pass import pickle as pkl with open('{}_actss_{}.pkl'.format(netFile, dataSet),'w') as fid: pkl.dump(actss,fid) correct_sum = 0 for i in xrange(0,len(correct)): correct_sum+=(guess[i]==correct[i]) # Generate confusion matrix if confusion_matrix_file is not None: cm = confusion_matrix(correct, guess) makeconf(cm, confusion_matrix_file) print "Cost %f, Acc %f"%(cost,correct_sum/float(total)) return correct_sum/float(total)
def run(args=None): usage = "usage : %prog [options]" parser = optparse.OptionParser(usage=usage) parser.add_option("--test", action="store_true", dest="test", default=False) # Optimizer parser.add_option("--minibatch", dest="minibatch", type="int", default=30) parser.add_option("--optimizer", dest="optimizer", type="string", default="adagrad") parser.add_option("--epochs", dest="epochs", type="int", default=50) parser.add_option("--step", dest="step", type="float", default=1e-2) parser.add_option("--middleDim", dest="middleDim", type="int", default=10) parser.add_option("--outputDim", dest="outputDim", type="int", default=5) parser.add_option("--wvecDim", dest="wvecDim", type="int", default=30) # for DCNN only parser.add_option("--ktop", dest="ktop", type="int", default=5) parser.add_option("--m1", dest="m1", type="int", default=10) parser.add_option("--m2", dest="m2", type="int", default=7) parser.add_option("--n1", dest="n1", type="int", default=6) parser.add_option("--n2", dest="n2", type="int", default=12) parser.add_option("--outFile", dest="outFile", type="string", default="models/test.bin") parser.add_option("--inFile", dest="inFile", type="string", default="models/test.bin") parser.add_option("--data", dest="data", type="string", default="train") parser.add_option("--model", dest="model", type="string", default="RNN") (opts, args) = parser.parse_args(args) # make this false if you dont care about your accuracies per epoch, makes things faster! evaluate_accuracy_while_training = True # Testing if opts.test: test(opts.inFile, opts.data, opts.model) return print "Loading data..." train_accuracies = [] dev_accuracies = [] # load training data trees = tr.loadTrees('train') opts.numWords = len(tr.loadWordMap()) if (opts.model == 'RNTN'): nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN'): nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN2'): nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN3'): nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'DCNN'): nn = DCNN(opts.wvecDim, opts.ktop, opts.m1, opts.m2, opts.n1, opts.n2, 0, opts.outputDim, opts.numWords, 2, opts.minibatch, rho=1e-4) trees = cnn.tree2matrix(trees) else: raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model nn.initParams() sgd = optimizer.SGD(nn, alpha=opts.step, minibatch=opts.minibatch, optimizer=opts.optimizer) dev_trees = tr.loadTrees("dev") for e in range(opts.epochs): start = time.time() print "Running epoch %d" % e sgd.run(trees) end = time.time() print "Time per epoch : %f" % (end - start) with open(opts.outFile, 'w') as fid: pickle.dump(opts, fid) pickle.dump(sgd.costt, fid) nn.toFile(fid) if evaluate_accuracy_while_training: print "testing on training set real quick" train_accuracies.append( test(opts.outFile, "train", opts.model, trees)) print "testing on dev set real quick" dev_accuracies.append( test(opts.outFile, "dev", opts.model, dev_trees)) # clear the fprop flags in trees and dev_trees for tree in trees: tr.leftTraverse(tree.root, nodeFn=tr.clearFprop) for tree in dev_trees: tr.leftTraverse(tree.root, nodeFn=tr.clearFprop) print "fprop in trees cleared" if evaluate_accuracy_while_training: pdb.set_trace() print train_accuracies print dev_accuracies
def run(args=None): usage = "usage : %prog [options]" parser = optparse.OptionParser(usage=usage) parser.add_option("--test", action="store_true", dest="test", default=False) # Optimizer parser.add_option("--minibatch", dest="minibatch", type="int", default=30) parser.add_option("--optimizer", dest="optimizer", type="string", default="adagrad") parser.add_option("--epochs", dest="epochs", type="int", default=50) parser.add_option("--step", dest="step", type="float", default=1e-2) parser.add_option("--middleDim", dest="middleDim", type="int", default=10) parser.add_option("--outputDim", dest="outputDim", type="int", default=5) parser.add_option("--wvecDim", dest="wvecDim", type="int", default=30) # By @tiagokv, just to ease the first assignment test parser.add_option("--wvecDimBatch", dest="wvecDimBatch", type="string", default="") # for DCNN only parser.add_option("--ktop", dest="ktop", type="int", default=5) parser.add_option("--m1", dest="m1", type="int", default=10) parser.add_option("--m2", dest="m2", type="int", default=7) parser.add_option("--n1", dest="n1", type="int", default=6) parser.add_option("--n2", dest="n2", type="int", default=12) parser.add_option("--outFile", dest="outFile", type="string", default="models/test.bin") parser.add_option("--inFile", dest="inFile", type="string", default="models/test.bin") parser.add_option("--data", dest="data", type="string", default="train") parser.add_option("--model", dest="model", type="string", default="RNN") (opts, args) = parser.parse_args(args) # make this false if you dont care about your accuracies per epoch, makes things faster! evaluate_accuracy_while_training = True # Testing if opts.test: test(opts.inFile, opts.data, opts.model) return print "Loading data..." train_accuracies = [] dev_accuracies = [] # load training data trees = tr.loadTrees('train') opts.numWords = len(tr.loadWordMap()) if (opts.model == 'RNTN'): nn = RNTN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN'): nn = RNN(opts.wvecDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN2'): nn = RNN2(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'RNN3'): nn = RNN3(opts.wvecDim, opts.middleDim, opts.outputDim, opts.numWords, opts.minibatch) elif (opts.model == 'DCNN'): nn = DCNN(opts.wvecDim, opts.ktop, opts.m1, opts.m2, opts.n1, opts.n2, 0, opts.outputDim, opts.numWords, 2, opts.minibatch, rho=1e-4) trees = cnn.tree2matrix(trees) else: raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN' % opts.model nn.initParams() sgd = optimizer.SGD(nn, alpha=opts.step, minibatch=opts.minibatch, optimizer=opts.optimizer) # assuring folder for plots exists if (os.path.isdir('plots') == False): os.makedirs('test') if (os.path.isdir('plots/' + opts.model) == False): os.makedirs('plots/' + opts.model) dev_trees = tr.loadTrees("dev") for e in range(opts.epochs): start = time.time() print "Running epoch %d" % e sgd.run(trees) end = time.time() print "Time per epoch : %f" % (end - start) with open(opts.outFile, 'w') as fid: pickle.dump(opts, fid) pickle.dump(sgd.costt, fid) nn.toFile(fid) if evaluate_accuracy_while_training: print "testing on training set real quick" train_accuracies.append( test(opts.outFile, "train", opts.model, trees)) print "testing on dev set real quick" dev_accuracies.append( test(opts.outFile, "dev", opts.model, dev_trees)) # clear the fprop flags in trees and dev_trees for tree in trees: tr.leftTraverse(tree.root, nodeFn=tr.clearFprop) for tree in dev_trees: tr.leftTraverse(tree.root, nodeFn=tr.clearFprop) print "fprop in trees cleared" if evaluate_accuracy_while_training: #pdb.set_trace() plt.figure() #Lets set up the plot plt.title('Accuracy in set per epochs') plt.plot(range(opts.epochs), train_accuracies, label='train') plt.plot(range(opts.epochs), dev_accuracies, label='dev') with open('dev_accu' + opts.model, 'a') as fid: fid.write( str(opts.wvecDim) + ',' + str(opts.middleDim) + ',' + str(dev_accuracies[-1]) + ';') #plt.axis([0,opts.epochs,0,1]) plt.xlabel('epochs') plt.ylabel('accuracy') plt.legend(loc=2, borderaxespad=0.) #always save with middleDim, even if it's a one-layer RNN plt.savefig('plots/' + opts.model + '/accuracy_wvec_' + str(opts.wvecDim) + '_middleDim_' + str(opts.middleDim) + ' .png') print 'image saved at %s' % os.getcwd()
def run(args=None): usage = "usage : %prog [options]" parser = optparse.OptionParser(usage=usage) parser.add_option("--test",action="store_true",dest="test",default=False) # Optimizer parser.add_option("--minibatch",dest="minibatch",type="int",default=30) parser.add_option("--optimizer",dest="optimizer",type="string", default="adagrad") parser.add_option("--epochs",dest="epochs",type="int",default=50) parser.add_option("--step",dest="step",type="float",default=1e-2) parser.add_option("--middleDim",dest="middleDim",type="int",default=10) parser.add_option("--outputDim",dest="outputDim",type="int",default=3) parser.add_option("--wvecDim",dest="wvecDim",type="int",default=30) # for DCNN only parser.add_option("--ktop",dest="ktop",type="int",default=5) parser.add_option("--m1",dest="m1",type="int",default=10) parser.add_option("--m2",dest="m2",type="int",default=7) parser.add_option("--n1",dest="n1",type="int",default=6) parser.add_option("--n2",dest="n2",type="int",default=12) parser.add_option("--outFile",dest="outFile",type="string", default="models/test.bin") parser.add_option("--inFile",dest="inFile",type="string", default="models/test.bin") parser.add_option("--data",dest="data",type="string",default="train") parser.add_option("--model",dest="model",type="string",default="RNN") (opts,args)=parser.parse_args(args) # make this false if you dont care about your accuracies per epoch, makes things faster! evaluate_accuracy_while_training = True # Testing if opts.test: cmfile = opts.inFile + ".confusion_matrix-" + opts.data test(opts.inFile,opts.data,None,opts.model,confusion_matrix_file=cmfile,full=True) return print "Loading data..." train_accuracies = [] dev_accuracies = [] # load training data trees = tr.loadTrees('train') opts.numWords = len(tr.loadWordMap()) #Load word embeddings L = tr.loadWordEmbedding() if(opts.model=='RNN2'): nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch) else: raise '%s is not a valid neural network, only RNN2'%opts.model nn.initParams(L) sgd = optimizer.SGD(nn,alpha=opts.step,minibatch=opts.minibatch, optimizer=opts.optimizer) dev_trees = tr.loadTrees("dev") for e in range(opts.epochs): start = time.time() print "Running epoch %d"%e sgd.run(trees) end = time.time() print "Time per epoch : %f"%(end-start) with open(opts.outFile,'w') as fid: pickle.dump(opts,fid) pickle.dump(sgd.costt,fid) nn.toFile(fid) if evaluate_accuracy_while_training: print "testing on training set real quick" train_accuracies.append(test(opts.outFile,"train",L,opts.model,trees)) print "testing on dev set real quick" dev_accuracies.append(test(opts.outFile,"dev",L,opts.model,dev_trees)) # clear the fprop flags in trees and dev_trees for tree in trees: tr.leftTraverse(tree.root,nodeFn=tr.clearFprop) for tree in dev_trees: tr.leftTraverse(tree.root,nodeFn=tr.clearFprop) print "fprop in trees cleared" if evaluate_accuracy_while_training: # pdb.set_trace() print train_accuracies print dev_accuracies # Plot train/dev_accuracies here? plt.figure() plt.plot(range(len(train_accuracies)), train_accuracies, label='Train') plt.plot(range(len(dev_accuracies)), dev_accuracies, label='Dev') plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.legend() # plot.show() plt.savefig(opts.outFile + ".accuracy_plot.png")