from layer.core import * from algorithm.SGD import Mini_Batch from data.process import loadData from layer.model import Model if __name__ == '__main__': dataSet=loadData() cifar=Model(batch_size=100,lr=0.0001,dataSet=dataSet,weight_decay=0.004) neure=[32,32,64,64] batch_size=100 cifar.add(DataLayer(batch_size,(32,32,3))) cifar.add(ConvolutionLayer((batch_size,3,32,32),(neure[0],3,3,3),'relu','Gaussian',0.0001)) cifar.add(PoolingLayer()) cifar.add(ConvolutionLayer((batch_size,neure[0],15,15),(neure[1],neure[0],4,4),'relu','Gaussian',0.01)) cifar.add(PoolingLayer()) cifar.add(ConvolutionLayer((batch_size,neure[1],6,6),(neure[2],neure[1],5,5),'relu','Gaussian',0.01)) cifar.add(PoolingLayer()) cifar.add(FullyConnectedLayer(neure[2]*1*1,neure[3],'relu','Gaussian',0.1)) cifar.add(DropoutLayer(0.5)) cifar.add(SoftmaxLayer(neure[3],5,'Gaussian',0.1)) cifar.build_train_fn() cifar.build_vaild_fn() algorithm=Mini_Batch(model=cifar,n_epochs=100,load_param='cnn_params.pkl',save_param='cnn_params.pkl') algorithm.run()
from layer.core import * from algorithm.SGD import Mini_Batch from data.process import loadData, loadScaleData from layer.model import Model if __name__ == '__main__': dataSet=loadScaleData('data.pkl') cifar=Model(batch_size=100,lr=0.01,dataSet=dataSet,weight_decay=0.0) neure=[1000,1000,1000] batch_size=100 cifar.add(DataLayer(batch_size,32*32*3)) cifar.add(FullyConnectedLayer(32*32*3,neure[0],'relu','Gaussian',0.1)) cifar.add(DropoutLayer(0.2)) cifar.add(FullyConnectedLayer(neure[0],neure[1],'relu','Gaussian',0.1)) cifar.add(DropoutLayer(0.2)) cifar.add(FullyConnectedLayer(neure[1],neure[2],'relu','Gaussian',0.1)) cifar.add(DropoutLayer(0.2)) cifar.add(SoftmaxLayer(neure[2],10)) cifar.pretrain() cifar.build_train_fn() cifar.build_vaild_fn() algorithm=Mini_Batch(model=cifar,n_epochs=100,load_param='mlp_params.pkl',save_param='mlp_params.pkl') algorithm.run()
from layer.core import * from algorithm.SGD import Mini_Batch from data.process import loadTrainData from layer.model import Model if __name__ == '__main__': # load ROI+ROTATION dataset #dataSet=loadTrainData("dataset/ROT/data.pkl","dataset/ROT/mean.pkl",scale=128.0); # load ROI dataset dataSet=loadTrainData("dataset/ROI/data.pkl","dataset/ROI/mean.pkl",scale=128.0); # load normal dataset #dataSet=loadTrainData("dataset/NORMAL/data.pkl","dataset/NORMAL/mean.pkl",scale=128.0); cifar=Model(batch_size=100,lr=0.001,dataSet=dataSet,weight_decay=0.0) #neure=[2000] neure=[1000,1000,1000] #neure=[2000,2000,2000]; batch_size=100 cifar.add(DataLayer(batch_size,32*32)) cifar.add(FullyConnectedLayer(32*32,neure[0],'relu','Gaussian',0.1)) cifar.add(DropoutLayer(0.2)) cifar.add(FullyConnectedLayer(neure[0],neure[1],'relu','Gaussian',0.1)) cifar.add(DropoutLayer(0.2)) cifar.add(FullyConnectedLayer(neure[1],neure[2],'relu','Gaussian',0.1)) cifar.add(DropoutLayer(0.2)) cifar.add(SoftmaxLayer(neure[2],5)) cifar.build_train_fn() cifar.build_vaild_fn() algorithm=Mini_Batch(model=cifar,n_epochs=200,load_param='mlp_params.pkl',save_param='mlp_params.pkl') algorithm.run()
def test_knn_stat(): print "\nNow tesing with KNN-ROI.....\n" global test_hit, test_count for idx in xrange(5): test_knn(("dataset/ROI_TEST/%d.bin") % idx, "dataset/ROI/mean.pkl", idx) print "total error rate: %f%%\n" % (float(test_count - test_hit) / test_count * 100) # clear test_hit = test_count = 0 if __name__ == '__main__': Test_Single = Model(batch_size=1, lr=0.01, dataSet=None) meta_num = 100 neure = [meta_num, meta_num, meta_num, meta_num] batch_size = 1 x = T.matrix('x') index = T.lscalar() Test_Single.add(DataLayer(batch_size, (32, 32, 1))) Test_Single.add( ConvolutionLayer((batch_size, 1, 32, 32), (neure[0], 1, 3, 3), 'relu', 'Gaussian', 0.0001)) Test_Single.add(PoolingLayer()) Test_Single.add( ConvolutionLayer((batch_size, neure[0], 15, 15), (neure[1], neure[0], 4, 4), 'relu', 'Gaussian', 0.01)) Test_Single.add(PoolingLayer()) Test_Single.add(
print "facial expression %d: test result %d/%d" %(y,ans,examples) print " error rate %f%%" %(float(examples-ans)/examples*100) global test_hit,test_count; test_hit+=ans test_count+=examples def test_knn_stat(): print "\nNow tesing with KNN-ROI.....\n" global test_hit,test_count; for idx in xrange(5): test_knn(("dataset/ROI_TEST/%d.bin")%idx,"dataset/ROI/mean.pkl",idx) print "total error rate: %f%%\n"%(float(test_count-test_hit)/test_count*100) # clear test_hit=test_count=0 if __name__ == '__main__': Test_Single=Model(batch_size=1,lr=0.01,dataSet=None) meta_num=100 neure=[meta_num,meta_num,meta_num,meta_num] batch_size=1 x=T.matrix('x') index=T.lscalar() Test_Single.add(DataLayer(batch_size,32*32)) Test_Single.add(FullyConnectedLayer(32*32,neure[0],'relu','Gaussian',0.1)) Test_Single.add(DropoutLayer(0.2)) Test_Single.add(FullyConnectedLayer(neure[0],neure[1],'relu','Gaussian',0.1)) Test_Single.add(DropoutLayer(0.2)) Test_Single.add(FullyConnectedLayer(neure[1],neure[2],'relu','Gaussian',0.1)) Test_Single.add(DropoutLayer(0.2)) Test_Single.add(SoftmaxLayer(neure[2],5)) Test_Single.build_test_fn() Test_Single.load_params('params/DNN2000_ROI.pkl')
def test_knn_stat(): print "\nNow tesing with KNN-ROI.....\n" global test_hit, test_count for idx in xrange(5): test_knn(("dataset/ROI_TEST/%d.bin") % idx, "dataset/ROI/mean.pkl", idx) print "total error rate: %f%%\n" % (float(test_count - test_hit) / test_count * 100) # clear test_hit = test_count = 0 if __name__ == '__main__': Test_Single = Model(batch_size=1, lr=0.01, dataSet=None) meta_num = 100 neure = [meta_num, meta_num, meta_num, meta_num] batch_size = 1 x = T.matrix('x') index = T.lscalar() Test_Single.add(DataLayer(batch_size, 32 * 32)) Test_Single.add( FullyConnectedLayer(32 * 32, neure[0], 'relu', 'Gaussian', 0.1)) Test_Single.add(DropoutLayer(0.2)) Test_Single.add(SoftmaxLayer(neure[0], 5)) Test_Single.build_test_fn() Test_Single.load_params('params/1NN2000_ROI.pkl') test_pred = Test_Single.test_pred test_belief = Test_Single.test_belief test_single_stat()
from layer.core import * from algorithm.SGD import Mini_Batch from data.process import loadTrainData from layer.model import Model if __name__ == '__main__': # load ROI+ROTATION dataset dataSet = loadTrainData("dataset/ROT/data.pkl", "dataset/ROT/mean.pkl") # load ROI dataset #dataSet=loadTrainData("dataset/ROI/data.pkl","dataset/ROI/mean.pkl"); # load normal dataset #dataSet=loadTrainData("dataset/NORMAL/data.pkl","dataset/NORMAL/mean.pkl"); cifar = Model(batch_size=100, lr=0.0001, dataSet=dataSet, weight_decay=0) neure = [64, 64, 128, 300] #neure=[32,32,64,64] #neure=[48,48,96,200] batch_size = 100 cifar.add(DataLayer(batch_size, (32, 32, 1))) cifar.add( ConvolutionLayer((batch_size, 1, 32, 32), (neure[0], 1, 3, 3), 'relu', 'Gaussian', 0.0001)) cifar.add(PoolingLayer()) cifar.add( ConvolutionLayer((batch_size, neure[0], 15, 15), (neure[1], neure[0], 4, 4), 'relu', 'Gaussian', 0.01)) cifar.add(PoolingLayer()) cifar.add( ConvolutionLayer((batch_size, neure[1], 6, 6), (neure[2], neure[1], 5, 5), 'relu', 'Gaussian', 0.01)) cifar.add(PoolingLayer()) cifar.add( FullyConnectedLayer(neure[2] * 1 * 1, neure[3], 'relu', 'Gaussian',
print "facial expression %d: test result %d/%d" %(y,ans,examples) print " error rate %f%%" %(float(examples-ans)/examples*100) global test_hit,test_count; test_hit+=ans test_count+=examples def test_knn_stat(): print "\nNow tesing with KNN-ROI.....\n" global test_hit,test_count; for idx in xrange(5): test_knn(("dataset/ROI_TEST/%d.bin")%idx,"dataset/ROI/mean.pkl",idx) print "total error rate: %f%%\n"%(float(test_count-test_hit)/test_count*100) # clear test_hit=test_count=0 if __name__ == '__main__': Test_Single=Model(batch_size=1,lr=0.01,dataSet=None) meta_num=100 neure=[meta_num,meta_num,meta_num,meta_num] batch_size=1 x=T.matrix('x') index=T.lscalar() Test_Single.add(DataLayer(batch_size,(32,32,1))) Test_Single.add(ConvolutionLayer((batch_size,1,32,32),(neure[0],1,3,3),'relu','Gaussian',0.0001)) Test_Single.add(PoolingLayer()) Test_Single.add(ConvolutionLayer((batch_size,neure[0],15,15),(neure[1],neure[0],4,4),'relu','Gaussian',0.01)) Test_Single.add(PoolingLayer()) Test_Single.add(ConvolutionLayer((batch_size,neure[1],6,6),(neure[2],neure[1],5,5),'relu','Gaussian',0.01)) Test_Single.add(PoolingLayer()) Test_Single.add(FullyConnectedLayer(neure[2]*1*1,neure[3],'relu','Gaussian',0.1)) Test_Single.add(DropoutLayer(0.5)) Test_Single.add(SoftmaxLayer(neure[3],5,'Gaussian',0.1))
from layer.core import * from algorithm.SGD import Mini_Batch from data.process import loadData, loadScaleData from layer.model import Model if __name__ == '__main__': dataSet=loadScaleData('data.pkl') cifar=Model(batch_size=100,lr=0.005,dataSet=dataSet,weight_decay=0.0) neure=[1000,1000,1000] batch_size=100 cifar.add(DataLayer(batch_size,32*32*3)) cifar.add(AutoEncodeLayer(32*32*3,neure[0],'relu','softplus',cost='squre',weight_init='Gaussian',gauss_std=0.1,level=0.3)) cifar.add(DropoutLayer(0.2)) cifar.add(SoftmaxLayer(neure[0],10)) cifar.pretrain(batch_size=20,n_epoches=15) cifar.build_train_fn() cifar.build_vaild_fn() algorithm=Mini_Batch(model=cifar,n_epochs=100,load_param='',save_param='mlp_params.pkl') algorithm.run()