Feature = Extract_feature_Realtime(Skeleton_matrix, njoints) Feature_normalized = normalize(Feature, Mean1, Std1) ### Feed into DBN shared_x = theano.shared(numpy.asarray(Feature_normalized, dtype=theano.config.floatX), borrow=True) numpy_rng = numpy.random.RandomState(123) ### model 1 ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) dbn.load('dbn_2014-05-23-20-07-28.npy') validate_model = theano.function(inputs=[], outputs=dbn.logLayer.p_y_given_x, givens={ dbn.x: shared_x}) observ_likelihood_1 = validate_model() del dbn ### model 2 ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) dbn.load('dbn_2014-05-24-05-53-17.npy')
Feature_normalized = normalize(Feature, Mean1, Std1) ### Feed into DBN shared_x = theano.shared(numpy.asarray(Feature_normalized, dtype=theano.config.floatX), borrow=True) numpy_rng = numpy.random.RandomState(123) ### model 1 ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) dbn.load('dbn_2014-05-23-20-07-28.npy') validate_model = theano.function(inputs=[], outputs=dbn.logLayer.p_y_given_x, givens={dbn.x: shared_x}) observ_likelihood_1 = validate_model() del dbn ### model 2 ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) dbn.load('dbn_2014-05-24-05-53-17.npy')
class RunDbn(object): def __init__(self): #################################### ### Some initialization ############ #################################### self.used_joints = ['ElbowLeft', 'WristLeft', 'ShoulderLeft','HandLeft', 'ElbowRight', 'WristRight','ShoulderRight','HandRight', 'Head','Spine','HipCenter']#11个点 self.njoints = len(self.used_joints) ### load the pre-store normalization constant f = open('SK_normalization.pkl','rb')#预先处理好的 SK_normalization = cPickle.load(f)#cPicke是 将对象打包保存为文件的类 self.Mean1 =SK_normalization ['Mean1']#这里的均值和 方差是 后面真实数据归一化要使用的 self.Std1 = SK_normalization['Std1'] #这是vitebi算法要的数据 ## Load Prior and transitional Matrix 预处理好的转换矩阵 dic=sio.loadmat('Transition_matrix.mat')#scipy.io 是个科学计算模块,i模块实现了MATLAB数据的导入 self.Transition_matrix = log(dic['Transition_matrix']) self.Prior = log(dic['Prior']) ########################## ### model 1 第一种网络构架模式 # ########################## self.numpy_rng = numpy.random.RandomState(123) self.dbn = GRBM_DBN(numpy_rng=self.numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) self.dbn.load('dbn_2014-05-23-20-07-28.npy')#预先训练好的构架 z=theano.tensor.dmatrix('z') #这里就是theano的奇葩函数构架 self.validate_model = theano.function(inputs=[z], outputs=self.dbn.logLayer.p_y_given_x,#输出是逻辑回归层的输出 givens={ self.dbn.x: z}) def myBuildDBNtest(self): #提取所有帧中,原始的骨架点,得到一个矩阵,Skeleton_matrix ,同时返回骨架是否归0化 #Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, 1, smp.getNumFrames()) time_tic = time.time() import cPickle Skeleton_matrix=cPickle.load(open("testSkeleton_matrix","rb")) #print Skeleton_matrix Feature = Extract_feature_Realtime(Skeleton_matrix, self.njoints) Feature_normalized = normalize(Feature, self.Mean1, self.Std1) ''' ########################## ### model 1 第一种网络构架模式 # ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) dbn.load('dbn_2014-05-23-20-07-28.npy')#预先训练好的构架 #这里就是theano的奇葩函数构架 validate_model = theano.function(inputs=[], outputs=dbn.logLayer.p_y_given_x,#输出是逻辑回归层的输出 givens={ dbn.x: shared_x}) ''' observ_likelihood_1 = self.validate_model(Feature_normalized)#调用函数得到结果 ########################## # viterbi path decoding ##################### observ_likelihood_1=observ_likelihood_1[0:50,:] #这里自己改了,只有第一个网络结构的, log_observ_likelihood = log(observ_likelihood_1.T) #这里是一个矩阵, 行是样本,列是概率 [1884个样本, 201列] 用T转置了 print "处理时间 %d sec" % int(time.time() - time_tic) time_tic = time.time() #下面就是vibiter算法了 print("\t Viterbi path decoding " ) # do it in log space avoid numeric underflow [path, predecessor_state_index, global_score] =viterbi_path_log( self.Prior, self.Transition_matrix, log_observ_likelihood ) label=viterbi_endframe(path,5,30) # Some gestures are not within the vocabulary #[pred_label, begin_frame, end_frame, Individual_score, frame_length] = viterbi_colab_clean( # path, global_score, threshold=-100, mini_frame=19) print "标记是:" print label print "viterbi处理时间 %d sec" % int(time.time() - time_tic) def myBuildDBN(self,Skeleton_matrix): #提取所有帧中,原始的骨架点,得到一个矩阵,Skeleton_matrix ,同时返回骨架是否归0化 #Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, 1, smp.getNumFrames()) time_tic = time.time() Feature = Extract_feature_Realtime(Skeleton_matrix, self.njoints) Feature_normalized = normalize(Feature, self.Mean1, self.Std1) observ_likelihood_1 = self.validate_model(Feature_normalized)#调用函数得到结果 ########################## # viterbi path decoding ##################### #observ_likelihood_1=observ_likelihood_1[0:50,:] #这里自己改了,只有第一个网络结构的, log_observ_likelihood = log(observ_likelihood_1.T) #这里是一个矩阵, 行是样本,列是概率 [1884个样本, 201列] 用T转置了 print "处理时间 %d sec" % int(time.time() - time_tic) return log_observ_likelihood def myViterbi(self,log_observ_likelihood): time_tic = time.time() #下面就是vibiter算法了 print("\t Viterbi path decoding " ) # do it in log space avoid numeric underflow [path, predecessor_state_index, global_score] =viterbi_path_log( self.Prior, self.Transition_matrix, log_observ_likelihood ) label=viterbi_endframe(path,5,30) # Some gestures are not within the vocabulary #[pred_label, begin_frame, end_frame, Individual_score, frame_length] = viterbi_colab_clean( # path, global_score, threshold=-100, mini_frame=19) print "标记是:" print label print "viterbi处理时间 %d sec" % int(time.time() - time_tic) return label
def test_GRBM_DBN(finetune_lr=1, pretraining_epochs=100, pretrain_lr=0.01, k=1, training_epochs=500, batch_size=200, annealing_learning_rate=0.99999): """ Demonstrates how to train and test a Deep Belief Network. This is demonstrated on MNIST. :type learning_rate: float :param learning_rate: learning rate used in the finetune stage :type pretraining_epochs: int :param pretraining_epochs: number of epoch to do pretraining :type pretrain_lr: float :param pretrain_lr: learning rate to be used during pre-training :type k: int :param k: number of Gibbs steps in CD/PCD :type training_epochs: int :param training_epochs: maximal number of iterations ot run the optimizer :type dataset: string :param dataset: path the the pickled dataset :type batch_size: int :param batch_size: the size of a minibatch """ datasets = load_CodaLab_skel(ratio_train=0.9, ration_valid=0.08) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size # numpy random generator numpy_rng = numpy.random.RandomState(123) print '... building the model' # construct the Deep Belief Network dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[2000, 2000, 1000], n_outs=201, finetune_lr=finetune_lr) ######################### # PRETRAINING THE MODEL # ######################### print '... getting the pretraining functions' pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size, k=k) print '... pre-training the model' start_time = time.clock() ## Pre-train layer-wise for i in xrange(dbn.n_layers): if i==0: # for GRBM, the The learning rate needs to be about one or #two orders of magnitude smaller than when using #binary visible units and some of the failures reported in the # literature are probably due to using a pretrain_lr_new = pretrain_lr*0.1 else: pretrain_lr_new = pretrain_lr # go through pretraining epochs for epoch in xrange(pretraining_epochs): start_time_temp = time.clock() # go through the training set c = [] for batch_index in xrange(n_train_batches): c.append(pretraining_fns[i](index=batch_index, lr=pretrain_lr_new)) end_time_temp = time.clock() print 'Pre-training layer %i, epoch %d, cost %f ' % (i, epoch, numpy.mean(c)) + ' ran for %d sec' % ((end_time_temp - start_time_temp) ) end_time = time.clock() print >> sys.stderr, ('The pretraining code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)) ######################## # FINETUNING THE MODEL # ######################## # get the training, validation and testing function for the model print '... getting the finetuning functions' train_fn, validate_model, test_model = dbn.build_finetune_functions( datasets=datasets, batch_size=batch_size, annealing_learning_rate=annealing_learning_rate) print '... finetunning the model' # early-stopping parameters patience = 4 * n_train_batches # look as this many examples regardless patience_increase = 2. # wait this much longer when a new best is # found improvement_threshold = 0.999 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_params = None best_validation_loss = numpy.inf test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < training_epochs) and (not done_looping): start_time_temp = time.clock() epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_fn(minibatch_index) iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: import warnings warnings.filterwarnings("ignore") validation_losses = validate_model() this_validation_loss = numpy.mean(validation_losses) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if (this_validation_loss < best_validation_loss * improvement_threshold): patience = max(patience, iter * patience_increase) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter # test it on the test set test_losses = test_model() test_score = numpy.mean(test_losses) end_time_temp = time.clock() print(('epoch %i, minibatch %i/%i, validation error %f %%' \ 'test error of best model %f %%, used time %d sec') % (epoch, minibatch_index + 1, n_train_batches,this_validation_loss * 100., test_score * 100., (end_time_temp - start_time_temp))) if patience <= iter: done_looping = True break end_time = time.clock() print(('Optimization complete with best validation score of %f %%,' 'with test performance %f %%') % (best_validation_loss * 100., test_score * 100.)) print >> sys.stderr, ('The fine tuning code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)) from time import gmtime, strftime filename = 'dbn_'+strftime("%Y-%m-%d-%H-%M-%S", gmtime()) dbn.save(filename) if 0: # here for testing, where we never used ## Now for testing dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) dbn.load('dbn_2014-05-22-18-39-37.npy') # compiling a Theano function that computes the mistakes that are made by # the model on a minibatch index = T.lscalar('index') validate_model = theano.function(inputs=[index], outputs=dbn.logLayer.p_y_given_x, givens={ dbn.x: valid_set_x[index * batch_size:(index + 1) * batch_size]}) n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] n_valid_batches /= batch_size temp = [validate_model(i) for i in xrange(n_valid_batches)]
from GRBM_DBN import test_GRBM_DBN from GRBM_DBN import GRBM_DBN from load_data_MNIST import load_data from load_data_MNIST import load_raw_data datasets = load_data() # # UCZYMY SIEĆ # test_score, val_score = test_GRBM_DBN(finetune_lr=0.1, pretraining_epochs=[1, 1], pretrain_lr=[0.002, 0.02], k=1, weight_decay=0.0002, momentum=0.9, batch_size=128, datasets=datasets, hidden_layers_sizes=[784,784], finetune = False, saveToDir = '../results/MNIST/', loadModelFromFile = '', verbose = True) # # UŻYCIE WYUCZONEJ SIECI # dbn = GRBM_DBN.load('../results/MNIST/pretrained_model') train_set, valid_set, test_set = load_raw_data() #klasyfikacja pierwszych 13 wzorców print dbn.classify(train_set[0][1:13]) #realne klasy pierwszych 13 wzorców print train_set[1][1:13]
Feature = Extract_feature_Realtime(Skeleton_matrix, njoints) Feature_normalized = normalize(Feature, Mean1, Std1) print Feature_normalized.max() print Feature_normalized.min() ### Feed into DBN shared_x = theano.shared(numpy.asarray(Feature_normalized, dtype=theano.config.floatX), borrow=True) numpy_rng = numpy.random.RandomState(123) ### model 1 ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 500], n_outs=class_count*10+1) dbn.load('dbn_model1less_nonvar2017-05-11-08-19-51.npy') validate_model = theano.function(inputs=[], outputs=dbn.logLayer.p_y_given_x, givens={ dbn.x: shared_x}) observ_likelihood_1 = validate_model() del dbn ### model 2 ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 500], n_outs=class_count*10+1) dbn.load('dbn_model1less_nonvar2017-05-11-08-19-51.npy') validate_model = theano.function(inputs=[], outputs=dbn.logLayer.p_y_given_x, givens={ dbn.x: shared_x})
Feature_normalized = normalize(Feature, Mean1, Std1) ### Feed into DBN, theano requires the shared tensor representation shared_x = theano.shared(numpy.asarray(Feature_normalized, dtype=theano.config.floatX), borrow=True) numpy_rng = numpy.random.RandomState(123) ########################## ### model 1 第一种网络构架模式 # ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528, hidden_layers_sizes=[1000, 1000, 500], n_outs=201) dbn.load('dbn_2014-05-23-20-07-28.npy')#预先训练好的构架 #这里就是theano的奇葩函数构架 validate_model = theano.function(inputs=[], outputs=dbn.logLayer.p_y_given_x,#输出是逻辑回归层的输出 givens={ dbn.x: shared_x}) observ_likelihood_1 = validate_model()#调用函数得到结果 del dbn """ ########################## ### model 2 ########################## dbn = GRBM_DBN(numpy_rng=numpy_rng, n_ins=528,