save_path= os.path.join(data, file) print file time_start = time() # we load precomputed feature set or recompute the whole feature set if os.path.isfile(save_path): print "loading exiting file" data_dic = cPickle.load(open(save_path,'rb')) video = data_dic["video"] Feature_gesture = data_dic["Feature_gesture"] assert video.shape[0] == Feature_gesture.shape[0] else: print("\t Processing file " + file) # Create the object to access the sample sample = GestureSample(os.path.join(data,file)) print "finish loading samples" video, Feature_gesture = sample.get_test_data_wudi_lio(used_joints) assert video.shape[0] == Feature_gesture.shape[0]# -*- coding: utf-8 -*- print "finish preprocessing" out_file = open(save_path, 'wb') cPickle.dump({"video":video, "Feature_gesture":Feature_gesture}, out_file, protocol=cPickle.HIGHEST_PROTOCOL) out_file.close() print "start computing likelihood" observ_likelihood = numpy.empty(shape=(video.shape[0],20*STATE_NO+1)) # 20 classed * 5 states + 1 ergodic state for batchnumber in xrange(video.shape[0]/batch.micro): video_temp = video[batch.micro*batchnumber:batch.micro*(batchnumber+1),:] skel_temp = Feature_gesture[batch.micro*batchnumber:batch.micro*(batchnumber+1),:] x_.set_value(normalize(video_temp, Mean_CNN, Std_CNN).astype("float32"),borrow=True)
for file_count, file in enumerate(samples): condition = (file_count > -1) if condition: #wudi only used first 650 for validation !!! Lio be careful! save_path = os.path.join(save_dst, file) if os.path.isfile(save_path): print "loading exiting file" data_dic = cPickle.load(open(save_path, 'rb')) video = data_dic["video"] Feature_gesture = data_dic["Feature_gesture"] assert video.shape[0] == Feature_gesture.shape[0] else: print("\t Processing file " + file) # Create the object to access the sample sample = GestureSample(os.path.join(data, file)) print "finish loading samples" video, Feature_gesture = sample.get_test_data_wudi_lio(used_joints) assert video.shape[0] == Feature_gesture.shape[ 0] # -*- coding: utf-8 -*- print "finish preprocessing" out_file = open(save_path, 'wb') cPickle.dump({ "video": video, "Feature_gesture": Feature_gesture }, out_file, protocol=cPickle.HIGHEST_PROTOCOL) out_file.close()
samples=glob("*") # because wudi unzipped all the files already! elif pc=="lio": samples=glob("*.zip") print len(samples), "samples found" for file_count, file in enumerate(samples): condition = (file_count > 650) if condition: #wudi only used first 650 for validation !!! Lio be careful! print("\t Processing file " + file) # Create the object to access the sample sample = GestureSample(os.path.join(data,file)) if not load_flag: video = sample.get_test_data_wudi_lio() save_path= os.path.join(save_dst, file) out_file = open(save_path, 'wb') cPickle.dump(video, out_file, protocol=cPickle.HIGHEST_PROTOCOL) out_file.close() else: save_path= os.path.join(data,file,'test') video = cPickle.load(open(save_path,"rb")) print video.shape print "start computing likelihood" observ_likelihood = numpy.empty(shape=(video.shape[0],20*5+1)) # 20 classed * 5 states + 1 ergodic state for batchnumber in xrange(video.shape[0]/batch.micro): video_temp = video[batch.micro*batchnumber:batch.micro*(batchnumber+1),:] x_.set_value(video_temp.astype("float32"),borrow=True) y_pred, p_y_given_x = evalu_model()