Std_skel = SK_normalization['Std1'] #################################################################### # DBN for skeleton modules #################################################################### # ------------------------------------------------------------------------------ # symbolic variables x_skeleton = ndtensor(len(tr._skeleon_in_shape))( name='x_skeleton') # video input x_skeleton_ = _shared(empty(tr._skeleon_in_shape)) dbn = GRBM_DBN(numpy_rng=random.RandomState(123), n_ins=891, \ hidden_layers_sizes=[2000, 2000, 1000], n_outs=101, input_x=x_skeleton, label=y ) # we load the pretrained DBN skeleton parameteres here load_path = '/idiap/user/dwu/chalearn/result/try/36.7% 2015.07.09.17.53.10' dbn.load_params_DBN(os.path.join(load_path, 'paramsbest.zip')) test_model = function([], dbn.logLayer.p_y_given_x, givens={x_skeleton: x_skeleton_}, on_unused_input='ignore') for file_count, file in enumerate(samples): condition = (file_count > -1) if condition: #wudi only used first 650 for validation !!! Lio be careful! save_path = os.path.join(data, file) print file time_start = time() # we load precomputed feature set or recompute the whole feature set if os.path.isfile(save_path): print "loading exiting file"
#################################################################### # DBN for skeleton modules #################################################################### # ------------------------------------------------------------------------------ # symbolic variables x_skeleton = ndtensor(len(tr._skeleon_in_shape))(name = 'x_skeleton') # video input x_skeleton_ = _shared(empty(tr._skeleon_in_shape)) ########sample number:39,hidden_layer_size=[2000,1000]->error rate=77.1%;[2000,2000,1000],78.4% dbn = GRBM_DBN(numpy_rng=random.RandomState(123), n_ins=891, \ hidden_layers_sizes=[2000,2000,1000], n_outs=101, input_x=x_skeleton, label=y ) # we load the pretrained DBN skeleton parameteres here, currently pretraining is done # unsupervisedly, we can load the supervised pretrainining parameters later #when u pretain the network, comment the following line dbn.load_params_DBN("/home/zhiquan/fancy/meterials/chalearn2014_fancy_data/result_temp/dbn/try/63.9% 2018.05.06.19.54.43/paramsbest.zip") cost = dbn.finetune_cost # function computing the number of errors errors = dbn.errors # wudi add the mean and standard deviation of the activation values to exam the neural net # Reference: Understanding the difficulty of training deep feedforward neural networks, Xavier Glorot, Yoshua Bengio out_mean = T.stack(dbn.out_mean) out_std = T.stack(dbn.out_std) gparams = T.grad(cost, dbn.params)
# DBN for skeleton modules #################################################################### # ------------------------------------------------------------------------------ # symbolic variables x_skeleton = ndtensor(len(tr._skeleon_in_shape))( name='x_skeleton') # video input x_skeleton_ = _shared(empty(tr._skeleon_in_shape)) dbn = GRBM_DBN(numpy_rng=random.RandomState(123), n_ins=891, \ hidden_layers_sizes=[2000, 2000, 1000], n_outs=101, input_x=x_skeleton, label=y ) # we load the pretrained DBN skeleton parameteres here, currently pretraining is done # unsupervisedly, we can load the supervised pretrainining parameters later dbn.load_params_DBN( "/idiap/user/dwu/chalearn/result/try/37.8% 2015.07.09.13.26.11/paramsbest.zip" ) cost = dbn.finetune_cost # function computing the number of errors errors = dbn.errors # wudi add the mean and standard deviation of the activation values to exam the neural net # Reference: Understanding the difficulty of training deep feedforward neural networks, Xavier Glorot, Yoshua Bengio out_mean = T.stack(dbn.out_mean) out_std = T.stack(dbn.out_std) gparams = T.grad(cost, dbn.params) params = dbn.params
Std1) # Lio changed it to read from HDF5 files #################################################################### # DBN for skeleton modules #################################################################### # ------------------------------------------------------------------------------ # symbolic variables x_skeleton = ndtensor(len(tr._skeleon_in_shape))( name='x_skeleton') # video input x_skeleton_ = _shared(empty(tr._skeleon_in_shape)) dbn = GRBM_DBN(numpy_rng=random.RandomState(123), n_ins=891, \ hidden_layers_sizes=[2000, 2000, 1000], n_outs=101, input_x=x_skeleton, label=y ) # we load the pretrained DBN skeleton parameteres here dbn.load_params_DBN( '/home/zhiquan/fancy/meterials/chalearn2014_fancy_data/result_temp/dbn/try/57.6% 2018.05.06.23.42.32/paramsbest.zip' ) #################################################################### # 3DCNN for video module #################################################################### # we load the CNN parameteres here use.load = True load_path = '/home/zhiquan/fancy/meterials/chalearn2014_fancy_data/result_temp/3dcnn/try/55.0% 2018.05.07.21.06.05/' video_cnn = conv3d_chalearn(x, use, lr, batch, net, reg, drop, mom, tr, res_dir, load_path) ##################################################################### # fuse the ConvNet output with skeleton output -- need to change here ###################################################################### out = T.concatenate([video_cnn.out, dbn.sigmoid_layers[-1].output], axis=1)
Mean_skel, Std_skel, Mean_CNN, Std_CNN = net_convnet3d_grbm_early_fusion.load_normalisation_constant(load_path) #################################################################### # DBN for skeleton modules #################################################################### # ------------------------------------------------------------------------------ # symbolic variables x_skeleton = ndtensor(len(tr._skeleon_in_shape))(name = 'x_skeleton') # video input x_skeleton_ = _shared(empty(tr._skeleon_in_shape)) dbn = GRBM_DBN(numpy_rng=random.RandomState(123), n_ins=891, \ hidden_layers_sizes=[2000, 2000, 1000], n_outs=101, input_x=x_skeleton, label=y ) # we load the pretrained DBN skeleton parameteres here load_path = '/idiap/user/dwu/chalearn/result/try/36.7% 2015.07.09.17.53.10' dbn.load_params_DBN(os.path.join(load_path,'paramsbest.zip')) test_model = function([], dbn.logLayer.p_y_given_x, givens={x_skeleton: x_skeleton_}, on_unused_input='ignore') for file_count, file in enumerate(samples): condition = (file_count > -1) if condition: #wudi only used first 650 for validation !!! Lio be careful! save_path= os.path.join(data, file) print file time_start = time() # we load precomputed feature set or recompute the whole feature set if os.path.isfile(save_path): print "loading exiting file"
loader = DataLoader_with_skeleton_normalisation(src, tr.batch_size, 0, 1, Mean1, Std1) # Lio changed it to read from HDF5 files #################################################################### # DBN for skeleton modules #################################################################### # ------------------------------------------------------------------------------ # symbolic variables x_skeleton = ndtensor(len(tr._skeleon_in_shape))(name = 'x_skeleton') # video input x_skeleton_ = _shared(empty(tr._skeleon_in_shape)) dbn = GRBM_DBN(numpy_rng=random.RandomState(123), n_ins=891, \ hidden_layers_sizes=[2000, 2000, 1000], n_outs=101, input_x=x_skeleton, label=y ) # we load the pretrained DBN skeleton parameteres here, currently pretraining is done # unsupervisedly, we can load the supervised pretrainining parameters later dbn.load_params_DBN("/idiap/user/dwu/chalearn/result/try/37.8% 2015.07.09.13.26.11/paramsbest.zip") cost = dbn.finetune_cost # function computing the number of errors errors = dbn.errors # wudi add the mean and standard deviation of the activation values to exam the neural net # Reference: Understanding the difficulty of training deep feedforward neural networks, Xavier Glorot, Yoshua Bengio out_mean = T.stack(dbn.out_mean) out_std = T.stack(dbn.out_std) gparams = T.grad(cost, dbn.params)