def eval_mm_mpjpe(self): """ This function will use the score prediction as the baseline And add Gaussian noise to """ train=False feat_E = self.candidate_feat_E / self.train_dp.max_depth # Note that noise has already been multiplied into feat_E net = self.feature_net output_layer_name = self.prediction_layer_name pred_outputs = net.get_layer_by_names([output_layer_name])[0].outputs pred_func = theano.function(inputs=net.inputs, outputs=pred_outputs,on_unused_input='ignore') itm = iu.itimer() itm.restart() cur_data = self.get_next_batch(train) sample_num = 500 topK = 10 pred_mpjpe_l = [] topK_mpjpe_l = [] itm.tic() while True: self.print_iteration() ndata = cur_data[2][0].shape[-1] input_data = self.prepare_data(cur_data[2]) gt_target = input_data[1].T print 'gt_target.shape = {}'.format(gt_target.shape) imgfeatures = self.calc_image_features([input_data[0]]).T print 'imgfeatures.shape = {}'.format(imgfeatures.shape) preds = pred_func(*[input_data[0]])[0].T print 'Prediction.shape = {}'.format(preds.shape) candidate_targets = self.add_pca_noise(preds, feat_E, sample_num,r=1) candidate_features = self.calc_target_feature_func(self.gpu_require(candidate_targets.reshape((-1, sample_num * ndata)).T))[0].T print 'candidate_features.shape = {}'.format(candidate_features.shape) scores = (candidate_features.reshape((-1, sample_num, ndata),order='F') * imgfeatures.reshape((-1, 1, ndata),order='F')).sum(axis=0) print 'score.shape = {}'.format(scores.shape) sidx = np.argpartition(-scores, topK, axis=0)[:topK,...] sidx_in_arr = sidx + np.array(range(ndata)) * sample_num topK_target_list = [ candidate_targets[:, sidx[...,k].flatten() ,k].mean(axis=1,keepdims=True) for k in range(ndata)] topK_target = np.concatenate(topK_target_list, axis=1) topK_mpjpe = dutils.calc_mpjpe_from_residual(topK_target - gt_target, 17)* 1200 pred_mpjpe = dutils.calc_mpjpe_from_residual(preds - gt_target, 17) * 1200 print 'topK_mpjpe = {} pred_mpjpe = {}'.format(topK_mpjpe.mean(), pred_mpjpe.mean()) pred_mpjpe_l.append(pred_mpjpe.flatten()) topK_mpjpe_l.append(topK_mpjpe.flatten()) self.epoch, self.batchnum = self.test_dp.epoch, self.test_dp.batchnum if self.epoch == 1: break cur_data = self.get_next_batch(train) itm.toc() pred_mpjpe_arr = np.concatenate(pred_mpjpe_l) topK_mpjpe_arr = np.concatenate(topK_mpjpe_l) print 'Compute {} batch {} data in total'.format(len(pred_mpjpe_l), pred_mpjpe_arr.shape) print 'in total pred_mpjpe = {} \t topK mpjpe = {}'.format(pred_mpjpe_arr.mean(), topK_mpjpe_arr.mean() )
def get_next_batch(self): if self.data_dic is None or len(self.batch_range) > 1: self.data_dic = self.get_batch(self.curr_batchnum) epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() cur_ndata = len(self.data_dic['cur_batch_indexes']) cur_batch_indexes = self.data_dic['cur_batch_indexes'] self.data_dic['cur_candidate_indexes']=self.generate_candidate_index(cur_batch_indexes,self.random_prob) cur_candidate_indexes = self.data_dic['cur_candidate_indexes'] gt_jt_rel = self.batch_meta['feature_list'][0][..., cur_batch_indexes] candidate_jt_rel = self.batch_meta['feature_list'][0][..., cur_candidate_indexes] img_feature = self.batch_meta['feature_list'][1][..., cur_batch_indexes] jt_candidate_feature = self.batch_meta['feature_list'][2][..., cur_candidate_indexes] z = (gt_jt_rel - candidate_jt_rel).reshape((3, self.num_joints,cur_ndata), order='F') score = dutils.calc_RBF_score(z, self.rbf_sigma, 3) mpjpe = dutils.calc_mpjpe_from_residual(z, self.num_joints) alldata = [np.require(gt_jt_rel/self.max_depth, dtype=np.single, requirements='C'), \ np.require(img_feature.reshape((-1,cur_ndata),order='F'), \ dtype=np.single, requirements='C'), \ np.require(jt_candidate_feature.reshape((-1,cur_ndata),order='F'), \ dtype=np.single, requirements='C'), \ np.require(score.reshape((-1,cur_ndata)), \ dtype=np.single, requirements='C'),\ np.require(mpjpe.reshape((-1,cur_ndata))/self.max_depth, \ dtype=np.single, requirements='C')] # import iutils as iu # iu.print_common_statistics(alldata[0], 'gt') # iu.print_common_statistics(alldata[1], 'imgfeature') # iu.print_common_statistics(alldata[2], 'jtfeature') # iu.print_common_statistics(alldata[3], 'mpjpe') return epoch, batchnum, alldata
def eval_mpjpe(self, op): """ This function will simple evaluate mpjpe """ train = False net = self.feature_net output_layer_name = self.prediction_layer_name pred_outputs = net.get_layer_by_names([output_layer_name])[0].outputs pred_func = theano.function(inputs=net.inputs, outputs=pred_outputs, on_unused_input='ignore') itm = iu.itimer() itm.restart() cur_data = self.get_next_batch(train) pred_mpjpe_l = [] pred_target_l = [] save_folder = op.get_value('save_res_path') assert (save_folder is not None) iu.ensure_dir(save_folder) save_path = iu.fullfile(save_folder, 'pose_prediction') net.set_train_mode( False) # added in Oct 27, 2015 After ICCV submission while True: self.print_iteration() ndata = cur_data[2][0].shape[-1] input_data = self.prepare_data(cur_data[2]) gt_target = input_data[1].T print 'gt_target.shape = {}'.format(gt_target.shape) preds = pred_func(*[input_data[0]])[0].T print 'Prediction.shape = {}'.format(preds.shape) pred_mpjpe = dutils.calc_mpjpe_from_residual( preds - gt_target, 17) * 1200 print 'pred_mpjpe = {}'.format(pred_mpjpe.mean()) self.epoch, self.batchnum = self.test_dp.epoch, self.test_dp.batchnum pred_mpjpe_l.append(pred_mpjpe) pred_target_l.append(preds) if self.epoch == 1: break cur_data = self.get_next_batch(train) mio.pickle(save_path, {'preds': pred_target_l, 'mpjpe': pred_mpjpe_l}) preds = np.concatenate(pred_target_l, axis=1) mpjpe = np.concatenate(pred_mpjpe_l, axis=1) mio.pickle(save_path, {'preds': pred_target_l, 'mpjpe': pred_mpjpe_l})
def get_next_batch(self): if self.data_dic is None or len(self.batch_range) > 1: self.data_dic = self.get_batch(self.curr_batchnum) epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() cur_ndata = len(self.data_dic['cur_batch_indexes']) cur_batch_indexes = self.data_dic['cur_batch_indexes'] self.data_dic['cur_candidate_indexes'] = self.generate_candidate_index( cur_batch_indexes, self.random_prob) cur_candidate_indexes = self.data_dic['cur_candidate_indexes'] gt_jt_rel = self.batch_meta['feature_list'][0][..., cur_batch_indexes] candidate_jt_rel = self.batch_meta['feature_list'][0][ ..., cur_candidate_indexes] img_feature = self.batch_meta['feature_list'][1][..., cur_batch_indexes] jt_candidate_feature = self.batch_meta['feature_list'][2][ ..., cur_candidate_indexes] z = (gt_jt_rel - candidate_jt_rel).reshape( (3, self.num_joints, cur_ndata), order='F') score = dutils.calc_RBF_score(z, self.rbf_sigma, 3) mpjpe = dutils.calc_mpjpe_from_residual(z, self.num_joints) alldata = [np.require(gt_jt_rel/self.max_depth, dtype=np.single, requirements='C'), \ np.require(img_feature.reshape((-1,cur_ndata),order='F'), \ dtype=np.single, requirements='C'), \ np.require(jt_candidate_feature.reshape((-1,cur_ndata),order='F'), \ dtype=np.single, requirements='C'), \ np.require(score.reshape((-1,cur_ndata)), \ dtype=np.single, requirements='C'),\ np.require(mpjpe.reshape((-1,cur_ndata))/self.max_depth, \ dtype=np.single, requirements='C')] # import iutils as iu # iu.print_common_statistics(alldata[0], 'gt') # iu.print_common_statistics(alldata[1], 'imgfeature') # iu.print_common_statistics(alldata[2], 'jtfeature') # iu.print_common_statistics(alldata[3], 'mpjpe') return epoch, batchnum, alldata
def eval_mpjpe(self, op): """ This function will simple evaluate mpjpe """ train=False net = self.feature_net output_layer_name = self.prediction_layer_name pred_outputs = net.get_layer_by_names([output_layer_name])[0].outputs pred_func = theano.function(inputs=net.inputs, outputs=pred_outputs,on_unused_input='ignore') itm = iu.itimer() itm.restart() cur_data = self.get_next_batch(train) pred_mpjpe_l = [] pred_target_l = [] save_folder = op.get_value('save_res_path') assert(save_folder is not None) iu.ensure_dir(save_folder) save_path = iu.fullfile(save_folder, 'pose_prediction') net.set_train_mode(False) # added in Oct 27, 2015 After ICCV submission while True: self.print_iteration() ndata = cur_data[2][0].shape[-1] input_data = self.prepare_data(cur_data[2]) gt_target = input_data[1].T print 'gt_target.shape = {}'.format(gt_target.shape) preds = pred_func(*[input_data[0]])[0].T print 'Prediction.shape = {}'.format(preds.shape) pred_mpjpe = dutils.calc_mpjpe_from_residual(preds - gt_target, 17) * 1200 print 'pred_mpjpe = {}'.format(pred_mpjpe.mean()) self.epoch, self.batchnum = self.test_dp.epoch, self.test_dp.batchnum pred_mpjpe_l.append(pred_mpjpe) pred_target_l.append(preds) if self.epoch == 1: break cur_data = self.get_next_batch(train) mio.pickle(save_path, {'preds':pred_target_l, 'mpjpe':pred_mpjpe_l}) preds = np.concatenate(pred_target_l, axis=1) mpjpe = np.concatenate(pred_mpjpe_l, axis=1) mio.pickle(save_path, {'preds':pred_target_l, 'mpjpe':pred_mpjpe_l})
def read_inputs(): d = mio.unpickle( '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_12/batches.meta' ) info = d['info'] print info.keys() indexes = info['indexes'] Y = d['feature_list'][0] X = d['feature_list'][1] train_range = range(0, 76048) test_range = range(76048, 105368) print min(indexes[train_range]), max(indexes[train_range]) print min(indexes[test_range]), max(indexes[test_range]) print 'X ' iu.print_common_statistics(X) X_train = X[..., train_range] Y_train = Y[..., train_range] feature_dim = X_train.shape[0] X_test = X[..., test_range] Y_test = Y[..., test_range] params = {'Sigma': np.ones(feature_dim + 1) * 0.0001} r = LinearRegression(params) r.fit(simpleDP(X_train, Y_train)) Y_pred = r.apply(X_test) print Y_pred.shape print Y_test[:5, :5] print Y_pred[:5, :5] diff = Y_test - Y_pred print 'abs diff = {}'.format(np.sum(diff.flatten()**2)) mpjpe = dutils.calc_mpjpe_from_residual(diff, 17) print 'average mpjpe {}'.format(np.mean(mpjpe.flatten()))
def read_inputs(): d = mio.unpickle('/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_12/batches.meta') info = d['info'] print info.keys() indexes = info['indexes'] Y = d['feature_list'][0] X = d['feature_list'][1] train_range = range(0,76048) test_range = range(76048,105368) print min(indexes[train_range]), max(indexes[train_range]) print min(indexes[test_range]), max(indexes[test_range]) print 'X ' iu.print_common_statistics(X) X_train = X[..., train_range] Y_train = Y[..., train_range] feature_dim = X_train.shape[0] X_test = X[..., test_range] Y_test = Y[..., test_range] params = {'Sigma':np.ones(feature_dim + 1) * 0.0001} r = LinearRegression(params) r.fit(simpleDP(X_train,Y_train)) Y_pred = r.apply(X_test) print Y_pred.shape print Y_test[:5,:5] print Y_pred[:5,:5] diff = Y_test - Y_pred print 'abs diff = {}'.format(np.sum(diff.flatten()**2)) mpjpe = dutils.calc_mpjpe_from_residual(diff,17) print 'average mpjpe {}'.format(np.mean(mpjpe.flatten()))
def eval_mm_mpjpe(self): """ This function will use the score prediction as the baseline And add Gaussian noise to """ train = False feat_E = self.candidate_feat_E / self.train_dp.max_depth # Note that noise has already been multiplied into feat_E net = self.feature_net output_layer_name = self.prediction_layer_name pred_outputs = net.get_layer_by_names([output_layer_name])[0].outputs pred_func = theano.function(inputs=net.inputs, outputs=pred_outputs, on_unused_input='ignore') itm = iu.itimer() itm.restart() cur_data = self.get_next_batch(train) sample_num = 500 topK = 10 pred_mpjpe_l = [] topK_mpjpe_l = [] itm.tic() while True: self.print_iteration() ndata = cur_data[2][0].shape[-1] input_data = self.prepare_data(cur_data[2]) gt_target = input_data[1].T print 'gt_target.shape = {}'.format(gt_target.shape) imgfeatures = self.calc_image_features([input_data[0]]).T print 'imgfeatures.shape = {}'.format(imgfeatures.shape) preds = pred_func(*[input_data[0]])[0].T print 'Prediction.shape = {}'.format(preds.shape) candidate_targets = self.add_pca_noise(preds, feat_E, sample_num, r=1) candidate_features = self.calc_target_feature_func( self.gpu_require( candidate_targets.reshape( (-1, sample_num * ndata)).T))[0].T print 'candidate_features.shape = {}'.format( candidate_features.shape) scores = (candidate_features.reshape( (-1, sample_num, ndata), order='F') * imgfeatures.reshape( (-1, 1, ndata), order='F')).sum(axis=0) print 'score.shape = {}'.format(scores.shape) sidx = np.argpartition(-scores, topK, axis=0)[:topK, ...] sidx_in_arr = sidx + np.array(range(ndata)) * sample_num topK_target_list = [ candidate_targets[:, sidx[..., k].flatten(), k].mean(axis=1, keepdims=True) for k in range(ndata) ] topK_target = np.concatenate(topK_target_list, axis=1) topK_mpjpe = dutils.calc_mpjpe_from_residual( topK_target - gt_target, 17) * 1200 pred_mpjpe = dutils.calc_mpjpe_from_residual( preds - gt_target, 17) * 1200 print 'topK_mpjpe = {} pred_mpjpe = {}'.format( topK_mpjpe.mean(), pred_mpjpe.mean()) pred_mpjpe_l.append(pred_mpjpe.flatten()) topK_mpjpe_l.append(topK_mpjpe.flatten()) self.epoch, self.batchnum = self.test_dp.epoch, self.test_dp.batchnum if self.epoch == 1: break cur_data = self.get_next_batch(train) itm.toc() pred_mpjpe_arr = np.concatenate(pred_mpjpe_l) topK_mpjpe_arr = np.concatenate(topK_mpjpe_l) print 'Compute {} batch {} data in total'.format( len(pred_mpjpe_l), pred_mpjpe_arr.shape) print 'in total pred_mpjpe = {} \t topK mpjpe = {}'.format( pred_mpjpe_arr.mean(), topK_mpjpe_arr.mean())
def show_highest_score(train): """ This function will load data from train or test set """ solver = resume_solver() # stat = solver.stat # print stat.keys() # mvc = stat['most_violated_counts'] # scc = stat['sample_candidate_counts'] # print 'mvc sum = {}, scc = {}'.format(mvc.sum(), scc.sum()) # test_shared_weights_online(solver.train_net.layers) # print '<<<<<<<<<<<<<<<<{}'.format(solver.train_net.layers is solver.eval_net.layers) # print 'train net inputs {}'.format(solver.train_net.inputs) # print 'eval net inputs {}'.format(solver.eval_net.inputs) # print 'eval net outputs {}'.format(solver.eval_net.outputs) # GraphParser.print_graph_connections(solver.train_net.layers) # return dp = solver.train_dp if train else solver.test_dp data = dp.get_next_batch(train) ep, bn, alldata, ext_data = solver.find_most_violated_ext( data, use_zero_margin=True, train=train) print ep, bn, len(alldata) gt_target = alldata[0] gt_margin = alldata[4] img_features = alldata[1] mv_target = ext_data[0] batch_candidate_indexes = ext_data[1] print 'batch candidate indexes shape is {}'.format( batch_candidate_indexes.shape) mv_features = alldata[3] gt_features = alldata[2] # mv_margin = solver.calc_margin(gt_target - mv_target) mv_margin = alldata[5] print "mv shape {}, gt shape {}".format(mv_target.shape, gt_target.shape) fl = solver.train_dp.data_dic['feature_list'] batch_candidate_targets = fl[0][..., batch_candidate_indexes] ndata = gt_target.shape[-1] data_to_eval = [ solver.gpu_require(img_features.T), solver.gpu_require(mv_features.T), solver.gpu_require(mv_margin.T) ] print 'Eval inpus are {}'.format([l.name for l in solver.eval_net.inputs]) mv_score = solver.eval_net.outputs[0].eval( dict(zip(solver.eval_net.inputs, data_to_eval))) data_to_eval = [ solver.gpu_require(img_features.T), solver.gpu_require(gt_features.T), solver.gpu_require(gt_margin.T) ] gt_score = solver.eval_net.outputs[0].eval( dict(zip(solver.eval_net.inputs, data_to_eval))) res_mpjpe, bmi = get_batch_best_match(batch_candidate_targets, gt_target, solver) print 'Current best match mpjpe is {}'.format(np.mean(res_mpjpe) * 1200) bmi_raw = batch_candidate_indexes[bmi] bm_features = fl[2][..., bmi_raw] bm_targets = fl[0][..., bmi_raw] residuals = bm_targets - gt_target mpjpe = dutils.calc_mpjpe_from_residual(residuals, 17) # mpjpe for best match print 'Calc Again mpjpe is {}'.format(np.mean(mpjpe.flatten()) * 1200) data_to_eval = [ solver.gpu_require(img_features.T), solver.gpu_require(bm_features.T), solver.gpu_require(gt_margin.T) ] bm_score = solver.eval_net.outputs[0].eval( dict(zip(solver.eval_net.inputs, data_to_eval))) # for evaluation # inputs = [solver.train_net.inputs[0],solver.train_net.inputs[2],solver.train_net.inputs[4]] # print 'inputs = {}'.format(inputs) # ff = theano.function(inputs=inputs, # outputs=solver.eval_net.layers['net2_score'][2].outputs # ) # print solver.eval_net.layers['net1_score'][2].outputs # res = solver.call_func(ff, data_to_eval) # r = res[0] # diff = r - gt_score # print '=======The abs difference is {}==========='.format(np.abs(diff).sum()) all_input_data = [solver.gpu_require(e.T) for e in alldata[1:]] solver.analyze_num_sv(all_input_data) # all_input_data = [all_input_data[0], all_input_data[2], all_input_data[1], # all_input_data[4], all_input_data[3]] # solver.print_layer_outputs(all_input_data) # Ignore the use_zero margin flag whole_candidate_set = solver.train_dp.data_dic['feature_list'][0][ ..., solver.train_dp.data_range] # print 'Whole candidate_set shape is {}'.format(whole_candidate_set.shape) # what_is_the_best_match( whole_candidate_set , mv_target, solver) # show_what_is_best_all(solver.train_dp, solver.test_dp, solver) mv_margin = solver.calc_margin(gt_target - mv_target) # MPJPE print 'gt_margin<======================' iu.print_common_statistics(gt_margin) print 'mv_margin<======================' iu.print_common_statistics(alldata[5]) show_bm_cmp(ndata, gt_target, mv_target, bm_targets, mv_score, gt_score, bm_score, solver) show_masked_plot(ndata, mv_margin, mv_score, gt_score, bm_score) show_raw_plot(ndata, mv_margin, mv_score, gt_score) # print 'Strange Here: {:.6f}% is correct'.format() pl.show()
def show_highest_score(train): """ This function will load data from train or test set """ solver = resume_solver() # stat = solver.stat # print stat.keys() # mvc = stat['most_violated_counts'] # scc = stat['sample_candidate_counts'] # print 'mvc sum = {}, scc = {}'.format(mvc.sum(), scc.sum()) # test_shared_weights_online(solver.train_net.layers) # print '<<<<<<<<<<<<<<<<{}'.format(solver.train_net.layers is solver.eval_net.layers) # print 'train net inputs {}'.format(solver.train_net.inputs) # print 'eval net inputs {}'.format(solver.eval_net.inputs) # print 'eval net outputs {}'.format(solver.eval_net.outputs) # GraphParser.print_graph_connections(solver.train_net.layers) # return dp = solver.train_dp if train else solver.test_dp data = dp.get_next_batch(train) ep, bn, alldata, ext_data = solver.find_most_violated_ext(data, use_zero_margin=True, train=train) print ep, bn, len(alldata) gt_target = alldata[0] gt_margin = alldata[4] img_features = alldata[1] mv_target = ext_data[0] batch_candidate_indexes = ext_data[1] print "batch candidate indexes shape is {}".format(batch_candidate_indexes.shape) mv_features = alldata[3] gt_features = alldata[2] # mv_margin = solver.calc_margin(gt_target - mv_target) mv_margin = alldata[5] print "mv shape {}, gt shape {}".format(mv_target.shape, gt_target.shape) fl = solver.train_dp.data_dic["feature_list"] batch_candidate_targets = fl[0][..., batch_candidate_indexes] ndata = gt_target.shape[-1] data_to_eval = [ solver.gpu_require(img_features.T), solver.gpu_require(mv_features.T), solver.gpu_require(mv_margin.T), ] print "Eval inpus are {}".format([l.name for l in solver.eval_net.inputs]) mv_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval))) data_to_eval = [ solver.gpu_require(img_features.T), solver.gpu_require(gt_features.T), solver.gpu_require(gt_margin.T), ] gt_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval))) res_mpjpe, bmi = get_batch_best_match(batch_candidate_targets, gt_target, solver) print "Current best match mpjpe is {}".format(np.mean(res_mpjpe) * 1200) bmi_raw = batch_candidate_indexes[bmi] bm_features = fl[2][..., bmi_raw] bm_targets = fl[0][..., bmi_raw] residuals = bm_targets - gt_target mpjpe = dutils.calc_mpjpe_from_residual(residuals, 17) # mpjpe for best match print "Calc Again mpjpe is {}".format(np.mean(mpjpe.flatten()) * 1200) data_to_eval = [ solver.gpu_require(img_features.T), solver.gpu_require(bm_features.T), solver.gpu_require(gt_margin.T), ] bm_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval))) # for evaluation # inputs = [solver.train_net.inputs[0],solver.train_net.inputs[2],solver.train_net.inputs[4]] # print 'inputs = {}'.format(inputs) # ff = theano.function(inputs=inputs, # outputs=solver.eval_net.layers['net2_score'][2].outputs # ) # print solver.eval_net.layers['net1_score'][2].outputs # res = solver.call_func(ff, data_to_eval) # r = res[0] # diff = r - gt_score # print '=======The abs difference is {}==========='.format(np.abs(diff).sum()) all_input_data = [solver.gpu_require(e.T) for e in alldata[1:]] solver.analyze_num_sv(all_input_data) # all_input_data = [all_input_data[0], all_input_data[2], all_input_data[1], # all_input_data[4], all_input_data[3]] # solver.print_layer_outputs(all_input_data) # Ignore the use_zero margin flag whole_candidate_set = solver.train_dp.data_dic["feature_list"][0][..., solver.train_dp.data_range] # print 'Whole candidate_set shape is {}'.format(whole_candidate_set.shape) # what_is_the_best_match( whole_candidate_set , mv_target, solver) # show_what_is_best_all(solver.train_dp, solver.test_dp, solver) mv_margin = solver.calc_margin(gt_target - mv_target) # MPJPE print "gt_margin<======================" iu.print_common_statistics(gt_margin) print "mv_margin<======================" iu.print_common_statistics(alldata[5]) show_bm_cmp(ndata, gt_target, mv_target, bm_targets, mv_score, gt_score, bm_score, solver) show_masked_plot(ndata, mv_margin, mv_score, gt_score, bm_score) show_raw_plot(ndata, mv_margin, mv_score, gt_score) # print 'Strange Here: {:.6f}% is correct'.format() pl.show()
def show_highest_score(train, solver): """ This function will load data from train or test set """ dp = solver.train_dp if train else solver.test_dp data = solver.get_next_batch(train) alldata, ext_data = solver.find_most_violated_ext(data[2],use_zero_margin=True, train=train ) print len(alldata) gt_target = alldata[0] gt_margin= alldata[4] img_features = alldata[1] mv_target = ext_data[0] batch_candidate_indexes =ext_data[1] print 'batch candidate indexes shape is {}'.format(batch_candidate_indexes.shape) mv_features = alldata[3] gt_features = alldata[2] # mv_margin = solver.calc_margin(gt_target - mv_target) mv_margin = alldata[5] print "mv shape {}, gt shape {}".format(mv_target.shape, gt_target.shape) fl = solver.get_all_candidates(solver.train_dp) batch_candidate_targets = fl[0][...,batch_candidate_indexes] ndata = gt_target.shape[-1] data_to_eval = [solver.gpu_require(img_features.T), solver.gpu_require(mv_features.T), solver.gpu_require(mv_margin.T) ] print 'Eval inpus are {}'.format([l.name for l in solver.eval_net.inputs]) mv_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval))) data_to_eval = [solver.gpu_require(img_features.T), solver.gpu_require(gt_features.T), solver.gpu_require(gt_margin.T) ] gt_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval))) res_mpjpe, bmi = get_batch_best_match(batch_candidate_targets, gt_target, solver) print 'Current best match mpjpe is {}'.format(np.mean(res_mpjpe)* 1200) bmi_raw = batch_candidate_indexes[bmi] bm_features = fl[2][..., bmi_raw] bm_targets = fl[0][..., bmi_raw] residuals = bm_targets - gt_target mpjpe = dutils.calc_mpjpe_from_residual(residuals, 17) # mpjpe for best match print 'mpjpe(bm_target, gt_target) is {}'.format(np.mean(mpjpe.flatten())*1200) data_to_eval = [solver.gpu_require(img_features.T), solver.gpu_require(bm_features.T), solver.gpu_require(gt_margin.T) ] bm_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval))) all_input_data = [solver.gpu_require(e.T) for e in alldata[1:]] solver.analyze_num_sv(all_input_data) # all_input_data = [all_input_data[0], all_input_data[2], all_input_data[1], # all_input_data[4], all_input_data[3]] # solver.print_layer_outputs(all_input_data) # Ignore the use_zero margin flag whole_candidate_set = solver.train_dp.data_dic['feature_list'][0][..., solver.train_dp.data_range] # print 'Whole candidate_set shape is {}'.format(whole_candidate_set.shape) # what_is_the_best_match( whole_candidate_set , mv_target, solver) # show_what_is_best_all(solver.train_dp, solver.test_dp, solver) mv_margin = solver.calc_margin(gt_target - mv_target) # MPJPE print 'gt_margin<======================' iu.print_common_statistics(gt_margin) print 'mv_margin<======================' iu.print_common_statistics(alldata[5]) show_bm_cmp(ndata, gt_target, mv_target, bm_targets, mv_score, gt_score, bm_score, solver) show_masked_plot(ndata, mv_margin, mv_score, gt_score, bm_score) show_raw_plot(ndata, mv_margin, mv_score, gt_score) # print 'Strange Here: {:.6f}% is correct'.format() pl.show()