def test_Embedding_ASM_act_14_exp_2_ACCV_fc_j0(): """ just check the pose displaying """ from mpl_toolkits.mplot3d import Axes3D import imgproc import iread.h36m_hmlpe as h36m data_path = "/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_Embedding_ASM_act_14_exp_2_ACCV_fc_j0/batches.meta" data_path1 = "/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta" meta = mio.unpickle(data_path) meta1 = mio.unpickle(data_path1) print "Len of feature list is {} \t dims = {}".format(len(meta["feature_list"]), meta["feature_dim"]) for i, e in enumerate(meta["feature_list"]): print "idx {}:\t shapes = {}".format(i, e.shape) f0 = meta["feature_list"][0] f0_1 = meta1["feature_list"][0] ndata = f0.shape[-1] limbs, params = h36m.part_idx, {"elev": -89, "azim": -107, "linewidth": 3} fig = pl.figure() idx = 0 fig.add_subplot(2, 1, 1, projection="3d") p = f0[..., idx].reshape((-1, 1), order="F") * 1200 p1 = f0_1[..., idx].reshape((-1, 1), order="F") print p pp = df.convert_relskel2rel(p).reshape((-1, 1), order="F") diff = pp.reshape((-1, 1), order="F") - p1 imgproc.turn_off_axis() dutils.show_3d_skeleton(p1, limbs, params) pl.show() print """
def test_Embedding_ASM_act_14_exp_2_ACCV_fc_j0(): """ just check the pose displaying """ from mpl_toolkits.mplot3d import Axes3D import imgproc import iread.h36m_hmlpe as h36m data_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_Embedding_ASM_act_14_exp_2_ACCV_fc_j0/batches.meta' data_path1 = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta' meta = mio.unpickle(data_path) meta1 = mio.unpickle(data_path1) print 'Len of feature list is {} \t dims = {}'.format( len(meta['feature_list']), meta['feature_dim']) for i, e in enumerate(meta['feature_list']): print 'idx {}:\t shapes = {}'.format(i, e.shape) f0 = meta['feature_list'][0] f0_1 = meta1['feature_list'][0] ndata = f0.shape[-1] limbs, params = h36m.part_idx, {'elev': -89, 'azim': -107, 'linewidth': 3} fig = pl.figure() idx = 0 fig.add_subplot(2, 1, 1, projection='3d') p = f0[..., idx].reshape((-1, 1), order='F') * 1200 p1 = f0_1[..., idx].reshape((-1, 1), order='F') print p pp = df.convert_relskel2rel(p).reshape((-1, 1), order='F') diff = pp.reshape((-1, 1), order='F') - p1 imgproc.turn_off_axis() dutils.show_3d_skeleton(p1, limbs, params) pl.show() print '''
def show_the_most_violated_poses(): from mpl_toolkits.mplot3d import Axes3D import imgproc saved_model_path = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat' saved_model_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/FCJ0_act_14_graph_0029_test_norm_cumulate_update' data_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta' meta = mio.unpickle(data_path) all_pose = meta['feature_list'][0] ssolver = Solver.get_saved_model(saved_model_path) stat = ssolver['model_state']['stat'] cnt_sample = stat['sample_candidate_counts'] mvc = stat['most_violated_counts'] ntrain = cnt_sample.size sorted_indexes = sorted(range(ntrain), key=lambda k: mvc[k], reverse=True) show_num = int(144) selected_indexes = sorted_indexes[:show_num] max_show_row = int(16) n_row = (show_num - 1)// max_show_row + 1 nc = 0 selected_pose = all_pose[:,selected_indexes] limbs = h36m.part_idx fig = pl.figure() params = {'elev':-89, 'azim':-107, 'linewidth':3} selected_cnt = mvc[selected_indexes] print n_row, max_show_row for r in range(n_row): for c in range(max_show_row): if nc == show_num: break p = selected_pose[...,nc].reshape((3,17),order='F').T nc = nc + 1 # pl.subplot(n_row, max_show_row, c) ax = fig.add_subplot(n_row, max_show_row, nc, projection='3d') imgproc.turn_off_axis() dutils.show_3d_skeleton(p, limbs, params) pl.title('mvc={}'.format(selected_cnt[nc-1])) pl.show()
def show_the_most_violated_poses(): from mpl_toolkits.mplot3d import Axes3D import imgproc saved_model_path = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat' saved_model_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/FCJ0_act_14_graph_0029_test_norm_cumulate_update' data_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta' meta = mio.unpickle(data_path) all_pose = meta['feature_list'][0] ssolver = Solver.get_saved_model(saved_model_path) stat = ssolver['model_state']['stat'] cnt_sample = stat['sample_candidate_counts'] mvc = stat['most_violated_counts'] ntrain = cnt_sample.size sorted_indexes = sorted(range(ntrain), key=lambda k: mvc[k], reverse=True) show_num = int(144) selected_indexes = sorted_indexes[:show_num] max_show_row = int(16) n_row = (show_num - 1) // max_show_row + 1 nc = 0 selected_pose = all_pose[:, selected_indexes] limbs = h36m.part_idx fig = pl.figure() params = {'elev': -89, 'azim': -107, 'linewidth': 3} selected_cnt = mvc[selected_indexes] print n_row, max_show_row for r in range(n_row): for c in range(max_show_row): if nc == show_num: break p = selected_pose[..., nc].reshape((3, 17), order='F').T nc = nc + 1 # pl.subplot(n_row, max_show_row, c) ax = fig.add_subplot(n_row, max_show_row, nc, projection='3d') imgproc.turn_off_axis() dutils.show_3d_skeleton(p, limbs, params) pl.title('mvc={}'.format(selected_cnt[nc - 1])) pl.show()
def estimate_pose_main_process(self, input_dic, output_dic): import imgproc load_next = True data, raw_img = input_dic['next_data'], input_dic['raw_img'] ndata = data[0].shape[-1] # output_dic['num_cases'] += [data[0].shape[-1]] buf = np.require(np.zeros((data[0].shape[-1], input_dic['data_dim']),\ dtype=n.single), \ requirements='C') data += [buf] start_t = time() self.libmodel.startFeatureWriter(data, input_dic['output_layer_idx']) if load_next: input_dic['next_data'], input_dic['raw_img'], input_dic['bnd'] = \ self.get_hmlpe_posedata_from_camera(input_dic['camera'], self.test_data_provider) self.finish_batch() print '[%.6f seconds]' % (time() - start_t) if input_dic['target_type'] in input_dic['convert_dic']: output_dic['est'] = input_dic['convert_dic'][ input_dic['target_type']](buf.T) else: output_dic['est'] = buf.T if not load_next: return sys.stdout.flush() tmp = input_dic['raw_img'] input_dic['camera_im'].set_data(tmp) input_dic['camera_fig'].canvas.draw() s = int(np.sqrt(data[0].shape[0])) if input_dic['target_type'] == 'hmlpe_2d': img = Image.fromarray( np.require(input_dic['raw_img'], dtype=np.uint8)) sx, sy = data[0].shape[1], data[0].shape[0] output_dic['est'] = output_dic['est'].reshape((2, -1, ndata), order='F') njoints = output_dic['est'].shape[1] cur_bnd = input_dic['bnd'] bnd_sx = np.tile(np.asarray([(v[2]+1.0) \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_sy = np.tile(np.asarray([(v[3]+1.0) \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_s = np.concatenate((bnd_sx, bnd_sy), axis=0) bnd_ax = np.tile(np.asarray([v[0] \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_ay = np.tile(np.asarray([v[1] \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_a = np.concatenate((bnd_ax, bnd_ay), axis=0) output_dic['est'] = output_dic['est'] * bnd_s + bnd_a draw = ImageDraw.Draw(img) # draw bnd for b in cur_bnd: draw.rectangle((b[0], b[1], b[0] + b[2], b[1] + b[3])) # draw center rectangle hx, hy = img.size draw.rectangle((hx / 2 - hy / 2, 0, hx / 2 + hy / 2, hy), outline=(255, 0, 0)) self.draw2d_skelenton(output_dic['est'], hmlpe.part_idx, draw) input_dic['pose_ax'].set_data(np.asarray(img)) input_dic['pose_fig'].canvas.draw() else: ## Plot pose input_dic['pose_ax'].cla() #input_dic['pose_ax'].view_init(azim=-92, elev=-46) vlim = 0.4 input_dic['pose_ax'].set_xlim([-vlim, vlim]) input_dic['pose_ax'].set_ylim([-vlim, vlim]) input_dic['pose_ax'].set_zlim([-vlim, vlim]) self.plot_skelenton(output_dic['est'], h36m.part_idx, input_dic['pose_ax']) imgproc.turn_off_axis(input_dic['pose_ax']) input_dic['pose_fig'].canvas.draw() if 'outputdir' in input_dic: outputdir = input_dic['outputdir'] savecnt = input_dic['savecnt'] print outputdir for i in range(2): plt.figure(i) plt.savefig( iu.fullfile(outputdir, 'fig_%02d_%06d.jpg' % (i, savecnt))) input_dic['savecnt'] = savecnt + 1 return input_dic['camera_im'], input_dic['pose_ax']
def estimate_pose_main_process(self, input_dic, output_dic): import imgproc load_next = True data, raw_img = input_dic['next_data'], input_dic['raw_img'] ndata = data[0].shape[-1] # output_dic['num_cases'] += [data[0].shape[-1]] buf = np.require(np.zeros((data[0].shape[-1], input_dic['data_dim']),\ dtype=n.single), \ requirements='C') data += [buf] start_t = time() self.libmodel.startFeatureWriter(data, input_dic['output_layer_idx']) if load_next: input_dic['next_data'], input_dic['raw_img'], input_dic['bnd'] = \ self.get_hmlpe_posedata_from_camera(input_dic['camera'], self.test_data_provider) self.finish_batch() print '[%.6f seconds]' % (time() - start_t) if input_dic['target_type'] in input_dic['convert_dic']: output_dic['est'] = input_dic['convert_dic'][input_dic['target_type']](buf.T) else: output_dic['est'] = buf.T if not load_next: return sys.stdout.flush() tmp = input_dic['raw_img'] input_dic['camera_im'].set_data(tmp) input_dic['camera_fig'].canvas.draw() s = int(np.sqrt(data[0].shape[0])) if input_dic['target_type'] == 'hmlpe_2d': img = Image.fromarray(np.require(input_dic['raw_img'],dtype=np.uint8)) sx,sy = data[0].shape[1], data[0].shape[0] output_dic['est'] = output_dic['est'].reshape((2,-1,ndata), order='F') njoints = output_dic['est'].shape[1] cur_bnd = input_dic['bnd'] bnd_sx = np.tile(np.asarray([(v[2]+1.0) \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_sy = np.tile(np.asarray([(v[3]+1.0) \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_s = np.concatenate((bnd_sx,bnd_sy),axis=0) bnd_ax = np.tile(np.asarray([v[0] \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_ay = np.tile(np.asarray([v[1] \ for v in cur_bnd]).reshape((1,ndata)),\ (njoints, 1)).reshape((1,njoints,ndata),order='F') bnd_a = np.concatenate((bnd_ax,bnd_ay),axis=0) output_dic['est'] = output_dic['est'] * bnd_s + bnd_a draw = ImageDraw.Draw(img) # draw bnd for b in cur_bnd: draw.rectangle((b[0],b[1],b[0]+b[2],b[1]+b[3])) # draw center rectangle hx,hy = img.size draw.rectangle((hx/2 - hy/2,0, hx/2+hy/2, hy ), outline=(255,0,0)) self.draw2d_skelenton(output_dic['est'], hmlpe.part_idx, draw) input_dic['pose_ax'].set_data(np.asarray(img)) input_dic['pose_fig'].canvas.draw() else: ## Plot pose input_dic['pose_ax'].cla() #input_dic['pose_ax'].view_init(azim=-92, elev=-46) vlim = 0.4 input_dic['pose_ax'].set_xlim([-vlim,vlim]) input_dic['pose_ax'].set_ylim([-vlim,vlim]) input_dic['pose_ax'].set_zlim([-vlim,vlim]) self.plot_skelenton(output_dic['est'], h36m.part_idx, input_dic['pose_ax']) imgproc.turn_off_axis(input_dic['pose_ax']) input_dic['pose_fig'].canvas.draw() if 'outputdir' in input_dic: outputdir = input_dic['outputdir'] savecnt = input_dic['savecnt'] print outputdir for i in range(2): plt.figure(i) plt.savefig(iu.fullfile(outputdir, 'fig_%02d_%06d.jpg' % (i,savecnt))) input_dic['savecnt'] = savecnt + 1 return input_dic['camera_im'], input_dic['pose_ax']