def estimate_real_time(self, gender, filename):






        pyRender = libRender.pyRenderMesh()
        mat_size = (64, 27)

        if torch.cuda.is_available():
            # Use for GPU
            GPU = True
            dtype = torch.cuda.FloatTensor
            print '######################### CUDA is available! #############################'
        else:
            # Use for CPU
            GPU = False
            dtype = torch.FloatTensor
            print '############################## USING CPU #################################'

        import convnet as convnet
        from torch.autograd import Variable

        if GPU == True:
            model = torch.load(filename)
            model = model.cuda()
        else:
            model = torch.load(filename, map_location='cpu')

        while not rospy.is_shutdown():


            pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*5.0, a_min=0, a_max=100)))

            pmat = gaussian_filter(pmat, sigma= 0.5)


            pmat_stack = PreprocessingLib().preprocessing_create_pressure_angle_stack_realtime(pmat, self.bedangle, mat_size)
            pmat_stack = torch.Tensor(pmat_stack)

            images_up_non_tensor = np.array(PreprocessingLib().preprocessing_pressure_map_upsample(pmat_stack.numpy(), multiple=2))
            images_up = Variable(torch.Tensor(images_up_non_tensor).type(dtype), requires_grad=False)

            betas_est, root_shift_est, angles_est = model.forward_kinematic_angles_realtime(images_up)

            print betas_est, root_shift_est, angles_est
            angles_est = angles_est.reshape(72)

            for idx in range(10):
                #print shape_pose_vol[0][idx]
                self.m.betas[idx] = betas_est[idx]


            for idx in range(72):
                self.m.pose[idx] = angles_est[idx]


            init_root = np.array(self.m.pose[0:3])+0.000001
            init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root)
            root_rot = libKinematics.eulerAnglesToRotationMatrix([np.pi, 0.0, np.pi/2])
            #print root_rot
            trans_root = libKinematics.dir_cos_angles_from_matrix(np.matmul(root_rot, init_rootR))

            self.m.pose[0] = trans_root[0]
            self.m.pose[1] = trans_root[1]
            self.m.pose[2] = trans_root[2]

            #print self.m.J_transformed[1, :], self.m.J_transformed[4, :]
            # self.m.pose[51] = selection_r


            pyRender.mesh_render_pose_bed(self.m, root_shift_est, self.point_cloud_array, self.pc_isnew, pmat, self.markers, self.bedangle)
            self.point_cloud_array = None
    def __init__(self, testing_database_file_f, testing_database_file_m, opt):
        '''Opens the specified pickle files to get the combined dataset:
        This dataset is a dictionary of pressure maps with the corresponding
        3d position and orientation of the markers associated with it.'''

        # change this to 'direct' when you are doing baseline methods


        self.CTRL_PNL = {}
        self.CTRL_PNL['batch_size'] = 1
        self.CTRL_PNL['loss_vector_type'] = opt.losstype
        self.CTRL_PNL['verbose'] = opt.verbose
        self.opt = opt
        self.CTRL_PNL['num_epochs'] = 101
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = False
        self.CTRL_PNL['incl_ht_wt_channels'] = True
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['GPU'] = GPU
        self.CTRL_PNL['dtype'] = dtype
        self.CTRL_PNL['repeat_real_data_ct'] = 1
        self.CTRL_PNL['regr_angles'] = 1
        self.CTRL_PNL['depth_map_labels'] = False
        self.CTRL_PNL['depth_map_labels_test'] = False
        self.CTRL_PNL['depth_map_output'] = True
        self.CTRL_PNL['depth_map_input_est'] = False #do this if we're working in a two-part regression
        self.CTRL_PNL['precomp_net1'] = False

        if self.CTRL_PNL['precomp_net1'] == True:
            self.CTRL_PNL['depth_map_input_est'] = True

        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL['depth_map_input_est'] #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        if opt.losstype == 'direct':
            self.CTRL_PNL['depth_map_labels'] = False
            self.CTRL_PNL['depth_map_output'] = False
        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL['depth_map_input_est'] == True: #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'
        self.CTRL_PNL['aws'] = False
        self.CTRL_PNL['lock_root'] = False




        # change this to 'direct' when you are doing baseline methods
        self.CTRL_PNL_COR = self.CTRL_PNL.copy()
        self.CTRL_PNL_COR['depth_map_output'] = True
        self.CTRL_PNL_COR['depth_map_input_est'] = True
        self.CTRL_PNL_COR['adjust_ang_from_est'] = True
        self.CTRL_PNL_COR['incl_ht_wt_channels'] = True
        self.CTRL_PNL_COR['incl_pmat_cntct_input'] = True
        self.CTRL_PNL_COR['num_input_channels'] = 3
        if self.CTRL_PNL_COR['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL_COR['num_input_channels'] += 1
        if self.CTRL_PNL_COR['depth_map_input_est'] == True: #for a two part regression
            self.CTRL_PNL_COR['num_input_channels'] += 3
        self.CTRL_PNL_COR['num_input_channels_batch0'] = np.copy(self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL_COR['incl_ht_wt_channels'] == True:
            self.CTRL_PNL_COR['num_input_channels'] += 2


        self.mat_size = (NUMOFTAXELS_X, NUMOFTAXELS_Y)
        self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)
        self.output_size_val = (NUMOFOUTPUTNODES_TEST, NUMOFOUTPUTDIMS)
        self.parents = np.array([4294967295, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21]).astype(np.int32)



        #################################### PREP TESTING ##########################################
        #load testing synth data
        dat_f_synth = TensorPrepLib().load_files_to_database(testing_database_file_f, 'synth')
        dat_m_synth = TensorPrepLib().load_files_to_database(testing_database_file_m, 'synth')
        dat_f_real = TensorPrepLib().load_files_to_database(testing_database_file_f, 'real')
        dat_m_real = TensorPrepLib().load_files_to_database(testing_database_file_m, 'real')

        self.test_x_flat = []  # Initialize the testing pressure mat list
        self.test_x_flat = TensorPrepLib().prep_images(self.test_x_flat, dat_f_synth, dat_m_synth, num_repeats = 1)
        self.test_x_flat = list(np.clip(np.array(self.test_x_flat) * 5.0, a_min=0, a_max=100))

        self.test_x_flat = TensorPrepLib().prep_images(self.test_x_flat, dat_f_real, dat_m_real, num_repeats = self.CTRL_PNL['repeat_real_data_ct'])

        self.test_x_flat = PreprocessingLib().preprocessing_blur_images(self.test_x_flat, self.mat_size, sigma=0.5)

        if len(self.test_x_flat) == 0: print("NO testing DATA INCLUDED")

        self.test_a_flat = []  # Initialize the testing pressure mat angle list
        self.test_a_flat = TensorPrepLib().prep_angles(self.test_a_flat, dat_f_synth, dat_m_real, num_repeats = 1)
        self.test_a_flat = TensorPrepLib().prep_angles(self.test_a_flat, dat_f_real, dat_m_real, num_repeats = self.CTRL_PNL['repeat_real_data_ct'])

        if self.CTRL_PNL['depth_map_labels'] == True:
            self.depth_contact_maps = [] #Initialize the precomputed depth and contact maps
            self.depth_contact_maps = TensorPrepLib().prep_depth_contact(self.depth_contact_maps, dat_f_synth, dat_m_synth, num_repeats = 1)
        else:
            self.depth_contact_maps = None

        test_xa = PreprocessingLib().preprocessing_create_pressure_angle_stack(self.test_x_flat,
                                                                                self.test_a_flat,
                                                                                self.CTRL_PNL['incl_inter'], self.mat_size,
                                                                                self.CTRL_PNL['clip_sobel'],
                                                                                self.CTRL_PNL['verbose'])

        test_xa = TensorPrepLib().append_input_depth_contact(np.array(test_xa),
                                                              include_pmat_contact = self.CTRL_PNL_COR['incl_pmat_cntct_input'],
                                                              mesh_depth_contact_maps = self.depth_contact_maps,
                                                              include_mesh_depth_contact = self.CTRL_PNL['depth_map_labels'])

        self.test_x_tensor = torch.Tensor(test_xa)

        self.test_y_flat = []  # Initialize the testing ground truth list
        self.test_y_flat = TensorPrepLib().prep_labels(self.test_y_flat, dat_f_synth, num_repeats = 1,
                                                        z_adj = -0.075, gender = "f", is_synth = True,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'])
        self.test_y_flat = TensorPrepLib().prep_labels(self.test_y_flat, dat_m_synth, num_repeats = 1,
                                                        z_adj = -0.075, gender = "m", is_synth = True,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'])

        self.test_y_flat = TensorPrepLib().prep_labels(self.test_y_flat, dat_f_real, num_repeats = self.CTRL_PNL['repeat_real_data_ct'],
                                                        z_adj = 0.0, gender = "m", is_synth = False,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'])
        self.test_y_flat = TensorPrepLib().prep_labels(self.test_y_flat, dat_m_real, num_repeats = self.CTRL_PNL['repeat_real_data_ct'],
                                                        z_adj = 0.0, gender = "m", is_synth = False,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'])
        self.test_y_tensor = torch.Tensor(self.test_y_flat)

        print self.test_x_tensor.shape, 'Input testing tensor shape'
        print self.test_y_tensor.shape, 'Output testing tensor shape'
Exemplo n.º 3
0
    def __init__(self, testing_database_file_f, testing_database_file_m, opt, filename):
        '''Opens the specified pickle files to get the combined dataset:
        This dataset is a dictionary of pressure maps with the corresponding
        3d position and orientation of the markers associated with it.'''

        # change this to 'direct' when you are doing baseline methods
        self.CTRL_PNL = {}
        self.CTRL_PNL['batch_size'] = 64
        self.CTRL_PNL['loss_vector_type'] = opt.losstype
        self.CTRL_PNL['verbose'] = opt.verbose
        self.opt = opt
        self.CTRL_PNL['num_epochs'] = 101
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = False
        self.CTRL_PNL['incl_ht_wt_channels'] = True
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['lock_root'] = False
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['GPU'] = GPU
        self.CTRL_PNL['dtype'] = dtype
        self.CTRL_PNL['repeat_real_data_ct'] = 1
        self.CTRL_PNL['regr_angles'] = 1
        self.CTRL_PNL['depth_map_labels'] = False
        self.CTRL_PNL['dropout'] = False
        self.CTRL_PNL['depth_map_labels_test'] = True #can only be true is we have 100% synth for testing
        self.CTRL_PNL['depth_map_output'] = True
        self.CTRL_PNL['depth_map_input_est'] = False #do this if we're working in a two-part regression
        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL['depth_map_input_est'] #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        self.CTRL_PNL['full_body_rot'] = True
        self.CTRL_PNL['all_tanh_activ'] = True
        self.CTRL_PNL['normalize_input'] = True
        self.CTRL_PNL['L2_contact'] = True
        self.CTRL_PNL['pmat_mult'] = int(5)
        self.CTRL_PNL['cal_noise'] = True
        self.CTRL_PNL['double_network_size'] = False
        self.CTRL_PNL['first_pass'] = True

        self.filename = filename

        if opt.losstype == 'direct':
            self.CTRL_PNL['depth_map_labels'] = False
            self.CTRL_PNL['depth_map_output'] = False

        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['incl_pmat_cntct_input'] = False #if there's calibration noise we need to recompute this every batch
            self.CTRL_PNL['clip_sobel'] = False

        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL['depth_map_input_est'] == True: #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['num_input_channels'] += 1

        pmat_std_from_mult = ['N/A', 11.70153502792190, 19.90905848383454, 23.07018866032369, 0.0, 25.50538629767412]
        if self.CTRL_PNL['cal_noise'] == False:
            sobel_std_from_mult = ['N/A', 29.80360490415032, 33.33532963163579, 34.14427844692501, 0.0, 34.86393494050921]
        else:
            sobel_std_from_mult = ['N/A', 45.61635847182483, 77.74920396659292, 88.89398421073700, 0.0, 97.90075708182506]

        self.CTRL_PNL['norm_std_coeffs'] =  [1./41.80684362163343,  #contact
                                             1./16.69545796387731,  #pos est depth
                                             1./45.08513083167194,  #neg est depth
                                             1./43.55800622930469,  #cm est
                                             1./pmat_std_from_mult[int(self.CTRL_PNL['pmat_mult'])], #pmat x5
                                             1./sobel_std_from_mult[int(self.CTRL_PNL['pmat_mult'])], #pmat sobel
                                             1./1.0,                #bed height mat
                                             1./1.0,                #OUTPUT DO NOTHING
                                             1./1.0,                #OUTPUT DO NOTHING
                                             1. / 30.216647403350,  #weight
                                             1. / 14.629298141231]  #height



        if self.opt.aws == True:
            self.CTRL_PNL['filepath_prefix'] = '/home/ubuntu/'
        else:
            self.CTRL_PNL['filepath_prefix'] = '/home/henry/'
            #self.CTRL_PNL['filepath_prefix'] = '/media/henry/multimodal_data_2/'

        if self.CTRL_PNL['depth_map_output'] == True: #we need all the vertices if we're going to regress the depth maps
            self.verts_list = "all"
        else:
            self.verts_list = [1325, 336, 1032, 4515, 1374, 4848, 1739, 5209, 1960, 5423]

        print self.CTRL_PNL['num_epochs'], 'NUM EPOCHS!'
        # Entire pressure dataset with coordinates in world frame

        self.save_name = '_' + opt.losstype + \
                         '_synth_32000' + \
                         '_' + str(self.CTRL_PNL['batch_size']) + 'b' + \
                         '_' + str(self.CTRL_PNL['num_epochs']) + 'e' + \
                         '_x' + str(self.CTRL_PNL['pmat_mult']) + 'pmult'


        if self.CTRL_PNL['depth_map_labels'] == True:
            self.save_name += '_' + str(self.opt.j_d_ratio) + 'rtojtdpth'
        if self.CTRL_PNL['depth_map_input_est'] == True:
            self.save_name += '_depthestin'
        if self.CTRL_PNL['adjust_ang_from_est'] == True:
            self.save_name += '_angleadj'
        if self.CTRL_PNL['all_tanh_activ'] == True:
            self.save_name += '_alltanh'
        if self.CTRL_PNL['L2_contact'] == True:
            self.save_name += '_l2cnt'
        if self.CTRL_PNL['cal_noise'] == True:
            self.save_name += '_calnoise'


        # self.save_name = '_' + opt.losstype+'_real_s9_alltest_' + str(self.CTRL_PNL['batch_size']) + 'b_'# + str(self.CTRL_PNL['num_epochs']) + 'e'

        print 'appending to', 'train' + self.save_name
        self.train_val_losses = {}
        self.train_val_losses['train' + self.save_name] = []
        self.train_val_losses['val' + self.save_name] = []
        self.train_val_losses['epoch' + self.save_name] = []

        self.mat_size = (NUMOFTAXELS_X, NUMOFTAXELS_Y)
        self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)
        self.output_size_val = (NUMOFOUTPUTNODES_TEST, NUMOFOUTPUTDIMS)
        self.parents = np.array([4294967295, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21]).astype(np.int32)




        #################################### PREP TESTING DATA ##########################################
        # load in the test file
        test_dat_f_synth = TensorPrepLib().load_files_to_database(testing_database_file_f, 'synth')
        test_dat_m_synth = TensorPrepLib().load_files_to_database(testing_database_file_m, 'synth')
        test_dat_f_real = TensorPrepLib().load_files_to_database(testing_database_file_f, 'real')
        test_dat_m_real = TensorPrepLib().load_files_to_database(testing_database_file_m, 'real')



        for possible_dat in [test_dat_f_synth, test_dat_m_synth, test_dat_f_real, test_dat_m_real]:
            if possible_dat is not None:
                self.dat = possible_dat
                self.dat['mdm_est'] = []
                self.dat['cm_est'] = []
                self.dat['angles_est'] = []
                self.dat['root_xyz_est'] = []
                self.dat['betas_est'] = []
                self.dat['root_atan2_est'] = []



        self.test_x_flat = []  # Initialize the testing pressure mat list
        self.test_x_flat = TensorPrepLib().prep_images(self.test_x_flat, test_dat_f_synth, test_dat_m_synth, num_repeats = 1)
        self.test_x_flat = list(np.clip(np.array(self.test_x_flat) * float(self.CTRL_PNL['pmat_mult']), a_min=0, a_max=100))
        self.test_x_flat = TensorPrepLib().prep_images(self.test_x_flat, test_dat_f_real, test_dat_m_real, num_repeats = 1)

        if self.CTRL_PNL['cal_noise'] == False:
            self.test_x_flat = PreprocessingLib().preprocessing_blur_images(self.test_x_flat, self.mat_size, sigma=0.5)

        if len(self.test_x_flat) == 0: print("NO TESTING DATA INCLUDED")

        self.test_a_flat = []  # Initialize the testing pressure mat angle listhave
        self.test_a_flat = TensorPrepLib().prep_angles(self.test_a_flat, test_dat_f_synth, test_dat_m_synth, num_repeats = 1)
        self.test_a_flat = TensorPrepLib().prep_angles(self.test_a_flat, test_dat_f_real, test_dat_m_real, num_repeats = 1)


        if self.CTRL_PNL['depth_map_labels_test'] == True:
            self.depth_contact_maps = [] #Initialize the precomputed depth and contact maps. only synth has this label.
            self.depth_contact_maps = TensorPrepLib().prep_depth_contact(self.depth_contact_maps, test_dat_f_synth, test_dat_m_synth, num_repeats = 1)
        else:
            self.depth_contact_maps = None

        if self.CTRL_PNL['depth_map_input_est'] == True:
            self.depth_contact_maps_input_est = [] #Initialize the precomputed depth and contact map input estimates
            self.depth_contact_maps_input_est = TensorPrepLib().prep_depth_contact_input_est(self.depth_contact_maps_input_est,
                                                                                             test_dat_f_synth, test_dat_m_synth, num_repeats = 1)
            self.depth_contact_maps_input_est = TensorPrepLib().prep_depth_contact_input_est(self.depth_contact_maps_input_est,
                                                                                             test_dat_f_real, test_dat_m_real, num_repeats = 1)
        else:
            self.depth_contact_maps_input_est = None

        print np.shape(self.test_x_flat), np.shape(self.test_a_flat)

        test_xa = PreprocessingLib().preprocessing_create_pressure_angle_stack(self.test_x_flat,
                                                                               self.test_a_flat,
                                                                                self.mat_size,
                                                                                self.CTRL_PNL)


        test_xa = TensorPrepLib().append_input_depth_contact(np.array(test_xa),
                                                              CTRL_PNL = self.CTRL_PNL,
                                                              mesh_depth_contact_maps_input_est = self.depth_contact_maps_input_est,
                                                              mesh_depth_contact_maps = self.depth_contact_maps)

        #normalize the input
        if self.CTRL_PNL['normalize_input'] == True:
            test_xa = TensorPrepLib().normalize_network_input(test_xa, self.CTRL_PNL)

        self.test_x_tensor = torch.Tensor(test_xa)

        test_y_flat = []  # Initialize the ground truth listhave

        test_y_flat = TensorPrepLib().prep_labels(test_y_flat, test_dat_f_synth, num_repeats = 1,
                                                    z_adj = -0.075, gender = "f", is_synth = True,
                                                    loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                    initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot = self.CTRL_PNL['full_body_rot'])
        test_y_flat = TensorPrepLib().prep_labels(test_y_flat, test_dat_m_synth, num_repeats = 1,
                                                    z_adj = -0.075, gender = "m", is_synth = True,
                                                    loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                    initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot = self.CTRL_PNL['full_body_rot'])

        test_y_flat = TensorPrepLib().prep_labels(test_y_flat, test_dat_f_real, num_repeats = 1,
                                                    z_adj = 0.0, gender = "f", is_synth = False,
                                                    loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                    initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot = self.CTRL_PNL['full_body_rot'])
        test_y_flat = TensorPrepLib().prep_labels(test_y_flat, test_dat_m_real, num_repeats = 1,
                                                    z_adj = 0.0, gender = "m", is_synth = False,
                                                    loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                    initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot = self.CTRL_PNL['full_body_rot'])

        if self.CTRL_PNL['normalize_input'] == True:
            test_y_flat = TensorPrepLib().normalize_wt_ht(test_y_flat, self.CTRL_PNL)

        self.test_y_tensor = torch.Tensor(test_y_flat)


        print self.test_x_tensor.shape, 'Input testing tensor shape'
        print self.test_y_tensor.shape, 'Output testing tensor shape'
    def unpackage_batch_kin_pass(self, batch, is_training, model, CTRL_PNL):

        #print batch[0].size(), 'batch 0 input'
        #print torch.max(batch[0][0, 0, :]), torch.max(batch[0][0, 1, :]), torch.max(batch[0][0, 2, :]), torch.max(batch[0][0, 3, :])

        INPUT_DICT = {}
        adj_ext_idx = 0
        # 0:72: positions.
        batch.append(batch[1][:, 72:82])  # betas
        batch.append(batch[1][:, 82:154])  # angles
        batch.append(batch[1][:, 154:157])  # root pos
        batch.append(batch[1][:, 157:159])  # gender switch
        batch.append(batch[1][:, 159])  # synth vs real switch
        batch.append(batch[1][:, 160:161])  # mass, kg
        batch.append(batch[1][:, 161:162])  # height, kg

        if CTRL_PNL['adjust_ang_from_est'] == True:
            adj_ext_idx += 3
            batch.append(batch[1][:, 162:172])  #betas est
            batch.append(batch[1][:, 172:244])  #angles est
            batch.append(batch[1][:, 244:247])  #root pos est
            if CTRL_PNL['full_body_rot'] == True:
                adj_ext_idx += 1
                batch.append(batch[1][:, 247:253])  #root atan2 est
                #print "appended root", batch[-1], batch[12]

            extra_smpl_angles = batch[10]
            extra_targets = batch[11]
        else:
            extra_smpl_angles = None
            extra_targets = None

        if CTRL_PNL['depth_map_labels'] == True:
            if CTRL_PNL['depth_map_labels_test'] == True or is_training == True:
                batch.append(
                    batch[0][:, CTRL_PNL['num_input_channels_batch0'], :, :]
                )  #mesh depth matrix
                batch.append(batch[0][:,
                                      CTRL_PNL['num_input_channels_batch0'] +
                                      1, :, :])  #mesh contact matrix

                #cut off batch 0 so we don't have depth or contact on the input
                batch[0] = batch[
                    0][:, 0:CTRL_PNL['num_input_channels_batch0'], :, :]
                print "CLIPPING BATCH 0"

        #print batch[0].size(), 'batch 0 shape'

        # cut it off so batch[2] is only the xyz marker targets
        batch[1] = batch[1][:, 0:72]

        #if is_training == True: #only do augmentation for real data that is in training mode
        #    batch[0], batch[1], extra_targets, extra_smpl_angles = SyntheticLib().synthetic_master(batch[0], batch[1], batch[6],
        #                                                            num_images_manip = (CTRL_PNL['num_input_channels_batch0']-1),
        #                                                            flip=True, shift=True, scale=False,
        #                                                            include_inter=CTRL_PNL['incl_inter'],
        #                                                            loss_vector_type=CTRL_PNL['loss_vector_type'],
        #                                                            extra_targets=extra_targets,
        #                                                            extra_smpl_angles = extra_smpl_angles)

        images_up_non_tensor = np.array(batch[0].numpy())

        INPUT_DICT['batch_images'] = np.copy(images_up_non_tensor)

        #here perform synthetic calibration noise over pmat and sobel filtered pmat.
        if CTRL_PNL['cal_noise'] == True:
            images_up_non_tensor = PreprocessingLib(
            ).preprocessing_add_calibration_noise(
                images_up_non_tensor,
                pmat_chan_idx=(CTRL_PNL['num_input_channels_batch0'] - 3),
                norm_std_coeffs=CTRL_PNL['norm_std_coeffs'],
                is_training=is_training)

        #print np.shape(images_up_non_tensor)

        images_up_non_tensor = PreprocessingLib(
        ).preprocessing_pressure_map_upsample(images_up_non_tensor, multiple=2)

        if is_training == True:  #only add noise to training images
            if CTRL_PNL['cal_noise'] == False:
                images_up_non_tensor = PreprocessingLib(
                ).preprocessing_add_image_noise(
                    np.array(images_up_non_tensor),
                    pmat_chan_idx=(CTRL_PNL['num_input_channels_batch0'] - 3),
                    norm_std_coeffs=CTRL_PNL['norm_std_coeffs'])
            else:
                images_up_non_tensor = PreprocessingLib(
                ).preprocessing_add_image_noise(
                    np.array(images_up_non_tensor),
                    pmat_chan_idx=(CTRL_PNL['num_input_channels_batch0'] - 2),
                    norm_std_coeffs=CTRL_PNL['norm_std_coeffs'])

        images_up = Variable(torch.Tensor(images_up_non_tensor).type(
            CTRL_PNL['dtype']),
                             requires_grad=False)

        if CTRL_PNL['incl_ht_wt_channels'] == True:  #make images full of stuff
            weight_input = torch.ones(
                (images_up.size()[0], images_up.size()[2] *
                 images_up.size()[3])).type(CTRL_PNL['dtype'])
            weight_input *= batch[7].type(CTRL_PNL['dtype'])
            weight_input = weight_input.view(
                (images_up.size()[0], 1, images_up.size()[2],
                 images_up.size()[3]))
            height_input = torch.ones(
                (images_up.size()[0], images_up.size()[2] *
                 images_up.size()[3])).type(CTRL_PNL['dtype'])
            height_input *= batch[8].type(CTRL_PNL['dtype'])
            height_input = height_input.view(
                (images_up.size()[0], 1, images_up.size()[2],
                 images_up.size()[3]))
            images_up = torch.cat((images_up, weight_input, height_input), 1)


        targets, betas = Variable(batch[1].type(CTRL_PNL['dtype']), requires_grad=False), \
                         Variable(batch[2].type(CTRL_PNL['dtype']), requires_grad=False)

        angles_gt = Variable(batch[3].type(CTRL_PNL['dtype']),
                             requires_grad=is_training)
        root_shift = Variable(batch[4].type(CTRL_PNL['dtype']),
                              requires_grad=is_training)
        gender_switch = Variable(batch[5].type(CTRL_PNL['dtype']),
                                 requires_grad=is_training)
        synth_real_switch = Variable(batch[6].type(CTRL_PNL['dtype']),
                                     requires_grad=is_training)

        OUTPUT_EST_DICT = {}
        if CTRL_PNL['adjust_ang_from_est'] == True:
            OUTPUT_EST_DICT['betas'] = Variable(batch[9].type(
                CTRL_PNL['dtype']),
                                                requires_grad=is_training)
            OUTPUT_EST_DICT['angles'] = Variable(extra_smpl_angles.type(
                CTRL_PNL['dtype']),
                                                 requires_grad=is_training)
            OUTPUT_EST_DICT['root_shift'] = Variable(extra_targets.type(
                CTRL_PNL['dtype']),
                                                     requires_grad=is_training)
            if CTRL_PNL['full_body_rot'] == True:
                OUTPUT_EST_DICT['root_atan2'] = Variable(
                    batch[12].type(CTRL_PNL['dtype']),
                    requires_grad=is_training)

        if CTRL_PNL['depth_map_labels'] == True:
            if CTRL_PNL['depth_map_labels_test'] == True or is_training == True:
                INPUT_DICT['batch_mdm'] = batch[9 + adj_ext_idx].type(
                    CTRL_PNL['dtype'])
                INPUT_DICT['batch_cm'] = batch[10 + adj_ext_idx].type(
                    CTRL_PNL['dtype'])
        else:
            INPUT_DICT['batch_mdm'] = None
            INPUT_DICT['batch_cm'] = None

        print images_up.size()

        #print torch.max(images_up[0, 0, :]), torch.max(images_up[0, 1, :]), torch.max(images_up[0, 2, :]), torch.max(images_up[0, 3, :]), torch.max(images_up[0, 4, :]), torch.max(images_up[0, 5, :]),

        scores, OUTPUT_DICT = model.forward_kinematic_angles(
            images=images_up,
            gender_switch=gender_switch,
            synth_real_switch=synth_real_switch,
            CTRL_PNL=CTRL_PNL,
            OUTPUT_EST_DICT=OUTPUT_EST_DICT,
            targets=targets,
            is_training=is_training,
            betas=betas,
            angles_gt=angles_gt,
            root_shift=root_shift,
        )  # scores is a variable with 27 for 10 euclidean errors and 17 lengths in meters. targets est is a numpy array in mm.

        #scores, targets_est_np, targets_est_reduced_np, betas_est_np = model.forward_kinematic_angles(images_up,
        #                                                                                              gender_switch,
        #                                                                                              synth_real_switch,
        #                                                                                              targets,
        #                                                                                              is_training,
        #                                                                                              betas,
        #                                                                                              angles_gt,
        #                                                                                              root_shift,
        #                                                                                              False)
        #OUTPUT_DICT = {}
        #OUTPUT_DICT['batch_targets_est'] = targets_est_np
        #OUTPUT_DICT['batch_betas_est'] = betas_est_np

        #INPUT_DICT['batch_images'] = images_up.data
        INPUT_DICT['batch_targets'] = targets.data

        return scores, INPUT_DICT, OUTPUT_DICT
    def estimate_real_time(self, filename1, filename2 = None):




        pyRender = libRender.pyRenderMesh()
        mat_size = (64, 27)
        from unpack_batch_lib import UnpackBatchLib

        if torch.cuda.is_available():
            # Use for GPU
            GPU = True
            dtype = torch.cuda.FloatTensor
            print '######################### CUDA is available! #############################'
        else:
            # Use for CPU
            GPU = False
            dtype = torch.FloatTensor
            print '############################## USING CPU #################################'

        from torch.autograd import Variable

        if GPU == True:
            for i in range(0, 8):
                try:
                    model = torch.load(filename1, map_location={'cuda:'+str(i):'cuda:0'})
                    model = model.cuda().eval()
                    break
                except:
                    pass
            if filename2 is not None:
                for i in range(0, 8):
                    try:
                        model2 = torch.load(filename2, map_location={'cuda:'+str(i):'cuda:0'})
                        model2 = model2.cuda().eval()
                        break
                    except:
                        pass
            else:
                model2 = None

        else:
            model = torch.load(filename1, map_location='cpu').eval()
            if filename2 is not None:
                model2 = torch.load(filename2, map_location='cpu').eval()
            else:
                model2 = None


        pub = rospy.Publisher('meshTopic', MeshAttr)
        #rospy.init_node('talker', anonymous=False)
        while not rospy.is_shutdown():


            pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(self.CTRL_PNL['pmat_mult']*4), a_min=0, a_max=100)))
            #pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(1), a_min=0, a_max=100)))
            #print "max is : ", np.max(pmat)
            #print "sum is : ", np.sum(pmat)

            if self.CTRL_PNL['cal_noise'] == False:
                pmat = gaussian_filter(pmat, sigma= 0.5)


            pmat_stack = PreprocessingLib().preprocessing_create_pressure_angle_stack_realtime(pmat, self.bedangle, mat_size)

            if self.CTRL_PNL['cal_noise'] == False:
                pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100)

            pmat_stack = np.array(pmat_stack)
            if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
                pmat_contact = np.copy(pmat_stack[:, 0:1, :, :])
                pmat_contact[pmat_contact > 0] = 100
                pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis = 1)

            weight_input = WEIGHT_LBS/2.20462
            height_input = (HEIGHT_IN*0.0254 - 1)*100

            batch1 = np.zeros((1, 162))
            if GENDER == 'f':
                batch1[:, 157] += 1
            elif GENDER == 'm':
                batch1[:, 158] += 1
            batch1[:, 160] += weight_input
            batch1[:, 161] += height_input

            if self.CTRL_PNL['normalize_input'] == True:
                self.CTRL_PNL['depth_map_input_est'] = False
                pmat_stack = self.TPL.normalize_network_input(pmat_stack, self.CTRL_PNL)
                batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL)


            pmat_stack = torch.Tensor(pmat_stack)
            batch1 = torch.Tensor(batch1)


            batch = []
            batch.append(pmat_stack)
            batch.append(batch1)

            self.CTRL_PNL['adjust_ang_from_est'] = False
            scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch, False, model, self.CTRL_PNL)

            self.CTRL_PNL['first_pass'] = False

            mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 16.69545796387731
            mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 45.08513083167194
            mdm_est_pos[mdm_est_pos < 0] = 0
            mdm_est_neg[mdm_est_neg > 0] = 0
            mdm_est_neg *= -1
            cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100 / 43.55800622930469

            #1. / 16.69545796387731,  # pos est depth
            #1. / 45.08513083167194,  # neg est depth
            #1. / 43.55800622930469,  # cm est

            if model2 is not None:
                batch_cor = []
                batch_cor.append(torch.cat((pmat_stack[:, 0:1, :, :],
                                          mdm_est_pos.type(torch.FloatTensor),
                                          mdm_est_neg.type(torch.FloatTensor),
                                          cm_est.type(torch.FloatTensor),
                                          pmat_stack[:, 1:, :, :]), dim=1))

                if self.CTRL_PNL['full_body_rot'] == False:
                    batch_cor.append(torch.cat((batch1,
                                      OUTPUT_DICT['batch_betas_est'].cpu(),
                                      OUTPUT_DICT['batch_angles_est'].cpu(),
                                      OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim = 1))
                elif self.CTRL_PNL['full_body_rot'] == True:
                    batch_cor.append(torch.cat((batch1,
                                      OUTPUT_DICT['batch_betas_est'].cpu(),
                                      OUTPUT_DICT['batch_angles_est'].cpu(),
                                      OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                                      OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim = 1))


                self.CTRL_PNL['adjust_ang_from_est'] = True
                scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, False, model2, self.CTRL_PNL)

            betas_est = np.squeeze(OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy())
            angles_est = np.squeeze(OUTPUT_DICT['batch_angles_est_post_clip'])
            root_shift_est = np.squeeze(OUTPUT_DICT['batch_root_xyz_est_post_clip'].cpu().numpy())


            #print betas_est.shape, root_shift_est.shape, angles_est.shape

            #print betas_est, root_shift_est, angles_est
            angles_est = angles_est.reshape(72)

            for idx in range(10):
                #print shape_pose_vol[0][idx]
                self.m.betas[idx] = betas_est[idx]


            for idx in range(72):
                self.m.pose[idx] = angles_est[idx]


            init_root = np.array(self.m.pose[0:3])+0.000001
            init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root)
            root_rot = libKinematics.eulerAnglesToRotationMatrix([np.pi, 0.0, np.pi/2])
            #print root_rot
            trans_root = libKinematics.dir_cos_angles_from_matrix(np.matmul(root_rot, init_rootR))

            self.m.pose[0] = trans_root[0]
            self.m.pose[1] = trans_root[1]
            self.m.pose[2] = trans_root[2]

            #print self.m.J_transformed[1, :], self.m.J_transformed[4, :]
            # self.m.pose[51] = selection_r

            print self.m.r
            #print OUTPUT_DICT['verts']

            pyRender.mesh_render_pose_bed_orig(self.m, root_shift_est, self.point_cloud_array, self.pc_isnew, pmat, self.markers, self.bedangle)
            self.point_cloud_array = None
Exemplo n.º 6
0
    def __init__(self, training_database_file_f, training_database_file_m):
        #if this is your first time looking at the code don't pay much attention to this __init__ function.
        #it's all just boilerplate for loading in the synthetic data

        self.CTRL_PNL = {}
        self.CTRL_PNL['loss_vector_type'] = 'anglesDC'
        self.CTRL_PNL['verbose'] = False
        self.CTRL_PNL['batch_size'] = 1
        self.CTRL_PNL['num_epochs'] = 100
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = False
        self.CTRL_PNL['incl_ht_wt_channels'] = False
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['dropout'] = False
        self.CTRL_PNL['lock_root'] = False
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['GPU'] = False
        repeat_real_data_ct = 3
        self.CTRL_PNL['regr_angles'] = False
        self.CTRL_PNL['aws'] = False
        self.CTRL_PNL[
            'depth_map_labels'] = True  #can only be true if we have 100% synthetic data for training
        self.CTRL_PNL[
            'depth_map_labels_test'] = True  #can only be true is we have 100% synth for testing
        self.CTRL_PNL['depth_map_output'] = self.CTRL_PNL['depth_map_labels']
        self.CTRL_PNL[
            'depth_map_input_est'] = False  #do this if we're working in a two-part regression
        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL[
            'depth_map_input_est']  #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        self.CTRL_PNL['full_body_rot'] = True
        self.CTRL_PNL['normalize_input'] = True
        self.CTRL_PNL['all_tanh_activ'] = True
        self.CTRL_PNL['L2_contact'] = True
        self.CTRL_PNL['pmat_mult'] = int(1)
        self.CTRL_PNL['cal_noise'] = False
        self.CTRL_PNL['double_network_size'] = False
        self.CTRL_PNL['first_pass'] = True

        self.weight_joints = 1.0  #self.opt.j_d_ratio*2
        self.weight_depth_planes = (1 - 0.5)  #*2

        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL[
                'incl_pmat_cntct_input'] = False  #if there's calibration noise we need to recompute this every batch
            self.CTRL_PNL['clip_sobel'] = False

        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL[
                'depth_map_input_est'] == True:  #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(
            self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['num_input_channels'] += 1

        pmat_std_from_mult = [
            'N/A', 11.70153502792190, 19.90905848383454, 23.07018866032369,
            0.0, 25.50538629767412
        ]
        if self.CTRL_PNL['cal_noise'] == False:
            sobel_std_from_mult = [
                'N/A', 29.80360490415032, 33.33532963163579, 34.14427844692501,
                0.0, 34.86393494050921
            ]
        else:
            sobel_std_from_mult = [
                'N/A', 45.61635847182483, 77.74920396659292, 88.89398421073700,
                0.0, 97.90075708182506
            ]

        self.CTRL_PNL['norm_std_coeffs'] = [
            1. / 41.80684362163343,  #contact
            1. / 16.69545796387731,  #pos est depth
            1. / 45.08513083167194,  #neg est depth
            1. / 43.55800622930469,  #cm est
            1. / pmat_std_from_mult[int(self.CTRL_PNL['pmat_mult'])],  #pmat x5
            1. /
            sobel_std_from_mult[int(self.CTRL_PNL['pmat_mult'])],  #pmat sobel
            1. / 1.0,  #bed height mat
            1. / 1.0,  #OUTPUT DO NOTHING
            1. / 1.0,  #OUTPUT DO NOTHING
            1. / 30.216647403350,  #weight
            1. / 14.629298141231
        ]  #height

        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'
        #self.CTRL_PNL['filepath_prefix'] = '/media/henry/multimodal_data_2/'

        if self.CTRL_PNL[
                'depth_map_output'] == True:  #we need all the vertices if we're going to regress the depth maps
            self.verts_list = "all"
        else:
            self.verts_list = [
                1325, 336, 1032, 4515, 1374, 4848, 1739, 5209, 1960, 5423
            ]

        print self.CTRL_PNL['num_epochs'], 'NUM EPOCHS!'
        # Entire pressure dataset with coordinates in world frame

        self.mat_size = (NUMOFTAXELS_X, NUMOFTAXELS_Y)

        #################################### PREP TRAINING DATA ##########################################
        #load training ysnth data
        dat_f_synth = TensorPrepLib().load_files_to_database(
            training_database_file_f, 'synth')
        dat_m_synth = TensorPrepLib().load_files_to_database(
            training_database_file_m, 'synth')

        self.train_x_flat = []  # Initialize the testing pressure mat list
        self.train_x_flat = TensorPrepLib().prep_images(self.train_x_flat,
                                                        dat_f_synth,
                                                        dat_m_synth,
                                                        num_repeats=1)
        self.train_x_flat = list(
            np.clip(np.array(self.train_x_flat) *
                    float(self.CTRL_PNL['pmat_mult']),
                    a_min=0,
                    a_max=100))

        if self.CTRL_PNL['cal_noise'] == False:
            self.train_x_flat = PreprocessingLib().preprocessing_blur_images(
                self.train_x_flat, self.mat_size, sigma=0.5)

        if len(self.train_x_flat) == 0: print("NO TRAINING DATA INCLUDED")

        self.train_a_flat = [
        ]  # Initialize the training pressure mat angle list
        self.train_a_flat = TensorPrepLib().prep_angles(self.train_a_flat,
                                                        dat_f_synth,
                                                        dat_m_synth,
                                                        num_repeats=1)

        if self.CTRL_PNL['depth_map_labels'] == True:
            self.depth_contact_maps = [
            ]  #Initialize the precomputed depth and contact maps. only synth has this label.
            self.depth_contact_maps = TensorPrepLib().prep_depth_contact(
                self.depth_contact_maps,
                dat_f_synth,
                dat_m_synth,
                num_repeats=1)
        else:
            self.depth_contact_maps = None

        if self.CTRL_PNL['depth_map_input_est'] == True:
            self.depth_contact_maps_input_est = [
            ]  #Initialize the precomputed depth and contact map input estimates
            self.depth_contact_maps_input_est = TensorPrepLib(
            ).prep_depth_contact_input_est(self.depth_contact_maps_input_est,
                                           dat_f_synth,
                                           dat_m_synth,
                                           num_repeats=1)

        else:
            self.depth_contact_maps_input_est = None

        #stack the bed height array on the pressure image as well as a sobel filtered image
        train_xa = PreprocessingLib(
        ).preprocessing_create_pressure_angle_stack(self.train_x_flat,
                                                    self.train_a_flat,
                                                    self.mat_size,
                                                    self.CTRL_PNL)

        #stack the depth and contact mesh images (and possibly a pmat contact image) together
        train_xa = TensorPrepLib().append_input_depth_contact(
            np.array(train_xa),
            CTRL_PNL=self.CTRL_PNL,
            mesh_depth_contact_maps_input_est=self.
            depth_contact_maps_input_est,
            mesh_depth_contact_maps=self.depth_contact_maps)

        #normalize the input
        if self.CTRL_PNL['normalize_input'] == True:
            train_xa = TensorPrepLib().normalize_network_input(
                train_xa, self.CTRL_PNL)

        self.train_x_tensor = torch.Tensor(train_xa)

        train_y_flat = []  # Initialize the training ground truth list
        train_y_flat = TensorPrepLib().prep_labels(
            train_y_flat,
            dat_f_synth,
            num_repeats=1,
            z_adj=-0.075,
            gender="f",
            is_synth=True,
            loss_vector_type=self.CTRL_PNL['loss_vector_type'],
            initial_angle_est=self.CTRL_PNL['adjust_ang_from_est'],
            full_body_rot=self.CTRL_PNL['full_body_rot'])
        train_y_flat = TensorPrepLib().prep_labels(
            train_y_flat,
            dat_m_synth,
            num_repeats=1,
            z_adj=-0.075,
            gender="m",
            is_synth=True,
            loss_vector_type=self.CTRL_PNL['loss_vector_type'],
            initial_angle_est=self.CTRL_PNL['adjust_ang_from_est'],
            full_body_rot=self.CTRL_PNL['full_body_rot'])

        # normalize the height and weight
        if self.CTRL_PNL['normalize_input'] == True:
            train_y_flat = TensorPrepLib().normalize_wt_ht(
                train_y_flat, self.CTRL_PNL)

        self.train_y_tensor = torch.Tensor(train_y_flat)

        self.train_dataset = torch.utils.data.TensorDataset(
            self.train_x_tensor, self.train_y_tensor)
        self.train_loader = torch.utils.data.DataLoader(
            self.train_dataset,
            self.CTRL_PNL['batch_size'],
            shuffle=self.CTRL_PNL['shuffle'])
    def unpackage_batch_dir_pass(self, batch, is_training, model, CTRL_PNL):

        batch.append(batch[1][:, 159])  # synth vs real switch
        batch.append(batch[1][:, 160:161])  # mass, kg
        batch.append(batch[1][:, 161:162])  # height, kg

        # cut it off so batch[2] is only the xyz marker targets
        batch[1] = batch[1][:, 0:72]

        if is_training == True:
            batch[0], batch[1], _, _ = SyntheticLib().synthetic_master(
                batch[0],
                batch[1],
                batch[2],
                num_images_manip=(CTRL_PNL['num_input_channels_batch0'] - 1),
                flip=False,
                shift=True,
                scale=False,
                include_inter=CTRL_PNL['incl_inter'],
                loss_vector_type=CTRL_PNL['loss_vector_type'])

        images_up_non_tensor = PreprocessingLib(
        ).preprocessing_pressure_map_upsample(batch[0].numpy(), multiple=2)
        if is_training == True:  #only add noise to training images
            images_up_non_tensor = PreprocessingLib(
            ).preprocessing_add_image_noise(
                np.array(images_up_non_tensor),
                pmat_chan_idx=(CTRL_PNL['num_input_channels_batch0'] - 3))
        images_up = Variable(torch.Tensor(images_up_non_tensor).type(
            CTRL_PNL['dtype']),
                             requires_grad=False)

        if CTRL_PNL['incl_ht_wt_channels'] == True:  #make images full of stuff
            weight_input = torch.ones(
                (images_up.size()[0], images_up.size()[2] *
                 images_up.size()[3])).type(CTRL_PNL['dtype'])
            weight_input *= batch[3].type(CTRL_PNL['dtype'])
            weight_input = weight_input.view(
                (images_up.size()[0], 1, images_up.size()[2],
                 images_up.size()[3]))
            height_input = torch.ones(
                (images_up.size()[0], images_up.size()[2] *
                 images_up.size()[3])).type(CTRL_PNL['dtype'])
            height_input *= batch[4].type(CTRL_PNL['dtype'])
            height_input = height_input.view(
                (images_up.size()[0], 1, images_up.size()[2],
                 images_up.size()[3]))
            images_up = torch.cat((images_up, weight_input, height_input), 1)

        images, targets = Variable(batch[0].type(CTRL_PNL['dtype']), requires_grad=False), \
                          Variable(batch[1].type(CTRL_PNL['dtype']), requires_grad=False)

        scores, OUTPUT_DICT = model.forward_direct(images_up,
                                                   targets,
                                                   is_training=is_training)

        INPUT_DICT = {}
        INPUT_DICT['batch_images'] = images.data
        INPUT_DICT['batch_targets'] = targets.data

        return scores, INPUT_DICT, OUTPUT_DICT
    def estimate_pose(self, pmat, bedangle, markers_c, model, model2):
        mat_size = (64, 27)


        pmat = np.fliplr(np.flipud(np.clip(pmat.reshape(MAT_SIZE)*float(self.CTRL_PNL['pmat_mult']), a_min=0, a_max=100)))

        if self.CTRL_PNL['cal_noise'] == False:
            pmat = gaussian_filter(pmat, sigma=1.0)

        pmat_stack = PreprocessingLib().preprocessing_create_pressure_angle_stack_realtime(pmat, 0.0, mat_size)

        if self.CTRL_PNL['cal_noise'] == False:
            pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100)

        pmat_stack = np.array(pmat_stack)
        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            pmat_contact = np.copy(pmat_stack[:, 0:1, :, :])
            pmat_contact[pmat_contact > 0] = 100
            pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis=1)

        weight_input = self.weight_lbs / 2.20462
        height_input = (self.height_in * 0.0254 - 1) * 100

        batch1 = np.zeros((1, 162))
        if self.gender == 'f':
            batch1[:, 157] += 1
        elif self.gender == 'm':
            batch1[:, 158] += 1
        batch1[:, 160] += weight_input
        batch1[:, 161] += height_input

        if self.CTRL_PNL['normalize_input'] == True:
            self.CTRL_PNL['depth_map_input_est'] = False
            pmat_stack = self.TPL.normalize_network_input(pmat_stack, self.CTRL_PNL)
            batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL)

        pmat_stack = torch.Tensor(pmat_stack)
        batch1 = torch.Tensor(batch1)


        if DROPOUT == True:
            pmat_stack = pmat_stack.repeat(25, 1, 1, 1)
            batch1 = batch1.repeat(25, 1)


        batch = []
        batch.append(pmat_stack)
        batch.append(batch1)

        NUMOFOUTPUTDIMS = 3
        NUMOFOUTPUTNODES_TRAIN = 24
        self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)


        self.CTRL_PNL['adjust_ang_from_est'] = False
        scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch, False, model, self.CTRL_PNL)

        mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 16.69545796387731
        mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 45.08513083167194
        mdm_est_pos[mdm_est_pos < 0] = 0
        mdm_est_neg[mdm_est_neg > 0] = 0
        mdm_est_neg *= -1
        cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100 / 43.55800622930469

        # 1. / 16.69545796387731,  # pos est depth
        # 1. / 45.08513083167194,  # neg est depth
        # 1. / 43.55800622930469,  # cm est

        sc_sample1 = OUTPUT_DICT['batch_targets_est'].clone()
        sc_sample1 = sc_sample1[0, :].squeeze() / 1000
        sc_sample1 = sc_sample1.view(self.output_size_train)
        #print sc_sample1

        if model2 is not None:
            print "Using model 2"
            batch_cor = []
            batch_cor.append(torch.cat((pmat_stack[:, 0:1, :, :],
                                        mdm_est_pos.type(torch.FloatTensor),
                                        mdm_est_neg.type(torch.FloatTensor),
                                        cm_est.type(torch.FloatTensor),
                                        pmat_stack[:, 1:, :, :]), dim=1))

            if self.CTRL_PNL['full_body_rot'] == False:
                batch_cor.append(torch.cat((batch1,
                                            OUTPUT_DICT['batch_betas_est'].cpu(),
                                            OUTPUT_DICT['batch_angles_est'].cpu(),
                                            OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim=1))
            elif self.CTRL_PNL['full_body_rot'] == True:
                batch_cor.append(torch.cat((batch1,
                                            OUTPUT_DICT['batch_betas_est'].cpu(),
                                            OUTPUT_DICT['batch_angles_est'].cpu(),
                                            OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                                            OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim=1))

            self.CTRL_PNL['adjust_ang_from_est'] = True
            scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, False, model2,
                                                                                        self.CTRL_PNL)



        # print betas_est, root_shift_est, angles_est
        if self.CTRL_PNL['dropout'] == True:
            print OUTPUT_DICT['verts'].shape
            smpl_verts = np.mean(OUTPUT_DICT['verts'], axis = 0)
            dropout_variance = np.std(OUTPUT_DICT['verts'], axis=0)
            dropout_variance = np.linalg.norm(dropout_variance, axis = 1)
        else:
            smpl_verts = OUTPUT_DICT['verts'][0, :, :]
            dropout_variance = None


        smpl_verts = np.concatenate((smpl_verts[:, 1:2] - 0.286 + 0.0143, smpl_verts[:, 0:1] - 0.286 + 0.0143, 2*0.075 -smpl_verts[:, 2:3]), axis = 1)

        smpl_faces = np.array(self.m.f)

        pc_autofil_red = self.trim_pc_sides() #this is the point cloud

        q = OUTPUT_DICT['batch_mdm_est'].data.numpy().reshape(OUTPUT_DICT['batch_mdm_est'].size()[0], 64, 27) * -1
        q = np.mean(q, axis = 0)

        camera_point = [1.09898028, 0.46441343, -1.53]

        if SHOW_SMPL_EST == False:
            smpl_verts *= 0.001

        #print smpl_verts

        viz_type = "3D"

        if viz_type == "2D":
            from visualization_lib import VisualizationLib
            if model2 is not None:
                self.im_sample = INPUT_DICT['batch_images'][0, 4:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
            else:
                self.im_sample = INPUT_DICT['batch_images'][0, 1:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
            self.im_sample_ext = INPUT_DICT['batch_images'][0, 0:, :].squeeze() * 20.  # normalizing_std_constants[0]  #pmat contact
            # self.im_sample_ext2 = INPUT_DICT['batch_images'][im_display_idx, 2:, :].squeeze()*20.#normalizing_std_constants[4]  #sobel
            self.im_sample_ext3 = OUTPUT_DICT['batch_mdm_est'][0, :, :].squeeze().unsqueeze(0) * -1  # est depth output

            # print scores[0, 10:16], 'scores of body rot'

            # print self.im_sample.size(), self.im_sample_ext.size(), self.im_sample_ext2.size(), self.im_sample_ext3.size()

            # self.publish_depth_marker_array(self.im_sample_ext3)



            self.tar_sample = INPUT_DICT['batch_targets']
            self.tar_sample = self.tar_sample[0, :].squeeze() / 1000
            sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
            sc_sample = sc_sample[0, :].squeeze() / 1000


            sc_sample = sc_sample.view(self.output_size_train)

            VisualizationLib().visualize_pressure_map(self.im_sample, sc_sample1, sc_sample,
                                                         # self.im_sample_ext, None, None,
                                                          self.im_sample_ext3, None, None, #, self.tar_sample_val, self.sc_sample_val,
                                                          block=False)

            time.sleep(4)

        elif viz_type == "3D":


            #render everything
            #self.pyRender.render_mesh_pc_bed_pyrender_everything(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                                      pc = pc_autofil_red, pmat = pmat, smpl_render_points = False,
            #                                                      markers = None, dropout_variance = dropout_variance)


            #render in 3D pyrender with pressure mat
            #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                          pc = None, pmat = pmat, smpl_render_points = False,
            #                                          facing_cam_only=False, viz_type = None,
            #                                          markers = None, segment_limbs=False)

            #render in 3D pyrender with segmented limbs
            #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                          pc = None, pmat = None, smpl_render_points = False,
            #                                          facing_cam_only=False, viz_type = None,
            #                                          markers = None, segment_limbs=True)

            #render the error of point cloud points relative to verts
            #self.Render.eval_dist_render_open3d(smpl_verts, smpl_faces, pc_autofil_red, viz_type = 'pc_error',
            #                                      camera_point = camera_point, segment_limbs=False)
            self.Render.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
                                                      pc = pc_autofil_red, pmat = None, smpl_render_points = False,
                                                      facing_cam_only=True, viz_type = 'pc_error',
                                                      markers = None, segment_limbs=False)

            #render the error of verts relative to point cloud points
            #self.Render.eval_dist_render_open3d(smpl_verts, smpl_faces, pc_autofil_red, viz_type = 'mesh_error',
            #                                      camera_point = camera_point, segment_limbs=False)
            #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                          pc = pc_autofil_red, pmat = None, smpl_render_points = False,
            #                                          facing_cam_only=True, viz_type = 'mesh_error',
            #                                          markers = None, segment_limbs=False)

            time.sleep(1)
            self.point_cloud_array = None
    def estimate_pose(self, pmat, bedangle, markers_c, tf_corners,
                      camera_alpha_vert, camera_alpha_horiz, h, kinect_rot_cw):
        mat_size = (64, 27)

        pmat = np.fliplr(
            np.flipud(
                np.clip(pmat.reshape(MAT_SIZE) * float(5), a_min=0,
                        a_max=100)))

        if self.CTRL_PNL['cal_noise'] == False:
            pmat = gaussian_filter(pmat, sigma=0.5)

        pmat_stack = PreprocessingLib(
        ).preprocessing_create_pressure_angle_stack_realtime(
            pmat, 0.0, mat_size)

        if self.CTRL_PNL['cal_noise'] == False:
            pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100)

        pmat_stack = np.array(pmat_stack)
        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            pmat_contact = np.copy(pmat_stack[:, 0:1, :, :])
            pmat_contact[pmat_contact > 0] = 100
            pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis=1)

        weight_input = self.weight_lbs / 2.20462
        height_input = (self.height_in * 0.0254 - 1) * 100

        batch1 = np.zeros((1, 162))
        if self.gender == 'f':
            batch1[:, 157] += 1
        elif self.gender == 'm':
            batch1[:, 158] += 1
        batch1[:, 160] += weight_input
        batch1[:, 161] += height_input

        if self.CTRL_PNL['normalize_input'] == True:
            self.CTRL_PNL['depth_map_input_est'] = False
            pmat_stack = self.TPL.normalize_network_input(
                pmat_stack, self.CTRL_PNL)
            batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL)

        pmat_stack = torch.Tensor(pmat_stack)
        batch1 = torch.Tensor(batch1)

        if DROPOUT == True:
            pmat_stack = pmat_stack.repeat(25, 1, 1, 1)
            batch1 = batch1.repeat(25, 1)

        batch = []
        batch.append(pmat_stack)
        batch.append(batch1)

        NUMOFOUTPUTDIMS = 3
        NUMOFOUTPUTNODES_TRAIN = 24
        self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)

        dropout_variance = None

        smpl_verts = np.concatenate(
            (self.ALL_VERTS[:, 1:2] + 0.0143 + 32 * 0.0286 + .286,
             self.ALL_VERTS[:, 0:1] + 0.0143 + 13.5 * 0.0286,
             -self.ALL_VERTS[:, 2:3]),
            axis=1)

        smpl_faces = np.array(self.m.f)

        pc_autofil_red = self.trim_pc_sides(
            tf_corners, camera_alpha_vert, camera_alpha_horiz, h,
            kinect_rot_cw)  #this is the point cloud

        camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

        if SHOW_SMPL_EST == False:
            smpl_verts *= 0.001

        #print smpl_verts

        print np.min(smpl_verts[:, 0]), np.max(smpl_verts[:, 0])
        print np.min(smpl_verts[:, 1]), np.max(smpl_verts[:, 1])
        print np.min(smpl_verts[:, 2]), np.max(smpl_verts[:, 2])

        #render everything
        self.RESULTS_DICT = self.pyRender.render_mesh_pc_bed_pyrender_everything(
            smpl_verts,
            smpl_faces,
            camera_point,
            bedangle,
            self.RESULTS_DICT,
            pc=pc_autofil_red,
            pmat=pmat,
            smpl_render_points=False,
            markers=[[0.0, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, 0.0],
                     [0.0, 0.0, 0.0]],
            dropout_variance=dropout_variance)

        #render in 3D pyrender with pressure mat
        #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
        #                                          pc = None, pmat = pmat, smpl_render_points = False,
        #                                          facing_cam_only=False, viz_type = None,
        #                                          markers = None, segment_limbs=False)

        #render in 3D pyrender with segmented limbs
        #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
        #                                          pc = None, pmat = None, smpl_render_points = False,
        #                                          facing_cam_only=False, viz_type = None,
        #                                          markers = None, segment_limbs=True)

        #render the error of point cloud points relative to verts
        #self.Render.eval_dist_render_open3d(smpl_verts, smpl_faces, pc_autofil_red, viz_type = 'pc_error',
        #                                      camera_point = camera_point, segment_limbs=False)
        #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
        #                                          pc = pc_autofil_red, pmat = None, smpl_render_points = False,
        #                                          facing_cam_only=True, viz_type = 'pc_error',
        #                                          markers = None, segment_limbs=False)

        #render the error of verts relative to point cloud points
        #self.Render.eval_dist_render_open3d(smpl_verts, smpl_faces, pc_autofil_red, viz_type = 'mesh_error',
        #                                      camera_point = camera_point, segment_limbs=False)
        #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
        #                                          pc = pc_autofil_red, pmat = None, smpl_render_points = False,
        #                                          facing_cam_only=True, viz_type = 'mesh_error',
        #                                          markers = None, segment_limbs=False)

        time.sleep(1)
        self.point_cloud_array = None
Exemplo n.º 10
0
    def estimate_real_time(self, filename1, filename2=None):

        pyRender = libRender.pyRenderMesh()
        mat_size = (64, 27)
        import sys
        # insert at 1, 0 is the script path (or '' in REPL)
        sys.path.insert(1, '../sim_camera_resting_scene/lib_py')
        sys.path.insert(1, '../sim_camera_resting_scene/DPNet')
        from unpack_batch_lib_br import UnpackBatchLib
        import convnet_br as convnet

        if torch.cuda.is_available():
            # Use for GPU
            GPU = True
            dtype = torch.cuda.FloatTensor
            print '######################### CUDA is available! #############################'
        else:
            # Use for CPU
            GPU = False
            dtype = torch.FloatTensor
            print '############################## USING CPU #################################'

        from torch.autograd import Variable
        model = torch.load(filename1)
        if GPU == True:
            for i in range(0, 8):
                try:
                    model = torch.load(
                        filename1, map_location={'cuda:' + str(i): 'cuda:0'})
                    model = model.cuda().eval()
                    break
                except:
                    pass
            if filename2 is not None:
                for i in range(0, 8):
                    try:
                        model2 = torch.load(
                            filename2,
                            map_location={'cuda:' + str(i): 'cuda:0'})
                        model2 = model2.cuda().eval()
                        break
                    except:
                        pass
            else:
                model2 = None

        else:
            model = torch.load(filename1, map_location='cpu').eval()
            if filename2 is not None:
                model2 = torch.load(filename2, map_location='cpu').eval()
            else:
                model2 = None

        pub = rospy.Publisher('meshTopic', MeshAttr)
        #rospy.init_node('talker', anonymous=False)
        while not rospy.is_shutdown():

            #pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(self.CTRL_PNL['pmat_mult']*4), a_min=0, a_max=100)))
            pmat = np.fliplr(
                np.flipud(
                    np.clip(self.pressure.reshape(mat_size) * float(4),
                            a_min=0,
                            a_max=100)))
            #print "max is : ", np.max(pmat)
            #print "sum is : ", np.sum(pmat)

            if self.CTRL_PNL['cal_noise'] == False:
                pmat = gaussian_filter(pmat, sigma=0.5)

            pmat_stack = PreprocessingLib(
            ).preprocessing_create_pressure_angle_stack_realtime(
                pmat, self.bedangle, mat_size, return_height=False)
            print np.shape(pmat_stack)

            if self.CTRL_PNL['cal_noise'] == False:
                pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100)

            pmat_stack = np.array(pmat_stack)
            if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
                pmat_contact = np.copy(pmat_stack[:, 0:1, :, :])
                pmat_contact[pmat_contact > 0] = 100
                pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis=1)

            weight_input = WEIGHT_LBS / 2.20462
            height_input = (HEIGHT_IN * 0.0254 - 1) * 100

            batch1 = np.zeros((1, 162))
            if GENDER == 'f':
                batch1[:, 157] += 1
            elif GENDER == 'm':
                batch1[:, 158] += 1
            batch1[:, 160] += weight_input
            batch1[:, 161] += height_input

            if self.CTRL_PNL['normalize_std'] == True:
                self.CTRL_PNL['depth_map_input_est'] = False
                pmat_stack = self.TPL.normalize_network_input(
                    pmat_stack, self.CTRL_PNL)
                batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL)

            pmat_std_from_mult = [
                'N/A', 11.70153502792190, 19.90905848383454, 23.07018866032369,
                0.0, 25.50538629767412
            ]
            if self.CTRL_PNL['cal_noise'] == False:
                sobel_std_from_mult = [
                    'N/A', 29.80360490415032, 33.33532963163579,
                    34.14427844692501, 0.0, 34.86393494050921
                ]
            else:
                sobel_std_from_mult = [
                    'N/A', 45.61635847182483, 77.74920396659292,
                    88.89398421073700, 0.0, 97.90075708182506
                ]

            self.CTRL_PNL['norm_std_coeffs'] = [
                1. / 41.80684362163343,  # contact
                1. / 45.08513083167194,  # neg est depth
                1. / 43.55800622930469,  # cm est
                1. /
                pmat_std_from_mult[int(self.CTRL_PNL['pmat_mult'])],  # pmat x5
                1. / sobel_std_from_mult[int(
                    self.CTRL_PNL['pmat_mult'])],  # pmat sobel
                1. / 1.0,  # OUTPUT DO NOTHING
                1. / 1.0,  # OUTPUT DO NOTHING
                1. / 30.216647403350,  # weight
                1. / 14.629298141231
            ]  # height

            if self.CTRL_PNL['normalize_std'] == False:
                for i in range(9):
                    self.CTRL_PNL['norm_std_coeffs'][i] *= 0.
                    self.CTRL_PNL['norm_std_coeffs'][i] += 1.

            pmat_stack = torch.Tensor(pmat_stack)
            batch1 = torch.Tensor(batch1)

            batch = []
            batch.append(pmat_stack)
            batch.append(batch1)

            self.CTRL_PNL['adjust_ang_from_est'] = False
            self.CTRL_PNL['recon_map_output'] = True

            print self.CTRL_PNL['num_input_channels'], batch[0].size(
            ), 'inputs and batch size'

            scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpack_batch(
                batch, False, model, self.CTRL_PNL)

            self.CTRL_PNL['first_pass'] = False

            mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(
                1)  #/ 16.69545796387731
            mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(
                1)  #/ 45.08513083167194
            mdm_est_pos[mdm_est_pos < 0] = 0
            mdm_est_neg[mdm_est_neg > 0] = 0
            mdm_est_neg *= -1
            cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(
                1) * 100  #/ 43.55800622930469

            #1. / 16.69545796387731,  # pos est depth
            #1. / 45.08513083167194,  # neg est depth
            #1. / 43.55800622930469,  # cm est

            if model2 is not None:
                batch_cor = []
                batch_cor.append(
                    torch.cat(
                        (pmat_stack[:, 0:1, :, :],
                         mdm_est_neg.type(
                             torch.FloatTensor), cm_est.type(
                                 torch.FloatTensor), pmat_stack[:, 1:, :, :]),
                        dim=1))

                if self.CTRL_PNL['full_body_rot'] == False:
                    batch_cor.append(
                        torch.cat(
                            (batch1, OUTPUT_DICT['batch_betas_est'].cpu(),
                             OUTPUT_DICT['batch_angles_est'].cpu(),
                             OUTPUT_DICT['batch_root_xyz_est'].cpu()),
                            dim=1))
                elif self.CTRL_PNL['full_body_rot'] == True:
                    batch_cor.append(
                        torch.cat(
                            (batch1, OUTPUT_DICT['batch_betas_est'].cpu(),
                             OUTPUT_DICT['batch_angles_est'].cpu(),
                             OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                             OUTPUT_DICT['batch_root_atan2_est'].cpu()),
                            dim=1))

                self.CTRL_PNL['adjust_ang_from_est'] = True
                self.CTRL_PNL['recon_map_output'] = True
                scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib(
                ).unpack_batch(batch_cor, False, model2, self.CTRL_PNL)

            betas_est = np.squeeze(
                OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy())
            angles_est = np.squeeze(OUTPUT_DICT['batch_angles_est_post_clip'])
            root_shift_est = np.squeeze(
                OUTPUT_DICT['batch_root_xyz_est_post_clip'].cpu().numpy())

            #print betas_est.shape, root_shift_est.shape, angles_est.shape

            #print betas_est, root_shift_est, angles_est
            angles_est = angles_est.reshape(72)

            for idx in range(10):
                #print shape_pose_vol[0][idx]
                self.m.betas[idx] = betas_est[idx]

            for idx in range(72):
                self.m.pose[idx] = angles_est[idx]

            init_root = np.array(self.m.pose[0:3]) + 0.000001
            init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root)
            root_rot = libKinematics.eulerAnglesToRotationMatrix(
                [np.pi, 0.0, np.pi / 2])
            #print root_rot
            trans_root = libKinematics.dir_cos_angles_from_matrix(
                np.matmul(root_rot, init_rootR))

            self.m.pose[0] = trans_root[0]
            self.m.pose[1] = trans_root[1]
            self.m.pose[2] = trans_root[2]

            #print self.m.J_transformed[1, :], self.m.J_transformed[4, :]
            # self.m.pose[51] = selection_r

            print self.m.r
            #print OUTPUT_DICT['verts']

            pyRender.mesh_render_pose_bed_orig(self.m, root_shift_est,
                                               self.point_cloud_array,
                                               self.pc_isnew, pmat * 4,
                                               self.markers, self.bedangle)
            self.point_cloud_array = None
    def __init__(self, training_database_file_f, training_database_file_m, opt):
        '''Opens the specified pickle files to get the combined dataset:
        This dataset is a dictionary of pressure maps with the corresponding
        3d position and orientation of the markers associated with it.'''

        # change this to 'direct' when you are doing baseline methods

        self.CTRL_PNL = {}
        self.CTRL_PNL['loss_vector_type'] = opt.losstype

        self.CTRL_PNL['verbose'] = opt.verbose
        self.opt = opt
        self.CTRL_PNL['batch_size'] = 128
        self.CTRL_PNL['num_epochs'] = 201
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = True
        self.CTRL_PNL['incl_ht_wt_channels'] = True
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['lock_root'] = False
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['GPU'] = GPU
        self.CTRL_PNL['dtype'] = dtype
        repeat_real_data_ct = 3
        self.CTRL_PNL['regr_angles'] = False
        self.CTRL_PNL['aws'] = False
        self.CTRL_PNL['depth_map_labels'] = True #can only be true if we have 100% synthetic data for training
        self.CTRL_PNL['depth_map_labels_test'] = True #can only be true is we have 100% synth for testing
        self.CTRL_PNL['depth_map_output'] = self.CTRL_PNL['depth_map_labels']
        self.CTRL_PNL['depth_map_input_est'] = False #do this if we're working in a two-part regression
        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL['depth_map_input_est'] #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        self.CTRL_PNL['full_body_rot'] = True
        self.CTRL_PNL['normalize_input'] = True
        self.CTRL_PNL['all_tanh_activ'] = False
        self.CTRL_PNL['L2_contact'] = False
        self.CTRL_PNL['pmat_mult'] = int(3)
        self.CTRL_PNL['cal_noise'] = True


        if opt.losstype == 'direct':
            self.CTRL_PNL['depth_map_labels'] = False
            self.CTRL_PNL['depth_map_output'] = False

        if self.CTRL_PNL['cal_noise'] == True:
            #self.CTRL_PNL['pmat_mult'] = int(1)
            #self.CTRL_PNL['incl_pmat_cntct_input'] = False #if there's calibration noise we need to recompute this every batch
            self.CTRL_PNL['clip_sobel'] = False

        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL['depth_map_input_est'] == True: #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['num_input_channels'] += 1


        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'

        if self.CTRL_PNL['depth_map_output'] == True: #we need all the vertices if we're going to regress the depth maps
            self.verts_list = "all"
        else:
            self.verts_list = [1325, 336, 1032, 4515, 1374, 4848, 1739, 5209, 1960, 5423]

        print self.CTRL_PNL['num_epochs'], 'NUM EPOCHS!'
        # Entire pressure dataset with coordinates in world frame

        self.save_name = '_' + opt.losstype + \
                         '_synth_32000' + \
                         '_' + str(self.CTRL_PNL['batch_size']) + 'b' + \
                         '_' + str(self.CTRL_PNL['num_epochs']) + 'e'



        self.mat_size = (NUMOFTAXELS_X, NUMOFTAXELS_Y)
        self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)
        self.output_size_val = (NUMOFOUTPUTNODES_TEST, NUMOFOUTPUTDIMS)
        self.parents = np.array([4294967295, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21]).astype(np.int32)



        #################################### PREP TRAINING DATA ##########################################
        #load training ysnth data
        dat_f_synth = TensorPrepLib().load_files_to_database(training_database_file_f, 'synth')
        dat_m_synth = TensorPrepLib().load_files_to_database(training_database_file_m, 'synth')
        dat_f_real = TensorPrepLib().load_files_to_database(training_database_file_f, 'real')
        dat_m_real = TensorPrepLib().load_files_to_database(training_database_file_m, 'real')


        self.train_x_flat = []  # Initialize the testing pressure mat list
        self.train_x_flat = TensorPrepLib().prep_images(self.train_x_flat, dat_f_synth, dat_m_synth, num_repeats = 1)
        self.train_x_flat = list(np.clip(np.array(self.train_x_flat) * float(self.CTRL_PNL['pmat_mult']), a_min=0, a_max=100))
        self.train_x_flat = TensorPrepLib().prep_images(self.train_x_flat, dat_f_real, dat_m_real, num_repeats = repeat_real_data_ct)


        if self.CTRL_PNL['cal_noise'] == False:
            self.train_x_flat = PreprocessingLib().preprocessing_blur_images(self.train_x_flat, self.mat_size, sigma=0.5)

        if len(self.train_x_flat) == 0: print("NO TRAINING DATA INCLUDED")

        self.train_a_flat = []  # Initialize the training pressure mat angle list
        self.train_a_flat = TensorPrepLib().prep_angles(self.train_a_flat, dat_f_synth, dat_m_synth, num_repeats = 1)
        self.train_a_flat = TensorPrepLib().prep_angles(self.train_a_flat, dat_f_real, dat_m_real, num_repeats = repeat_real_data_ct)

        if self.CTRL_PNL['depth_map_labels'] == True:
            self.depth_contact_maps = [] #Initialize the precomputed depth and contact maps. only synth has this label.
            self.depth_contact_maps = TensorPrepLib().prep_depth_contact(self.depth_contact_maps, dat_f_synth, dat_m_synth, num_repeats = 1)
        else:
            self.depth_contact_maps = None

        if self.CTRL_PNL['depth_map_input_est'] == True:
            self.depth_contact_maps_input_est = [] #Initialize the precomputed depth and contact map input estimates
            self.depth_contact_maps_input_est = TensorPrepLib().prep_depth_contact_input_est(self.depth_contact_maps_input_est,
                                                                                             dat_f_synth, dat_m_synth, num_repeats = 1)
            self.depth_contact_maps_input_est = TensorPrepLib().prep_depth_contact_input_est(self.depth_contact_maps_input_est,
                                                                                             dat_f_real, dat_m_real, num_repeats = repeat_real_data_ct)
        else:
            self.depth_contact_maps_input_est = None

        #self.CTRL_PNL['clip_sobel'] = False
        #stack the bed height array on the pressure image as well as a sobel filtered image
        train_xa = PreprocessingLib().preprocessing_create_pressure_angle_stack(self.train_x_flat,
                                                                                self.train_a_flat,
                                                                                self.mat_size,
                                                                                self.CTRL_PNL)


        #print np.shape(train_xa), 'shape@'
        train_xa = np.array(train_xa)

        if self.CTRL_PNL['cal_noise'] == True:
            train_xa[:, 0, :, :] = np.array(PreprocessingLib().preprocessing_blur_images(list(np.array(train_xa)[:, 0, :, :]), self.mat_size, sigma=0.5)).reshape(-1, self.mat_size[0], self.mat_size[1])
            train_xa[:, 1, :, :] = np.array(PreprocessingLib().preprocessing_blur_images(list(np.array(train_xa)[:, 1, :, :]), self.mat_size, sigma=0.5)).reshape(-1, self.mat_size[0], self.mat_size[1])


        #stack the depth and contact mesh images (and possibly a pmat contact image) together
        train_xa = TensorPrepLib().append_input_depth_contact(np.array(train_xa),
                                                              CTRL_PNL = self.CTRL_PNL,
                                                              mesh_depth_contact_maps_input_est = self.depth_contact_maps_input_est,
                                                              mesh_depth_contact_maps = self.depth_contact_maps)


        self.train_x = train_xa


        self.train_y_flat = []  # Initialize the training ground truth list
        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_f_synth, num_repeats = 1,
                                                        z_adj = -0.075, gender = "f", is_synth = True,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])
        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_m_synth, num_repeats = 1,
                                                        z_adj = -0.075, gender = "m", is_synth = True,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])

        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_f_real, num_repeats = repeat_real_data_ct,
                                                        z_adj = 0.0, gender = "m", is_synth = False,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])
        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_m_real, num_repeats = repeat_real_data_ct,
                                                        z_adj = 0.0, gender = "m", is_synth = False,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])
class PhysicalTrainer():
    '''Gets the dictionary of pressure maps from the training database,
    and will have API to do all sorts of training with it.'''
    def __init__(self, training_database_file_f, training_database_file_m, opt):
        '''Opens the specified pickle files to get the combined dataset:
        This dataset is a dictionary of pressure maps with the corresponding
        3d position and orientation of the markers associated with it.'''

        # change this to 'direct' when you are doing baseline methods

        self.CTRL_PNL = {}
        self.CTRL_PNL['loss_vector_type'] = opt.losstype

        self.CTRL_PNL['verbose'] = opt.verbose
        self.opt = opt
        self.CTRL_PNL['batch_size'] = 128
        self.CTRL_PNL['num_epochs'] = 201
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = True
        self.CTRL_PNL['incl_ht_wt_channels'] = True
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['lock_root'] = False
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['GPU'] = GPU
        self.CTRL_PNL['dtype'] = dtype
        repeat_real_data_ct = 3
        self.CTRL_PNL['regr_angles'] = False
        self.CTRL_PNL['aws'] = False
        self.CTRL_PNL['depth_map_labels'] = True #can only be true if we have 100% synthetic data for training
        self.CTRL_PNL['depth_map_labels_test'] = True #can only be true is we have 100% synth for testing
        self.CTRL_PNL['depth_map_output'] = self.CTRL_PNL['depth_map_labels']
        self.CTRL_PNL['depth_map_input_est'] = False #do this if we're working in a two-part regression
        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL['depth_map_input_est'] #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        self.CTRL_PNL['full_body_rot'] = True
        self.CTRL_PNL['normalize_input'] = True
        self.CTRL_PNL['all_tanh_activ'] = False
        self.CTRL_PNL['L2_contact'] = False
        self.CTRL_PNL['pmat_mult'] = int(3)
        self.CTRL_PNL['cal_noise'] = True


        if opt.losstype == 'direct':
            self.CTRL_PNL['depth_map_labels'] = False
            self.CTRL_PNL['depth_map_output'] = False

        if self.CTRL_PNL['cal_noise'] == True:
            #self.CTRL_PNL['pmat_mult'] = int(1)
            #self.CTRL_PNL['incl_pmat_cntct_input'] = False #if there's calibration noise we need to recompute this every batch
            self.CTRL_PNL['clip_sobel'] = False

        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL['depth_map_input_est'] == True: #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['num_input_channels'] += 1


        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'

        if self.CTRL_PNL['depth_map_output'] == True: #we need all the vertices if we're going to regress the depth maps
            self.verts_list = "all"
        else:
            self.verts_list = [1325, 336, 1032, 4515, 1374, 4848, 1739, 5209, 1960, 5423]

        print self.CTRL_PNL['num_epochs'], 'NUM EPOCHS!'
        # Entire pressure dataset with coordinates in world frame

        self.save_name = '_' + opt.losstype + \
                         '_synth_32000' + \
                         '_' + str(self.CTRL_PNL['batch_size']) + 'b' + \
                         '_' + str(self.CTRL_PNL['num_epochs']) + 'e'



        self.mat_size = (NUMOFTAXELS_X, NUMOFTAXELS_Y)
        self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)
        self.output_size_val = (NUMOFOUTPUTNODES_TEST, NUMOFOUTPUTDIMS)
        self.parents = np.array([4294967295, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21]).astype(np.int32)



        #################################### PREP TRAINING DATA ##########################################
        #load training ysnth data
        dat_f_synth = TensorPrepLib().load_files_to_database(training_database_file_f, 'synth')
        dat_m_synth = TensorPrepLib().load_files_to_database(training_database_file_m, 'synth')
        dat_f_real = TensorPrepLib().load_files_to_database(training_database_file_f, 'real')
        dat_m_real = TensorPrepLib().load_files_to_database(training_database_file_m, 'real')


        self.train_x_flat = []  # Initialize the testing pressure mat list
        self.train_x_flat = TensorPrepLib().prep_images(self.train_x_flat, dat_f_synth, dat_m_synth, num_repeats = 1)
        self.train_x_flat = list(np.clip(np.array(self.train_x_flat) * float(self.CTRL_PNL['pmat_mult']), a_min=0, a_max=100))
        self.train_x_flat = TensorPrepLib().prep_images(self.train_x_flat, dat_f_real, dat_m_real, num_repeats = repeat_real_data_ct)


        if self.CTRL_PNL['cal_noise'] == False:
            self.train_x_flat = PreprocessingLib().preprocessing_blur_images(self.train_x_flat, self.mat_size, sigma=0.5)

        if len(self.train_x_flat) == 0: print("NO TRAINING DATA INCLUDED")

        self.train_a_flat = []  # Initialize the training pressure mat angle list
        self.train_a_flat = TensorPrepLib().prep_angles(self.train_a_flat, dat_f_synth, dat_m_synth, num_repeats = 1)
        self.train_a_flat = TensorPrepLib().prep_angles(self.train_a_flat, dat_f_real, dat_m_real, num_repeats = repeat_real_data_ct)

        if self.CTRL_PNL['depth_map_labels'] == True:
            self.depth_contact_maps = [] #Initialize the precomputed depth and contact maps. only synth has this label.
            self.depth_contact_maps = TensorPrepLib().prep_depth_contact(self.depth_contact_maps, dat_f_synth, dat_m_synth, num_repeats = 1)
        else:
            self.depth_contact_maps = None

        if self.CTRL_PNL['depth_map_input_est'] == True:
            self.depth_contact_maps_input_est = [] #Initialize the precomputed depth and contact map input estimates
            self.depth_contact_maps_input_est = TensorPrepLib().prep_depth_contact_input_est(self.depth_contact_maps_input_est,
                                                                                             dat_f_synth, dat_m_synth, num_repeats = 1)
            self.depth_contact_maps_input_est = TensorPrepLib().prep_depth_contact_input_est(self.depth_contact_maps_input_est,
                                                                                             dat_f_real, dat_m_real, num_repeats = repeat_real_data_ct)
        else:
            self.depth_contact_maps_input_est = None

        #self.CTRL_PNL['clip_sobel'] = False
        #stack the bed height array on the pressure image as well as a sobel filtered image
        train_xa = PreprocessingLib().preprocessing_create_pressure_angle_stack(self.train_x_flat,
                                                                                self.train_a_flat,
                                                                                self.mat_size,
                                                                                self.CTRL_PNL)


        #print np.shape(train_xa), 'shape@'
        train_xa = np.array(train_xa)

        if self.CTRL_PNL['cal_noise'] == True:
            train_xa[:, 0, :, :] = np.array(PreprocessingLib().preprocessing_blur_images(list(np.array(train_xa)[:, 0, :, :]), self.mat_size, sigma=0.5)).reshape(-1, self.mat_size[0], self.mat_size[1])
            train_xa[:, 1, :, :] = np.array(PreprocessingLib().preprocessing_blur_images(list(np.array(train_xa)[:, 1, :, :]), self.mat_size, sigma=0.5)).reshape(-1, self.mat_size[0], self.mat_size[1])


        #stack the depth and contact mesh images (and possibly a pmat contact image) together
        train_xa = TensorPrepLib().append_input_depth_contact(np.array(train_xa),
                                                              CTRL_PNL = self.CTRL_PNL,
                                                              mesh_depth_contact_maps_input_est = self.depth_contact_maps_input_est,
                                                              mesh_depth_contact_maps = self.depth_contact_maps)


        self.train_x = train_xa


        self.train_y_flat = []  # Initialize the training ground truth list
        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_f_synth, num_repeats = 1,
                                                        z_adj = -0.075, gender = "f", is_synth = True,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])
        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_m_synth, num_repeats = 1,
                                                        z_adj = -0.075, gender = "m", is_synth = True,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])

        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_f_real, num_repeats = repeat_real_data_ct,
                                                        z_adj = 0.0, gender = "m", is_synth = False,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])
        self.train_y_flat = TensorPrepLib().prep_labels(self.train_y_flat, dat_m_real, num_repeats = repeat_real_data_ct,
                                                        z_adj = 0.0, gender = "m", is_synth = False,
                                                        loss_vector_type = self.CTRL_PNL['loss_vector_type'],
                                                        initial_angle_est = self.CTRL_PNL['adjust_ang_from_est'],
                                                        full_body_rot=self.CTRL_PNL['full_body_rot'])



    def get_std_of_types(self):

        self.train_y_flat = np.array(self.train_y_flat)

        print(np.shape(self.train_y_flat), 'size of the training database output')


        joint_positions = self.train_y_flat[:, 0:72].reshape(-1, 24, 3)/1000
        #print joint_positions.shape
        mean_joint_positions = np.mean(joint_positions, axis = 0)
        joint_positions_rel_mean = joint_positions - mean_joint_positions
        euclidean = np.linalg.norm(joint_positions_rel_mean, axis = 2)
        #print euclidean.shape
        std_of_euclidean = np.std(euclidean)
        print std_of_euclidean, 'std of euclidean'

        #print joint_positions[0, :, :]
        #print joint_positions_rel_mean[0, :, :]
        #print euclidean[0, :]
        #print betas[0, :]

        #print joint_angles[0, :]
        #print mean_joint_angles

        betas = self.train_y_flat[:, 72:82]
        std_of_betas = np.std(betas)
        print std_of_betas, 'std of betas'


        #now get the std of the root joint in atan2
        joints = self.train_y_flat[:, 82:82+72]
        mean_joints = np.mean(joints, axis = 0)
        joints_rel_mean = joints - mean_joints
        std_of_joints = np.std(joints_rel_mean)
        print std_of_joints, 'std joints'


        #now get the std of the root joint in atan2
        joint_atan2x = np.cos(self.train_y_flat[:, 82:85])
        joint_atan2y = np.sin(self.train_y_flat[:, 82:85])
        joint_atan2 = np.concatenate((joint_atan2x, joint_atan2y), axis = 1)
        mean_joint_atan2 = np.mean(joint_atan2, axis = 0)
        joint_atan2_rel_mean = joint_atan2 - mean_joint_atan2
        std_of_joint_atan2 = np.std(joint_atan2_rel_mean)
        print std_of_joint_atan2, 'std of atan2 full body rot'

        #for i in range(72):
        #    print i, np.min(self.train_y_flat[:, 82+i]), np.max(self.train_y_flat[:, 82+i])


        #now get the std of the depth matrix
        depth_matrix_down = np.copy(self.train_x[:, 4])
        depth_matrix_down[depth_matrix_down > 0] = 0
        depth_matrix_down = np.abs(depth_matrix_down)
        mean_depth_mats = np.mean(depth_matrix_down, axis = 0)
        depth_matrix_rel_mean = depth_matrix_down - mean_depth_mats
        std_of_depth_matrix_down = np.std(depth_matrix_rel_mean)
        print std_of_depth_matrix_down, 'std of depth matrix'

        #now get the std of the contact matrix
        cntct_matrix_down = np.copy(self.train_x[:, 5])
        cntct_matrix_down = np.abs(cntct_matrix_down)
        mean_cntct_mats = np.mean(cntct_matrix_down, axis = 0)
        cntct_matrix_rel_mean = cntct_matrix_down - mean_cntct_mats
        std_of_cntct_matrix_down = np.std(cntct_matrix_rel_mean)
        print std_of_cntct_matrix_down, 'std of cntct matrix'


        #####################################################################
        ########DO NOT SUBTRACT THE MEAN##############
        print "now computing standard dev of the input"
        print np.shape(self.train_x)

        weight_matrix = np.repeat(self.train_y_flat[:, 160:161], 64*27, axis=1).reshape(np.shape(self.train_y_flat)[0], 1, 64, 27)
        height_matrix = np.repeat(self.train_y_flat[:, 161:162], 64*27, axis=1).reshape(np.shape(self.train_y_flat)[0], 1, 64, 27)

        print 'got here'
        print weight_matrix.shape, 'weight mat'
        print height_matrix.shape, 'height mat'
        print weight_matrix[0, :, 0:2, 0:2]
        print height_matrix[0, :, 0:2, 0:2]

        input_array = np.concatenate((self.train_x, weight_matrix, height_matrix), axis = 1)
        print input_array[0, 6, 0:2, 0:2]
        print input_array[0, 7, 0:2, 0:2]
        print input_array.shape

        if self.CTRL_PNL['depth_map_input_est'] == True:
            input_types = ['pmat_contact   ',
                           'mdm est pos    ', 'mdm est neg    ', 'cm est         ',
                           'pmat x5 clipped', 'pmat sobel     ', 'bed angle      ',
                           'depth output   ', 'contact output ',
                           'weight_matrix  ', 'height_matrix ']
        else:
            input_types = ['pmat_contact   ',
                           'pmat x5 clipped', 'pmat sobel     ', 'bed angle      ',
                           'depth output   ', 'contact output ',
                           'weight_matrix  ', 'height_matrix  ']


        for i in range(len(input_types)):
            some_layer = np.copy(input_array[:, i, :, :])
            mean_some_layer = np.mean(some_layer, axis = 0)
            some_layer_rel_mean = some_layer - mean_some_layer
            std_some_layer = np.std(some_layer_rel_mean, axis = 0)
            mean_some_layer = np.mean(mean_some_layer)
            std_some_layer = np.mean(std_some_layer)
            print i, input_types[i], '  mean is: %.3f  \t  std is: %.14f \t  min/max: %.3f, \t %.3f' %(mean_some_layer, std_some_layer, np.min(some_layer), np.max(some_layer))


        num_epochs = self.num_epochs
        hidden_dim = 12
        kernel_size = 10

        print "GOT HERE!!"

        #synthetic joints STD: 0.1282715100608753
        #synthetic betas STD: 1.7312621950698526
        #synthetic angles STD: 0.2130542427733348

    def get_bincount_ints_images(self):

        self.train_x_flat = np.array(self.train_x_flat).astype(int)

        print np.shape(self.train_x_flat), 'size of the training database'

        num_images = np.shape(self.train_x_flat)[0]

        self.train_x_flat = self.train_x_flat.flatten()
        self.train_x_flat[self.train_x_flat > 100] = 100

        #print self.train_x_flat[0:100]


        bin_count = []

        for i in range(0, 101):
            bin_count.append(np.count_nonzero(self.train_x_flat == i))

        bin_count = np.array(bin_count)
        print bin_count

        bin_count = bin_count/float(num_images)


        import matplotlib.pyplot as plt


        real = [9.32182891e+02, 2.41353293e+01, 2.22440547e+01, 2.01024808e+01,
                1.84882806e+01, 1.74491018e+01, 1.66834902e+01, 1.54379812e+01,
                1.45082121e+01, 1.36798118e+01, 1.33175364e+01, 1.25483319e+01,
                1.20975192e+01, 1.20109495e+01, 1.15692900e+01, 1.09368691e+01,
                1.09317365e+01, 1.05028229e+01, 1.02795552e+01, 9.92412318e+00,
                8.99520958e+00, 8.80025663e+00, 8.40556031e+00, 8.03772455e+00,
                7.69281437e+00, 7.30863986e+00, 6.95970915e+00, 6.62763045e+00,
                6.35337896e+00, 6.11274594e+00, 5.87134303e+00, 5.63250642e+00,
                5.39905902e+00, 5.11334474e+00, 4.94893071e+00, 4.92908469e+00,
                4.62728828e+00, 4.45457656e+00, 4.35637297e+00, 4.24901625e+00,
                3.91548332e+00, 3.83036784e+00, 3.63336185e+00, 3.66638152e+00,
                3.56971771e+00, 3.41120616e+00, 3.39136014e+00, 3.25355004e+00,
                3.23199316e+00, 3.17416595e+00, 3.11591104e+00, 3.09546621e+00,
                3.02053037e+00, 2.89461078e+00, 2.95816938e+00, 2.94200171e+00,
                2.85312233e+00, 2.82566296e+00, 2.68169376e+00, 2.67639008e+00,
                2.62412318e+00, 2.51035073e+00, 2.48083832e+00, 2.38879384e+00,
                2.39315654e+00, 2.35825492e+00, 2.32557742e+00, 2.23798118e+00,
                2.26236099e+00, 2.23028229e+00, 2.20923867e+00, 2.13840890e+00,
                2.14388366e+00 ,2.11608212e+00, 2.09324209e+00, 2.02095808e+00,
                1.92814371e+00, 1.98785287e+00, 1.91676647e+00, 1.79811805e+00,
                1.73370402e+00, 1.70384944e+00, 1.70940975e+00, 1.65089820e+00,
                1.55705731e+00, 1.47305389e+00, 1.47596236e+00, 1.40795552e+00,
                1.32814371e+00, 1.26595381e+00, 1.22309666e+00, 1.16133447e+00,
                1.06261762e+00, 9.98374679e-01, 9.68434559e-01, 9.25919589e-01,
                7.92044482e-01, 7.42857143e-01, 7.16595381e-01, 6.48759624e-01,
                2.61242857e+02]


        print bin_count

        plt.plot(np.arange(0, 99), real[1:100], 'r.', label='real')
        plt.plot(np.arange(0, 99), bin_count[1:100], 'b.', label='synth')
        plt.ylabel('Taxel Force Count \n (force rounded to nearest whole #)')
        plt.xlabel('Force (scale of 0 to 100)')
        plt.legend()
        plt.show()



    def plot_bincounts(self):

        real = [9.32182891e+02, 2.41353293e+01, 2.22440547e+01, 2.01024808e+01,
                1.84882806e+01, 1.74491018e+01, 1.66834902e+01, 1.54379812e+01,
                1.45082121e+01, 1.36798118e+01, 1.33175364e+01, 1.25483319e+01,
                1.20975192e+01, 1.20109495e+01, 1.15692900e+01, 1.09368691e+01,
                1.09317365e+01, 1.05028229e+01, 1.02795552e+01, 9.92412318e+00,
                8.99520958e+00, 8.80025663e+00, 8.40556031e+00, 8.03772455e+00,
                7.69281437e+00, 7.30863986e+00, 6.95970915e+00, 6.62763045e+00,
                6.35337896e+00, 6.11274594e+00, 5.87134303e+00, 5.63250642e+00,
                5.39905902e+00, 5.11334474e+00, 4.94893071e+00, 4.92908469e+00,
                4.62728828e+00, 4.45457656e+00, 4.35637297e+00, 4.24901625e+00,
                3.91548332e+00, 3.83036784e+00, 3.63336185e+00, 3.66638152e+00,
                3.56971771e+00, 3.41120616e+00, 3.39136014e+00, 3.25355004e+00,
                3.23199316e+00, 3.17416595e+00, 3.11591104e+00, 3.09546621e+00,
                3.02053037e+00, 2.89461078e+00, 2.95816938e+00, 2.94200171e+00,
                2.85312233e+00, 2.82566296e+00, 2.68169376e+00, 2.67639008e+00,
                2.62412318e+00, 2.51035073e+00, 2.48083832e+00, 2.38879384e+00,
                2.39315654e+00, 2.35825492e+00, 2.32557742e+00, 2.23798118e+00,
                2.26236099e+00, 2.23028229e+00, 2.20923867e+00, 2.13840890e+00,
                2.14388366e+00 ,2.11608212e+00, 2.09324209e+00, 2.02095808e+00,
                1.92814371e+00, 1.98785287e+00, 1.91676647e+00, 1.79811805e+00,
                1.73370402e+00, 1.70384944e+00, 1.70940975e+00, 1.65089820e+00,
                1.55705731e+00, 1.47305389e+00, 1.47596236e+00, 1.40795552e+00,
                1.32814371e+00, 1.26595381e+00, 1.22309666e+00, 1.16133447e+00,
                1.06261762e+00, 9.98374679e-01, 9.68434559e-01, 9.25919589e-01,
                7.92044482e-01, 7.42857143e-01, 7.16595381e-01, 6.48759624e-01,
                2.61242857e+02]

        synth = [1343.22232192,    4.07216977,    3.87526332,    3.73279629,    3.56068113,
                3.46481236,    3.41259655,    3.32685106,    3.2357806 ,    3.21613872,
                3.15151752,    3.10880081,    3.08726691,    3.03938129,    3.03341266,
                3.03470001,    2.98244519,    2.95613248,    2.90116642,    2.87887181,
                2.87982757,    2.85770851,    2.84108996,    2.80812593,    2.75975267,
                2.69887649,    2.64839666,    2.63983381,    2.59725365,    2.56867832,
                2.53407584,    2.52746353,    2.47682765,    2.46282281,    2.4540064,
                2.4299173 ,    2.41727783,    2.38047125,    2.38825388,    2.34370367,
                2.34918468,    2.31224155,    2.3065655 ,    2.30047983,    2.27030506,
                2.26591636,    2.25466178,    2.23172349,    2.22421393,    2.23084575,
                2.18783647,    2.17234922,    2.19435125,    2.15159554,    2.14320824,
                2.16341578,    2.12971054,    2.11356012,    2.12680424,    2.0678786,
                2.10441211,    2.05186471,    2.08133729,    2.06109074,    2.0262932,
                2.0886713 ,    1.99847858,    1.97450651,    1.97667161,    1.96077475,
                1.94228369,    1.92377311,    1.93130218,    1.91366935,    1.88341656,
                1.87058204,    1.86783179,    1.85429508,    1.83785207,    1.8299134,
                1.83151283,    1.81274869,    1.79724194,    1.7861434 ,    1.7733479,
                1.75464227,    1.74748381,    1.73125536,    1.74069595,    1.71299836,
                1.69971522,    1.68054147,    1.65883202,    1.65775923,    1.6430717,
                1.63571819,    1.60712335,    1.59345011,    1.59040727,    1.57058984,
                138.61369665]



        plt.plot(np.arange(0, 99), real[1:100], '-r', label='real')
        plt.plot(np.arange(0, 99), synth[1:100], '-b', label='synth')
        plt.ylabel('Taxel Force Count \n (force rounded to nearest whole #)')
        plt.xlabel('Force (scale of 0 to 100)')
        plt.legend()
        plt.show()