예제 #1
0
    def validate_convnet(self, verbose=False, n_batches=None):

        if DROPOUT == True:
            self.model.train()
        else:
            self.model.eval()
        loss = 0.
        n_examples = 0

        error_list = []

        for batch_i, batch in enumerate(self.test_loader):

            if DROPOUT == True:
                batch[0] = batch[0].repeat(25, 1, 1, 1)
                batch[1] = batch[1].repeat(25, 1)
            #self.model.train()
            scores, INPUT_DICT, OUTPUT_DICT = \
                UnpackBatchLib().unpackage_batch_kin_pass(batch, is_training=False, model=self.model,
                                                          CTRL_PNL=self.CTRL_PNL)
예제 #2
0
    def validate_convnet(self, verbose=False, n_batches=None):

        if DROPOUT == True:
            self.model.train()
        else:
            self.model.eval()
        loss = 0.
        n_examples = 0

        for batch_i, batch in enumerate(self.test_loader):

            if DROPOUT == True:
                batch[0] = batch[0].repeat(25, 1, 1, 1)
                batch[1] = batch[1].repeat(25, 1)
            #self.model.train()

            if self.CTRL_PNL['loss_vector_type'] == 'direct':
                scores, INPUT_DICT, OUTPUT_DICT = \
                    UnpackBatchLib().unpackage_batch_dir_pass(batch, is_training=False, model=self.model,
                                                              CTRL_PNL=self.CTRL_PNL)
                self.criterion = nn.L1Loss()
                scores_zeros = Variable(torch.Tensor(np.zeros((batch[1].shape[0], scores.size()[1]))).type(dtype),
                                        requires_grad=False)
                loss += self.criterion(scores, scores_zeros).data.item()



            elif self.CTRL_PNL['loss_vector_type'] == 'anglesR' or self.CTRL_PNL['loss_vector_type'] == 'anglesDC' or self.CTRL_PNL['loss_vector_type'] == 'anglesEU':
                scores, INPUT_DICT, OUTPUT_DICT = \
                    UnpackBatchLib().unpackage_batch_kin_pass(batch, is_training=True, model=self.model,
                                                              CTRL_PNL=self.CTRL_PNL)

                self.CTRL_PNL['first_pass'] = False

                self.criterion = nn.L1Loss()
                scores_zeros = Variable(torch.Tensor(np.zeros((batch[0].shape[0], scores.size()[1]))).type(dtype),
                                        requires_grad=False)

                loss_curr = self.criterion(scores[:, 10:34], scores_zeros[:, 10:34]).data.item() / 10.

                print loss_curr, 'loss'

                loss += loss_curr

                print scores[0, :], "SCORES!!"


                print OUTPUT_DICT['batch_angles_est'].shape, n_examples
                for item in range(OUTPUT_DICT['batch_angles_est'].shape[0]):
                    self.dat['mdm_est'].append(OUTPUT_DICT['batch_mdm_est'][item].cpu().numpy().astype(float32))
                    self.dat['cm_est'].append(OUTPUT_DICT['batch_cm_est'][item].cpu().numpy().astype(int16))
                    self.dat['angles_est'].append(OUTPUT_DICT['batch_angles_est'][item].cpu().numpy().astype(float32))
                    self.dat['root_xyz_est'].append(OUTPUT_DICT['batch_root_xyz_est'][item].cpu().numpy().astype(float32))
                    self.dat['betas_est'].append(OUTPUT_DICT['batch_betas_est'][item].cpu().numpy().astype(float32))
                    if self.CTRL_PNL['full_body_rot'] == True:
                        self.dat['root_atan2_est'].append(OUTPUT_DICT['batch_root_atan2_est'][item].cpu().numpy().astype(float32))

            n_examples += self.CTRL_PNL['batch_size']
            #print n_examples

            if n_batches and (batch_i >= n_batches):
                break


            try:
                targets_print = torch.cat([targets_print, torch.mean(INPUT_DICT['batch_targets'], dim = 0).unsqueeze(0)], dim=0)
                targets_est_print = torch.cat([targets_est_print, torch.mean(OUTPUT_DICT['batch_targets_est'], dim = 0).unsqueeze(0)], dim=0)
            except:

                targets_print = torch.mean(INPUT_DICT['batch_targets'], dim = 0).unsqueeze(0)
                targets_est_print = torch.mean(OUTPUT_DICT['batch_targets_est'], dim = 0).unsqueeze(0)


            print targets_print.shape, INPUT_DICT['batch_targets'].shape
            print targets_est_print.shape, OUTPUT_DICT['batch_targets_est'].shape


            if GPU == True:
                error_norm, error_avg, _ = VisualizationLib().print_error_val(targets_print[-2:-1,:].cpu(),
                                                                                   targets_est_print[-2:-1,:].cpu(),
                                                                                   self.output_size_val,
                                                                                   self.CTRL_PNL['loss_vector_type'],
                                                                                   data='validate')
            else:
                error_norm, error_avg, _ = VisualizationLib().print_error_val(targets_print[-2:-1,:],
                                                                              targets_est_print[-2:-1,:],
                                                                                   self.output_size_val,
                                                                                   self.CTRL_PNL['loss_vector_type'],
                                                                                   data='validate')

            for item in self.dat:
                print item, len(self.dat[item])

            if self.opt.visualize == True:
                loss /= n_examples
                loss *= 100
                loss *= 1000

                print INPUT_DICT['batch_images'].size()
                NUM_IMAGES = INPUT_DICT['batch_images'].size()[0]

                for image_ct in range(NUM_IMAGES):
                    # #self.im_sampleval = self.im_sampleval[:,0,:,:]
                    self.im_sampleval = INPUT_DICT['batch_images'][image_ct, :].squeeze()
                    self.tar_sampleval = INPUT_DICT['batch_targets'][image_ct, :].squeeze() / 1000
                    self.sc_sampleval = OUTPUT_DICT['batch_targets_est'][image_ct, :].squeeze() / 1000
                    self.sc_sampleval = self.sc_sampleval.view(24, 3)

                    self.im_sample2 = OUTPUT_DICT['batch_mdm_est'].data[image_ct, :].squeeze()


                    if GPU == True:
                        VisualizationLib().visualize_pressure_map(self.im_sampleval.cpu(), self.tar_sampleval.cpu(), self.sc_sampleval.cpu(), block=False)
                    else:
                        VisualizationLib().visualize_pressure_map(self.im_sampleval, self.tar_sampleval, self.sc_sampleval, self.im_sample2+50, block=False)
                    time.sleep(1)


        #pkl.dump(self.dat,open('/media/henry/multimodal_data_2/'+self.filename+'_output0p7.p', 'wb'))
        pkl.dump(self.dat,open('/home/henry/'+self.filename+'_output_46k_FIX_100e_htwt_clns0p1_V2.p', 'wb'))
    def validate_convnet(self, verbose=False, n_batches=None):

        self.model.eval()
        self.model_cor.eval()
        #self.model_cor2.eval()
        loss = 0.
        n_examples = 0

        for batch_i, batch in enumerate(self.test_loader):
            if batch_i == 100: break

            if self.CTRL_PNL['loss_vector_type'] == 'direct':
                scores, INPUT_DICT, OUTPUT_DICT = \
                    UnpackBatchLib().unpackage_batch_dir_pass(batch, is_training=False, model=self.model,
                                                              CTRL_PNL=self.CTRL_PNL)
                self.criterion = nn.L1Loss()
                scores_zeros = Variable(torch.Tensor(np.zeros((batch[1].shape[0], scores.size()[1]))).type(dtype),
                                        requires_grad=False)
                loss += self.criterion(scores, scores_zeros).data.item()



            elif self.CTRL_PNL['loss_vector_type'] == 'anglesR' or self.CTRL_PNL['loss_vector_type'] == 'anglesDC' or self.CTRL_PNL['loss_vector_type'] == 'anglesEU':

                batch_cor = []
                batch_cor.append(batch[0])
                batch_cor.append(batch[1])
                if self.CTRL_PNL_COR['incl_pmat_cntct_input'] == True and self.CTRL_PNL['incl_pmat_cntct_input'] == False: batch[0] = batch[0][:, 1:, :, :]

                print batch[0].size(), 'batch 0 size'
                print batch[1].size(), 'batch 1 size'
                print batch_cor[0].size(), 'batch cor 0 size A'
                print batch_cor[1].size(), 'batch cor 1 size A'
                self.CTRL_PNL['adjust_ang_from_est'] = False

                scores, INPUT_DICT, OUTPUT_DICT = \
                    UnpackBatchLib().unpackage_batch_kin_pass(batch, is_training=False, model=self.model,
                                                              CTRL_PNL=self.CTRL_PNL)


                self.criterion = nn.L1Loss()
                scores_zeros = Variable(torch.Tensor(np.zeros((batch[0].shape[0], scores.size()[1]))).type(dtype),
                                        requires_grad=False)

                loss_curr = self.criterion(scores[:, 10:34], scores_zeros[:, 10:34]).data.item() / 10.

                print loss_curr, 'loss'

                #########change output dict to be the input dict here if we're testing on the precomputed labels#######


                mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1)
                mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1)
                mdm_est_pos[mdm_est_pos < 0] = 0
                mdm_est_neg[mdm_est_neg > 0] = 0
                mdm_est_neg *= -1
                cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100
                # depth_contact_input_est_list.append([dat['mdm_est'][entry], dat['cm_est'][entry], ])

                print batch_cor[0].type()

                if self.CTRL_PNL_COR['incl_pmat_cntct_input'] == True:
                    batch_cor[0] = torch.cat((batch_cor[0][:, 0:1, :, :],
                                                mdm_est_pos.type(torch.FloatTensor),
                                                mdm_est_neg.type(torch.FloatTensor),
                                                cm_est.type(torch.FloatTensor),
                                                batch_cor[0][:, 1:, :, :]), dim = 1)
                else:
                    batch_cor[0] = torch.cat((mdm_est_pos.type(torch.FloatTensor),
                                                mdm_est_neg.type(torch.FloatTensor),
                                                cm_est.type(torch.FloatTensor),
                                                batch_cor[0]), dim = 1)

                if self.CTRL_PNL['precomp_net1'] == True:
                    batch_cor[1] = torch.cat((batch_cor[1][:, :-85],
                                              OUTPUT_DICT['batch_betas_est'].clone().type(torch.FloatTensor),
                                              OUTPUT_DICT['batch_angles_est'].clone().type(torch.FloatTensor),
                                              OUTPUT_DICT['batch_root_xyz_est'].clone().type(torch.FloatTensor)), dim = 1)
                else:
                    batch_cor[1] = torch.cat((batch_cor[1],
                                              OUTPUT_DICT['batch_betas_est'].clone().type(torch.FloatTensor),
                                              OUTPUT_DICT['batch_angles_est'].clone().type(torch.FloatTensor),
                                              OUTPUT_DICT['batch_root_xyz_est'].clone().type(torch.FloatTensor)), dim = 1)

                print batch_cor[0].size(), 'batch cor 0 size B'
                print batch_cor[1].size(), 'batch cor 1 size B'

                #batch_cor2 = []
                #batch_cor2.append(torch.cat((batch_cor[0][:, 0:1, :, :],
                #                                batch_cor[0][:, 4:, :, :]), dim=1))
                #batch_cor2.append(batch_cor[1].clone())

                #VisualizationLib().visualize_pressure_map(batch_cor[0][0, 4:].squeeze(), None, None,
                #                                          batch_cor[0][0, 5:].squeeze(), None, None,
                #                                          batch_cor[0][0, 6:].squeeze(), None, None,
                #                                          block=False)
                #for i in range(7):
                #    print i, torch.min(batch_cor[0][0, i]), torch.max(batch_cor[0][0, i])


                scores, INPUT_DICT_COR, OUTPUT_DICT_COR = \
                    UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, is_training=True, model=self.model_cor,
                                                              CTRL_PNL=self.CTRL_PNL_COR)


                #print batch_cor2[0].size(), 'batch cor 0 size'
                #print batch_cor2[1].size(), 'batch cor 1 size'

                #scores2, INPUT_DICT_COR2, OUTPUT_DICT_COR2 = \
                #    UnpackBatchLib().unpackage_batch_kin_pass(batch_cor2, is_training=False, model=self.model_cor2,
                #                                              CTRL_PNL=self.CTRL_PNL_COR)

                loss += loss_curr



            n_examples += self.CTRL_PNL['batch_size']
            #print n_examples

            if n_batches and (batch_i >= n_batches):
                break


            try:
                targets_print = torch.cat([targets_print, torch.mean(INPUT_DICT_COR['batch_targets'], dim = 0).unsqueeze(0)], dim=0)
                targets_est_print = torch.cat([targets_est_print, torch.mean(OUTPUT_DICT_COR['batch_targets_est'], dim = 0).unsqueeze(0)], dim=0)
            except:

                targets_print = torch.mean(INPUT_DICT_COR['batch_targets'], dim = 0).unsqueeze(0)
                targets_est_print = torch.mean(OUTPUT_DICT_COR['batch_targets_est'], dim = 0).unsqueeze(0)


            print targets_print.shape, INPUT_DICT_COR['batch_targets'].shape
            print targets_est_print.shape, OUTPUT_DICT_COR['batch_targets_est'].shape


            if GPU == True:
                error_norm, error_avg, _ = VisualizationLib().print_error_val(targets_print[-2:-1,:].cpu(),
                                                                                   targets_est_print[-2:-1,:].cpu(),
                                                                                   self.output_size_val,
                                                                                   self.CTRL_PNL['loss_vector_type'],
                                                                                   data='validate')
            else:
                error_norm, error_avg, _ = VisualizationLib().print_error_val(targets_print[-2:-1,:],
                                                                              targets_est_print[-2:-1,:],
                                                                                   self.output_size_val,
                                                                                   self.CTRL_PNL['loss_vector_type'],
                                                                                   data='validate')

            if self.opt.visualize == True:
                loss /= n_examples
                loss *= 100
                loss *= 1000

                print INPUT_DICT['batch_images'].size()
                NUM_IMAGES = INPUT_DICT['batch_images'].size()[0]

                for image_ct in range(NUM_IMAGES):
                    # #self.im_sample = self.im_sample[:,0,:,:]
                    self.im_sample = INPUT_DICT['batch_images'][image_ct, 1:].squeeze()
                    self.tar_sample = INPUT_DICT['batch_targets'][image_ct, :].squeeze() / 1000

                    self.sc_sample = OUTPUT_DICT['batch_targets_est'][image_ct, :].squeeze() / 1000
                    self.im_sample_cor = INPUT_DICT['batch_images'][image_ct, 1:].squeeze()
                    self.sc_sample_cor = OUTPUT_DICT_COR['batch_targets_est'][image_ct, :].squeeze() / 1000
                    #self.im_sample_cor2 = INPUT_DICT['batch_images'][image_ct, :].squeeze()
                    #self.sc_sample_cor2 = OUTPUT_DICT_COR2['batch_targets_est'][image_ct, :].squeeze() / 1000

                    #self.im_sample2 = GaussFitLib().get_pressure_under_legs(self.im_sample[0, :, :], np.copy(self.sc_sample))


                    self.im_sample2 = OUTPUT_DICT['batch_mdm_est'].data[image_ct, :].squeeze()*-1


                    if GPU == True:
                        VisualizationLib().visualize_pressure_map(self.im_sample.cpu(), self.tar_sample.cpu(), self.sc_sample.cpu(), block=False)
                    else:
                        VisualizationLib().visualize_pressure_map(self.im_sample, self.tar_sample, self.sc_sample,
                                                                  self.im_sample2, self.tar_sample, self.sc_sample,
                                                                  #self.im_sample_cor2, self.tar_sample, self.sc_sample_cor2,
                                                                  self.im_sample_cor, self.tar_sample, self.sc_sample_cor,
                                                                  block=False)
                    time.sleep(1)

        if GPU == True:
            error_norm, error_avg, _ = VisualizationLib().print_error_iros2018(targets_print.cpu(), targets_est_print.cpu(), self.output_size_val, self.CTRL_PNL['loss_vector_type'], data='validate')
        else:
            error_norm, error_avg, _ = VisualizationLib().print_error_iros2018(targets_print, targets_est_print, self.output_size_val, self.CTRL_PNL['loss_vector_type'], data='validate')

        print "MEAN IS: ", np.mean(error_norm, axis=0)
        print error_avg, np.mean(error_avg)*10
    def estimate_real_time(self, filename1, filename2 = None):




        pyRender = libRender.pyRenderMesh()
        mat_size = (64, 27)
        from unpack_batch_lib import UnpackBatchLib

        if torch.cuda.is_available():
            # Use for GPU
            GPU = True
            dtype = torch.cuda.FloatTensor
            print '######################### CUDA is available! #############################'
        else:
            # Use for CPU
            GPU = False
            dtype = torch.FloatTensor
            print '############################## USING CPU #################################'

        from torch.autograd import Variable

        if GPU == True:
            for i in range(0, 8):
                try:
                    model = torch.load(filename1, map_location={'cuda:'+str(i):'cuda:0'})
                    model = model.cuda().eval()
                    break
                except:
                    pass
            if filename2 is not None:
                for i in range(0, 8):
                    try:
                        model2 = torch.load(filename2, map_location={'cuda:'+str(i):'cuda:0'})
                        model2 = model2.cuda().eval()
                        break
                    except:
                        pass
            else:
                model2 = None

        else:
            model = torch.load(filename1, map_location='cpu').eval()
            if filename2 is not None:
                model2 = torch.load(filename2, map_location='cpu').eval()
            else:
                model2 = None


        pub = rospy.Publisher('meshTopic', MeshAttr)
        #rospy.init_node('talker', anonymous=False)
        while not rospy.is_shutdown():


            pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(self.CTRL_PNL['pmat_mult']*4), a_min=0, a_max=100)))
            #pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(1), a_min=0, a_max=100)))
            #print "max is : ", np.max(pmat)
            #print "sum is : ", np.sum(pmat)

            if self.CTRL_PNL['cal_noise'] == False:
                pmat = gaussian_filter(pmat, sigma= 0.5)


            pmat_stack = PreprocessingLib().preprocessing_create_pressure_angle_stack_realtime(pmat, self.bedangle, mat_size)

            if self.CTRL_PNL['cal_noise'] == False:
                pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100)

            pmat_stack = np.array(pmat_stack)
            if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
                pmat_contact = np.copy(pmat_stack[:, 0:1, :, :])
                pmat_contact[pmat_contact > 0] = 100
                pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis = 1)

            weight_input = WEIGHT_LBS/2.20462
            height_input = (HEIGHT_IN*0.0254 - 1)*100

            batch1 = np.zeros((1, 162))
            if GENDER == 'f':
                batch1[:, 157] += 1
            elif GENDER == 'm':
                batch1[:, 158] += 1
            batch1[:, 160] += weight_input
            batch1[:, 161] += height_input

            if self.CTRL_PNL['normalize_input'] == True:
                self.CTRL_PNL['depth_map_input_est'] = False
                pmat_stack = self.TPL.normalize_network_input(pmat_stack, self.CTRL_PNL)
                batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL)


            pmat_stack = torch.Tensor(pmat_stack)
            batch1 = torch.Tensor(batch1)


            batch = []
            batch.append(pmat_stack)
            batch.append(batch1)

            self.CTRL_PNL['adjust_ang_from_est'] = False
            scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch, False, model, self.CTRL_PNL)

            self.CTRL_PNL['first_pass'] = False

            mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 16.69545796387731
            mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 45.08513083167194
            mdm_est_pos[mdm_est_pos < 0] = 0
            mdm_est_neg[mdm_est_neg > 0] = 0
            mdm_est_neg *= -1
            cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100 / 43.55800622930469

            #1. / 16.69545796387731,  # pos est depth
            #1. / 45.08513083167194,  # neg est depth
            #1. / 43.55800622930469,  # cm est

            if model2 is not None:
                batch_cor = []
                batch_cor.append(torch.cat((pmat_stack[:, 0:1, :, :],
                                          mdm_est_pos.type(torch.FloatTensor),
                                          mdm_est_neg.type(torch.FloatTensor),
                                          cm_est.type(torch.FloatTensor),
                                          pmat_stack[:, 1:, :, :]), dim=1))

                if self.CTRL_PNL['full_body_rot'] == False:
                    batch_cor.append(torch.cat((batch1,
                                      OUTPUT_DICT['batch_betas_est'].cpu(),
                                      OUTPUT_DICT['batch_angles_est'].cpu(),
                                      OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim = 1))
                elif self.CTRL_PNL['full_body_rot'] == True:
                    batch_cor.append(torch.cat((batch1,
                                      OUTPUT_DICT['batch_betas_est'].cpu(),
                                      OUTPUT_DICT['batch_angles_est'].cpu(),
                                      OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                                      OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim = 1))


                self.CTRL_PNL['adjust_ang_from_est'] = True
                scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, False, model2, self.CTRL_PNL)

            betas_est = np.squeeze(OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy())
            angles_est = np.squeeze(OUTPUT_DICT['batch_angles_est_post_clip'])
            root_shift_est = np.squeeze(OUTPUT_DICT['batch_root_xyz_est_post_clip'].cpu().numpy())


            #print betas_est.shape, root_shift_est.shape, angles_est.shape

            #print betas_est, root_shift_est, angles_est
            angles_est = angles_est.reshape(72)

            for idx in range(10):
                #print shape_pose_vol[0][idx]
                self.m.betas[idx] = betas_est[idx]


            for idx in range(72):
                self.m.pose[idx] = angles_est[idx]


            init_root = np.array(self.m.pose[0:3])+0.000001
            init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root)
            root_rot = libKinematics.eulerAnglesToRotationMatrix([np.pi, 0.0, np.pi/2])
            #print root_rot
            trans_root = libKinematics.dir_cos_angles_from_matrix(np.matmul(root_rot, init_rootR))

            self.m.pose[0] = trans_root[0]
            self.m.pose[1] = trans_root[1]
            self.m.pose[2] = trans_root[2]

            #print self.m.J_transformed[1, :], self.m.J_transformed[4, :]
            # self.m.pose[51] = selection_r

            print self.m.r
            #print OUTPUT_DICT['verts']

            pyRender.mesh_render_pose_bed_orig(self.m, root_shift_est, self.point_cloud_array, self.pc_isnew, pmat, self.markers, self.bedangle)
            self.point_cloud_array = None
    def estimate_pose(self, pmat, bedangle, markers_c, model, model2):
        mat_size = (64, 27)


        pmat = np.fliplr(np.flipud(np.clip(pmat.reshape(MAT_SIZE)*float(self.CTRL_PNL['pmat_mult']), a_min=0, a_max=100)))

        if self.CTRL_PNL['cal_noise'] == False:
            pmat = gaussian_filter(pmat, sigma=1.0)

        pmat_stack = PreprocessingLib().preprocessing_create_pressure_angle_stack_realtime(pmat, 0.0, mat_size)

        if self.CTRL_PNL['cal_noise'] == False:
            pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100)

        pmat_stack = np.array(pmat_stack)
        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            pmat_contact = np.copy(pmat_stack[:, 0:1, :, :])
            pmat_contact[pmat_contact > 0] = 100
            pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis=1)

        weight_input = self.weight_lbs / 2.20462
        height_input = (self.height_in * 0.0254 - 1) * 100

        batch1 = np.zeros((1, 162))
        if self.gender == 'f':
            batch1[:, 157] += 1
        elif self.gender == 'm':
            batch1[:, 158] += 1
        batch1[:, 160] += weight_input
        batch1[:, 161] += height_input

        if self.CTRL_PNL['normalize_input'] == True:
            self.CTRL_PNL['depth_map_input_est'] = False
            pmat_stack = self.TPL.normalize_network_input(pmat_stack, self.CTRL_PNL)
            batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL)

        pmat_stack = torch.Tensor(pmat_stack)
        batch1 = torch.Tensor(batch1)


        if DROPOUT == True:
            pmat_stack = pmat_stack.repeat(25, 1, 1, 1)
            batch1 = batch1.repeat(25, 1)


        batch = []
        batch.append(pmat_stack)
        batch.append(batch1)

        NUMOFOUTPUTDIMS = 3
        NUMOFOUTPUTNODES_TRAIN = 24
        self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)


        self.CTRL_PNL['adjust_ang_from_est'] = False
        scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch, False, model, self.CTRL_PNL)

        mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 16.69545796387731
        mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 45.08513083167194
        mdm_est_pos[mdm_est_pos < 0] = 0
        mdm_est_neg[mdm_est_neg > 0] = 0
        mdm_est_neg *= -1
        cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100 / 43.55800622930469

        # 1. / 16.69545796387731,  # pos est depth
        # 1. / 45.08513083167194,  # neg est depth
        # 1. / 43.55800622930469,  # cm est

        sc_sample1 = OUTPUT_DICT['batch_targets_est'].clone()
        sc_sample1 = sc_sample1[0, :].squeeze() / 1000
        sc_sample1 = sc_sample1.view(self.output_size_train)
        #print sc_sample1

        if model2 is not None:
            print "Using model 2"
            batch_cor = []
            batch_cor.append(torch.cat((pmat_stack[:, 0:1, :, :],
                                        mdm_est_pos.type(torch.FloatTensor),
                                        mdm_est_neg.type(torch.FloatTensor),
                                        cm_est.type(torch.FloatTensor),
                                        pmat_stack[:, 1:, :, :]), dim=1))

            if self.CTRL_PNL['full_body_rot'] == False:
                batch_cor.append(torch.cat((batch1,
                                            OUTPUT_DICT['batch_betas_est'].cpu(),
                                            OUTPUT_DICT['batch_angles_est'].cpu(),
                                            OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim=1))
            elif self.CTRL_PNL['full_body_rot'] == True:
                batch_cor.append(torch.cat((batch1,
                                            OUTPUT_DICT['batch_betas_est'].cpu(),
                                            OUTPUT_DICT['batch_angles_est'].cpu(),
                                            OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                                            OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim=1))

            self.CTRL_PNL['adjust_ang_from_est'] = True
            scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, False, model2,
                                                                                        self.CTRL_PNL)



        # print betas_est, root_shift_est, angles_est
        if self.CTRL_PNL['dropout'] == True:
            print OUTPUT_DICT['verts'].shape
            smpl_verts = np.mean(OUTPUT_DICT['verts'], axis = 0)
            dropout_variance = np.std(OUTPUT_DICT['verts'], axis=0)
            dropout_variance = np.linalg.norm(dropout_variance, axis = 1)
        else:
            smpl_verts = OUTPUT_DICT['verts'][0, :, :]
            dropout_variance = None


        smpl_verts = np.concatenate((smpl_verts[:, 1:2] - 0.286 + 0.0143, smpl_verts[:, 0:1] - 0.286 + 0.0143, 2*0.075 -smpl_verts[:, 2:3]), axis = 1)

        smpl_faces = np.array(self.m.f)

        pc_autofil_red = self.trim_pc_sides() #this is the point cloud

        q = OUTPUT_DICT['batch_mdm_est'].data.numpy().reshape(OUTPUT_DICT['batch_mdm_est'].size()[0], 64, 27) * -1
        q = np.mean(q, axis = 0)

        camera_point = [1.09898028, 0.46441343, -1.53]

        if SHOW_SMPL_EST == False:
            smpl_verts *= 0.001

        #print smpl_verts

        viz_type = "3D"

        if viz_type == "2D":
            from visualization_lib import VisualizationLib
            if model2 is not None:
                self.im_sample = INPUT_DICT['batch_images'][0, 4:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
            else:
                self.im_sample = INPUT_DICT['batch_images'][0, 1:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
            self.im_sample_ext = INPUT_DICT['batch_images'][0, 0:, :].squeeze() * 20.  # normalizing_std_constants[0]  #pmat contact
            # self.im_sample_ext2 = INPUT_DICT['batch_images'][im_display_idx, 2:, :].squeeze()*20.#normalizing_std_constants[4]  #sobel
            self.im_sample_ext3 = OUTPUT_DICT['batch_mdm_est'][0, :, :].squeeze().unsqueeze(0) * -1  # est depth output

            # print scores[0, 10:16], 'scores of body rot'

            # print self.im_sample.size(), self.im_sample_ext.size(), self.im_sample_ext2.size(), self.im_sample_ext3.size()

            # self.publish_depth_marker_array(self.im_sample_ext3)



            self.tar_sample = INPUT_DICT['batch_targets']
            self.tar_sample = self.tar_sample[0, :].squeeze() / 1000
            sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
            sc_sample = sc_sample[0, :].squeeze() / 1000


            sc_sample = sc_sample.view(self.output_size_train)

            VisualizationLib().visualize_pressure_map(self.im_sample, sc_sample1, sc_sample,
                                                         # self.im_sample_ext, None, None,
                                                          self.im_sample_ext3, None, None, #, self.tar_sample_val, self.sc_sample_val,
                                                          block=False)

            time.sleep(4)

        elif viz_type == "3D":


            #render everything
            #self.pyRender.render_mesh_pc_bed_pyrender_everything(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                                      pc = pc_autofil_red, pmat = pmat, smpl_render_points = False,
            #                                                      markers = None, dropout_variance = dropout_variance)


            #render in 3D pyrender with pressure mat
            #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                          pc = None, pmat = pmat, smpl_render_points = False,
            #                                          facing_cam_only=False, viz_type = None,
            #                                          markers = None, segment_limbs=False)

            #render in 3D pyrender with segmented limbs
            #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                          pc = None, pmat = None, smpl_render_points = False,
            #                                          facing_cam_only=False, viz_type = None,
            #                                          markers = None, segment_limbs=True)

            #render the error of point cloud points relative to verts
            #self.Render.eval_dist_render_open3d(smpl_verts, smpl_faces, pc_autofil_red, viz_type = 'pc_error',
            #                                      camera_point = camera_point, segment_limbs=False)
            self.Render.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
                                                      pc = pc_autofil_red, pmat = None, smpl_render_points = False,
                                                      facing_cam_only=True, viz_type = 'pc_error',
                                                      markers = None, segment_limbs=False)

            #render the error of verts relative to point cloud points
            #self.Render.eval_dist_render_open3d(smpl_verts, smpl_faces, pc_autofil_red, viz_type = 'mesh_error',
            #                                      camera_point = camera_point, segment_limbs=False)
            #self.pyRender.render_mesh_pc_bed_pyrender(smpl_verts, smpl_faces, camera_point, bedangle,
            #                                          pc = pc_autofil_red, pmat = None, smpl_render_points = False,
            #                                          facing_cam_only=True, viz_type = 'mesh_error',
            #                                          markers = None, segment_limbs=False)

            time.sleep(1)
            self.point_cloud_array = None
예제 #6
0
    def val_convnet_general(self, epoch):

        self.gender = "f"
        if self.gender == "m":
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.m = load_model(model_path)


        self.pyRender = libPyRender.pyRenderMesh(render = True)

        '''
        Train the model for one epoch.
        '''
        # Some models use slightly different forward passes and train and test
        # time (e.g., any model with Dropout). This puts the model in train mode
        # (as opposed to eval mode) so it knows which one to use.
        self.model.eval()#train()
        self.model2.eval()#train()

        RESULTS_DICT = {}
        RESULTS_DICT['j_err'] = []
        RESULTS_DICT['betas'] = []
        RESULTS_DICT['dir_v_err'] = []
        RESULTS_DICT['dir_v_limb_err'] = []
        RESULTS_DICT['v_to_gt_err'] = []
        RESULTS_DICT['v_limb_to_gt_err'] = []
        RESULTS_DICT['gt_to_v_err'] = []
        RESULTS_DICT['precision'] = []
        RESULTS_DICT['recall'] = []
        RESULTS_DICT['overlap_d_err'] = []
        RESULTS_DICT['all_d_err'] = []
        init_time = time.time()

        with torch.autograd.set_detect_anomaly(True):

            # This will loop a total = training_images/batch_size times
            for batch_idx, batch in enumerate(self.train_loader):

                batch1 = batch[1].clone()

                betas_gt = torch.mean(batch[1][:, 72:82], dim = 0).numpy()
                angles_gt = torch.mean(batch[1][:, 82:154], dim = 0).numpy()
                root_shift_est_gt = torch.mean(batch[1][:, 154:157], dim = 0).numpy()

                NUMOFOUTPUTDIMS = 3
                NUMOFOUTPUTNODES_TRAIN = 24
                self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)

                self.CTRL_PNL['adjust_ang_from_est'] = False
                self.CTRL_PNL['depth_map_labels'] = True
                print batch[0].size(), "batch 0 shape"
                scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch, False, self.model,
                                                                                            self.CTRL_PNL)
                print OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy()[0], 'betas init'
                mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 16.69545796387731
                mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 45.08513083167194
                mdm_est_pos[mdm_est_pos < 0] = 0
                mdm_est_neg[mdm_est_neg > 0] = 0
                mdm_est_neg *= -1
                cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100 / 43.55800622930469

                # 1. / 16.69545796387731,  # pos est depth
                # 1. / 45.08513083167194,  # neg est depth
                # 1. / 43.55800622930469,  # cm est

                sc_sample1 = OUTPUT_DICT['batch_targets_est'].clone()
                sc_sample1 = sc_sample1[0, :].squeeze() / 1000
                sc_sample1 = sc_sample1.view(self.output_size_train)
                # print sc_sample1

                if self.model2 is not None:
                    print "Using model 2"
                    batch_cor = []
                    batch_cor.append(torch.cat((batch[0][:, 0:1, :, :],
                                                mdm_est_pos.type(torch.FloatTensor),
                                                mdm_est_neg.type(torch.FloatTensor),
                                                cm_est.type(torch.FloatTensor),
                                                batch[0][:, 1:, :, :]), dim=1))


                    if self.CTRL_PNL['full_body_rot'] == False:
                        batch_cor.append(torch.cat((batch1,
                                                    OUTPUT_DICT['batch_betas_est'].cpu(),
                                                    OUTPUT_DICT['batch_angles_est'].cpu(),
                                                    OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim=1))
                    elif self.CTRL_PNL['full_body_rot'] == True:
                        batch_cor.append(torch.cat((batch1,
                                                    OUTPUT_DICT['batch_betas_est'].cpu(),
                                                    OUTPUT_DICT['batch_angles_est'].cpu(),
                                                    OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                                                    OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim=1))



                    self.CTRL_PNL['adjust_ang_from_est'] = True
                    self.CTRL_PNL['depth_map_labels'] = False
                    scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, False,
                                                                                                self.model2,
                                                                                                self.CTRL_PNL)

                # print betas_est, root_shift_est, angles_est
                if self.CTRL_PNL['dropout'] == True:
                    #print OUTPUT_DICT['verts'].shape
                    smpl_verts = np.mean(OUTPUT_DICT['verts'], axis=0)
                    dropout_variance = np.std(OUTPUT_DICT['verts'], axis=0)
                    dropout_variance = np.linalg.norm(dropout_variance, axis=1)
                else:
                    smpl_verts = OUTPUT_DICT['verts'][0, :, :]
                    dropout_variance = None




                smpl_verts = np.concatenate((smpl_verts[:, 1:2] - 0.286 + 0.0143, smpl_verts[:, 0:1] - 0.286 + 0.0143,
                                             - smpl_verts[:, 2:3]), axis=1)

                smpl_faces = np.array(self.m.f)


                q = OUTPUT_DICT['batch_mdm_est'].data.numpy().reshape(OUTPUT_DICT['batch_mdm_est'].size()[0], 64, 27) * -1
                q = np.mean(q, axis=0)

                camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                bedangle = 0.0
                # print smpl_verts

                RESULTS_DICT['betas'].append(OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy()[0])
                print RESULTS_DICT['betas'][-1], "BETAS"

                viz_type = "3D"

                if viz_type == "2D":
                    from visualization_lib import VisualizationLib
                    if self.model2 is not None:
                        self.im_sample = INPUT_DICT['batch_images'][0, 4:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
                    else:
                        self.im_sample = INPUT_DICT['batch_images'][0, 1:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
                    self.im_sample_ext = INPUT_DICT['batch_images'][0, 0:,:].squeeze() * 20.  # normalizing_std_constants[0]  #pmat contact
                    # self.im_sample_ext2 = INPUT_DICT['batch_images'][im_display_idx, 2:, :].squeeze()*20.#normalizing_std_constants[4]  #sobel
                    self.im_sample_ext3 = OUTPUT_DICT['batch_mdm_est'][0, :, :].squeeze().unsqueeze(0) * -1  # est depth output

                    # print scores[0, 10:16], 'scores of body rot'

                    # print self.im_sample.size(), self.im_sample_ext.size(), self.im_sample_ext2.size(), self.im_sample_ext3.size()

                    # self.publish_depth_marker_array(self.im_sample_ext3)

                    self.tar_sample = INPUT_DICT['batch_targets']
                    self.tar_sample = self.tar_sample[0, :].squeeze() / 1000
                    sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
                    sc_sample = sc_sample[0, :].squeeze() / 1000

                    sc_sample = sc_sample.view(self.output_size_train)

                    VisualizationLib().visualize_pressure_map(self.im_sample, sc_sample1, sc_sample,
                                                              # self.im_sample_ext, None, None,
                                                              self.im_sample_ext3, None, None,
                                                              # , self.tar_sample_val, self.sc_sample_val,
                                                              block=False)


                elif viz_type == "3D":
                    pmat = batch[0][0, 1, :, :].clone().numpy()*25.50538629767412
                    #print pmat.shape

                    for beta in range(betas_gt.shape[0]):
                        self.m.betas[beta] = betas_gt[beta]
                    for angle in range(angles_gt.shape[0]):
                        self.m.pose[angle] = angles_gt[angle]

                    smpl_verts_gt = np.array(self.m.r)
                    for s in range(root_shift_est_gt.shape[0]):
                        smpl_verts_gt[:, s] += (root_shift_est_gt[s] - float(self.m.J_transformed[0, s]))

                    smpl_verts_gt = np.concatenate(
                        (smpl_verts_gt[:, 1:2] - 0.286 + 0.0143, smpl_verts_gt[:, 0:1] - 0.286 + 0.0143,
                          0.0 - smpl_verts_gt[:, 2:3]), axis=1)



                    joint_cart_gt = np.array(self.m.J_transformed).reshape(24, 3)
                    for s in range(root_shift_est_gt.shape[0]):
                        joint_cart_gt[:, s] += (root_shift_est_gt[s] - float(self.m.J_transformed[0, s]))

                    #print joint_cart_gt, 'gt'

                    sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
                    sc_sample = (sc_sample[0, :].squeeze().numpy() / 1000).reshape(24, 3)

                    #print sc_sample, 'estimate'
                    joint_error = np.linalg.norm(joint_cart_gt-sc_sample, axis = 1)
                    #print joint_error
                    RESULTS_DICT['j_err'].append(joint_error)


                    camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                    # render everything
                    RESULTS_DICT = self.pyRender.render_mesh_pc_bed_pyrender_everything_synth(smpl_verts, smpl_faces,
                                                                            camera_point, bedangle, RESULTS_DICT,
                                                                            smpl_verts_gt=smpl_verts_gt, pmat=pmat,
                                                                            markers=None,
                                                                            dropout_variance=dropout_variance)

                #time.sleep(300)

                #print RESULTS_DICT['j_err']
                print np.mean(np.array(RESULTS_DICT['j_err']), axis = 0)
                #print RESULTS_DICT['precision']
                print np.mean(RESULTS_DICT['precision'])
                print time.time() - init_time
                #break

        #save here

        pkl.dump(RESULTS_DICT, open('/media/henry/multimodal_data_2/data/final_results/results_synth_'+TESTING_FILENAME+'_'+NETWORK_2+'.p', 'wb'))