コード例 #1
0
    def generate_prechecked_pose(self, posture, stiffness, filename):

        prechecked_pose_list = np.load(filename, allow_pickle=True).tolist()

        print len(prechecked_pose_list)
        shuffle(prechecked_pose_list)

        pyRender = libRender.pyRenderMesh()

        for shape_pose_vol in prechecked_pose_list[6:]:
            #print shape_pose_vol
            #print shape_pose_vol[0]
            #print shape_pose_vol[1]
            #print shape_pose_vol[2]
            for idx in range(len(shape_pose_vol[0])):
                #print shape_pose_vol[0][idx]
                self.m.betas[idx] = shape_pose_vol[0][idx]

            print 'init'
            print shape_pose_vol[2][0], shape_pose_vol[2][1], shape_pose_vol[
                2][2]

            self.m.pose[:] = np.array(72 * [0.])

            for idx in range(len(shape_pose_vol[1])):
                #print shape_pose_vol[1][idx]
                #print self.m.pose[shape_pose_vol[1][idx]]
                #print shape_pose_vol[2][idx]
                pose_index = shape_pose_vol[1][idx] * 1

                self.m.pose[pose_index] = shape_pose_vol[2][idx] * 1.

                #print idx, pose_index, self.m.pose[pose_index], shape_pose_vol[2][idx]

            print self.m.pose[0:3]
            init_root = np.array(self.m.pose[0:3]) + 0.000001
            init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root)
            root_rot = libKinematics.eulerAnglesToRotationMatrix(
                [np.pi, 0.0, np.pi / 2])
            #print root_rot
            trans_root = libKinematics.dir_cos_angles_from_matrix(
                np.matmul(root_rot, init_rootR))

            self.m.pose[0] = trans_root[0]
            self.m.pose[1] = trans_root[1]
            self.m.pose[2] = trans_root[2]

            print root_rot
            print init_rootR
            print trans_root
            print init_root, trans_root

            #print self.m.J_transformed[1, :], self.m.J_transformed[4, :]
            # self.m.pose[51] = selection_r
            pyRender.mesh_render_pose_bed(self.m, self.point_cloud_array,
                                          self.pc_isnew, self.pressure,
                                          self.markers)
            self.point_cloud_array = None
コード例 #2
0
        m.pose[48:51] = capsule_angles[41:44]
        m.pose[51:54] = capsule_angles[44:47]
        m.pose[55] = capsule_angles[47]
        m.pose[58] = capsule_angles[48]
        m.pose[60:63] = capsule_angles[49:52]
        m.pose[63:66] = capsule_angles[52:55]

        return m


if __name__ == '__main__':

    from smpl.smpl_webuser.serialization import load_model
    import time
    import lib_pyrender as libPyRender
    pyRender = libPyRender.pyRenderMesh(render=True)

    gender = "f"
    posture = "lay"
    num_data = 2000
    stiffness = "none"

    resting_pose_data_list = np.load(
        "/home/henry/data/resting_poses/resting_pose_roll0_plo_hbh_f_lay_set2_1769_of_2601_none_stiff_fix.npy",
        allow_pickle=True)
    model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_' + gender + '_lbs_10_207_0_v1.0.0.pkl'
    m = load_model(model_path)

    PERSON_SCALE = 50.0

    # first get the offsets
コード例 #3
0
    def evaluate_data(self, filename1, filename2=None):


        self.Render = libRender.pyRenderMesh()
        self.pyRender = libPyRender.pyRenderMesh()

        #model = torch.load(filename1, map_location={'cuda:5': 'cuda:0'})
        if GPU == True:
            for i in range(0, 8):
                try:
                    model = torch.load(filename1, map_location={'cuda:'+str(i):'cuda:0'})
                    if self.CTRL_PNL['dropout'] == True:
                        model = model.cuda().train()
                    else:
                        model = model.cuda().eval()
                    break
                except:
                    pass
            if filename2 is not None:
                for i in range(0, 8):
                    try:
                        model2 = torch.load(filename2, map_location={'cuda:'+str(i):'cuda:0'})
                        if self.CTRL_PNL['dropout'] == True:
                            model2 = model2.cuda().train()
                        else:
                            model2 = model2.cuda().eval()
                        break
                    except:
                        pass
            else:
                model2 = None
        else:
            model = torch.load(filename1, map_location='cpu')
            if self.CTRL_PNL['dropout'] == True:
                model = model.train()
            else:
                model = model.eval()
            if filename2 is not None:
                model2 = torch.load(filename2, map_location='cpu')
                if self.CTRL_PNL['dropout'] == True:
                    model2 = model2.train()
                else:
                    model2 = model2.eval()
            else:
                model2 = None

        #function_input = np.array(function_input)*np.array([10, 10, 10, 10, 10, 10, 0.1, 0.1, 0.1, 0.1, 1])
        #function_input += np.array([2.2, 32, -1, 1.2, 32, -5, 1.0, 1.0, 0.96, 0.95, 0.8])
        function_input = np.array(self.calibration_optim_values)*np.array([10, 10, 10, 0.1, 0.1, 0.1, 0.1])
        function_input += np.array([1.2, 32, -5, 1.0, 1.0, 0.96, 0.95])


        kinect_rotate_angle = function_input[3-3]
        kinect_shift_up = int(function_input[4-3])
        kinect_shift_right = int(function_input[5-3])
        camera_alpha_vert = function_input[6-3]
        camera_alpha_horiz = function_input[7-3]
        pressure_horiz_scale = function_input[8-3]
        pressure_vert_scale = function_input[9-3]
        #head_angle_multiplier = function_input[10-3]


        #file_dir = "/media/henry/multimodal_data_1/all_hevans_data/0905_2_Evening/0255"
        #file_dir_list = ["/media/henry/multimodal_data_2/test_data/data_072019_0001/"]
        blah = True

        #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0007"
        #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0006"
        #file_dir = "/home/henry/ivy_test_data/data_102019_kneeup_0000"
        #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/P000/data_102019_kneeup_0000"

        if PARTICIPANT == "P106":
            file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_00"
            #file_dir = "/home/henry/Desktop/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_000"
        else:
            file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_00"
            #file_dir = "/home/henry/Desktop/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_00"
        file_dir_nums = ["00","01","02","03","04","05","06","07","08","09"]#,"10"]#,"11","12"]
        overall_counter = 1
        overall_counter_disp = 1

        bedstatenpy = []
        colornpy = []
        config_codenpy = []
        date_stampnpy = []
        depth_rnpy = []
        markersnpy = []
        point_cloudnpy = []
        pressurenpy = []
        time_stampnpy = []

        SAVE = True

        for file_dir_num in file_dir_nums:
            file_dir_curr = file_dir + file_dir_num

            print "LOADING", file_dir_curr
            V3D.load_next_file(file_dir_curr)

            start_num = 0
            print self.color_all.shape

            #for im_num in range(29, 100):
            for im_num in range(start_num, self.color_all.shape[0]):

                if PARTICIPANT == "S103" and overall_counter in [26, 27, 28, 45, 53, 54, 55]:#, 52, 53]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S104" and overall_counter in [49, 50]: #S104 is everything but the last two
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S107" and overall_counter in [25, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S114" and overall_counter in [42, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S118" and overall_counter in [11, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S121" and overall_counter in [7, 47]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S130" and overall_counter in [30, 31, 34, 52, 53, 54, 55]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S134" and overall_counter in [49, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S140" and overall_counter in [49, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S141" and overall_counter in [49, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S145" and overall_counter in [23, 49, 50, 51]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S151" and overall_counter in [9, 48]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S163" and overall_counter in [46, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S165" and overall_counter in [19, 45]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S170" and overall_counter in [49, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S179" and overall_counter in [42, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S184" and overall_counter in [49, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S187" and overall_counter in [39, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S188" and overall_counter in [47, 50]:
                    overall_counter += 1
                    pass
                elif PARTICIPANT == "S196" and overall_counter in [20, 36]:
                    overall_counter += 1
                    pass
                #elif overall_counter < 41:# and im_num > 0:
                #    overall_counter += 1
                #    overall_counter_disp += 1
                #    pass

                else:
                    print file_dir_curr, "    subset count: ", im_num, "    overall ct: ", overall_counter_disp, overall_counter
                    overall_counter += 1
                    overall_counter_disp += 1
                    self.overall_image_scale_amount = 0.85

                    half_w_half_l = [0.4, 0.4, 1.1, 1.1]

                    all_image_list = []
                    self.label_single_image = []

                    self.label_index = 0

                    self.color = self.color_all[im_num]
                    self.depth_r = self.depth_r_all[im_num]
                    self.pressure = self.pressure_all[im_num]
                    self.bed_state = self.bedstate_all[im_num]
                    self.point_cloud_autofil = self.point_cloud_autofil_all[im_num] + [0.0, 0.0, 0.1]
                    print self.point_cloud_autofil.shape

                    self.bed_state[0] = self.bed_state[0]#*head_angle_multiplier
                    self.bed_state *= 0
                    #self.bed_state += 60.
                    print self.bed_state, np.shape(self.pressure)

                    bedstatenpy.append(self.bedstate_all[im_num])
                    colornpy.append(self.color_all[im_num])
                    config_codenpy.append(self.config_code_all[im_num])
                    date_stampnpy.append(self.date_stamp_all[im_num])
                    depth_rnpy.append(self.depth_r_all[im_num])
                    markersnpy.append(list(self.markers_all[im_num]))
                    point_cloudnpy.append(self.point_cloud_autofil_all[im_num])
                    pressurenpy.append(self.pressure_all[im_num])
                    time_stampnpy.append(self.time_stamp_all[im_num])

                    if im_num == start_num and blah == True:
                        markers_c = []
                        markers_c.append(self.markers_all[im_num][0])
                        markers_c.append(self.markers_all[im_num][1])
                        markers_c.append(self.markers_all[im_num][2])
                        markers_c.append(self.markers_all[im_num][3])
                        for idx in range(4):
                            if markers_c[idx] is not None:
                                markers_c[idx] = np.array(markers_c[idx])*213./228.
                    blah = False



                    # Get the marker points in 2D on the color image
                    u_c, v_c = ArTagLib().color_2D_markers(markers_c, self.new_K_kin)

                    # Get the marker points dropped to the height of the pressure mat
                    u_c_drop, v_c_drop, markers_c_drop = ArTagLib().color_2D_markers_drop(markers_c, self.new_K_kin)


                    # Get the geometry for sizing the pressure mat
                    pmat_ArTagLib = ArTagLib()
                    self.pressure_im_size_required, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend, half_w_half_l = \
                        pmat_ArTagLib.p_mat_geom(markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l)

                    tf_corners = np.zeros((8, 2))
                    tf_corners[0:8,:] = np.copy(self.tf_corners)


                    #COLOR
                    #if self.color is not 0:
                    color_reshaped, color_size = VizLib().color_image(self.color, self.kcam, self.new_K_kin,
                                                                      u_c, v_c, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, camera_alpha_vert, camera_alpha_horiz)
                    color_reshaped = imutils.rotate(color_reshaped, kinect_rotate_angle)
                    color_reshaped = color_reshaped[pre_VERT_CUT+kinect_shift_up:-pre_VERT_CUT+kinect_shift_up,  HORIZ_CUT+kinect_shift_right : 540 - HORIZ_CUT+kinect_shift_right, :]
                    tf_corners[0:4, :], color_reshaped = self.transform_selected_points(color_reshaped,
                                                                                                 camera_alpha_vert,
                                                                                                 camera_alpha_horiz,
                                                                                                 kinect_rotate_angle,
                                                                                                 kinect_shift_right,
                                                                                                 kinect_shift_up, [1.0, 0],
                                                                                                 [1.0, 0],
                                                                                                 np.copy(self.tf_corners[0:4][:]))

                    all_image_list.append(color_reshaped)


                    #DEPTH
                    h = VizLib().get_new_K_kin_homography(camera_alpha_vert, camera_alpha_horiz, self.new_K_kin)
                    depth_r_orig = cv2.warpPerspective(self.depth_r, h, (self.depth_r.shape[1], self.depth_r.shape[0]))
                    depth_r_orig = imutils.rotate(depth_r_orig, kinect_rotate_angle)
                    depth_r_orig = depth_r_orig[HORIZ_CUT + kinect_shift_right: 540 - HORIZ_CUT + kinect_shift_right, pre_VERT_CUT - kinect_shift_up:-pre_VERT_CUT - kinect_shift_up]
                    depth_r_reshaped, depth_r_size, depth_r_orig = VizLib().depth_image(depth_r_orig, u_c, v_c)
                    self.depth_r_orig = depth_r_orig
                    self.depthcam_midpixel = [self.new_K_kin[1, 2] - HORIZ_CUT - kinect_shift_right, (960-self.new_K_kin[0, 2]) - pre_VERT_CUT - kinect_shift_up]

                    all_image_list.append(depth_r_reshaped)


                    self.get_pc_from_depthmap(self.bed_state[0], tf_corners[2, :])

                    #PRESSURE
                    self.pressure = np.clip(self.pressure*4, 0, 100)
                    pressure_reshaped, pressure_size, coords_from_top_left = VizLib().pressure_image(self.pressure, self.pressure_im_size,
                                                                               self.pressure_im_size_required, color_size,
                                                                               u_c_drop, v_c_drop, u_c_pmat, v_c_pmat,
                                                                               u_p_bend, v_p_bend)
                    pressure_shape = pressure_reshaped.shape
                    pressure_reshaped = cv2.resize(pressure_reshaped, None, fx=pressure_horiz_scale,
                                                  fy=pressure_vert_scale)[0:pressure_shape[0],
                                                  0:pressure_shape[1], :]

                    if pressure_horiz_scale < 1.0 or pressure_vert_scale < 1.0:
                        pressure_reshaped_padded = np.zeros(pressure_shape).astype(np.uint8)
                        pressure_reshaped_padded[0:pressure_reshaped.shape[0], 0:pressure_reshaped.shape[1], :] += pressure_reshaped
                        pressure_reshaped = np.copy(pressure_reshaped_padded)

                    coords_from_top_left[0] -= coords_from_top_left[0]*(1-pressure_horiz_scale)
                    coords_from_top_left[1] += (960 - coords_from_top_left[1])*(1-pressure_vert_scale)

                    pressure_reshaped = pressure_reshaped[pre_VERT_CUT:-pre_VERT_CUT,  HORIZ_CUT : 540 - HORIZ_CUT, :]


                    all_image_list.append(pressure_reshaped)



                    self.all_images = np.zeros((960-np.abs(pre_VERT_CUT)*2, 1, 3)).astype(np.uint8)
                    for image in all_image_list:
                        print image.shape
                        self.all_images = np.concatenate((self.all_images, image), axis = 1)

                    self.all_images = self.all_images[VERT_CUT : 960 - VERT_CUT, :, :]



                    is_not_mult_4 = True
                    while is_not_mult_4 == True:
                        is_not_mult_4 = cv2.resize(self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount).shape[1]%4
                        self.overall_image_scale_amount+= 0.001

                    coords_from_top_left[0] -= (HORIZ_CUT)
                    coords_from_top_left[1] = 960 - pre_VERT_CUT - coords_from_top_left[1]
                    self.coords_from_top_left = (np.array(coords_from_top_left) * self.overall_image_scale_amount)
                    #print self.coords_from_top_left

                    self.all_images = cv2.resize(self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount)
                    self.cursor_shift = self.all_images.shape[1]/4


                    self.all_images_clone = self.all_images.copy()


                    cv2.imshow('all_images', self.all_images)
                    k = cv2.waitKey(1)
                    if SAVE == False:
                        time.sleep(5)
                    #cv2.waitKey(0)


                    #self.estimate_pose(self.pressure, self.bed_state[0], markers_c, model, model2)
        if SAVE == True:
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/color.npy", colornpy)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/depth_r.npy", depth_rnpy)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/pressure.npy", pressurenpy)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/bedstate.npy", bedstatenpy)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/markers.npy", np.array(markersnpy), allow_pickle=True)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/time_stamp.npy", time_stampnpy)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/point_cloud.npy", point_cloudnpy)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/config_code.npy", config_codenpy)
            np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/date_stamp.npy", date_stampnpy)
    def evaluate_data(self, filename1):

        #self.Render = libRender.pyRenderMesh(render = False)
        self.pyRender = libPyRender.pyRenderMesh(render=False)

        #function_input = np.array(function_input)*np.array([10, 10, 10, 10, 10, 10, 0.1, 0.1, 0.1, 0.1, 1])
        #function_input += np.array([2.2, 32, -1, 1.2, 32, -5, 1.0, 1.0, 0.96, 0.95, 0.8])
        function_input = np.array(self.calibration_optim_values) * np.array(
            [10, 10, 10, 0.1, 0.1, 0.1, 0.1])
        function_input += np.array([1.2, 32, -5, 1.0, 1.0, 0.96, 0.95])

        kinect_rotate_angle = function_input[3 - 3]
        kinect_shift_up = int(function_input[4 - 3])  # - 40
        kinect_shift_right = int(function_input[5 - 3])  # - 20
        camera_alpha_vert = function_input[6 - 3]
        camera_alpha_horiz = function_input[7 - 3]
        pressure_horiz_scale = function_input[8 - 3]
        pressure_vert_scale = function_input[9 - 3]
        #head_angle_multiplier = function_input[10-3]

        #print kinect_shift_up, kinect_shift_right, "SHIFT UP RIGHT"
        #print pressure_horiz_scale, pressure_vert_scale, "PRESSURE SCALES" #1.04 for one too far to left

        #file_dir = "/media/henry/multimodal_data_1/all_hevans_data/0905_2_Evening/0255"
        #file_dir_list = ["/media/henry/multimodal_data_2/test_data/data_072019_0001/"]
        blah = True

        #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0007"
        #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0006"
        #file_dir = "/home/henry/ivy_test_data/data_102019_kneeup_0000"
        #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/P000/data_102019_kneeup_0000"

        if PARTICIPANT == "P106":
            #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_000"
            file_dir = "/home/henry/Desktop/CVPR2020_study/" + PARTICIPANT + "/data_" + PARTICIPANT + "_000"
            file_dirs = [  #file_dir+str(0),
                file_dir + str(1), file_dir + str(2), file_dir + str(3),
                file_dir + str(4), file_dir + str(5)
            ]
        else:
            #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_000"
            file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/" + PARTICIPANT + "/data_checked_" + PARTICIPANT + "-" + POSE_TYPE
            file_dirs = [file_dir]
            #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_000"
            #file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-C_0000"
            #file_dirs = [file_dir]

        self.RESULTS_DICT = {}
        self.RESULTS_DICT['body_roll_rad'] = []
        self.RESULTS_DICT['v_to_gt_err'] = []
        self.RESULTS_DICT['v_limb_to_gt_err'] = []
        self.RESULTS_DICT['gt_to_v_err'] = []
        self.RESULTS_DICT['precision'] = []
        self.RESULTS_DICT['recall'] = []
        self.RESULTS_DICT['overlap_d_err'] = []
        self.RESULTS_DICT['all_d_err'] = []
        self.RESULTS_DICT['betas'] = []

        init_time = time.time()

        for file_dir in file_dirs:
            V3D.load_next_file(file_dir)

            start_num = 0
            #print self.color_all.shape

            #for im_num in range(29, 100):
            for im_num in range(start_num, self.color_all.shape[0]):

                #For P188: skip 5. 13 good cross legs

                print "NEXT IM!", im_num, " ", time.time() - init_time

                if PARTICIPANT == "S114" and POSE_TYPE == "2" and im_num in [
                        26, 29
                ]:
                    continue  #these don't have point clouds
                if PARTICIPANT == "S165" and POSE_TYPE == "2" and im_num in [
                        1, 3, 15
                ]:
                    continue  #these don't have point clouds
                if PARTICIPANT == "S188" and POSE_TYPE == "2" and im_num in [
                        5, 17, 21
                ]:
                    continue

                #good picks: 103 - 6 good for what info is there
                #151 11 is  good
                #179 - 7 is great
                #187 natural poses very good
                #196 - 11 has great smile :)

                self.overall_image_scale_amount = 0.85

                half_w_half_l = [0.4, 0.4, 1.1, 1.1]

                all_image_list = []
                self.label_single_image = []

                self.label_index = 0

                self.color = self.color_all[im_num]
                self.depth_r = self.depth_r_all[im_num]
                self.pressure = self.pressure_all[im_num]
                self.bed_state = self.bedstate_all[im_num]

                if self.point_cloud_autofil_all[im_num].shape[0] == 0:
                    self.point_cloud_autofil_all[im_num] = np.array(
                        [[0.0, 0.0, 0.0]])
                self.point_cloud_autofil = self.point_cloud_autofil_all[
                    im_num] + self.markers_all[im_num][
                        2]  #[0.0, 0.0, 0.0]#0.1]
                #print self.markers_all[im_num]
                #print self.point_cloud_autofil.shape, 'PC AUTOFIL ORIG'

                self.bed_state[
                    0] = self.bed_state[0] * 0.0  #*head_angle_multiplier
                self.bed_state *= 0
                #self.bed_state += 60.
                #print self.bed_state, np.shape(self.pressure)

                if im_num == start_num and blah == True:
                    markers_c = []
                    markers_c.append(self.markers_all[im_num][0])
                    markers_c.append(self.markers_all[im_num][1])
                    markers_c.append(self.markers_all[im_num][2])
                    markers_c.append(self.markers_all[im_num][3])
                    #for idx in range(4):
                    #if markers_c[idx] is not None:
                    #markers_c[idx] = np.array(markers_c[idx])*213./228.
                blah = False

                #print markers_c, 'Markers C'

                # Get the marker points in 2D on the color image
                u_c, v_c = ArTagLib().color_2D_markers(markers_c,
                                                       self.new_K_kin)

                # Get the marker points dropped to the height of the pressure mat
                u_c_drop, v_c_drop, markers_c_drop = ArTagLib(
                ).color_2D_markers_drop(markers_c, self.new_K_kin)

                #print markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l
                # Get the geometry for sizing the pressure mat
                pmat_ArTagLib = ArTagLib()
                self.pressure_im_size_required, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend, half_w_half_l = \
                    pmat_ArTagLib.p_mat_geom(markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l)

                tf_corners = np.zeros((8, 2))
                tf_corners[0:8, :] = np.copy(self.tf_corners)

                #COLOR
                #if self.color is not 0:
                color_reshaped, color_size = VizLib().color_image(
                    self.color, self.kcam, self.new_K_kin, u_c, v_c, u_c_drop,
                    v_c_drop, u_c_pmat, v_c_pmat, camera_alpha_vert,
                    camera_alpha_horiz)
                color_reshaped = imutils.rotate(color_reshaped,
                                                kinect_rotate_angle)
                color_reshaped = color_reshaped[pre_VERT_CUT +
                                                kinect_shift_up:-pre_VERT_CUT +
                                                kinect_shift_up, HORIZ_CUT +
                                                kinect_shift_right:540 -
                                                HORIZ_CUT +
                                                kinect_shift_right, :]
                tf_corners[
                    0:4, :], color_reshaped = self.transform_selected_points(
                        color_reshaped, camera_alpha_vert, camera_alpha_horiz,
                        kinect_rotate_angle, kinect_shift_right,
                        kinect_shift_up, [1.0, 0], [1.0, 0],
                        np.copy(self.tf_corners[0:4][:]))

                all_image_list.append(color_reshaped)

                #DEPTH
                h = VizLib().get_new_K_kin_homography(camera_alpha_vert,
                                                      camera_alpha_horiz,
                                                      self.new_K_kin)

                depth_r_orig = cv2.warpPerspective(
                    self.depth_r, h,
                    (self.depth_r.shape[1], self.depth_r.shape[0]))
                depth_r_orig = imutils.rotate(depth_r_orig,
                                              kinect_rotate_angle)
                depth_r_orig = depth_r_orig[HORIZ_CUT +
                                            kinect_shift_right:540 -
                                            HORIZ_CUT + kinect_shift_right,
                                            pre_VERT_CUT -
                                            kinect_shift_up:-pre_VERT_CUT -
                                            kinect_shift_up]
                depth_r_reshaped, depth_r_size, depth_r_orig = VizLib(
                ).depth_image(depth_r_orig, u_c, v_c)
                self.depth_r_orig = depth_r_orig
                self.depthcam_midpixel = [
                    self.new_K_kin[1, 2] - HORIZ_CUT - kinect_shift_right,
                    (960 - self.new_K_kin[0, 2]) - pre_VERT_CUT -
                    kinect_shift_up
                ]
                self.depthcam_midpixel2 = [
                    self.new_K_kin[1, 2] - HORIZ_CUT,
                    (960 - self.new_K_kin[0, 2]) - pre_VERT_CUT
                ]

                #print h, "H" #warping perspective
                #print kinect_rotate_angle #the amount to rotate counterclockwise about normal vector to the bed
                #print kinect_shift_right, kinect_shift_up #pixel shift of depth im. convert this to meters based on depth of

                depth_r_orig_nowarp = imutils.rotate(self.depth_r, 0)
                depth_r_orig_nowarp = depth_r_orig_nowarp[HORIZ_CUT + 0:540 -
                                                          HORIZ_CUT + 0,
                                                          pre_VERT_CUT -
                                                          0:-pre_VERT_CUT - 0]
                depth_r_reshaped_nowarp, depth_r_size, depth_r_orig_nowarp = VizLib(
                ).depth_image(depth_r_orig_nowarp, u_c,
                              v_c)  #this just does two rotations

                all_image_list.append(depth_r_reshaped)
                all_image_list.append(depth_r_reshaped_nowarp)

                X, Y, Z = self.get_pc_from_depthmap(self.bed_state[0],
                                                    tf_corners[2, :])

                #print self.pressure_im_size_required, color_size, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend

                #PRESSURE
                #pressure_vert_scale = 1.0
                #pressure_horiz_scale = 1.0
                self.pressure = np.clip(self.pressure * 4, 0, 100)
                pressure_reshaped, pressure_size, coords_from_top_left = VizLib(
                ).pressure_image(self.pressure, self.pressure_im_size,
                                 self.pressure_im_size_required, color_size,
                                 u_c_drop, v_c_drop, u_c_pmat, v_c_pmat,
                                 u_p_bend, v_p_bend)
                pressure_shape = pressure_reshaped.shape
                pressure_reshaped = cv2.resize(
                    pressure_reshaped,
                    None,
                    fx=pressure_horiz_scale,
                    fy=pressure_vert_scale)[0:pressure_shape[0],
                                            0:pressure_shape[1], :]

                if pressure_horiz_scale < 1.0 or pressure_vert_scale < 1.0:
                    pressure_reshaped_padded = np.zeros(pressure_shape).astype(
                        np.uint8)
                    pressure_reshaped_padded[
                        0:pressure_reshaped.shape[0],
                        0:pressure_reshaped.shape[1], :] += pressure_reshaped
                    pressure_reshaped = np.copy(pressure_reshaped_padded)

                coords_from_top_left[0] -= coords_from_top_left[0] * (
                    1 - pressure_horiz_scale)
                coords_from_top_left[1] += (960 - coords_from_top_left[1]) * (
                    1 - pressure_vert_scale)

                pressure_reshaped = pressure_reshaped[
                    pre_VERT_CUT:-pre_VERT_CUT, HORIZ_CUT:540 - HORIZ_CUT, :]

                all_image_list.append(pressure_reshaped)

                self.all_images = np.zeros(
                    (960 - np.abs(pre_VERT_CUT) * 2, 1, 3)).astype(np.uint8)
                for image in all_image_list:
                    #print image.shape
                    self.all_images = np.concatenate((self.all_images, image),
                                                     axis=1)

                self.all_images = self.all_images[VERT_CUT:960 -
                                                  VERT_CUT, :, :]

                is_not_mult_4 = True
                while is_not_mult_4 == True:
                    is_not_mult_4 = cv2.resize(
                        self.all_images, (0, 0),
                        fx=self.overall_image_scale_amount,
                        fy=self.overall_image_scale_amount).shape[1] % 4
                    self.overall_image_scale_amount += 0.001

                coords_from_top_left[0] -= (HORIZ_CUT)
                coords_from_top_left[
                    1] = 960 - pre_VERT_CUT - coords_from_top_left[1]
                self.coords_from_top_left = (np.array(coords_from_top_left) *
                                             self.overall_image_scale_amount)
                #print self.coords_from_top_left

                self.all_images = cv2.resize(
                    self.all_images, (0, 0),
                    fx=self.overall_image_scale_amount,
                    fy=self.overall_image_scale_amount)
                self.cursor_shift = self.all_images.shape[1] / 4

                self.all_images_clone = self.all_images.copy()

                cv2.imshow('all_images', self.all_images)
                k = cv2.waitKey(1)
                #cv2.waitKey(0)

                self.pc_all = [Y, X, -Z]
                #print np.shape(self.pc_all), "PC ALL SHAPE"

                self.estimate_pose(self.pressure, self.bed_state[0], markers_c,
                                   tf_corners, camera_alpha_vert,
                                   camera_alpha_horiz, h, kinect_rotate_angle)

        pkl.dump(
            self.RESULTS_DICT,
            open(
                '/media/henry/multimodal_data_2/data/final_results/results_real_'
                + PARTICIPANT + '_' + POSE_TYPE + '_' + NETWORK_2 + '.p',
                'wb'))
コード例 #5
0
    def estimate_real_time(self, filename1, filename2=None):

        pyRender = libRender.pyRenderMesh()
        mat_size = (64, 27)
        from unpack_batch_lib import UnpackBatchLib

        if torch.cuda.is_available():
            # Use for GPU
            GPU = True
            dtype = torch.cuda.FloatTensor
            print '######################### CUDA is available! #############################'
        else:
            # Use for CPU
            GPU = False
            dtype = torch.FloatTensor
            print '############################## USING CPU #################################'

        import convnet as convnet
        from torch.autograd import Variable

        if GPU == True:
            for i in range(0, 8):
                try:
                    model = torch.load(
                        filename1, map_location={'cuda:' + str(i): 'cuda:0'})
                    model = model.cuda().eval()
                    break
                except:
                    pass
            if filename2 is not None:
                for i in range(0, 8):
                    try:
                        model2 = torch.load(
                            filename2,
                            map_location={'cuda:' + str(i): 'cuda:0'})
                        model2 = model2.cuda().eval()
                        break
                    except:
                        pass
            else:
                model2 = None

        else:
            model = torch.load(filename1, map_location='cpu').eval()
            if filename2 is not None:
                model2 = torch.load(filename2, map_location='cpu').eval()
            else:
                model2 = None

        pub = rospy.Publisher('meshTopic', MeshAttr)
        #rospy.init_node('talker', anonymous=False)
        while not rospy.is_shutdown():

            pmat = np.fliplr(
                np.flipud(
                    np.clip(self.pressure.reshape(mat_size) *
                            float(self.CTRL_PNL['pmat_mult']),
                            a_min=0,
                            a_max=100)))

            if self.CTRL_PNL['cal_noise'] == False:
                pmat = gaussian_filter(pmat, sigma=0.5)

            pmat_stack = PreprocessingLib(
            ).preprocessing_create_pressure_angle_stack_realtime(
                pmat, self.bedangle, mat_size)

            if self.CTRL_PNL['cal_noise'] == False:
                pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100)

            pmat_stack = np.array(pmat_stack)
            if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
                pmat_contact = np.copy(pmat_stack[:, 0:1, :, :])
                pmat_contact[pmat_contact > 0] = 100
                pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis=1)

            weight_input = WEIGHT_LBS / 2.20462
            height_input = (HEIGHT_IN * 0.0254 - 1) * 100

            batch1 = np.zeros((1, 162))
            if GENDER == 'f':
                batch1[:, 157] += 1
            elif GENDER == 'm':
                batch1[:, 158] += 1
            batch1[:, 160] += weight_input
            batch1[:, 161] += height_input

            if self.CTRL_PNL['normalize_input'] == True:
                self.CTRL_PNL['depth_map_input_est'] = False
                pmat_stack = self.TPL.normalize_network_input(
                    pmat_stack, self.CTRL_PNL)
                batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL)

            pmat_stack = torch.Tensor(pmat_stack)
            batch1 = torch.Tensor(batch1)

            batch = []
            batch.append(pmat_stack)
            batch.append(batch1)

            self.CTRL_PNL['adjust_ang_from_est'] = False
            scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib(
            ).unpackage_batch_kin_pass(batch, False, model, self.CTRL_PNL)

            mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(
                1) / 16.69545796387731
            mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(
                1) / 45.08513083167194
            mdm_est_pos[mdm_est_pos < 0] = 0
            mdm_est_neg[mdm_est_neg > 0] = 0
            mdm_est_neg *= -1
            cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(
                1) * 100 / 43.55800622930469

            #1. / 16.69545796387731,  # pos est depth
            #1. / 45.08513083167194,  # neg est depth
            #1. / 43.55800622930469,  # cm est

            if model2 is not None:
                batch_cor = []
                batch_cor.append(
                    torch.cat(
                        (pmat_stack[:, 0:1, :, :],
                         mdm_est_pos.type(torch.FloatTensor),
                         mdm_est_neg.type(
                             torch.FloatTensor), cm_est.type(
                                 torch.FloatTensor), pmat_stack[:, 1:, :, :]),
                        dim=1))

                if self.CTRL_PNL['full_body_rot'] == False:
                    batch_cor.append(
                        torch.cat(
                            (batch1, OUTPUT_DICT['batch_betas_est'].cpu(),
                             OUTPUT_DICT['batch_angles_est'].cpu(),
                             OUTPUT_DICT['batch_root_xyz_est'].cpu()),
                            dim=1))
                elif self.CTRL_PNL['full_body_rot'] == True:
                    batch_cor.append(
                        torch.cat(
                            (batch1, OUTPUT_DICT['batch_betas_est'].cpu(),
                             OUTPUT_DICT['batch_angles_est'].cpu(),
                             OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                             OUTPUT_DICT['batch_root_atan2_est'].cpu()),
                            dim=1))

                self.CTRL_PNL['adjust_ang_from_est'] = True
                scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib(
                ).unpackage_batch_kin_pass(batch_cor, False, model2,
                                           self.CTRL_PNL)

            betas_est = np.squeeze(
                OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy())
            angles_est = np.squeeze(OUTPUT_DICT['batch_angles_est_post_clip'])
            root_shift_est = np.squeeze(
                OUTPUT_DICT['batch_root_xyz_est_post_clip'].cpu().numpy())

            #print betas_est.shape, root_shift_est.shape, angles_est.shape

            #print betas_est, root_shift_est, angles_est
            angles_est = angles_est.reshape(72)

            for idx in range(10):
                #print shape_pose_vol[0][idx]
                self.m.betas[idx] = betas_est[idx]

            for idx in range(72):
                self.m.pose[idx] = angles_est[idx]

            init_root = np.array(self.m.pose[0:3]) + 0.000001
            init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root)
            root_rot = libKinematics.eulerAnglesToRotationMatrix(
                [np.pi, 0.0, np.pi / 2])
            #print root_rot
            trans_root = libKinematics.dir_cos_angles_from_matrix(
                np.matmul(root_rot, init_rootR))

            self.m.pose[0] = trans_root[0]
            self.m.pose[1] = trans_root[1]
            self.m.pose[2] = trans_root[2]

            #print self.m.J_transformed[1, :], self.m.J_transformed[4, :]
            # self.m.pose[51] = selection_r

            print self.m.r
            #print OUTPUT_DICT['verts']

            pyRender.mesh_render_pose_bed_orig(
                self.m, root_shift_est, self.point_cloud_array, self.pc_isnew,
                pmat * 5. / float(self.CTRL_PNL['pmat_mult']), self.markers,
                self.bedangle)
            self.point_cloud_array = None
コード例 #6
0
    def val_convnet_general(self, epoch):

        self.gender = "f"
        if self.gender == "m":
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.m = load_model(model_path)


        self.pyRender = libPyRender.pyRenderMesh(render = True)

        '''
        Train the model for one epoch.
        '''
        # Some models use slightly different forward passes and train and test
        # time (e.g., any model with Dropout). This puts the model in train mode
        # (as opposed to eval mode) so it knows which one to use.
        self.model.eval()#train()
        self.model2.eval()#train()

        RESULTS_DICT = {}
        RESULTS_DICT['j_err'] = []
        RESULTS_DICT['betas'] = []
        RESULTS_DICT['dir_v_err'] = []
        RESULTS_DICT['dir_v_limb_err'] = []
        RESULTS_DICT['v_to_gt_err'] = []
        RESULTS_DICT['v_limb_to_gt_err'] = []
        RESULTS_DICT['gt_to_v_err'] = []
        RESULTS_DICT['precision'] = []
        RESULTS_DICT['recall'] = []
        RESULTS_DICT['overlap_d_err'] = []
        RESULTS_DICT['all_d_err'] = []
        init_time = time.time()

        with torch.autograd.set_detect_anomaly(True):

            # This will loop a total = training_images/batch_size times
            for batch_idx, batch in enumerate(self.train_loader):

                batch1 = batch[1].clone()

                betas_gt = torch.mean(batch[1][:, 72:82], dim = 0).numpy()
                angles_gt = torch.mean(batch[1][:, 82:154], dim = 0).numpy()
                root_shift_est_gt = torch.mean(batch[1][:, 154:157], dim = 0).numpy()

                NUMOFOUTPUTDIMS = 3
                NUMOFOUTPUTNODES_TRAIN = 24
                self.output_size_train = (NUMOFOUTPUTNODES_TRAIN, NUMOFOUTPUTDIMS)

                self.CTRL_PNL['adjust_ang_from_est'] = False
                self.CTRL_PNL['depth_map_labels'] = True
                print batch[0].size(), "batch 0 shape"
                scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch, False, self.model,
                                                                                            self.CTRL_PNL)
                print OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy()[0], 'betas init'
                mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 16.69545796387731
                mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 45.08513083167194
                mdm_est_pos[mdm_est_pos < 0] = 0
                mdm_est_neg[mdm_est_neg > 0] = 0
                mdm_est_neg *= -1
                cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100 / 43.55800622930469

                # 1. / 16.69545796387731,  # pos est depth
                # 1. / 45.08513083167194,  # neg est depth
                # 1. / 43.55800622930469,  # cm est

                sc_sample1 = OUTPUT_DICT['batch_targets_est'].clone()
                sc_sample1 = sc_sample1[0, :].squeeze() / 1000
                sc_sample1 = sc_sample1.view(self.output_size_train)
                # print sc_sample1

                if self.model2 is not None:
                    print "Using model 2"
                    batch_cor = []
                    batch_cor.append(torch.cat((batch[0][:, 0:1, :, :],
                                                mdm_est_pos.type(torch.FloatTensor),
                                                mdm_est_neg.type(torch.FloatTensor),
                                                cm_est.type(torch.FloatTensor),
                                                batch[0][:, 1:, :, :]), dim=1))


                    if self.CTRL_PNL['full_body_rot'] == False:
                        batch_cor.append(torch.cat((batch1,
                                                    OUTPUT_DICT['batch_betas_est'].cpu(),
                                                    OUTPUT_DICT['batch_angles_est'].cpu(),
                                                    OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim=1))
                    elif self.CTRL_PNL['full_body_rot'] == True:
                        batch_cor.append(torch.cat((batch1,
                                                    OUTPUT_DICT['batch_betas_est'].cpu(),
                                                    OUTPUT_DICT['batch_angles_est'].cpu(),
                                                    OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                                                    OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim=1))



                    self.CTRL_PNL['adjust_ang_from_est'] = True
                    self.CTRL_PNL['depth_map_labels'] = False
                    scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, False,
                                                                                                self.model2,
                                                                                                self.CTRL_PNL)

                # print betas_est, root_shift_est, angles_est
                if self.CTRL_PNL['dropout'] == True:
                    #print OUTPUT_DICT['verts'].shape
                    smpl_verts = np.mean(OUTPUT_DICT['verts'], axis=0)
                    dropout_variance = np.std(OUTPUT_DICT['verts'], axis=0)
                    dropout_variance = np.linalg.norm(dropout_variance, axis=1)
                else:
                    smpl_verts = OUTPUT_DICT['verts'][0, :, :]
                    dropout_variance = None




                smpl_verts = np.concatenate((smpl_verts[:, 1:2] - 0.286 + 0.0143, smpl_verts[:, 0:1] - 0.286 + 0.0143,
                                             - smpl_verts[:, 2:3]), axis=1)

                smpl_faces = np.array(self.m.f)


                q = OUTPUT_DICT['batch_mdm_est'].data.numpy().reshape(OUTPUT_DICT['batch_mdm_est'].size()[0], 64, 27) * -1
                q = np.mean(q, axis=0)

                camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                bedangle = 0.0
                # print smpl_verts

                RESULTS_DICT['betas'].append(OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy()[0])
                print RESULTS_DICT['betas'][-1], "BETAS"

                viz_type = "3D"

                if viz_type == "2D":
                    from visualization_lib import VisualizationLib
                    if self.model2 is not None:
                        self.im_sample = INPUT_DICT['batch_images'][0, 4:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
                    else:
                        self.im_sample = INPUT_DICT['batch_images'][0, 1:,:].squeeze() * 20.  # normalizing_std_constants[4]*5.  #pmat
                    self.im_sample_ext = INPUT_DICT['batch_images'][0, 0:,:].squeeze() * 20.  # normalizing_std_constants[0]  #pmat contact
                    # self.im_sample_ext2 = INPUT_DICT['batch_images'][im_display_idx, 2:, :].squeeze()*20.#normalizing_std_constants[4]  #sobel
                    self.im_sample_ext3 = OUTPUT_DICT['batch_mdm_est'][0, :, :].squeeze().unsqueeze(0) * -1  # est depth output

                    # print scores[0, 10:16], 'scores of body rot'

                    # print self.im_sample.size(), self.im_sample_ext.size(), self.im_sample_ext2.size(), self.im_sample_ext3.size()

                    # self.publish_depth_marker_array(self.im_sample_ext3)

                    self.tar_sample = INPUT_DICT['batch_targets']
                    self.tar_sample = self.tar_sample[0, :].squeeze() / 1000
                    sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
                    sc_sample = sc_sample[0, :].squeeze() / 1000

                    sc_sample = sc_sample.view(self.output_size_train)

                    VisualizationLib().visualize_pressure_map(self.im_sample, sc_sample1, sc_sample,
                                                              # self.im_sample_ext, None, None,
                                                              self.im_sample_ext3, None, None,
                                                              # , self.tar_sample_val, self.sc_sample_val,
                                                              block=False)


                elif viz_type == "3D":
                    pmat = batch[0][0, 1, :, :].clone().numpy()*25.50538629767412
                    #print pmat.shape

                    for beta in range(betas_gt.shape[0]):
                        self.m.betas[beta] = betas_gt[beta]
                    for angle in range(angles_gt.shape[0]):
                        self.m.pose[angle] = angles_gt[angle]

                    smpl_verts_gt = np.array(self.m.r)
                    for s in range(root_shift_est_gt.shape[0]):
                        smpl_verts_gt[:, s] += (root_shift_est_gt[s] - float(self.m.J_transformed[0, s]))

                    smpl_verts_gt = np.concatenate(
                        (smpl_verts_gt[:, 1:2] - 0.286 + 0.0143, smpl_verts_gt[:, 0:1] - 0.286 + 0.0143,
                          0.0 - smpl_verts_gt[:, 2:3]), axis=1)



                    joint_cart_gt = np.array(self.m.J_transformed).reshape(24, 3)
                    for s in range(root_shift_est_gt.shape[0]):
                        joint_cart_gt[:, s] += (root_shift_est_gt[s] - float(self.m.J_transformed[0, s]))

                    #print joint_cart_gt, 'gt'

                    sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
                    sc_sample = (sc_sample[0, :].squeeze().numpy() / 1000).reshape(24, 3)

                    #print sc_sample, 'estimate'
                    joint_error = np.linalg.norm(joint_cart_gt-sc_sample, axis = 1)
                    #print joint_error
                    RESULTS_DICT['j_err'].append(joint_error)


                    camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                    # render everything
                    RESULTS_DICT = self.pyRender.render_mesh_pc_bed_pyrender_everything_synth(smpl_verts, smpl_faces,
                                                                            camera_point, bedangle, RESULTS_DICT,
                                                                            smpl_verts_gt=smpl_verts_gt, pmat=pmat,
                                                                            markers=None,
                                                                            dropout_variance=dropout_variance)

                #time.sleep(300)

                #print RESULTS_DICT['j_err']
                print np.mean(np.array(RESULTS_DICT['j_err']), axis = 0)
                #print RESULTS_DICT['precision']
                print np.mean(RESULTS_DICT['precision'])
                print time.time() - init_time
                #break

        #save here

        pkl.dump(RESULTS_DICT, open('/media/henry/multimodal_data_2/data/final_results/results_synth_'+TESTING_FILENAME+'_'+NETWORK_2+'.p', 'wb'))