def transform_selected_points(self, image, camera_alpha_vert, camera_alpha_horiz, angle, right, up, h_scale_cut, v_scale_cut, coords_subset): h_scale = h_scale_cut[0] h_cut = h_scale_cut[1] v_scale = v_scale_cut[0] v_cut = v_scale_cut[1] tf_coords_subset = np.copy(coords_subset) print camera_alpha_vert, camera_alpha_horiz, HORIZ_CUT, VERT_CUT, pre_VERT_CUT, right h = VizLib().get_new_K_kin_homography(camera_alpha_vert, camera_alpha_horiz, self.new_K_kin, flip_vert=-1) for i in range(4): new_coords = np.matmul(h, np.array([tf_coords_subset[i, 1]+pre_VERT_CUT, tf_coords_subset[i, 0]+HORIZ_CUT, 1])) new_coords = new_coords/new_coords[2] tf_coords_subset[i, 0] = new_coords[1] - HORIZ_CUT tf_coords_subset[i, 1] = new_coords[0] - pre_VERT_CUT tf_coords_subset[i, 1] = (tf_coords_subset[i, 1] - image.shape[0] / 2) * np.cos(np.deg2rad(angle)) - ( tf_coords_subset[i, 0] - image.shape[1] / 2) * np.sin(np.deg2rad(angle)) + image.shape[ 0] / 2 - up tf_coords_subset[i, 0] = (tf_coords_subset[i, 1] - image.shape[0] / 2) * np.sin(np.deg2rad(angle)) + ( tf_coords_subset[i, 0] - image.shape[1] / 2) * np.cos(np.deg2rad(angle)) + image.shape[ 1] / 2 - right tf_coords_subset[i, 0] = h_scale * (tf_coords_subset[i][0] + h_cut) - h_cut tf_coords_subset[i, 1] = v_scale * (tf_coords_subset[i][1] + v_cut) - v_cut image[int(tf_coords_subset[i][1] + 0.5) - 2:int(tf_coords_subset[i][1] + 0.5) + 2, int(tf_coords_subset[i][0] + 0.5) - 2:int(tf_coords_subset[i][0] + 0.5) + 2, :] = 255 return tf_coords_subset, image
def evaluate_data(self, filename1, filename2=None): self.Render = libRender.pyRenderMesh() self.pyRender = libPyRender.pyRenderMesh() #model = torch.load(filename1, map_location={'cuda:5': 'cuda:0'}) if GPU == True: for i in range(0, 8): try: model = torch.load(filename1, map_location={'cuda:'+str(i):'cuda:0'}) if self.CTRL_PNL['dropout'] == True: model = model.cuda().train() else: model = model.cuda().eval() break except: pass if filename2 is not None: for i in range(0, 8): try: model2 = torch.load(filename2, map_location={'cuda:'+str(i):'cuda:0'}) if self.CTRL_PNL['dropout'] == True: model2 = model2.cuda().train() else: model2 = model2.cuda().eval() break except: pass else: model2 = None else: model = torch.load(filename1, map_location='cpu') if self.CTRL_PNL['dropout'] == True: model = model.train() else: model = model.eval() if filename2 is not None: model2 = torch.load(filename2, map_location='cpu') if self.CTRL_PNL['dropout'] == True: model2 = model2.train() else: model2 = model2.eval() else: model2 = None #function_input = np.array(function_input)*np.array([10, 10, 10, 10, 10, 10, 0.1, 0.1, 0.1, 0.1, 1]) #function_input += np.array([2.2, 32, -1, 1.2, 32, -5, 1.0, 1.0, 0.96, 0.95, 0.8]) function_input = np.array(self.calibration_optim_values)*np.array([10, 10, 10, 0.1, 0.1, 0.1, 0.1]) function_input += np.array([1.2, 32, -5, 1.0, 1.0, 0.96, 0.95]) kinect_rotate_angle = function_input[3-3] kinect_shift_up = int(function_input[4-3]) kinect_shift_right = int(function_input[5-3]) camera_alpha_vert = function_input[6-3] camera_alpha_horiz = function_input[7-3] pressure_horiz_scale = function_input[8-3] pressure_vert_scale = function_input[9-3] #head_angle_multiplier = function_input[10-3] #file_dir = "/media/henry/multimodal_data_1/all_hevans_data/0905_2_Evening/0255" #file_dir_list = ["/media/henry/multimodal_data_2/test_data/data_072019_0001/"] blah = True #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0007" #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0006" #file_dir = "/home/henry/ivy_test_data/data_102019_kneeup_0000" #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/P000/data_102019_kneeup_0000" if PARTICIPANT == "P106": file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_00" #file_dir = "/home/henry/Desktop/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_000" else: file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_00" #file_dir = "/home/henry/Desktop/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_00" file_dir_nums = ["00","01","02","03","04","05","06","07","08","09"]#,"10"]#,"11","12"] overall_counter = 1 overall_counter_disp = 1 bedstatenpy = [] colornpy = [] config_codenpy = [] date_stampnpy = [] depth_rnpy = [] markersnpy = [] point_cloudnpy = [] pressurenpy = [] time_stampnpy = [] SAVE = True for file_dir_num in file_dir_nums: file_dir_curr = file_dir + file_dir_num print "LOADING", file_dir_curr V3D.load_next_file(file_dir_curr) start_num = 0 print self.color_all.shape #for im_num in range(29, 100): for im_num in range(start_num, self.color_all.shape[0]): if PARTICIPANT == "S103" and overall_counter in [26, 27, 28, 45, 53, 54, 55]:#, 52, 53]: overall_counter += 1 pass elif PARTICIPANT == "S104" and overall_counter in [49, 50]: #S104 is everything but the last two overall_counter += 1 pass elif PARTICIPANT == "S107" and overall_counter in [25, 50]: overall_counter += 1 pass elif PARTICIPANT == "S114" and overall_counter in [42, 50]: overall_counter += 1 pass elif PARTICIPANT == "S118" and overall_counter in [11, 50]: overall_counter += 1 pass elif PARTICIPANT == "S121" and overall_counter in [7, 47]: overall_counter += 1 pass elif PARTICIPANT == "S130" and overall_counter in [30, 31, 34, 52, 53, 54, 55]: overall_counter += 1 pass elif PARTICIPANT == "S134" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S140" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S141" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S145" and overall_counter in [23, 49, 50, 51]: overall_counter += 1 pass elif PARTICIPANT == "S151" and overall_counter in [9, 48]: overall_counter += 1 pass elif PARTICIPANT == "S163" and overall_counter in [46, 50]: overall_counter += 1 pass elif PARTICIPANT == "S165" and overall_counter in [19, 45]: overall_counter += 1 pass elif PARTICIPANT == "S170" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S179" and overall_counter in [42, 50]: overall_counter += 1 pass elif PARTICIPANT == "S184" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S187" and overall_counter in [39, 50]: overall_counter += 1 pass elif PARTICIPANT == "S188" and overall_counter in [47, 50]: overall_counter += 1 pass elif PARTICIPANT == "S196" and overall_counter in [20, 36]: overall_counter += 1 pass #elif overall_counter < 41:# and im_num > 0: # overall_counter += 1 # overall_counter_disp += 1 # pass else: print file_dir_curr, " subset count: ", im_num, " overall ct: ", overall_counter_disp, overall_counter overall_counter += 1 overall_counter_disp += 1 self.overall_image_scale_amount = 0.85 half_w_half_l = [0.4, 0.4, 1.1, 1.1] all_image_list = [] self.label_single_image = [] self.label_index = 0 self.color = self.color_all[im_num] self.depth_r = self.depth_r_all[im_num] self.pressure = self.pressure_all[im_num] self.bed_state = self.bedstate_all[im_num] self.point_cloud_autofil = self.point_cloud_autofil_all[im_num] + [0.0, 0.0, 0.1] print self.point_cloud_autofil.shape self.bed_state[0] = self.bed_state[0]#*head_angle_multiplier self.bed_state *= 0 #self.bed_state += 60. print self.bed_state, np.shape(self.pressure) bedstatenpy.append(self.bedstate_all[im_num]) colornpy.append(self.color_all[im_num]) config_codenpy.append(self.config_code_all[im_num]) date_stampnpy.append(self.date_stamp_all[im_num]) depth_rnpy.append(self.depth_r_all[im_num]) markersnpy.append(list(self.markers_all[im_num])) point_cloudnpy.append(self.point_cloud_autofil_all[im_num]) pressurenpy.append(self.pressure_all[im_num]) time_stampnpy.append(self.time_stamp_all[im_num]) if im_num == start_num and blah == True: markers_c = [] markers_c.append(self.markers_all[im_num][0]) markers_c.append(self.markers_all[im_num][1]) markers_c.append(self.markers_all[im_num][2]) markers_c.append(self.markers_all[im_num][3]) for idx in range(4): if markers_c[idx] is not None: markers_c[idx] = np.array(markers_c[idx])*213./228. blah = False # Get the marker points in 2D on the color image u_c, v_c = ArTagLib().color_2D_markers(markers_c, self.new_K_kin) # Get the marker points dropped to the height of the pressure mat u_c_drop, v_c_drop, markers_c_drop = ArTagLib().color_2D_markers_drop(markers_c, self.new_K_kin) # Get the geometry for sizing the pressure mat pmat_ArTagLib = ArTagLib() self.pressure_im_size_required, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend, half_w_half_l = \ pmat_ArTagLib.p_mat_geom(markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l) tf_corners = np.zeros((8, 2)) tf_corners[0:8,:] = np.copy(self.tf_corners) #COLOR #if self.color is not 0: color_reshaped, color_size = VizLib().color_image(self.color, self.kcam, self.new_K_kin, u_c, v_c, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, camera_alpha_vert, camera_alpha_horiz) color_reshaped = imutils.rotate(color_reshaped, kinect_rotate_angle) color_reshaped = color_reshaped[pre_VERT_CUT+kinect_shift_up:-pre_VERT_CUT+kinect_shift_up, HORIZ_CUT+kinect_shift_right : 540 - HORIZ_CUT+kinect_shift_right, :] tf_corners[0:4, :], color_reshaped = self.transform_selected_points(color_reshaped, camera_alpha_vert, camera_alpha_horiz, kinect_rotate_angle, kinect_shift_right, kinect_shift_up, [1.0, 0], [1.0, 0], np.copy(self.tf_corners[0:4][:])) all_image_list.append(color_reshaped) #DEPTH h = VizLib().get_new_K_kin_homography(camera_alpha_vert, camera_alpha_horiz, self.new_K_kin) depth_r_orig = cv2.warpPerspective(self.depth_r, h, (self.depth_r.shape[1], self.depth_r.shape[0])) depth_r_orig = imutils.rotate(depth_r_orig, kinect_rotate_angle) depth_r_orig = depth_r_orig[HORIZ_CUT + kinect_shift_right: 540 - HORIZ_CUT + kinect_shift_right, pre_VERT_CUT - kinect_shift_up:-pre_VERT_CUT - kinect_shift_up] depth_r_reshaped, depth_r_size, depth_r_orig = VizLib().depth_image(depth_r_orig, u_c, v_c) self.depth_r_orig = depth_r_orig self.depthcam_midpixel = [self.new_K_kin[1, 2] - HORIZ_CUT - kinect_shift_right, (960-self.new_K_kin[0, 2]) - pre_VERT_CUT - kinect_shift_up] all_image_list.append(depth_r_reshaped) self.get_pc_from_depthmap(self.bed_state[0], tf_corners[2, :]) #PRESSURE self.pressure = np.clip(self.pressure*4, 0, 100) pressure_reshaped, pressure_size, coords_from_top_left = VizLib().pressure_image(self.pressure, self.pressure_im_size, self.pressure_im_size_required, color_size, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend) pressure_shape = pressure_reshaped.shape pressure_reshaped = cv2.resize(pressure_reshaped, None, fx=pressure_horiz_scale, fy=pressure_vert_scale)[0:pressure_shape[0], 0:pressure_shape[1], :] if pressure_horiz_scale < 1.0 or pressure_vert_scale < 1.0: pressure_reshaped_padded = np.zeros(pressure_shape).astype(np.uint8) pressure_reshaped_padded[0:pressure_reshaped.shape[0], 0:pressure_reshaped.shape[1], :] += pressure_reshaped pressure_reshaped = np.copy(pressure_reshaped_padded) coords_from_top_left[0] -= coords_from_top_left[0]*(1-pressure_horiz_scale) coords_from_top_left[1] += (960 - coords_from_top_left[1])*(1-pressure_vert_scale) pressure_reshaped = pressure_reshaped[pre_VERT_CUT:-pre_VERT_CUT, HORIZ_CUT : 540 - HORIZ_CUT, :] all_image_list.append(pressure_reshaped) self.all_images = np.zeros((960-np.abs(pre_VERT_CUT)*2, 1, 3)).astype(np.uint8) for image in all_image_list: print image.shape self.all_images = np.concatenate((self.all_images, image), axis = 1) self.all_images = self.all_images[VERT_CUT : 960 - VERT_CUT, :, :] is_not_mult_4 = True while is_not_mult_4 == True: is_not_mult_4 = cv2.resize(self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount).shape[1]%4 self.overall_image_scale_amount+= 0.001 coords_from_top_left[0] -= (HORIZ_CUT) coords_from_top_left[1] = 960 - pre_VERT_CUT - coords_from_top_left[1] self.coords_from_top_left = (np.array(coords_from_top_left) * self.overall_image_scale_amount) #print self.coords_from_top_left self.all_images = cv2.resize(self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount) self.cursor_shift = self.all_images.shape[1]/4 self.all_images_clone = self.all_images.copy() cv2.imshow('all_images', self.all_images) k = cv2.waitKey(1) if SAVE == False: time.sleep(5) #cv2.waitKey(0) #self.estimate_pose(self.pressure, self.bed_state[0], markers_c, model, model2) if SAVE == True: np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/color.npy", colornpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/depth_r.npy", depth_rnpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/pressure.npy", pressurenpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/bedstate.npy", bedstatenpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/markers.npy", np.array(markersnpy), allow_pickle=True) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/time_stamp.npy", time_stampnpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/point_cloud.npy", point_cloudnpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/config_code.npy", config_codenpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/date_stamp.npy", date_stampnpy)
def evaluate_data(self, filename1): #self.Render = libRender.pyRenderMesh(render = False) self.pyRender = libPyRender.pyRenderMesh(render=False) #function_input = np.array(function_input)*np.array([10, 10, 10, 10, 10, 10, 0.1, 0.1, 0.1, 0.1, 1]) #function_input += np.array([2.2, 32, -1, 1.2, 32, -5, 1.0, 1.0, 0.96, 0.95, 0.8]) function_input = np.array(self.calibration_optim_values) * np.array( [10, 10, 10, 0.1, 0.1, 0.1, 0.1]) function_input += np.array([1.2, 32, -5, 1.0, 1.0, 0.96, 0.95]) kinect_rotate_angle = function_input[3 - 3] kinect_shift_up = int(function_input[4 - 3]) # - 40 kinect_shift_right = int(function_input[5 - 3]) # - 20 camera_alpha_vert = function_input[6 - 3] camera_alpha_horiz = function_input[7 - 3] pressure_horiz_scale = function_input[8 - 3] pressure_vert_scale = function_input[9 - 3] #head_angle_multiplier = function_input[10-3] #print kinect_shift_up, kinect_shift_right, "SHIFT UP RIGHT" #print pressure_horiz_scale, pressure_vert_scale, "PRESSURE SCALES" #1.04 for one too far to left #file_dir = "/media/henry/multimodal_data_1/all_hevans_data/0905_2_Evening/0255" #file_dir_list = ["/media/henry/multimodal_data_2/test_data/data_072019_0001/"] blah = True #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0007" #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0006" #file_dir = "/home/henry/ivy_test_data/data_102019_kneeup_0000" #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/P000/data_102019_kneeup_0000" if PARTICIPANT == "P106": #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_000" file_dir = "/home/henry/Desktop/CVPR2020_study/" + PARTICIPANT + "/data_" + PARTICIPANT + "_000" file_dirs = [ #file_dir+str(0), file_dir + str(1), file_dir + str(2), file_dir + str(3), file_dir + str(4), file_dir + str(5) ] else: #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_000" file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/" + PARTICIPANT + "/data_checked_" + PARTICIPANT + "-" + POSE_TYPE file_dirs = [file_dir] #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_000" #file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-C_0000" #file_dirs = [file_dir] self.RESULTS_DICT = {} self.RESULTS_DICT['body_roll_rad'] = [] self.RESULTS_DICT['v_to_gt_err'] = [] self.RESULTS_DICT['v_limb_to_gt_err'] = [] self.RESULTS_DICT['gt_to_v_err'] = [] self.RESULTS_DICT['precision'] = [] self.RESULTS_DICT['recall'] = [] self.RESULTS_DICT['overlap_d_err'] = [] self.RESULTS_DICT['all_d_err'] = [] self.RESULTS_DICT['betas'] = [] init_time = time.time() for file_dir in file_dirs: V3D.load_next_file(file_dir) start_num = 0 #print self.color_all.shape #for im_num in range(29, 100): for im_num in range(start_num, self.color_all.shape[0]): #For P188: skip 5. 13 good cross legs print "NEXT IM!", im_num, " ", time.time() - init_time if PARTICIPANT == "S114" and POSE_TYPE == "2" and im_num in [ 26, 29 ]: continue #these don't have point clouds if PARTICIPANT == "S165" and POSE_TYPE == "2" and im_num in [ 1, 3, 15 ]: continue #these don't have point clouds if PARTICIPANT == "S188" and POSE_TYPE == "2" and im_num in [ 5, 17, 21 ]: continue #good picks: 103 - 6 good for what info is there #151 11 is good #179 - 7 is great #187 natural poses very good #196 - 11 has great smile :) self.overall_image_scale_amount = 0.85 half_w_half_l = [0.4, 0.4, 1.1, 1.1] all_image_list = [] self.label_single_image = [] self.label_index = 0 self.color = self.color_all[im_num] self.depth_r = self.depth_r_all[im_num] self.pressure = self.pressure_all[im_num] self.bed_state = self.bedstate_all[im_num] if self.point_cloud_autofil_all[im_num].shape[0] == 0: self.point_cloud_autofil_all[im_num] = np.array( [[0.0, 0.0, 0.0]]) self.point_cloud_autofil = self.point_cloud_autofil_all[ im_num] + self.markers_all[im_num][ 2] #[0.0, 0.0, 0.0]#0.1] #print self.markers_all[im_num] #print self.point_cloud_autofil.shape, 'PC AUTOFIL ORIG' self.bed_state[ 0] = self.bed_state[0] * 0.0 #*head_angle_multiplier self.bed_state *= 0 #self.bed_state += 60. #print self.bed_state, np.shape(self.pressure) if im_num == start_num and blah == True: markers_c = [] markers_c.append(self.markers_all[im_num][0]) markers_c.append(self.markers_all[im_num][1]) markers_c.append(self.markers_all[im_num][2]) markers_c.append(self.markers_all[im_num][3]) #for idx in range(4): #if markers_c[idx] is not None: #markers_c[idx] = np.array(markers_c[idx])*213./228. blah = False #print markers_c, 'Markers C' # Get the marker points in 2D on the color image u_c, v_c = ArTagLib().color_2D_markers(markers_c, self.new_K_kin) # Get the marker points dropped to the height of the pressure mat u_c_drop, v_c_drop, markers_c_drop = ArTagLib( ).color_2D_markers_drop(markers_c, self.new_K_kin) #print markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l # Get the geometry for sizing the pressure mat pmat_ArTagLib = ArTagLib() self.pressure_im_size_required, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend, half_w_half_l = \ pmat_ArTagLib.p_mat_geom(markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l) tf_corners = np.zeros((8, 2)) tf_corners[0:8, :] = np.copy(self.tf_corners) #COLOR #if self.color is not 0: color_reshaped, color_size = VizLib().color_image( self.color, self.kcam, self.new_K_kin, u_c, v_c, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, camera_alpha_vert, camera_alpha_horiz) color_reshaped = imutils.rotate(color_reshaped, kinect_rotate_angle) color_reshaped = color_reshaped[pre_VERT_CUT + kinect_shift_up:-pre_VERT_CUT + kinect_shift_up, HORIZ_CUT + kinect_shift_right:540 - HORIZ_CUT + kinect_shift_right, :] tf_corners[ 0:4, :], color_reshaped = self.transform_selected_points( color_reshaped, camera_alpha_vert, camera_alpha_horiz, kinect_rotate_angle, kinect_shift_right, kinect_shift_up, [1.0, 0], [1.0, 0], np.copy(self.tf_corners[0:4][:])) all_image_list.append(color_reshaped) #DEPTH h = VizLib().get_new_K_kin_homography(camera_alpha_vert, camera_alpha_horiz, self.new_K_kin) depth_r_orig = cv2.warpPerspective( self.depth_r, h, (self.depth_r.shape[1], self.depth_r.shape[0])) depth_r_orig = imutils.rotate(depth_r_orig, kinect_rotate_angle) depth_r_orig = depth_r_orig[HORIZ_CUT + kinect_shift_right:540 - HORIZ_CUT + kinect_shift_right, pre_VERT_CUT - kinect_shift_up:-pre_VERT_CUT - kinect_shift_up] depth_r_reshaped, depth_r_size, depth_r_orig = VizLib( ).depth_image(depth_r_orig, u_c, v_c) self.depth_r_orig = depth_r_orig self.depthcam_midpixel = [ self.new_K_kin[1, 2] - HORIZ_CUT - kinect_shift_right, (960 - self.new_K_kin[0, 2]) - pre_VERT_CUT - kinect_shift_up ] self.depthcam_midpixel2 = [ self.new_K_kin[1, 2] - HORIZ_CUT, (960 - self.new_K_kin[0, 2]) - pre_VERT_CUT ] #print h, "H" #warping perspective #print kinect_rotate_angle #the amount to rotate counterclockwise about normal vector to the bed #print kinect_shift_right, kinect_shift_up #pixel shift of depth im. convert this to meters based on depth of depth_r_orig_nowarp = imutils.rotate(self.depth_r, 0) depth_r_orig_nowarp = depth_r_orig_nowarp[HORIZ_CUT + 0:540 - HORIZ_CUT + 0, pre_VERT_CUT - 0:-pre_VERT_CUT - 0] depth_r_reshaped_nowarp, depth_r_size, depth_r_orig_nowarp = VizLib( ).depth_image(depth_r_orig_nowarp, u_c, v_c) #this just does two rotations all_image_list.append(depth_r_reshaped) all_image_list.append(depth_r_reshaped_nowarp) X, Y, Z = self.get_pc_from_depthmap(self.bed_state[0], tf_corners[2, :]) #print self.pressure_im_size_required, color_size, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend #PRESSURE #pressure_vert_scale = 1.0 #pressure_horiz_scale = 1.0 self.pressure = np.clip(self.pressure * 4, 0, 100) pressure_reshaped, pressure_size, coords_from_top_left = VizLib( ).pressure_image(self.pressure, self.pressure_im_size, self.pressure_im_size_required, color_size, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend) pressure_shape = pressure_reshaped.shape pressure_reshaped = cv2.resize( pressure_reshaped, None, fx=pressure_horiz_scale, fy=pressure_vert_scale)[0:pressure_shape[0], 0:pressure_shape[1], :] if pressure_horiz_scale < 1.0 or pressure_vert_scale < 1.0: pressure_reshaped_padded = np.zeros(pressure_shape).astype( np.uint8) pressure_reshaped_padded[ 0:pressure_reshaped.shape[0], 0:pressure_reshaped.shape[1], :] += pressure_reshaped pressure_reshaped = np.copy(pressure_reshaped_padded) coords_from_top_left[0] -= coords_from_top_left[0] * ( 1 - pressure_horiz_scale) coords_from_top_left[1] += (960 - coords_from_top_left[1]) * ( 1 - pressure_vert_scale) pressure_reshaped = pressure_reshaped[ pre_VERT_CUT:-pre_VERT_CUT, HORIZ_CUT:540 - HORIZ_CUT, :] all_image_list.append(pressure_reshaped) self.all_images = np.zeros( (960 - np.abs(pre_VERT_CUT) * 2, 1, 3)).astype(np.uint8) for image in all_image_list: #print image.shape self.all_images = np.concatenate((self.all_images, image), axis=1) self.all_images = self.all_images[VERT_CUT:960 - VERT_CUT, :, :] is_not_mult_4 = True while is_not_mult_4 == True: is_not_mult_4 = cv2.resize( self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount).shape[1] % 4 self.overall_image_scale_amount += 0.001 coords_from_top_left[0] -= (HORIZ_CUT) coords_from_top_left[ 1] = 960 - pre_VERT_CUT - coords_from_top_left[1] self.coords_from_top_left = (np.array(coords_from_top_left) * self.overall_image_scale_amount) #print self.coords_from_top_left self.all_images = cv2.resize( self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount) self.cursor_shift = self.all_images.shape[1] / 4 self.all_images_clone = self.all_images.copy() cv2.imshow('all_images', self.all_images) k = cv2.waitKey(1) #cv2.waitKey(0) self.pc_all = [Y, X, -Z] #print np.shape(self.pc_all), "PC ALL SHAPE" self.estimate_pose(self.pressure, self.bed_state[0], markers_c, tf_corners, camera_alpha_vert, camera_alpha_horiz, h, kinect_rotate_angle) pkl.dump( self.RESULTS_DICT, open( '/media/henry/multimodal_data_2/data/final_results/results_real_' + PARTICIPANT + '_' + POSE_TYPE + '_' + NETWORK_2 + '.p', 'wb'))
def evaluate_data(self): self.depthcam_midpixel2 = [ self.new_K_kin[1, 2] - HORIZ_CUT, (960 - self.new_K_kin[0, 2]) - pre_VERT_CUT ] #function_input = np.array(function_input)*np.array([10, 10, 10, 10, 10, 10, 0.1, 0.1, 0.1, 0.1, 1]) #function_input += np.array([2.2, 32, -1, 1.2, 32, -5, 1.0, 1.0, 0.96, 0.95, 0.8]) function_input = np.array(self.calibration_optim_values) * np.array( [10, 10, 10, 0.1, 0.1, 0.1, 0.1]) function_input += np.array([1.2, 32, -5, 1.0, 1.0, 0.96, 0.95]) kinect_rotate_angle = function_input[3 - 3] kinect_shift_up = int(function_input[4 - 3]) kinect_shift_right = int(function_input[5 - 3]) camera_alpha_vert = function_input[6 - 3] camera_alpha_horiz = function_input[7 - 3] blah = True file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/" + PARTICIPANT + "/data_checked_" + PARTICIPANT + "-" + POSE_TYPE file_dirs = [file_dir] init_time = time.time() RESAVE_DICT = {} RESAVE_DICT['images'] = [] RESAVE_DICT['RGB'] = [] RESAVE_DICT['depth'] = [] RESAVE_DICT['pc'] = [] RESAVE_DICT['pmat_corners'] = [] RESAVE_DICT['pose_type'] = [] for file_dir in file_dirs: V3D.load_next_file(file_dir) start_num = 0 #print self.color_all.shape #for im_num in range(29, 100): for im_num in range(start_num, self.color_all.shape[0]): #For P188: skip 5. 13 good cross legs if PARTICIPANT == "S114" and POSE_TYPE == "2" and im_num in [ 26, 29 ]: continue #these don't have point clouds if PARTICIPANT == "S165" and POSE_TYPE == "2" and im_num in [ 1, 3, 15 ]: continue #these don't have point clouds if PARTICIPANT == "S188" and POSE_TYPE == "2" and im_num in [ 5, 17, 21 ]: continue #these don't have point clouds print "NEXT IM!", im_num, " ", time.time( ) - init_time, self.pose_type_2_list[im_num] if POSE_TYPE == "2": RESAVE_DICT['pose_type'].append( self.pose_type_2_list[im_num]) elif POSE_TYPE == "1": if im_num == 0: if PARTICIPANT == "S145": RESAVE_DICT['pose_type'].append('p_sel_sup') elif PARTICIPANT == "S188": RESAVE_DICT['pose_type'].append('p_sel_ll') else: RESAVE_DICT['pose_type'].append('p_sel_any') if im_num == 1: if PARTICIPANT == "S140" or PARTICIPANT == "S145": RESAVE_DICT['pose_type'].append('p_sel_ll') elif PARTICIPANT == "S188": RESAVE_DICT['pose_type'].append('p_sel_rl') else: RESAVE_DICT['pose_type'].append('p_sel_sup') if im_num == 2: if PARTICIPANT == "S140" or PARTICIPANT == "S145": RESAVE_DICT['pose_type'].append('p_sel_rl') elif PARTICIPANT == "S188": RESAVE_DICT['pose_type'].append('p_sel_prn') else: RESAVE_DICT['pose_type'].append('p_sel_ll') if im_num == 3: if PARTICIPANT == "S140" or PARTICIPANT == "S145": RESAVE_DICT['pose_type'].append('p_sel_prn') elif PARTICIPANT == "S188": RESAVE_DICT['pose_type'].append('p_sel_any') else: RESAVE_DICT['pose_type'].append('p_sel_rl') if im_num == 4: if PARTICIPANT == "S140" or PARTICIPANT == "S188": RESAVE_DICT['pose_type'].append('p_sel_sup') else: RESAVE_DICT['pose_type'].append('p_sel_prn') print RESAVE_DICT['pose_type'][-1] self.overall_image_scale_amount = 0.85 half_w_half_l = [0.4, 0.4, 1.1, 1.1] all_image_list = [] self.label_single_image = [] self.label_index = 0 self.color = self.color_all[im_num] self.depth_r = self.depth_r_all[im_num] self.pressure = self.pressure_all[im_num] self.bed_state = self.bedstate_all[im_num] if self.point_cloud_autofil_all[im_num].shape[0] == 0: self.point_cloud_autofil_all[im_num] = np.array( [[0.0, 0.0, 0.0]]) self.point_cloud_autofil = self.point_cloud_autofil_all[ im_num] + self.markers_all[im_num][ 2] #[0.0, 0.0, 0.0]#0.1] #print self.markers_all[im_num] #print self.point_cloud_autofil.shape, 'PC AUTOFIL ORIG' self.bed_state[ 0] = self.bed_state[0] * 0.0 #*head_angle_multiplier self.bed_state *= 0 #self.bed_state += 60. #print self.bed_state, np.shape(self.pressure) if im_num == start_num and blah == True: markers_c = [] markers_c.append(self.markers_all[im_num][0]) markers_c.append(self.markers_all[im_num][1]) markers_c.append(self.markers_all[im_num][2]) markers_c.append(self.markers_all[im_num][3]) #for idx in range(4): #if markers_c[idx] is not None: #markers_c[idx] = np.array(markers_c[idx])*213./228. blah = False #print markers_c, 'Markers C' # Get the marker points in 2D on the color image u_c, v_c = ArTagLib().color_2D_markers(markers_c, self.new_K_kin) # Get the marker points dropped to the height of the pressure mat u_c_drop, v_c_drop, markers_c_drop = ArTagLib( ).color_2D_markers_drop(markers_c, self.new_K_kin) #print markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l # Get the geometry for sizing the pressure mat pmat_ArTagLib = ArTagLib() self.pressure_im_size_required, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend, half_w_half_l = \ pmat_ArTagLib.p_mat_geom(markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l) tf_corners = np.zeros((8, 2)) tf_corners[0:8, :] = np.copy(self.tf_corners) #COLOR color_reshaped, color_size = VizLib().color_image( self.color, self.kcam, self.new_K_kin, u_c, v_c, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, camera_alpha_vert, camera_alpha_horiz) color_reshaped = imutils.rotate(color_reshaped, kinect_rotate_angle) color_reshaped = color_reshaped[pre_VERT_CUT + kinect_shift_up:-pre_VERT_CUT + kinect_shift_up, HORIZ_CUT + kinect_shift_right:540 - HORIZ_CUT + kinect_shift_right, :] #all_image_list.append(color_reshaped) tf_corners[0:4, :], _ = self.transform_selected_points( color_reshaped, camera_alpha_vert, camera_alpha_horiz, kinect_rotate_angle, kinect_shift_right, kinect_shift_up, [1.0, 0], [1.0, 0], np.copy(self.tf_corners[0:4][:])) #should blur face here #color_reshaped = self.blur_face(color_reshaped) RESAVE_DICT['RGB'].append(color_reshaped) RESAVE_DICT['pmat_corners'].append(tf_corners[0:4, :]) #SAVE CALIBRATED COLOR HERE, color_reshaped #SAVE CALIBRATED TF CORNERS HERE, tf_corners[0:4, :] all_image_list.append(color_reshaped) #DEPTH h = VizLib().get_new_K_kin_homography(camera_alpha_vert, camera_alpha_horiz, self.new_K_kin) depth_r_orig = cv2.warpPerspective( self.depth_r, h, (self.depth_r.shape[1], self.depth_r.shape[0])) depth_r_orig = imutils.rotate(depth_r_orig, kinect_rotate_angle) depth_r_orig = depth_r_orig[HORIZ_CUT + kinect_shift_right:540 - HORIZ_CUT + kinect_shift_right, pre_VERT_CUT - kinect_shift_up:-pre_VERT_CUT - kinect_shift_up] #SAVE CALIBRATED DEPTH HERE, depth_r_orig RESAVE_DICT['depth'].append(depth_r_orig) depth_r_reshaped, depth_r_size, depth_r_orig = self.depth_image( depth_r_orig) all_image_list.append(depth_r_reshaped) #PRESSURE self.pressure = np.clip(self.pressure * 1, 0, 100) pressure_reshaped, pressure_size = self.pressure_image( self.pressure, color_size, tf_corners) pressure_reshaped = pressure_reshaped[ pre_VERT_CUT:-pre_VERT_CUT, HORIZ_CUT:540 - HORIZ_CUT, :] all_image_list.append(pressure_reshaped) self.all_images = np.zeros( (960 - np.abs(pre_VERT_CUT) * 2, 1, 3)).astype(np.uint8) for image in all_image_list: #print image.shape self.all_images = np.concatenate((self.all_images, image), axis=1) self.all_images = self.all_images[VERT_CUT:960 - VERT_CUT, :, :] is_not_mult_4 = True while is_not_mult_4 == True: is_not_mult_4 = cv2.resize( self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount).shape[1] % 4 self.overall_image_scale_amount += 0.001 self.all_images = cv2.resize( self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount) self.cursor_shift = self.all_images.shape[1] / 4 self.all_images_clone = self.all_images.copy() cv2.imshow('all_images', self.all_images) k = cv2.waitKey(1) #cv2.waitKey(0) #now do 3D rendering pmat = np.fliplr( np.flipud( np.clip(self.pressure.reshape(MAT_SIZE) * float(1), a_min=0, a_max=100))) #SAVE PRESSURE HERE, self.pressure RESAVE_DICT['images'].append(pmat) pc_autofil_red = self.trim_pc_sides( camera_alpha_vert, camera_alpha_horiz, h, kinect_rotate_angle) #this is the point cloud #SAVE POINT CLOUD HERE, pc_autofil_red RESAVE_DICT['pc'].append(pc_autofil_red) camera_point = [ 1.09898028, 0.46441343, -CAM_BED_DIST ] #[dist from foot of bed, dist from left side of mat, dist normal] # self.pyRender.render_3D_data(camera_point, pmat=pmat, pc=pc_autofil_red) self.point_cloud_array = None if POSE_TYPE == "2": save_name = '/prescribed' elif POSE_TYPE == "1": save_name = '/p_select' pkl.dump(RESAVE_DICT, open(participant_directory + save_name + '.p', 'wb')) print "SAVED."