def generate_prechecked_pose(self, gender, posture, stiffness, filename): prechecked_pose_list = np.load(filename, allow_pickle = True).tolist() print len(prechecked_pose_list) shuffle(prechecked_pose_list) pyRender = libRender.pyRenderMesh() for shape_pose_vol in prechecked_pose_list[6:]: #print shape_pose_vol #print shape_pose_vol[0] #print shape_pose_vol[1] #print shape_pose_vol[2] for idx in range(len(shape_pose_vol[0])): #print shape_pose_vol[0][idx] self.m.betas[idx] = shape_pose_vol[0][idx] print 'init' print shape_pose_vol[2][0],shape_pose_vol[2][1],shape_pose_vol[2][2] self.m.pose[:] = np.array(72 * [0.]) for idx in range(len(shape_pose_vol[1])): #print shape_pose_vol[1][idx] #print self.m.pose[shape_pose_vol[1][idx]] #print shape_pose_vol[2][idx] pose_index = shape_pose_vol[1][idx]*1 self.m.pose[pose_index] = shape_pose_vol[2][idx]*1. #print idx, pose_index, self.m.pose[pose_index], shape_pose_vol[2][idx] print self.m.pose[0:3] init_root = np.array(self.m.pose[0:3])+0.000001 init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root) root_rot = libKinematics.eulerAnglesToRotationMatrix([np.pi, 0.0, np.pi/2]) #print root_rot trans_root = libKinematics.dir_cos_angles_from_matrix(np.matmul(root_rot, init_rootR)) self.m.pose[0] = trans_root[0] self.m.pose[1] = trans_root[1] self.m.pose[2] = trans_root[2] print root_rot print init_rootR print trans_root print init_root, trans_root #print self.m.J_transformed[1, :], self.m.J_transformed[4, :] # self.m.pose[51] = selection_r pyRender.mesh_render_pose_bed(self.m, self.point_cloud_array, self.pc_isnew, self.pressure, self.markers) self.point_cloud_array = None
def estimate_real_time(self, gender, filename): pyRender = libRender.pyRenderMesh() mat_size = (64, 27) if torch.cuda.is_available(): # Use for GPU GPU = True dtype = torch.cuda.FloatTensor print '######################### CUDA is available! #############################' else: # Use for CPU GPU = False dtype = torch.FloatTensor print '############################## USING CPU #################################' import convnet as convnet from torch.autograd import Variable if GPU == True: model = torch.load(filename) model = model.cuda() else: model = torch.load(filename, map_location='cpu') while not rospy.is_shutdown(): pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*5.0, a_min=0, a_max=100))) pmat = gaussian_filter(pmat, sigma= 0.5) pmat_stack = PreprocessingLib().preprocessing_create_pressure_angle_stack_realtime(pmat, self.bedangle, mat_size) pmat_stack = torch.Tensor(pmat_stack) images_up_non_tensor = np.array(PreprocessingLib().preprocessing_pressure_map_upsample(pmat_stack.numpy(), multiple=2)) images_up = Variable(torch.Tensor(images_up_non_tensor).type(dtype), requires_grad=False) betas_est, root_shift_est, angles_est = model.forward_kinematic_angles_realtime(images_up) print betas_est, root_shift_est, angles_est angles_est = angles_est.reshape(72) for idx in range(10): #print shape_pose_vol[0][idx] self.m.betas[idx] = betas_est[idx] for idx in range(72): self.m.pose[idx] = angles_est[idx] init_root = np.array(self.m.pose[0:3])+0.000001 init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root) root_rot = libKinematics.eulerAnglesToRotationMatrix([np.pi, 0.0, np.pi/2]) #print root_rot trans_root = libKinematics.dir_cos_angles_from_matrix(np.matmul(root_rot, init_rootR)) self.m.pose[0] = trans_root[0] self.m.pose[1] = trans_root[1] self.m.pose[2] = trans_root[2] #print self.m.J_transformed[1, :], self.m.J_transformed[4, :] # self.m.pose[51] = selection_r pyRender.mesh_render_pose_bed(self.m, root_shift_est, self.point_cloud_array, self.pc_isnew, pmat, self.markers, self.bedangle) self.point_cloud_array = None
def estimate_real_time(self, filename1, filename2 = None): pyRender = libRender.pyRenderMesh() mat_size = (64, 27) from unpack_batch_lib import UnpackBatchLib if torch.cuda.is_available(): # Use for GPU GPU = True dtype = torch.cuda.FloatTensor print '######################### CUDA is available! #############################' else: # Use for CPU GPU = False dtype = torch.FloatTensor print '############################## USING CPU #################################' from torch.autograd import Variable if GPU == True: for i in range(0, 8): try: model = torch.load(filename1, map_location={'cuda:'+str(i):'cuda:0'}) model = model.cuda().eval() break except: pass if filename2 is not None: for i in range(0, 8): try: model2 = torch.load(filename2, map_location={'cuda:'+str(i):'cuda:0'}) model2 = model2.cuda().eval() break except: pass else: model2 = None else: model = torch.load(filename1, map_location='cpu').eval() if filename2 is not None: model2 = torch.load(filename2, map_location='cpu').eval() else: model2 = None pub = rospy.Publisher('meshTopic', MeshAttr) #rospy.init_node('talker', anonymous=False) while not rospy.is_shutdown(): pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(self.CTRL_PNL['pmat_mult']*4), a_min=0, a_max=100))) #pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(1), a_min=0, a_max=100))) #print "max is : ", np.max(pmat) #print "sum is : ", np.sum(pmat) if self.CTRL_PNL['cal_noise'] == False: pmat = gaussian_filter(pmat, sigma= 0.5) pmat_stack = PreprocessingLib().preprocessing_create_pressure_angle_stack_realtime(pmat, self.bedangle, mat_size) if self.CTRL_PNL['cal_noise'] == False: pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100) pmat_stack = np.array(pmat_stack) if self.CTRL_PNL['incl_pmat_cntct_input'] == True: pmat_contact = np.copy(pmat_stack[:, 0:1, :, :]) pmat_contact[pmat_contact > 0] = 100 pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis = 1) weight_input = WEIGHT_LBS/2.20462 height_input = (HEIGHT_IN*0.0254 - 1)*100 batch1 = np.zeros((1, 162)) if GENDER == 'f': batch1[:, 157] += 1 elif GENDER == 'm': batch1[:, 158] += 1 batch1[:, 160] += weight_input batch1[:, 161] += height_input if self.CTRL_PNL['normalize_input'] == True: self.CTRL_PNL['depth_map_input_est'] = False pmat_stack = self.TPL.normalize_network_input(pmat_stack, self.CTRL_PNL) batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL) pmat_stack = torch.Tensor(pmat_stack) batch1 = torch.Tensor(batch1) batch = [] batch.append(pmat_stack) batch.append(batch1) self.CTRL_PNL['adjust_ang_from_est'] = False scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch, False, model, self.CTRL_PNL) self.CTRL_PNL['first_pass'] = False mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 16.69545796387731 mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze(1) / 45.08513083167194 mdm_est_pos[mdm_est_pos < 0] = 0 mdm_est_neg[mdm_est_neg > 0] = 0 mdm_est_neg *= -1 cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(1) * 100 / 43.55800622930469 #1. / 16.69545796387731, # pos est depth #1. / 45.08513083167194, # neg est depth #1. / 43.55800622930469, # cm est if model2 is not None: batch_cor = [] batch_cor.append(torch.cat((pmat_stack[:, 0:1, :, :], mdm_est_pos.type(torch.FloatTensor), mdm_est_neg.type(torch.FloatTensor), cm_est.type(torch.FloatTensor), pmat_stack[:, 1:, :, :]), dim=1)) if self.CTRL_PNL['full_body_rot'] == False: batch_cor.append(torch.cat((batch1, OUTPUT_DICT['batch_betas_est'].cpu(), OUTPUT_DICT['batch_angles_est'].cpu(), OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim = 1)) elif self.CTRL_PNL['full_body_rot'] == True: batch_cor.append(torch.cat((batch1, OUTPUT_DICT['batch_betas_est'].cpu(), OUTPUT_DICT['batch_angles_est'].cpu(), OUTPUT_DICT['batch_root_xyz_est'].cpu(), OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim = 1)) self.CTRL_PNL['adjust_ang_from_est'] = True scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpackage_batch_kin_pass(batch_cor, False, model2, self.CTRL_PNL) betas_est = np.squeeze(OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy()) angles_est = np.squeeze(OUTPUT_DICT['batch_angles_est_post_clip']) root_shift_est = np.squeeze(OUTPUT_DICT['batch_root_xyz_est_post_clip'].cpu().numpy()) #print betas_est.shape, root_shift_est.shape, angles_est.shape #print betas_est, root_shift_est, angles_est angles_est = angles_est.reshape(72) for idx in range(10): #print shape_pose_vol[0][idx] self.m.betas[idx] = betas_est[idx] for idx in range(72): self.m.pose[idx] = angles_est[idx] init_root = np.array(self.m.pose[0:3])+0.000001 init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root) root_rot = libKinematics.eulerAnglesToRotationMatrix([np.pi, 0.0, np.pi/2]) #print root_rot trans_root = libKinematics.dir_cos_angles_from_matrix(np.matmul(root_rot, init_rootR)) self.m.pose[0] = trans_root[0] self.m.pose[1] = trans_root[1] self.m.pose[2] = trans_root[2] #print self.m.J_transformed[1, :], self.m.J_transformed[4, :] # self.m.pose[51] = selection_r print self.m.r #print OUTPUT_DICT['verts'] pyRender.mesh_render_pose_bed_orig(self.m, root_shift_est, self.point_cloud_array, self.pc_isnew, pmat, self.markers, self.bedangle) self.point_cloud_array = None
def generate_prechecked_pose(self, gender, posture, stiffness, filename): import multiprocessing pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 3)) prechecked_pose_list = np.load(filename).tolist() #import trimesh #import pyrender #self.human_mat = pyrender.MetallicRoughnessMaterial(baseColorFactor=[0.3, 0.3, 1.0 ,0.5]) print len(prechecked_pose_list) shuffle(prechecked_pose_list) pyRender = libRender.pyRenderMesh() for shape_pose_vol in prechecked_pose_list[6:]: #print shape_pose_vol #print shape_pose_vol[0] #print shape_pose_vol[1] #print shape_pose_vol[2] for idx in range(len(shape_pose_vol[0])): #print shape_pose_vol[0][idx] self.m.betas[idx] = shape_pose_vol[0][idx] for idx in range(len(shape_pose_vol[1])): #print shape_pose_vol[1][idx] #print self.m.pose[shape_pose_vol[1][idx]] #print shape_pose_vol[2][idx] self.m.pose[shape_pose_vol[1][idx]] = shape_pose_vol[2][idx] print "shift up down", shape_pose_vol[5] #self.m.pose[3] = -np.pi/10 #self.m.pose[5] = np.pi/12 #self.m.pose[8] = np.pi/6 #self.m.pose[12] = np.pi/4 #self.m.pose[44] = np.pi/6 #self.m.pose[53] = np.pi/4 #self.m.pose[41] = -np.pi/10 #self.m.pose[50] = -np.pi/8 #self.m.pose[48] = -np.pi/6 #self.m.pose[58] = np.pi/6 #self.m.pose[55] = -np.pi/6 ## Write to an .obj file #outmesh_path = "./data/person.obj" #with open(outmesh_path, 'w') as fp: # for v in self.m.r: # fp.write('v %f %f %f\n' % (v[0], v[1], v[2])) # for f in self.m.f + 1: # Faces are 1-based, not 0-based in obj files # fp.write('f %d %d %d\n' % (f[0], f[1], f[2])) #rospy.init_node("smpl_viz") #while not rospy.is_shutdown(): # libVisualization.rviz_publish_output(np.array(self.m.J_transformed)) # libVisualization.rviz_publish_output_limbs_direct(np.array(self.m.J_transformed)) #self.m.pose[0] = np.pi/6 #print self.m.J_transformed[1, :], self.m.J_transformed[4, :] #R_l_hip_rod = libKinematics.matrix_from_dir_cos_angles([float(self.m.pose[3]), float(self.m.pose[4]), float(self.m.pose[5]) ]) #R_r_hip_rod = libKinematics.matrix_from_dir_cos_angles([float(self.m.pose[6]), float(self.m.pose[7]), float(self.m.pose[8]) ]) #R_root = libKinematics.eulerAnglesToRotationMatrix([-float(self.m.pose[0]), 0.0, 0.0]) #R_l = np.matmul(R_root, R_l_hip_rod) #R_r = np.matmul(R_root, R_r_hip_rod) #new_left_hip = libKinematics.dir_cos_angles_from_matrix(R_l) #new_right_hip = libKinematics.dir_cos_angles_from_matrix(R_r) #self.m.pose[3] = new_left_hip[0] #self.m.pose[4] = new_left_hip[1] #self.m.pose[5] = new_left_hip[2] #self.m.pose[6] = new_right_hip[0] #self.m.pose[7] = new_right_hip[1] #self.m.pose[8] = new_right_hip[2] #print self.m.J_transformed[1, :], self.m.J_transformed[4, :] # self.m.pose[51] = selection_r #pyRender.mesh_render(self.m) #verts = np.array(self.m.r) #faces = np.array(self.m.f) #tm = trimesh.base.Trimesh(vertices=verts, faces=faces) #smpl_mesh = pyrender.Mesh.from_trimesh(tm, material=self.human_mat, wireframe=True , smooth = False)# smoothing doesn't do anything to wireframe dss = dart_skel_sim.DartSkelSim( render=True, m=self.m, gender=gender, posture=posture, stiffness=stiffness, shiftSIDE=shape_pose_vol[4], shiftUD=shape_pose_vol[5], filepath_prefix=self.filepath_prefix, add_floor=False) dss.run_simulation(10000)
def evaluate_data(self, filename1, filename2=None): self.Render = libRender.pyRenderMesh() self.pyRender = libPyRender.pyRenderMesh() #model = torch.load(filename1, map_location={'cuda:5': 'cuda:0'}) if GPU == True: for i in range(0, 8): try: model = torch.load(filename1, map_location={'cuda:'+str(i):'cuda:0'}) if self.CTRL_PNL['dropout'] == True: model = model.cuda().train() else: model = model.cuda().eval() break except: pass if filename2 is not None: for i in range(0, 8): try: model2 = torch.load(filename2, map_location={'cuda:'+str(i):'cuda:0'}) if self.CTRL_PNL['dropout'] == True: model2 = model2.cuda().train() else: model2 = model2.cuda().eval() break except: pass else: model2 = None else: model = torch.load(filename1, map_location='cpu') if self.CTRL_PNL['dropout'] == True: model = model.train() else: model = model.eval() if filename2 is not None: model2 = torch.load(filename2, map_location='cpu') if self.CTRL_PNL['dropout'] == True: model2 = model2.train() else: model2 = model2.eval() else: model2 = None #function_input = np.array(function_input)*np.array([10, 10, 10, 10, 10, 10, 0.1, 0.1, 0.1, 0.1, 1]) #function_input += np.array([2.2, 32, -1, 1.2, 32, -5, 1.0, 1.0, 0.96, 0.95, 0.8]) function_input = np.array(self.calibration_optim_values)*np.array([10, 10, 10, 0.1, 0.1, 0.1, 0.1]) function_input += np.array([1.2, 32, -5, 1.0, 1.0, 0.96, 0.95]) kinect_rotate_angle = function_input[3-3] kinect_shift_up = int(function_input[4-3]) kinect_shift_right = int(function_input[5-3]) camera_alpha_vert = function_input[6-3] camera_alpha_horiz = function_input[7-3] pressure_horiz_scale = function_input[8-3] pressure_vert_scale = function_input[9-3] #head_angle_multiplier = function_input[10-3] #file_dir = "/media/henry/multimodal_data_1/all_hevans_data/0905_2_Evening/0255" #file_dir_list = ["/media/henry/multimodal_data_2/test_data/data_072019_0001/"] blah = True #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0007" #file_dir = "/media/henry/multimodal_data_2/test_data/data_072019_0006" #file_dir = "/home/henry/ivy_test_data/data_102019_kneeup_0000" #file_dir = "/media/henry/multimodal_data_1/CVPR2020_study/P000/data_102019_kneeup_0000" if PARTICIPANT == "P106": file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_00" #file_dir = "/home/henry/Desktop/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"_000" else: file_dir = "/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_00" #file_dir = "/home/henry/Desktop/CVPR2020_study/"+PARTICIPANT+"/data_"+PARTICIPANT+"-2_00" file_dir_nums = ["00","01","02","03","04","05","06","07","08","09"]#,"10"]#,"11","12"] overall_counter = 1 overall_counter_disp = 1 bedstatenpy = [] colornpy = [] config_codenpy = [] date_stampnpy = [] depth_rnpy = [] markersnpy = [] point_cloudnpy = [] pressurenpy = [] time_stampnpy = [] SAVE = True for file_dir_num in file_dir_nums: file_dir_curr = file_dir + file_dir_num print "LOADING", file_dir_curr V3D.load_next_file(file_dir_curr) start_num = 0 print self.color_all.shape #for im_num in range(29, 100): for im_num in range(start_num, self.color_all.shape[0]): if PARTICIPANT == "S103" and overall_counter in [26, 27, 28, 45, 53, 54, 55]:#, 52, 53]: overall_counter += 1 pass elif PARTICIPANT == "S104" and overall_counter in [49, 50]: #S104 is everything but the last two overall_counter += 1 pass elif PARTICIPANT == "S107" and overall_counter in [25, 50]: overall_counter += 1 pass elif PARTICIPANT == "S114" and overall_counter in [42, 50]: overall_counter += 1 pass elif PARTICIPANT == "S118" and overall_counter in [11, 50]: overall_counter += 1 pass elif PARTICIPANT == "S121" and overall_counter in [7, 47]: overall_counter += 1 pass elif PARTICIPANT == "S130" and overall_counter in [30, 31, 34, 52, 53, 54, 55]: overall_counter += 1 pass elif PARTICIPANT == "S134" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S140" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S141" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S145" and overall_counter in [23, 49, 50, 51]: overall_counter += 1 pass elif PARTICIPANT == "S151" and overall_counter in [9, 48]: overall_counter += 1 pass elif PARTICIPANT == "S163" and overall_counter in [46, 50]: overall_counter += 1 pass elif PARTICIPANT == "S165" and overall_counter in [19, 45]: overall_counter += 1 pass elif PARTICIPANT == "S170" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S179" and overall_counter in [42, 50]: overall_counter += 1 pass elif PARTICIPANT == "S184" and overall_counter in [49, 50]: overall_counter += 1 pass elif PARTICIPANT == "S187" and overall_counter in [39, 50]: overall_counter += 1 pass elif PARTICIPANT == "S188" and overall_counter in [47, 50]: overall_counter += 1 pass elif PARTICIPANT == "S196" and overall_counter in [20, 36]: overall_counter += 1 pass #elif overall_counter < 41:# and im_num > 0: # overall_counter += 1 # overall_counter_disp += 1 # pass else: print file_dir_curr, " subset count: ", im_num, " overall ct: ", overall_counter_disp, overall_counter overall_counter += 1 overall_counter_disp += 1 self.overall_image_scale_amount = 0.85 half_w_half_l = [0.4, 0.4, 1.1, 1.1] all_image_list = [] self.label_single_image = [] self.label_index = 0 self.color = self.color_all[im_num] self.depth_r = self.depth_r_all[im_num] self.pressure = self.pressure_all[im_num] self.bed_state = self.bedstate_all[im_num] self.point_cloud_autofil = self.point_cloud_autofil_all[im_num] + [0.0, 0.0, 0.1] print self.point_cloud_autofil.shape self.bed_state[0] = self.bed_state[0]#*head_angle_multiplier self.bed_state *= 0 #self.bed_state += 60. print self.bed_state, np.shape(self.pressure) bedstatenpy.append(self.bedstate_all[im_num]) colornpy.append(self.color_all[im_num]) config_codenpy.append(self.config_code_all[im_num]) date_stampnpy.append(self.date_stamp_all[im_num]) depth_rnpy.append(self.depth_r_all[im_num]) markersnpy.append(list(self.markers_all[im_num])) point_cloudnpy.append(self.point_cloud_autofil_all[im_num]) pressurenpy.append(self.pressure_all[im_num]) time_stampnpy.append(self.time_stamp_all[im_num]) if im_num == start_num and blah == True: markers_c = [] markers_c.append(self.markers_all[im_num][0]) markers_c.append(self.markers_all[im_num][1]) markers_c.append(self.markers_all[im_num][2]) markers_c.append(self.markers_all[im_num][3]) for idx in range(4): if markers_c[idx] is not None: markers_c[idx] = np.array(markers_c[idx])*213./228. blah = False # Get the marker points in 2D on the color image u_c, v_c = ArTagLib().color_2D_markers(markers_c, self.new_K_kin) # Get the marker points dropped to the height of the pressure mat u_c_drop, v_c_drop, markers_c_drop = ArTagLib().color_2D_markers_drop(markers_c, self.new_K_kin) # Get the geometry for sizing the pressure mat pmat_ArTagLib = ArTagLib() self.pressure_im_size_required, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend, half_w_half_l = \ pmat_ArTagLib.p_mat_geom(markers_c_drop, self.new_K_kin, self.pressure_im_size_required, self.bed_state, half_w_half_l) tf_corners = np.zeros((8, 2)) tf_corners[0:8,:] = np.copy(self.tf_corners) #COLOR #if self.color is not 0: color_reshaped, color_size = VizLib().color_image(self.color, self.kcam, self.new_K_kin, u_c, v_c, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, camera_alpha_vert, camera_alpha_horiz) color_reshaped = imutils.rotate(color_reshaped, kinect_rotate_angle) color_reshaped = color_reshaped[pre_VERT_CUT+kinect_shift_up:-pre_VERT_CUT+kinect_shift_up, HORIZ_CUT+kinect_shift_right : 540 - HORIZ_CUT+kinect_shift_right, :] tf_corners[0:4, :], color_reshaped = self.transform_selected_points(color_reshaped, camera_alpha_vert, camera_alpha_horiz, kinect_rotate_angle, kinect_shift_right, kinect_shift_up, [1.0, 0], [1.0, 0], np.copy(self.tf_corners[0:4][:])) all_image_list.append(color_reshaped) #DEPTH h = VizLib().get_new_K_kin_homography(camera_alpha_vert, camera_alpha_horiz, self.new_K_kin) depth_r_orig = cv2.warpPerspective(self.depth_r, h, (self.depth_r.shape[1], self.depth_r.shape[0])) depth_r_orig = imutils.rotate(depth_r_orig, kinect_rotate_angle) depth_r_orig = depth_r_orig[HORIZ_CUT + kinect_shift_right: 540 - HORIZ_CUT + kinect_shift_right, pre_VERT_CUT - kinect_shift_up:-pre_VERT_CUT - kinect_shift_up] depth_r_reshaped, depth_r_size, depth_r_orig = VizLib().depth_image(depth_r_orig, u_c, v_c) self.depth_r_orig = depth_r_orig self.depthcam_midpixel = [self.new_K_kin[1, 2] - HORIZ_CUT - kinect_shift_right, (960-self.new_K_kin[0, 2]) - pre_VERT_CUT - kinect_shift_up] all_image_list.append(depth_r_reshaped) self.get_pc_from_depthmap(self.bed_state[0], tf_corners[2, :]) #PRESSURE self.pressure = np.clip(self.pressure*4, 0, 100) pressure_reshaped, pressure_size, coords_from_top_left = VizLib().pressure_image(self.pressure, self.pressure_im_size, self.pressure_im_size_required, color_size, u_c_drop, v_c_drop, u_c_pmat, v_c_pmat, u_p_bend, v_p_bend) pressure_shape = pressure_reshaped.shape pressure_reshaped = cv2.resize(pressure_reshaped, None, fx=pressure_horiz_scale, fy=pressure_vert_scale)[0:pressure_shape[0], 0:pressure_shape[1], :] if pressure_horiz_scale < 1.0 or pressure_vert_scale < 1.0: pressure_reshaped_padded = np.zeros(pressure_shape).astype(np.uint8) pressure_reshaped_padded[0:pressure_reshaped.shape[0], 0:pressure_reshaped.shape[1], :] += pressure_reshaped pressure_reshaped = np.copy(pressure_reshaped_padded) coords_from_top_left[0] -= coords_from_top_left[0]*(1-pressure_horiz_scale) coords_from_top_left[1] += (960 - coords_from_top_left[1])*(1-pressure_vert_scale) pressure_reshaped = pressure_reshaped[pre_VERT_CUT:-pre_VERT_CUT, HORIZ_CUT : 540 - HORIZ_CUT, :] all_image_list.append(pressure_reshaped) self.all_images = np.zeros((960-np.abs(pre_VERT_CUT)*2, 1, 3)).astype(np.uint8) for image in all_image_list: print image.shape self.all_images = np.concatenate((self.all_images, image), axis = 1) self.all_images = self.all_images[VERT_CUT : 960 - VERT_CUT, :, :] is_not_mult_4 = True while is_not_mult_4 == True: is_not_mult_4 = cv2.resize(self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount).shape[1]%4 self.overall_image_scale_amount+= 0.001 coords_from_top_left[0] -= (HORIZ_CUT) coords_from_top_left[1] = 960 - pre_VERT_CUT - coords_from_top_left[1] self.coords_from_top_left = (np.array(coords_from_top_left) * self.overall_image_scale_amount) #print self.coords_from_top_left self.all_images = cv2.resize(self.all_images, (0, 0), fx=self.overall_image_scale_amount, fy=self.overall_image_scale_amount) self.cursor_shift = self.all_images.shape[1]/4 self.all_images_clone = self.all_images.copy() cv2.imshow('all_images', self.all_images) k = cv2.waitKey(1) if SAVE == False: time.sleep(5) #cv2.waitKey(0) #self.estimate_pose(self.pressure, self.bed_state[0], markers_c, model, model2) if SAVE == True: np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/color.npy", colornpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/depth_r.npy", depth_rnpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/pressure.npy", pressurenpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/bedstate.npy", bedstatenpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/markers.npy", np.array(markersnpy), allow_pickle=True) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/time_stamp.npy", time_stampnpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/point_cloud.npy", point_cloudnpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/config_code.npy", config_codenpy) np.save("/media/henry/multimodal_data_2/CVPR2020_study/"+PARTICIPANT+"/data_checked_"+PARTICIPANT+"-2/date_stamp.npy", date_stampnpy)
def estimate_real_time(self, filename1, filename2=None): pyRender = libRender.pyRenderMesh() mat_size = (64, 27) import sys # insert at 1, 0 is the script path (or '' in REPL) sys.path.insert(1, '../sim_camera_resting_scene/lib_py') sys.path.insert(1, '../sim_camera_resting_scene/DPNet') from unpack_batch_lib_br import UnpackBatchLib import convnet_br as convnet if torch.cuda.is_available(): # Use for GPU GPU = True dtype = torch.cuda.FloatTensor print '######################### CUDA is available! #############################' else: # Use for CPU GPU = False dtype = torch.FloatTensor print '############################## USING CPU #################################' from torch.autograd import Variable model = torch.load(filename1) if GPU == True: for i in range(0, 8): try: model = torch.load( filename1, map_location={'cuda:' + str(i): 'cuda:0'}) model = model.cuda().eval() break except: pass if filename2 is not None: for i in range(0, 8): try: model2 = torch.load( filename2, map_location={'cuda:' + str(i): 'cuda:0'}) model2 = model2.cuda().eval() break except: pass else: model2 = None else: model = torch.load(filename1, map_location='cpu').eval() if filename2 is not None: model2 = torch.load(filename2, map_location='cpu').eval() else: model2 = None pub = rospy.Publisher('meshTopic', MeshAttr) #rospy.init_node('talker', anonymous=False) while not rospy.is_shutdown(): #pmat = np.fliplr(np.flipud(np.clip(self.pressure.reshape(mat_size)*float(self.CTRL_PNL['pmat_mult']*4), a_min=0, a_max=100))) pmat = np.fliplr( np.flipud( np.clip(self.pressure.reshape(mat_size) * float(4), a_min=0, a_max=100))) #print "max is : ", np.max(pmat) #print "sum is : ", np.sum(pmat) if self.CTRL_PNL['cal_noise'] == False: pmat = gaussian_filter(pmat, sigma=0.5) pmat_stack = PreprocessingLib( ).preprocessing_create_pressure_angle_stack_realtime( pmat, self.bedangle, mat_size, return_height=False) print np.shape(pmat_stack) if self.CTRL_PNL['cal_noise'] == False: pmat_stack = np.clip(pmat_stack, a_min=0, a_max=100) pmat_stack = np.array(pmat_stack) if self.CTRL_PNL['incl_pmat_cntct_input'] == True: pmat_contact = np.copy(pmat_stack[:, 0:1, :, :]) pmat_contact[pmat_contact > 0] = 100 pmat_stack = np.concatenate((pmat_contact, pmat_stack), axis=1) weight_input = WEIGHT_LBS / 2.20462 height_input = (HEIGHT_IN * 0.0254 - 1) * 100 batch1 = np.zeros((1, 162)) if GENDER == 'f': batch1[:, 157] += 1 elif GENDER == 'm': batch1[:, 158] += 1 batch1[:, 160] += weight_input batch1[:, 161] += height_input if self.CTRL_PNL['normalize_std'] == True: self.CTRL_PNL['depth_map_input_est'] = False pmat_stack = self.TPL.normalize_network_input( pmat_stack, self.CTRL_PNL) batch1 = self.TPL.normalize_wt_ht(batch1, self.CTRL_PNL) pmat_std_from_mult = [ 'N/A', 11.70153502792190, 19.90905848383454, 23.07018866032369, 0.0, 25.50538629767412 ] if self.CTRL_PNL['cal_noise'] == False: sobel_std_from_mult = [ 'N/A', 29.80360490415032, 33.33532963163579, 34.14427844692501, 0.0, 34.86393494050921 ] else: sobel_std_from_mult = [ 'N/A', 45.61635847182483, 77.74920396659292, 88.89398421073700, 0.0, 97.90075708182506 ] self.CTRL_PNL['norm_std_coeffs'] = [ 1. / 41.80684362163343, # contact 1. / 45.08513083167194, # neg est depth 1. / 43.55800622930469, # cm est 1. / pmat_std_from_mult[int(self.CTRL_PNL['pmat_mult'])], # pmat x5 1. / sobel_std_from_mult[int( self.CTRL_PNL['pmat_mult'])], # pmat sobel 1. / 1.0, # OUTPUT DO NOTHING 1. / 1.0, # OUTPUT DO NOTHING 1. / 30.216647403350, # weight 1. / 14.629298141231 ] # height if self.CTRL_PNL['normalize_std'] == False: for i in range(9): self.CTRL_PNL['norm_std_coeffs'][i] *= 0. self.CTRL_PNL['norm_std_coeffs'][i] += 1. pmat_stack = torch.Tensor(pmat_stack) batch1 = torch.Tensor(batch1) batch = [] batch.append(pmat_stack) batch.append(batch1) self.CTRL_PNL['adjust_ang_from_est'] = False self.CTRL_PNL['recon_map_output'] = True print self.CTRL_PNL['num_input_channels'], batch[0].size( ), 'inputs and batch size' scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib().unpack_batch( batch, False, model, self.CTRL_PNL) self.CTRL_PNL['first_pass'] = False mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze( 1) #/ 16.69545796387731 mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone().unsqueeze( 1) #/ 45.08513083167194 mdm_est_pos[mdm_est_pos < 0] = 0 mdm_est_neg[mdm_est_neg > 0] = 0 mdm_est_neg *= -1 cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze( 1) * 100 #/ 43.55800622930469 #1. / 16.69545796387731, # pos est depth #1. / 45.08513083167194, # neg est depth #1. / 43.55800622930469, # cm est if model2 is not None: batch_cor = [] batch_cor.append( torch.cat( (pmat_stack[:, 0:1, :, :], mdm_est_neg.type( torch.FloatTensor), cm_est.type( torch.FloatTensor), pmat_stack[:, 1:, :, :]), dim=1)) if self.CTRL_PNL['full_body_rot'] == False: batch_cor.append( torch.cat( (batch1, OUTPUT_DICT['batch_betas_est'].cpu(), OUTPUT_DICT['batch_angles_est'].cpu(), OUTPUT_DICT['batch_root_xyz_est'].cpu()), dim=1)) elif self.CTRL_PNL['full_body_rot'] == True: batch_cor.append( torch.cat( (batch1, OUTPUT_DICT['batch_betas_est'].cpu(), OUTPUT_DICT['batch_angles_est'].cpu(), OUTPUT_DICT['batch_root_xyz_est'].cpu(), OUTPUT_DICT['batch_root_atan2_est'].cpu()), dim=1)) self.CTRL_PNL['adjust_ang_from_est'] = True self.CTRL_PNL['recon_map_output'] = True scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib( ).unpack_batch(batch_cor, False, model2, self.CTRL_PNL) betas_est = np.squeeze( OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy()) angles_est = np.squeeze(OUTPUT_DICT['batch_angles_est_post_clip']) root_shift_est = np.squeeze( OUTPUT_DICT['batch_root_xyz_est_post_clip'].cpu().numpy()) #print betas_est.shape, root_shift_est.shape, angles_est.shape #print betas_est, root_shift_est, angles_est angles_est = angles_est.reshape(72) for idx in range(10): #print shape_pose_vol[0][idx] self.m.betas[idx] = betas_est[idx] for idx in range(72): self.m.pose[idx] = angles_est[idx] init_root = np.array(self.m.pose[0:3]) + 0.000001 init_rootR = libKinematics.matrix_from_dir_cos_angles(init_root) root_rot = libKinematics.eulerAnglesToRotationMatrix( [np.pi, 0.0, np.pi / 2]) #print root_rot trans_root = libKinematics.dir_cos_angles_from_matrix( np.matmul(root_rot, init_rootR)) self.m.pose[0] = trans_root[0] self.m.pose[1] = trans_root[1] self.m.pose[2] = trans_root[2] #print self.m.J_transformed[1, :], self.m.J_transformed[4, :] # self.m.pose[51] = selection_r print self.m.r #print OUTPUT_DICT['verts'] pyRender.mesh_render_pose_bed_orig(self.m, root_shift_est, self.point_cloud_array, self.pc_isnew, pmat * 4, self.markers, self.bedangle) self.point_cloud_array = None