def main(scene_idx=0): #scene_idx = 0 mapper_scene2z = get_mapper() mapper_scene2points = get_mapper_scene2points() Train_Scenes, Test_Scenes = get_train_test_scenes() scene_name = Test_Scenes[scene_idx] num_startPoints = len(mapper_scene2points[scene_name]) num_steps = 35 ## create test folder test_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/test_IBVS' ''' approach_folder = '{}/SIFT_interMatrix_gtDepth_Vz_OmegaY'.format(test_folder) create_folder(approach_folder) scene_folder = '{}/{}'.format(approach_folder, scene_name) create_folder(scene_folder) ''' f = open('{}/{}_{}.txt'.format(test_folder, perception_rep, depth_method), 'a') f.write('scene_name = {}\n'.format(scene_name)) list_count_correct = [] list_count_runs = [] list_count_steps = [] ## rrt functions ## first figure out how to sample points from rrt graph rrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format(scene_name) path_finder = rrt.PathFinder(rrt_directory) path_finder.load() num_nodes = len(path_finder.nodes_x) free = cv2.imread('/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.format(scene_name), 0) ## GibsonEnv setup ## For Gibson Env import gym, logging from mpi4py import MPI from gibson.envs.husky_env import HuskyNavigateEnv from baselines import logger import skimage.io from transforms3d.euler import euler2quat config_file = os.path.join('/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop', 'env_yamls', '{}_navigate.yaml'.format(scene_name)) env = HuskyNavigateEnv(config=config_file, gpu_count = 1) obs = env.reset() ## this line is important otherwise there will be an error like 'AttributeError: 'HuskyNavigateEnv' object has no attribute 'potential'' def get_obs(current_pose): pos, orn = func_pose2posAndorn(current_pose, mapper_scene2z[scene_name]) env.robot.reset_new_pose(pos, orn) obs, _, _, _ = env.step(4) obs_rgb = obs['rgb_filled'] obs_depth = obs['depth'] return obs_rgb.copy(), obs_depth.copy() base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('test') ## go through each point folder for point_idx in range(0, num_startPoints): #for point_idx in range(0, 1): print('point_idx = {}'.format(point_idx)) #point_folder = '{}/point_{}'.format(scene_folder, point_idx) #create_folder(point_folder) ## read in start img and start pose point_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name, point_idx) point_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format(base_folder, scene_name, point_idx)) start_img = cv2.imread('{}/{}.png'.format(point_image_folder, point_pose_npy_file[0]['img_name']))[:, :, ::-1] start_pose = point_pose_npy_file[0]['pose'] ## index 0 is the left image, so right_img_idx starts from index 1 count_correct = 0 list_correct_img_names = [] list_whole_stat = [] list_steps = [] for right_img_idx in range(1, len(point_pose_npy_file)): #for right_img_idx in range(1, 11): flag_correct = False print('right_img_idx = {}'.format(right_img_idx)) count_steps = 0 current_pose = start_pose right_img_name = point_pose_npy_file[right_img_idx]['img_name'] goal_pose = point_pose_npy_file[right_img_idx]['pose'] goal_img, goal_depth = get_obs(goal_pose) list_result_poses = [current_pose] num_matches = 0 flag_broken = False while count_steps < num_steps: current_img, current_depth = get_obs(current_pose) noise = np.random.normal(0.0, 0.5, (256, 256)) current_depth[:, :, 0] += noise try: kp1, kp2 = detect_correspondences(current_img, goal_img) if count_steps == 0: start_depth = current_depth.copy() except: print('run into error') break num_matches = kp1.shape[1] vx, vz, omegay, flag_stop = compute_velocity_through_correspondences_and_depth(kp1, kp2, current_depth) previous_pose = current_pose current_pose, _, _, flag_stop_goToPose = goToPose_one_step(current_pose, vx, vz, omegay) ## check if there is collision during the action left_pixel = path_finder.point_to_pixel((previous_pose[0], previous_pose[1])) right_pixel = path_finder.point_to_pixel((current_pose[0], current_pose[1])) # rrt.line_check returns True when there is no obstacle if not rrt.line_check(left_pixel, right_pixel, free): flag_broken = True print('run into an obstacle ...') break ## check if we should stop or not if flag_stop or flag_stop_goToPose: print('flag_stop = {}, flag_stop_goToPose = {}'.format(flag_stop, flag_stop_goToPose)) print('break') break count_steps += 1 list_result_poses.append(current_pose) ## sample current_img again to save in list_obs current_img, current_depth = get_obs(current_pose) #assert 1==2 ## decide if this run is successful or not flag_correct, dist, theta_change = similar_location_under_certainThreshold(goal_pose, list_result_poses[count_steps]) print('dist = {}, theta = {}'.format(dist, theta_change)) #print('start_pose = {}, final_pose = {}, goal_pose = {}'.format(start_pose, list_result_poses[-1], goal_pose)) if flag_correct: count_correct += 1 list_correct_img_names.append(right_img_name[10:]) if flag_correct: str_succ = 'Success' print('str_succ = {}'.format(str_succ)) else: str_succ = 'Failure' print('str_succ = {}'.format(str_succ)) list_steps.append(len(list_result_poses)) ## =================================================================================================================== ## plot the pose graph ''' img_name = 'goTo_{}.jpg'.format(right_img_name[10:]) print('img_name = {}'.format(img_name)) ## plot the poses free2 = cv2.imread('/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.format(scene_name), 1) rows, cols, _ = free2.shape plt.imshow(free2) for m in range(len(list_result_poses)): pose = list_result_poses[m] x, y = path_finder.point_to_pixel((pose[0], pose[1])) theta = pose[2] plt.arrow(x, y, cos(theta), sin(theta), color='y', \ overhang=1, head_width=0.1, head_length=0.15, width=0.001) ## draw goal pose x, y = path_finder.point_to_pixel((goal_pose[0], goal_pose[1])) theta = goal_pose[2] plt.arrow(x, y, cos(theta), sin(theta), color='r', \ overhang=1, head_width=0.1, head_length=0.15, width=0.001) plt.axis([0, cols, 0, rows]) plt.xticks([]) plt.yticks([]) plt.title('{}, start point_{}, goal viewpoint {}, {}\n dist = {:.4f} meter, theta = {:.4f} degree\n'.format(scene_name, point_idx, right_img_name[10:], str_succ, dist, theta_change)) plt.savefig('{}/{}'.format(point_folder, img_name), bbox_inches='tight', dpi=(400)) plt.close() ## ====================================================================================================================== ## save stats current_test_dict = {} current_test_dict['img_name'] = right_img_name current_test_dict['success_flag'] = flag_correct current_test_dict['dist'] = dist current_test_dict['theta'] = theta_change current_test_dict['steps'] = count_steps current_test_dict['collision'] = flag_broken list_whole_stat.append(current_test_dict) np.save('{}/runs_statistics.npy'.format(point_folder), list_whole_stat) success_rate = 1.0 * count_correct / (len(point_pose_npy_file)-1) print('count_correct/num_right_images = {}/{} = {}'.format(count_correct, len(point_pose_npy_file)-1, success_rate)) ## write correctly run target image names to file f = open('{}/successful_runs.txt'.format(point_folder), 'w') f.write('count_correct/num_right_images = {}/{} = {}\n'.format(count_correct, len(point_pose_npy_file)-1, success_rate)) for i in range(len(list_correct_img_names)): f.write('{}\n'.format(list_correct_img_names[i])) f.close() print('writing correct run image names to txt ...') ''' avg_steps = 1.0 * sum(list_steps) / len(list_steps) f.write('point {} : {}/{}, {}\n'.format(point_idx, count_correct, len(point_pose_npy_file)-1, avg_steps)) list_count_correct.append(count_correct) list_count_runs.append(len(point_pose_npy_file)-1) list_count_steps.append(avg_steps) f.flush() avg_count_steps = 1.0 * sum(list_count_steps) / len(list_count_steps) f.write('In total : {}/{}, {}\n'.format(sum(list_count_correct), sum(list_count_runs), avg_count_steps)) f.write('-------------------------------------------------------------------------------------\n')
def main(scene_idx=0, point_a=0, right_a=0): #scene_idx = 0 Train_Scenes, Test_Scenes = get_train_test_scenes() mapper_scene2points = get_mapper_scene2points() scene_name = Test_Scenes[scene_idx] num_startPoints = len(mapper_scene2points[scene_name]) ## as the move forward distance = 0.1, assume velocity is 0.01, it needs 10 steps. def pose_interpolation(start_pose, end_pose, num=10, include_endpoint=False): x0, y0, theta0 = start_pose x1, y1, theta1 = end_pose x = np.linspace(x0, x1, num=num, endpoint=include_endpoint) y = np.linspace(y0, y1, num=num, endpoint=include_endpoint) ## convert to quaternion q0 = Quaternion(axis=[0, -1, 0], angle=theta0) q1 = Quaternion(axis=[0, -1, 0], angle=theta1) pose_list = [] v = np.array([1, 0, 0]) for idx, q in enumerate( Quaternion.intermediates(q0, q1, num - 1, include_endpoints=True)): if idx < num: e, d, f = q.rotate(v) theta = atan2(f, e) current_pose = [x[idx], y[idx], theta] pose_list.append(current_pose) return pose_list ## rrt functions ## first figure out how to sample points from rrt graph rrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format( scene_name) path_finder = rrt.PathFinder(rrt_directory) path_finder.load() num_nodes = len(path_finder.nodes_x) free = cv2.imread( '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png' .format(scene_name), 0) ## draw the observations ## setup environment import gym, logging from mpi4py import MPI from gibson.envs.husky_env import HuskyNavigateEnv from baselines import logger import skimage.io from transforms3d.euler import euler2quat config_file = os.path.join( '/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop', 'env_yamls', '{}_navigate.yaml'.format(scene_name)) env = HuskyNavigateEnv(config=config_file, gpu_count=1) obs = env.reset( ) ## this line is important otherwise there will be an error like 'AttributeError: 'HuskyNavigateEnv' object has no attribute 'potential'' mapper_scene2z = get_mapper() def get_obs(current_pose): pos, orn = func_pose2posAndorn(current_pose, mapper_scene2z[scene_name]) env.robot.reset_new_pose(pos, orn) obs, _, _, _ = env.step(4) obs_rgb = obs['rgb_filled'] obs_depth = obs['depth'] #obs_normal = obs['normal'] return obs_rgb, obs_depth #, obs_normal def close_to_goal(pose1, pose2, thresh=0.20): L2_dist = math.sqrt((pose1[0] - pose2[0])**2 + (pose1[1] - pose2[1])**2) thresh_L2_dist = thresh theta_change = abs(pose1[2] - pose2[2]) / math.pi * 180 return (L2_dist <= thresh_L2_dist) #and (theta_change <= 30) base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format( 'test') #for point_idx in range(2, 3): for point_idx in range(point_a, point_a + 1): print('point_idx = {}'.format(point_idx)) ## read in start img and start pose point_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name, point_idx) point_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format( base_folder, scene_name, point_idx)) save_folder = '{}/{}/point_{}'.format( '/home/reza/Datasets/GibsonEnv/my_code/vs_controller/for_video', scene_name, point_idx) ## index 0 is the left image, so right_img_idx starts from index 1 #for right_img_idx in range(1, len(point_pose_npy_file)): #for right_img_idx in range(1, 101): for right_img_idx in range(right_a, right_a + 1): right_img_name = point_pose_npy_file[right_img_idx]['img_name'] ## Read in pose npy file generated from DQN dqn_pose_npy_file = np.load( '{}/run_{}/{}_waypoint_pose_list.npy'.format( save_folder, right_img_idx, right_img_name[10:])) start_pose = dqn_pose_npy_file[0] goal_pose = dqn_pose_npy_file[1] dqn_pose_list = dqn_pose_npy_file[2] goal_img, goal_depth = get_obs(goal_pose) goal_img = goal_img[:, :, ::-1] cv2.imwrite( '{}/run_{}/goal_img.jpg'.format(save_folder, right_img_idx), goal_img) interpolated_pose_list = [] ## build the subsequence len_dqn_pose_list = len(dqn_pose_list) for i in range(len_dqn_pose_list - 1): first_pose = dqn_pose_list[i] second_pose = dqn_pose_list[i + 1] subseq_pose_list = pose_interpolation(first_pose, second_pose) interpolated_pose_list += subseq_pose_list interpolated_pose_list.append(dqn_pose_list[-1]) img_name = 'goTo_{}.jpg'.format('current') print('img_name = {}'.format(img_name)) ## plot the poses free2 = cv2.imread( '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png' .format(scene_name), 1) rows, cols, _ = free2.shape plt.imshow(free2) for m in range(len(interpolated_pose_list)): #for m in range(0, 100, 5): pose = interpolated_pose_list[m] x, y = path_finder.point_to_pixel((pose[0], pose[1])) theta = pose[2] plt.arrow(x, y, cos(theta), sin(theta), color='y', \ overhang=1, head_width=0.1, head_length=0.15, width=0.001) ## draw goal pose x, y = path_finder.point_to_pixel((goal_pose[0], goal_pose[1])) theta = goal_pose[2] plt.arrow(x, y, cos(theta), sin(theta), color='r', \ overhang=1, head_width=0.1, head_length=0.15, width=0.001) ## draw start pose x, y = path_finder.point_to_pixel((start_pose[0], start_pose[1])) theta = start_pose[2] plt.arrow(x, y, cos(theta), sin(theta), color='b', \ overhang=1, head_width=0.1, head_length=0.15, width=0.001) plt.axis([0, cols, 0, rows]) plt.xticks([]) plt.yticks([]) #plt.title('{}, start point_{}, goal viewpoint {}, {}'.format(scene_name, point_idx, right_img_name[10:], str_succ)) plt.savefig('{}/run_{}/{}'.format(save_folder, right_img_idx, 'overview.jpg'), bbox_inches='tight', dpi=(400)) plt.close() #''' for i in range(len(interpolated_pose_list)): current_pose = interpolated_pose_list[i] obs_rgb, obs_depth = get_obs(current_pose) obs_rgb = obs_rgb[:, :, ::-1] cv2.imwrite( '{}/run_{}/step_{}.jpg'.format(save_folder, right_img_idx, i), obs_rgb)
sys.path.append('/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop') sys.path.append('/home/reza/Datasets/GibsonEnv/my_code/visual_servoing') sys.path.append('/home/reza/Datasets/GibsonEnv/my_code/DDPG') from util_visual_servoing import get_train_test_scenes, get_mapper, get_mapper_scene2points, create_folder, get_mapper_dist_theta_heading, get_pose_from_name, sample_gt_random_dense_correspondences, sample_gt_dense_correspondences import rrt import random from util import action2pose, func_pose2posAndorn, similar_location_under_certainThreshold, plus_theta_fn, minus_theta_fn from util_vscontroller import gt_goToPose, genOverlapAreaOnCurrentView, genOverlapAreaOnGoalView, genErrorOverlapAreaOnCurrentView from utils_ddpg import buildActionMapper, update_current_pose from dqn_vs import DQN_vs_triplet import torch np.set_printoptions(precision=2, suppress=True) ## necessary constants mapper_scene2points = get_mapper_scene2points() num_episodes = 200000 batch_size = 128 lambda_action = 0.25 action_table = buildActionMapper(flag_fewer_actions=True) seq_len = 50 def main(scene_idx=0, actual_episodes=1): #scene_idx = 0 #actual_episodes=2 Train_Scenes, Test_Scenes = get_train_test_scenes() scene_name = Train_Scenes[scene_idx] num_startPoints = len(mapper_scene2points[scene_name]) model_weights_save_path = '{}'.format('/home/reza/Datasets/GibsonEnv/my_code/vs_controller/trained_dqn')
def main(scene_idx=0, point_a=0): #scene_idx = 1 ## necessary constants mapper_scene2points = get_mapper_scene2points() num_episodes = 200000 batch_size = 64 lambda_action = 0.25 action_table = buildActionMapper(flag_fewer_actions=True) seq_len = 50 Train_Scenes, Test_Scenes = get_train_test_scenes() if mode == 'Test': scene_name = Test_Scenes[scene_idx] elif mode == 'Train': scene_name = Train_Scenes[scene_idx] num_startPoints = len(mapper_scene2points[scene_name]) model_weights_save_path = '{}/{}'.format('/home/reza/Datasets/GibsonEnv/my_code/vs_controller/trained_dqn', approach) action_space = action_table.shape[0] ##============================================================================================================= ## rrt functions ## first figure out how to sample points from rrt graph rrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format(scene_name) path_finder = rrt.PathFinder(rrt_directory) path_finder.load() num_nodes = len(path_finder.nodes_x) free = cv2.imread('/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.format(scene_name), 0) ##------------------------------------------------------------------------------------------------------------ ## setup environment import gym, logging from mpi4py import MPI from gibson.envs.husky_env import HuskyNavigateEnv from baselines import logger import skimage.io from transforms3d.euler import euler2quat config_file = os.path.join('/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop', 'env_yamls', '{}_navigate.yaml'.format(scene_name)) env = HuskyNavigateEnv(config=config_file, gpu_count = 1) obs = env.reset() ## this line is important otherwise there will be an error like 'AttributeError: 'HuskyNavigateEnv' object has no attribute 'potential'' mapper_scene2z = get_mapper() def get_obs(current_pose): pos, orn = func_pose2posAndorn(current_pose, mapper_scene2z[scene_name]) env.robot.reset_new_pose(pos, orn) obs, _, _, _ = env.step(4) obs_rgb = obs['rgb_filled'] obs_depth = obs['depth'] #obs_normal = obs['normal'] return obs_rgb, obs_depth#, obs_normal def close_to_goal(pose1, pose2, thresh=0.20): L2_dist = math.sqrt((pose1[0] - pose2[0])**2 + (pose1[1] - pose2[1])**2) thresh_L2_dist = thresh theta_change = abs(pose1[2] - pose2[2])/math.pi * 180 return (L2_dist <= thresh_L2_dist) #and (theta_change <= 30) ##============================================================================================================ if mode == 'Test': base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('test') elif mode == 'Train': base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('train') import torch import torch.nn as nn import torch.nn.functional as F device = torch.device('cuda:0') ## Default CUDA device num_epochs = 200000 ## same as # of trajs sampled num_actions = action_table.shape[0] if input_type == 'both': perception = Perception_overlap(4).to(device) elif input_type == 'siamese': perception = Perception_siamese(4).to(device) elif input_type == 'optical_flow': perception = Perception_overlap(2).to(device) elif input_type == 'optical_flow_depth': perception = Perception_overlap(3).to(device) elif input_type == 'optical_flow_depth_normalized': perception = Perception_overlap(3).to(device) elif input_type == 'optical_flow_depth_unnormalized_mask': perception = Perception_overlap(3).to(device) elif input_type == 'optical_flow_depth_siamese': perception = Perception_siamese_fusion_new(3).to(device) elif input_type == 'optical_flow_memory': perception = Preception_overlap_resnet(4).to(device) else: perception = Perception_overlap(2).to(device) if input_type == 'siamese': model = DQN_OVERLAP_Controller(perception, num_actions, input_size=512).to(device) elif input_type == 'optical_flow_memory': model = DQN_OVERLAP_RESNET_Controller(perception, num_actions, input_size=512).to(device) else: model = DQN_OVERLAP_Controller(perception, num_actions, input_size=256).to(device) model.load_state_dict(torch.load('{}/dqn_epoch_{}_Uvalda.pt'.format(model_weights_save_path, num_epochs))) #model.eval() list_succ = [] list_collision = [] ## go through each point folder if mode == 'Test': a, b = 0, 1 elif mode == 'Train': a, b = 7, 8 #a, b = 0, 1 #for point_idx in range(0, num_startPoints): #for point_idx in range(a, b): for point_idx in range(point_a, point_a+1): print('point_idx = {}'.format(point_idx)) task_folder = '{}/{}/point_{}'.format('/home/reza/Datasets/GibsonEnv/my_code/vs_controller/for_video', scene_name, point_idx) create_folder(task_folder) ## read in start img and start pose point_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name, point_idx) point_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format(base_folder, scene_name, point_idx)) #start_img = cv2.imread('{}/{}.png'.format(point_image_folder, point_pose_npy_file[0]['img_name']))[:, :, ::-1] start_pose = point_pose_npy_file[0]['pose'] start_img, start_depth = get_obs(start_pose) start_depth = start_depth.copy() count_succ = 0 count_collision = 0 count_short_runs = 0 count_short_runs_collision = 0 count_short_runs_succ = 0 ## index 0 is the left image, so right_img_idx starts from index 1 #for right_img_idx in range(1, len(point_pose_npy_file)): for right_img_idx in range(1, 101): print('right_img_idx = {}'.format(right_img_idx)) run_folder = '{}/run_{}'.format(task_folder, right_img_idx) create_folder(run_folder) current_pose = start_pose right_img_name = point_pose_npy_file[right_img_idx]['img_name'] goal_pose = point_pose_npy_file[right_img_idx]['pose'] #goal_img = cv2.imread('{}/{}.png'.format(point_image_folder, right_img_name), 1)[:,:,::-1] goal_img, goal_depth = get_obs(goal_pose) goal_depth = goal_depth.copy() current_depth = start_depth.copy() episode_reward = 0 flag_succ = False poses_list = [] poses_list.append(start_pose) poses_list.append(goal_pose) poses_list.append([current_pose]) for i_step in range(seq_len): if input_type == 'both' or input_type == 'siamese': overlapArea_currentView = genOverlapAreaOnCurrentView(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] overlapArea_goalView = genOverlapAreaOnGoalView(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] overlapArea = np.concatenate((overlapArea_currentView, overlapArea_goalView), axis=2) elif input_type == 'optical_flow': overlapArea = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] overlapArea = removeCorrespondenceRandomly(overlapArea, keep_prob=1.0) elif input_type == 'optical_flow_depth': opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] overlapArea = np.concatenate((opticalFlow, current_depth), axis=2) elif input_type == 'optical_flow_depth_normalized': opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] normalized_opticalFlow = normalize_opticalFlow(opticalFlow) normalized_depth = normalize_depth(current_depth) #normalized_depth = np.ones((256, 256, 1), np.float32) overlapArea = np.concatenate((normalized_opticalFlow, normalized_depth), axis=2) elif input_type == 'optical_flow_depth_unnormalized_mask': opticalFlow, mask_flow = genGtDenseCorrespondenseFlowMapAndMask(current_depth, goal_depth, current_pose, goal_pose) opticalFlow = opticalFlow[:, :, :2] normalized_depth = current_depth * mask_flow #normalized_opticalFlow = normalize_opticalFlow(opticalFlow) normalized_depth = normalize_depth(normalized_depth) overlapArea = np.concatenate((opticalFlow, normalized_depth), axis=2) elif input_type == 'optical_flow_depth_siamese': opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] normalized_depth = normalize_depth(current_depth) #normalized_depth = np.ones((256, 256, 1), np.float32) overlapArea = np.concatenate((opticalFlow, normalized_depth), axis=2) #print('overlapArea.shape = {}'.format(overlapArea.shape)) elif input_type == 'optical_flow_memory': opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] if i_step == 0: overlapArea = np.concatenate((opticalFlow, opticalFlow), axis=2) else: overlapArea = np.concatenate((old_opticalFlow, opticalFlow), axis=2) else: overlapArea = genOverlapAreaOnCurrentView(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2] tensor_left = torch.tensor(overlapArea, dtype=torch.float32).to(device).unsqueeze(0).permute(0, 3, 1, 2) Qvalue_table = model(tensor_left) pred = Qvalue_table.max(1)[1].view(1, 1).detach().cpu().numpy().item() ## batch_size x 3 #print('Qvalue_table: {}'.format(Qvalue_table)) #print('pred = {}'.format(pred)) ## update current_pose vz, omegay = action_table[pred] #print('vz = {:.2f}, omegay = {:.2f}'.format(vz, omegay)) vx = 0.0 vx = vx * lambda_action vz = vz * lambda_action omegay = omegay * pi * lambda_action #print('actual velocity = {:.2f}, {:.2f}, {:.2f}'.format(vx, vz, omegay)) previous_pose = current_pose current_pose = update_current_pose(current_pose, vx, vz, omegay) poses_list[2].append(current_pose) flag_broken = False left_pixel = path_finder.point_to_pixel((previous_pose[0], previous_pose[1])) right_pixel = path_finder.point_to_pixel((current_pose[0], current_pose[1])) ## rrt.line_check returns True when there is no obstacle if not rrt.line_check(left_pixel, right_pixel, free): print('run into something') flag_broken = True break if close_to_goal(current_pose, goal_pose): print('success run') flag_succ = True break ## compute new_state current_img, current_depth = get_obs(current_pose) current_depth = current_depth.copy() #old_opticalFlow = opticalFlow.copy() np.save('{}/{}_waypoint_pose_list.npy'.format(run_folder, right_img_name[10:]), poses_list) #assert 1==2 if flag_succ: count_succ += 1 list_succ.append(point_pose_npy_file[right_img_idx]['img_name']) if findShortRangeImageName(right_img_name): count_short_runs_succ += 1 if flag_broken: count_collision += 1 list_collision.append(point_pose_npy_file[right_img_idx]['img_name']) if findShortRangeImageName(right_img_name): count_short_runs_collision += 1 if findShortRangeImageName(right_img_name): count_short_runs += 1 print('count_succ = {}'.format(count_succ)) print('count_collision = {}'.format(count_collision)) print('count_short_runs_succ = {}'.format(count_short_runs_succ)) print('count_short_runs_collision = {}'.format(count_short_runs_collision)) print('num_succ = {}, num_run = {}, count_short_runs_succ = {}, count_short_runs = {}'.format(count_succ, len(point_pose_npy_file), count_short_runs_succ, count_short_runs))