from util_vscontroller import gt_goToPose, genOverlapAreaOnCurrentView, genOverlapAreaOnGoalView, genErrorOverlapAreaOnCurrentView, genGtDenseCorrespondenseFlowMap, normalize_opticalFlow, normalize_depth, genGtDenseCorrespondenseFlowMapAndMask, removeCorrespondenceRandomly_withSmoothing
from utils_ddpg import buildActionMapper, update_current_pose
from dqn_vs import DQN_vs_overlap, DQN_vs_siamese
from model_vs import *
import torch
from util_short_range import findShortRangeImageName

np.set_printoptions(precision=2, suppress=True)
approach = 'thirtyfirst_try_rnn_no_perception_newerSeqReward'  #'twentyeighth_try_vanillaRNN_model' #'twentysixth_try_opticalFlow_rnn'
mode = 'Test'
input_type = 'optical_flow_rnn'
scene_idx = 0
approach_folder = '{}/{}_{}'.format(
    '/home/reza/Datasets/GibsonEnv/my_code/vs_controller/visualize_dqn',
    approach, mode)
create_folder(approach_folder)

## necessary constants
mapper_scene2points = get_mapper_scene2points()
num_episodes = 200000
batch_size = 64
lambda_action = 0.25
action_table = buildActionMapper(flag_fewer_actions=True)
seq_len = 50

Train_Scenes, Test_Scenes = get_train_test_scenes()

if mode == 'Test':
    scene_name = Test_Scenes[scene_idx]
elif mode == 'Train':
    scene_name = Train_Scenes[scene_idx]
def main(scene_idx=0, point_a=0):

	#scene_idx = 1

	## necessary constants
	mapper_scene2points = get_mapper_scene2points()
	num_episodes = 200000
	batch_size = 64
	lambda_action = 0.25
	action_table = buildActionMapper(flag_fewer_actions=True)
	seq_len = 50

	Train_Scenes, Test_Scenes = get_train_test_scenes()

	if mode == 'Test':
		scene_name = Test_Scenes[scene_idx]
	elif mode == 'Train':
		scene_name = Train_Scenes[scene_idx]


	num_startPoints = len(mapper_scene2points[scene_name])
	model_weights_save_path = '{}/{}'.format('/home/reza/Datasets/GibsonEnv/my_code/vs_controller/trained_dqn', approach)
	action_space = action_table.shape[0]

	##=============================================================================================================
	## rrt functions
	## first figure out how to sample points from rrt graph
	rrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format(scene_name)
	path_finder = rrt.PathFinder(rrt_directory)
	path_finder.load()
	num_nodes = len(path_finder.nodes_x)
	free = cv2.imread('/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.format(scene_name), 0)

	##------------------------------------------------------------------------------------------------------------
	## setup environment
	import gym, logging
	from mpi4py import MPI
	from gibson.envs.husky_env import HuskyNavigateEnv
	from baselines import logger
	import skimage.io
	from transforms3d.euler import euler2quat
	config_file = os.path.join('/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop', 'env_yamls', '{}_navigate.yaml'.format(scene_name))
	env = HuskyNavigateEnv(config=config_file, gpu_count = 1)
	obs = env.reset() ## this line is important otherwise there will be an error like 'AttributeError: 'HuskyNavigateEnv' object has no attribute 'potential''
	mapper_scene2z = get_mapper()

	def get_obs(current_pose):
		pos, orn = func_pose2posAndorn(current_pose, mapper_scene2z[scene_name])
		env.robot.reset_new_pose(pos, orn)
		obs, _, _, _ = env.step(4)
		obs_rgb = obs['rgb_filled']
		obs_depth = obs['depth']
		#obs_normal = obs['normal']
		return obs_rgb, obs_depth#, obs_normal

	def close_to_goal(pose1, pose2, thresh=0.20):
		L2_dist = math.sqrt((pose1[0] - pose2[0])**2 + (pose1[1] - pose2[1])**2)
		thresh_L2_dist = thresh
		theta_change = abs(pose1[2] - pose2[2])/math.pi * 180
		return (L2_dist <= thresh_L2_dist) #and (theta_change <= 30)

	##============================================================================================================
	if mode == 'Test':
		base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('test')
	elif mode == 'Train':
		base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('train')

	import torch
	import torch.nn as nn
	import torch.nn.functional as F

	device = torch.device('cuda:0')     ## Default CUDA device
	num_epochs = 200000 ## same as # of trajs sampled
	num_actions = action_table.shape[0]
	if input_type == 'both':
		perception = Perception_overlap(4).to(device)
	elif input_type == 'siamese':
		perception = Perception_siamese(4).to(device)
	elif input_type == 'optical_flow':
		perception = Perception_overlap(2).to(device)
	elif input_type == 'optical_flow_depth':
		perception = Perception_overlap(3).to(device)
	elif input_type == 'optical_flow_depth_normalized':
		perception = Perception_overlap(3).to(device)
	elif input_type == 'optical_flow_depth_unnormalized_mask':
		perception = Perception_overlap(3).to(device)
	elif input_type == 'optical_flow_depth_siamese':
		perception = Perception_siamese_fusion_new(3).to(device)
	elif input_type == 'optical_flow_memory':
		perception = Preception_overlap_resnet(4).to(device)
	else:
		perception = Perception_overlap(2).to(device)
	if input_type == 'siamese':
		model = DQN_OVERLAP_Controller(perception, num_actions, input_size=512).to(device)
	elif input_type == 'optical_flow_memory':
		model = DQN_OVERLAP_RESNET_Controller(perception, num_actions, input_size=512).to(device)
	else:
		model = DQN_OVERLAP_Controller(perception, num_actions, input_size=256).to(device)
	model.load_state_dict(torch.load('{}/dqn_epoch_{}_Uvalda.pt'.format(model_weights_save_path, num_epochs)))
	#model.eval()

	list_succ = []
	list_collision = []
	## go through each point folder
	if mode == 'Test':
		a, b = 0, 1
	elif mode == 'Train':
		a, b = 7, 8
		#a, b = 0, 1
	#for point_idx in range(0, num_startPoints):
	#for point_idx in range(a, b):
	for point_idx in range(point_a, point_a+1):
		print('point_idx = {}'.format(point_idx))

		task_folder = '{}/{}/point_{}'.format('/home/reza/Datasets/GibsonEnv/my_code/vs_controller/for_video', scene_name, point_idx)
		create_folder(task_folder)

		## read in start img and start pose
		point_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name, point_idx)
		point_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format(base_folder, scene_name, point_idx))

		#start_img = cv2.imread('{}/{}.png'.format(point_image_folder, point_pose_npy_file[0]['img_name']))[:, :, ::-1]
		start_pose = point_pose_npy_file[0]['pose']
		start_img, start_depth = get_obs(start_pose)
		start_depth = start_depth.copy()

		count_succ = 0
		count_collision = 0
		count_short_runs = 0
		count_short_runs_collision = 0
		count_short_runs_succ = 0
		## index 0 is the left image, so right_img_idx starts from index 1
		#for right_img_idx in range(1, len(point_pose_npy_file)):
		for right_img_idx in range(1, 101):
			print('right_img_idx = {}'.format(right_img_idx))

			run_folder = '{}/run_{}'.format(task_folder, right_img_idx)
			create_folder(run_folder)

			current_pose = start_pose
			
			right_img_name = point_pose_npy_file[right_img_idx]['img_name']
			
			goal_pose = point_pose_npy_file[right_img_idx]['pose']
			#goal_img = cv2.imread('{}/{}.png'.format(point_image_folder, right_img_name), 1)[:,:,::-1]
			goal_img, goal_depth = get_obs(goal_pose)
			goal_depth = goal_depth.copy()

			current_depth = start_depth.copy()

			episode_reward = 0

			flag_succ = False

			poses_list = []
			poses_list.append(start_pose)
			poses_list.append(goal_pose)
			poses_list.append([current_pose])

			for i_step in range(seq_len):
				if input_type == 'both' or input_type == 'siamese':
					overlapArea_currentView = genOverlapAreaOnCurrentView(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
					overlapArea_goalView = genOverlapAreaOnGoalView(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
					overlapArea = np.concatenate((overlapArea_currentView, overlapArea_goalView), axis=2)
				elif input_type == 'optical_flow':
					overlapArea = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
					overlapArea = removeCorrespondenceRandomly(overlapArea, keep_prob=1.0)
				elif input_type == 'optical_flow_depth':
					opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
					overlapArea = np.concatenate((opticalFlow, current_depth), axis=2)
				elif input_type == 'optical_flow_depth_normalized':
					opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
					normalized_opticalFlow = normalize_opticalFlow(opticalFlow)
					normalized_depth = normalize_depth(current_depth)
					#normalized_depth = np.ones((256, 256, 1), np.float32)
					overlapArea = np.concatenate((normalized_opticalFlow, normalized_depth), axis=2)
				elif input_type == 'optical_flow_depth_unnormalized_mask':
					opticalFlow, mask_flow = genGtDenseCorrespondenseFlowMapAndMask(current_depth, goal_depth, current_pose, goal_pose)
					opticalFlow = opticalFlow[:, :, :2]
					normalized_depth = current_depth * mask_flow
					#normalized_opticalFlow = normalize_opticalFlow(opticalFlow)
					normalized_depth = normalize_depth(normalized_depth)
					overlapArea = np.concatenate((opticalFlow, normalized_depth), axis=2)	
				elif input_type == 'optical_flow_depth_siamese':
					opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
					normalized_depth = normalize_depth(current_depth)
					#normalized_depth = np.ones((256, 256, 1), np.float32)
					overlapArea = np.concatenate((opticalFlow, normalized_depth), axis=2)
					#print('overlapArea.shape = {}'.format(overlapArea.shape))
				elif input_type == 'optical_flow_memory':
					opticalFlow = genGtDenseCorrespondenseFlowMap(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
					if i_step == 0:
						overlapArea = np.concatenate((opticalFlow, opticalFlow), axis=2)
					else:
						overlapArea = np.concatenate((old_opticalFlow, opticalFlow), axis=2)
				else:
					overlapArea = genOverlapAreaOnCurrentView(current_depth, goal_depth, current_pose, goal_pose)[:,:,:2]

				
				tensor_left = torch.tensor(overlapArea, dtype=torch.float32).to(device).unsqueeze(0).permute(0, 3, 1, 2)
				Qvalue_table = model(tensor_left)
				pred = Qvalue_table.max(1)[1].view(1, 1).detach().cpu().numpy().item() ## batch_size x 3
				#print('Qvalue_table: {}'.format(Qvalue_table))
				#print('pred = {}'.format(pred))
				
				## update current_pose
				vz, omegay = action_table[pred]
				#print('vz = {:.2f}, omegay = {:.2f}'.format(vz, omegay))
				vx = 0.0
				vx = vx * lambda_action
				vz = vz * lambda_action
				omegay = omegay * pi * lambda_action
				#print('actual velocity = {:.2f}, {:.2f}, {:.2f}'.format(vx, vz, omegay))
				previous_pose = current_pose
				current_pose = update_current_pose(current_pose, vx, vz, omegay)
				poses_list[2].append(current_pose)

				flag_broken = False
				left_pixel = path_finder.point_to_pixel((previous_pose[0], previous_pose[1]))
				right_pixel = path_finder.point_to_pixel((current_pose[0], current_pose[1]))
				## rrt.line_check returns True when there is no obstacle
				if not rrt.line_check(left_pixel, right_pixel, free):
					print('run into something')
					flag_broken = True
					break

				if close_to_goal(current_pose, goal_pose):
					print('success run')
					flag_succ = True
					break

				## compute new_state
				current_img, current_depth = get_obs(current_pose)
				current_depth = current_depth.copy()
				#old_opticalFlow = opticalFlow.copy()

			np.save('{}/{}_waypoint_pose_list.npy'.format(run_folder, right_img_name[10:]), poses_list)
			#assert 1==2

			if flag_succ:
				count_succ += 1
				list_succ.append(point_pose_npy_file[right_img_idx]['img_name'])
				if findShortRangeImageName(right_img_name):
					count_short_runs_succ += 1
			if flag_broken:
				count_collision += 1
				list_collision.append(point_pose_npy_file[right_img_idx]['img_name'])
				if findShortRangeImageName(right_img_name):
					count_short_runs_collision += 1
			if findShortRangeImageName(right_img_name):
				count_short_runs += 1


			print('count_succ = {}'.format(count_succ))
			print('count_collision = {}'.format(count_collision))
			print('count_short_runs_succ = {}'.format(count_short_runs_succ))
			print('count_short_runs_collision = {}'.format(count_short_runs_collision))

		print('num_succ = {}, num_run = {}, count_short_runs_succ = {}, count_short_runs = {}'.format(count_succ, len(point_pose_npy_file), count_short_runs_succ, count_short_runs))
#def main(scene_idx=0):

scene_idx = 0

mapper_scene2z = get_mapper()
mapper_scene2points = get_mapper_scene2points()
Train_Scenes, Test_Scenes = get_train_test_scenes()
scene_name = Test_Scenes[scene_idx]
num_startPoints = len(mapper_scene2points[scene_name])
num_steps = 50

## create test folder
test_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/test_IBVS'
approach_folder = '{}/gtCorrespondence_interMatrix_gtDepth_Vz_OmegaY'.format(
    test_folder)
create_folder(approach_folder)

scene_folder = '{}/{}'.format(approach_folder, scene_name)
create_folder(scene_folder)

## rrt functions
## first figure out how to sample points from rrt graph
rrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format(
    scene_name)
path_finder = rrt.PathFinder(rrt_directory)
path_finder.load()
num_nodes = len(path_finder.nodes_x)
free = cv2.imread(
    '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.
    format(scene_name), 0)
Пример #4
0
from util import plus_theta_fn, minus_theta_fn
from util_vscontroller import genOverlapAreaOnCurrentView, gt_goToPose, genOverlapAreaOnGoalView,

scene_idx = 1

mapper_scene2z = get_mapper()
mapper_scene2points = get_mapper_scene2points()
Train_Scenes, Test_Scenes = get_train_test_scenes()

scene_name = Test_Scenes[scene_idx]
num_startPoints = len(mapper_scene2points[scene_name])

base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_test'

save_base_folder = '/home/reza/Datasets/GibsonEnv/my_code/vs_controller/overlapping_area'
create_folder(save_base_folder)

for point_idx in range(0, 1):
    print('point_idx = {}'.format(point_idx))

    save_point_folder = '{}/{}_point_{}'.format(save_base_folder, scene_name,
                                                point_idx)
    create_folder(save_point_folder)

    ## read in start img and start pose
    point_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name,
                                                 point_idx)
    point_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format(
        base_folder, scene_name, point_idx))

    start_img = cv2.imread('{}/{}.png'.format(
Пример #5
0
def main(scene_idx):
    scene_name = Test_Scenes[scene_idx]
    scene_file_addr = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_test/{}'.format(
        scene_name)

    #scene_name = Train_Scenes[scene_idx]
    #scene_file_addr = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_train/{}'.format(scene_name)
    create_folder(scene_file_addr)

    ## rrt functions
    ## first figure out how to sample points from rrt graph
    rrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format(
        scene_name)
    path_finder = rrt.PathFinder(rrt_directory)
    path_finder.load()
    num_nodes = len(path_finder.nodes_x)
    free = cv2.imread(
        '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'
        .format(scene_name), 0)

    ## GibsonEnv setup
    config_file = os.path.join(
        '/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop', 'env_yamls',
        '{}_navigate.yaml'.format(scene_name))
    env = HuskyNavigateEnv(config=config_file, gpu_count=1)
    obs = env.reset(
    )  ## this line is important otherwise there will be an error like 'AttributeError: 'HuskyNavigateEnv' object has no attribute 'potential''

    def get_obs(current_pose):
        pos, orn = func_pose2posAndorn(current_pose,
                                       mapper_scene2z[scene_name])
        env.robot.reset_new_pose(pos, orn)
        obs, _, _, _ = env.step(4)
        obs_rgb = obs['rgb_filled']
        obs_depth = obs['depth']
        return obs_rgb.copy(), obs_depth.copy()

    left_pose_list = mapper_scene2points[scene_name]
    right_pose_list = []
    for p_idx, p in enumerate(left_pose_list):
        #for p_idx in range(0, 1):
        #p = left_pose_list[p_idx]
        list_whole = []
        x0, y0, theta0 = p
        left_pose = [x0, y0, theta0]

        point_file_addr = '{}/point_{}'.format(scene_file_addr, p_idx)
        create_folder(point_file_addr)
        current_pose = left_pose
        left_rgb, left_depth = get_obs(current_pose)
        cv2.imwrite('{}/left_img.png'.format(point_file_addr),
                    left_rgb[:, :, ::-1])
        np.save('{}/left_img_depth.npy'.format(point_file_addr), left_depth)
        ## add left_img to list_whole
        current_dict = {}
        current_dict['img_name'] = 'left_img'
        current_dict['pose'] = left_pose
        list_whole.append(current_dict)

        for i in range(len(theta_list)):
            if i == 0 or i == 6:
                len_dist_list = 2
            elif i == 1 or i == 5:
                len_dist_list = 3
            elif i == 2 or i == 4:
                len_dist_list = 4
            elif i == 3:
                len_dist_list = 6
            print('len_dist_list = {}'.format(len_dist_list))
            for j in range(len_dist_list):

                location_theta = plus_theta_fn(theta0, theta_list[i])
                location_dist = dist_list[j]
                x1 = x0 + location_dist * math.cos(location_theta)
                y1 = y0 + location_dist * math.sin(location_theta)

                left_pixel = path_finder.point_to_pixel(left_pose)
                right_pixel = path_finder.point_to_pixel((x1, y1))

                # check the line
                flag = rrt.line_check(left_pixel, right_pixel, free)
                if not flag:
                    print('j = {}, obstacle'.format(j))
                else:
                    for diff_theta_idx in range(len(diff_theta_list)):
                        diff_theta = diff_theta_list[diff_theta_idx]
                        theta1 = plus_theta_fn(theta0, diff_theta)
                        right_pose = [x1, y1, theta1]

                        current_pose = right_pose
                        right_rgb, right_depth = get_obs(current_pose)

                        ## check if there is common space between left img and right img
                        kp1, kp2 = sample_gt_dense_correspondences(
                            left_depth,
                            right_depth,
                            left_pose,
                            right_pose,
                            gap=32,
                            focal_length=128,
                            resolution=256,
                            start_pixel=31)
                        if kp1.shape[1] > 2:
                            cv2.imwrite(
                                '{}/right_img_dist_{}_theta_{}_heading_{}.png'.
                                format(point_file_addr, mapper_dist[j],
                                       mapper_theta[i],
                                       mapper_theta[diff_theta_idx]),
                                right_rgb[:, :, ::-1])
                            np.save(
                                '{}/right_img_dist_{}_theta_{}_heading_{}_depth.npy'
                                .format(point_file_addr, mapper_dist[j],
                                        mapper_theta[i],
                                        mapper_theta[diff_theta_idx]),
                                right_depth)
                            right_pose_list.append(right_pose)
                            ## add right_img to list_whole
                            current_dict = {}
                            current_dict[
                                'img_name'] = 'right_img_dist_{}_theta_{}_heading_{}'.format(
                                    mapper_dist[j], mapper_theta[i],
                                    mapper_theta[diff_theta_idx])
                            current_dict['pose'] = right_pose
                            list_whole.append(current_dict)
                        else:
                            print('No common space')

        ## save list_whole
        np.save('{}/point_{}_poses.npy'.format(scene_file_addr, p_idx),
                list_whole)

    # plot the pose graph
    pose_file_addr = '{}'.format(scene_file_addr)
    img_name = '{}_sampled_poses.jpg'.format(scene_name)
    print('img_name = {}'.format(img_name))
    ## plot the poses
    free = cv2.imread(
        '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'
        .format(scene_name), 1)
    rows, cols, _ = free.shape
    plt.imshow(free)
    for m in range(len(left_pose_list)):
        pose = left_pose_list[m]
        x, y = path_finder.point_to_pixel((pose[0], pose[1]))
        theta = pose[2]
        plt.arrow(x, y, cos(theta), sin(theta), color='r', \
            overhang=1, head_width=0.1, head_length=0.15, width=0.001)
    for m in range(len(right_pose_list)):
        pose = right_pose_list[m]
        x, y = path_finder.point_to_pixel((pose[0], pose[1]))
        theta = pose[2]
        plt.arrow(x, y, cos(theta), sin(theta), color='b', \
            overhang=1, head_width=0.1, head_length=0.15, width=0.001)
    plt.axis([0, cols, 0, rows])
    plt.xticks([])
    plt.yticks([])
    plt.savefig('{}/{}'.format(pose_file_addr, img_name),
                bbox_inches='tight',
                dpi=(400))
    plt.close()
scene_idx = 0
category = 'Train'

if category == 'Test':
	scene_name = Test_Scenes[scene_idx]
if category == 'Train':
	scene_name = Train_Scenes[scene_idx]
num_startPoints = len(mapper_scene2points[scene_name])

print('scene_name = {}'.format(scene_name))

approach = 'third_try'
approach_folder = '/home/reza/Datasets/GibsonEnv/my_code/vs_controller/test_goToPose/{}'.format(approach)
test_folder = '{}/{}'.format(approach_folder, scene_name)
create_folder(test_folder)

## rrt functions
## first figure out how to sample points from rrt graph
rrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format(scene_name)
path_finder = rrt.PathFinder(rrt_directory)
path_finder.load()
num_nodes = len(path_finder.nodes_x)
free = cv2.imread('/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.format(scene_name), 0)

## setup environment
import gym, logging
from mpi4py import MPI
from gibson.envs.husky_env import HuskyNavigateEnv
from baselines import logger
import skimage.io