Ejemplo n.º 1
0
   def __init__(self,
                setting_file = 'search_quadcopter.json',
                reset_type = 'random',       # random
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'color', # 'color', 'depth', 'rgbd'
                reward_type = 'distance', # distance
                docker = False,
                resolution=(84, 84)
                ):

     print(setting_file)
     setting = self.load_env_setting(setting_file)
     self.docker = docker
     self.reset_type = reset_type
     self.roll = 0
     self.pitch = 0

     # start unreal env
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
     env_ip, env_port = self.unreal.start(docker,resolution)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                                port= env_port,
                                ip=env_ip,
                                env=self.unreal.path2env,
                                resolution=resolution)

    # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.count_steps = 0


    # define observation space,
    # color, depth, rgbd,...
     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd'
     self.observation_shape = self.unrealcv.define_observation(self.cam_id,self.observation_type)

     # define reward type
     # distance, bbox, bbox_distance,
     self.reward_type = reward_type
     self.reward_function = reward.Reward(setting)


     self.rendering = False

     # init augment env
     if 'random' in self.reset_type or 'hide' in self.reset_type:
         self.show_list = self.objects_env
         self.hiden_list = random.sample(self.objects_env, min(15,len(self.objects_env)))
         for x in self.hiden_list:
            self.show_list.remove(x)
            self.unrealcv.hide_obj(x)
Ejemplo n.º 2
0
    def __init__(self,
                setting_file = 'search_rr_plant78.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                augment_env = None,   #texture, target, light
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                resolution = (160,120)
    ):
     setting = self.load_env_setting(setting_file)
     self.cam_id = 0
     print('env_bin: ', setting.env_bin)
     # run virtual enrionment in docker container
     # self.docker = run_docker.RunDocker()
     # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

     # start unreal env
     docker = False
     # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting.env_bin)
     env_ip,env_port = self.unreal.start(docker,resolution)

     # connect unrealcv client to server
     # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)


     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env,
                              resolution=resolution)
     self.unrealcv.pitch = self.pitch
     self.startpose = self.unrealcv.get_pose(self.cam_id)
     print('start_pose: ', self.startpose)
     #try hardcode start pose
     self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
     # ACTION: (linear velocity ,angle velocity)
     self.ACTION_LIST = [
             (20,  0),
             (20, 15),
             (20,-15),
             (20, 30),
             (20,-30),
     ]
     self.count_steps = 0
     self.max_steps = 100
     self.target_pos = ( -60,   0,   50)
     self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
     state = self.unrealcv.read_image(self.cam_id, 'lit')
     self.observation_space = gym.spaces.Box(low=0, high=255, shape=state.shape)
    def __init__(
            self,
            setting_file='depth_fusion.json',
            reset_type='waypoint',  # testpoint, waypoint,
            augment_env=None,  #texture, target, light
            test=True,  # if True will use the test_xy as start point
            action_type='discrete',  # 'discrete', 'continuous'
            observation_type='rgbd',  # 'color', 'depth', 'rgbd'
            reward_type='bbox',  # distance, bbox, bbox_distance,
            docker=False,
            # resolution = (84,84)
            # resolution = (640,480),
            resolution=(640, 480),
            log_dir='log/'):
        setting = self.load_env_setting(setting_file)
        self.cam_id = 0
        # self.reset_type = 'random'
        self.reset_type = 'test'
        self.log_dir = log_dir
        # gt_pcl = pcl.load('house-000024-gt.ply')

        # run virtual enrionment in docker container
        # self.docker = run_docker.RunDocker()
        # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

        # start unreal env
        docker = False
        # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect unrealcv client to server
        # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        # define action
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        self.observation_type = observation_type
        assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'gray'
        self.observation_shape = self.unrealcv.define_observation(
            self.cam_id, self.observation_type)

        self.startpose = self.unrealcv.get_pose(self.cam_id)

        #try hardcode start pose
        # self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
        # self.startpose = [0.0, 707.1068, 707.1067,0.0,270.0, -45.0] # [0,45,1000]
        # self.startpose = [0.0,99.6,8.72,0.0,270.0,-5.0] #[for depth fusion] [0,5,100]
        # self.startpose = [0.0,70.7,70.7,0.0,270.0,-45.0]
        azimuth, elevation, distance = self.start_pose_rel
        # print('start pose rel', azimuth,elevation,distance)
        self.startpose = poseRelToAbs(azimuth, elevation, distance)
        # print('start_pose: ', self.startpose)
        ''' create base frame '''
        poseOrigin(self.log_dir + 'frame-{:06}.pose.txt'.format(1000))
        # ACTION: (Azimuth, Elevation, Distance)

        self.count_steps = 0
        self.count_house_frames = 0
        # self.max_steps = 35
        self.target_pos = (-60, 0, 50)
        self.gt_pclpose_prev = np.array(self.start_pose_rel)
        # self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        # state = self.unrealcv.read_image(self.cam_id, 'lit')
        # self.observation_space = gym.spaces.Box(low=0, high=255, shape=state.shape)

        self.nn_distance_module = tf.load_op_library(
            '/home/daryl/gym-unrealcv/gym_unrealcv/envs/utils/tf_nndistance_so.so'
        )
        self.total_distance = 0

        objects = self.unrealcv.get_objects()
        # print('objects', objects)
        self.houses = [(obj) for obj in objects if obj.startswith('BAT6_')]
        # print('houses', self.houses)
        for house in self.houses:
            self.unrealcv.hide_obj(house)

        self.house_id = 0
        self.unrealcv.show_obj(self.houses[self.house_id])

        gt_dir = '/hdd/AIRSCAN/datasets/house38_44/groundtruth/'

        self.gt_pcl = []
        for i in range(len(self.houses)):
            gt_fn = gt_dir + self.houses[i] + '_sampled_10k.ply'
            # print('gt', gt_fn)
            # gt_pcl = pcl.load(gt_fn)
            gt_pcl = o3d.read_point_cloud(gt_fn)
            gt_pcl = np.asarray(gt_pcl.points, dtype=np.float32)
            # gt_pcl = pcl.load('/home/daryl/datasets/BAT6_SETA_HOUSE8_WTR_sampled_10k.ply')
            # gt_pcl = np.asarray(gt_pcl)
            self.gt_pcl.append(np.expand_dims(gt_pcl, axis=0))
class depthFusion_keras_multHouse(gym.Env):
    # init the Unreal Gym Environment
    def __init__(
            self,
            setting_file='depth_fusion.json',
            reset_type='waypoint',  # testpoint, waypoint,
            augment_env=None,  #texture, target, light
            test=True,  # if True will use the test_xy as start point
            action_type='discrete',  # 'discrete', 'continuous'
            observation_type='rgbd',  # 'color', 'depth', 'rgbd'
            reward_type='bbox',  # distance, bbox, bbox_distance,
            docker=False,
            # resolution = (84,84)
            # resolution = (640,480),
            resolution=(640, 480),
            log_dir='log/'):
        setting = self.load_env_setting(setting_file)
        self.cam_id = 0
        # self.reset_type = 'random'
        self.reset_type = 'test'
        self.log_dir = log_dir
        # gt_pcl = pcl.load('house-000024-gt.ply')

        # run virtual enrionment in docker container
        # self.docker = run_docker.RunDocker()
        # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

        # start unreal env
        docker = False
        # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect unrealcv client to server
        # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        # define action
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        self.observation_type = observation_type
        assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'gray'
        self.observation_shape = self.unrealcv.define_observation(
            self.cam_id, self.observation_type)

        self.startpose = self.unrealcv.get_pose(self.cam_id)

        #try hardcode start pose
        # self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
        # self.startpose = [0.0, 707.1068, 707.1067,0.0,270.0, -45.0] # [0,45,1000]
        # self.startpose = [0.0,99.6,8.72,0.0,270.0,-5.0] #[for depth fusion] [0,5,100]
        # self.startpose = [0.0,70.7,70.7,0.0,270.0,-45.0]
        azimuth, elevation, distance = self.start_pose_rel
        # print('start pose rel', azimuth,elevation,distance)
        self.startpose = poseRelToAbs(azimuth, elevation, distance)
        # print('start_pose: ', self.startpose)
        ''' create base frame '''
        poseOrigin(self.log_dir + 'frame-{:06}.pose.txt'.format(1000))
        # ACTION: (Azimuth, Elevation, Distance)

        self.count_steps = 0
        self.count_house_frames = 0
        # self.max_steps = 35
        self.target_pos = (-60, 0, 50)
        self.gt_pclpose_prev = np.array(self.start_pose_rel)
        # self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        # state = self.unrealcv.read_image(self.cam_id, 'lit')
        # self.observation_space = gym.spaces.Box(low=0, high=255, shape=state.shape)

        self.nn_distance_module = tf.load_op_library(
            '/home/daryl/gym-unrealcv/gym_unrealcv/envs/utils/tf_nndistance_so.so'
        )
        self.total_distance = 0

        objects = self.unrealcv.get_objects()
        # print('objects', objects)
        self.houses = [(obj) for obj in objects if obj.startswith('BAT6_')]
        # print('houses', self.houses)
        for house in self.houses:
            self.unrealcv.hide_obj(house)

        self.house_id = 0
        self.unrealcv.show_obj(self.houses[self.house_id])

        gt_dir = '/hdd/AIRSCAN/datasets/house38_44/groundtruth/'

        self.gt_pcl = []
        for i in range(len(self.houses)):
            gt_fn = gt_dir + self.houses[i] + '_sampled_10k.ply'
            # print('gt', gt_fn)
            # gt_pcl = pcl.load(gt_fn)
            gt_pcl = o3d.read_point_cloud(gt_fn)
            gt_pcl = np.asarray(gt_pcl.points, dtype=np.float32)
            # gt_pcl = pcl.load('/home/daryl/datasets/BAT6_SETA_HOUSE8_WTR_sampled_10k.ply')
            # gt_pcl = np.asarray(gt_pcl)
            self.gt_pcl.append(np.expand_dims(gt_pcl, axis=0))

    def _step(self, action=0):
        # (velocity, angle) = self.ACTION_LIST[action]
        self.count_steps += 1
        self.count_house_frames += 1
        azimuth, elevation, distance = self.discrete_actions[action]
        change_pose = np.array((azimuth, elevation, distance))

        pose_prev = np.array(self.pose_prev)
        # print('pose prev', pose_prev)
        # print('action', change_pose)

        MIN_elevation = 20
        MAX_elevation = 70
        MIN_distance = 100
        # MAX_distance = 150
        MAX_distance = 125

        pose_new = pose_prev + change_pose
        # pose_new = pose_prev + np.array([30,0,0]) # to test ICM
        if pose_new[2] > MAX_distance:
            pose_new[2] = MAX_distance
        elif pose_new[2] < MIN_distance:
            pose_new[2] = MIN_distance
        if (pose_new[1] >= MAX_elevation):
            pose_new[1] = MAX_elevation
        elif (pose_new[1] <= MIN_elevation):
            pose_new[1] = MIN_elevation
        else:
            pose_new[1] = 45.0
        if (pose_new[0] < 0):
            pose_new[0] = 360 + pose_new[0]
        elif (pose_new[0] >= 359):
            pose_new[0] = pose_new[0] - 360

        # print('action ', action)
        # print('pose new', pose_new)
        # print(azimuth, elevation, distance )
        # collision, move_dist = self.unrealcv.move_rel2(self.cam_id, azimuth, elevation, distance)
        collision, move_dist = self.unrealcv.move_rel2(self.cam_id,
                                                       pose_new[0],
                                                       pose_new[1],
                                                       pose_new[2])
        # print('collision', collision)
        # print('distance:   ', move_dist)

        self.pose_prev = pose_new
        # state = self.unrealcv.read_image(self.cam_id, 'lit')
        state = self.unrealcv.get_observation(self.cam_id,
                                              self.observation_type)
        # print('state shape', state.shape)
        depth_pt = self.unrealcv.read_depth(self.cam_id, mode='depthFusion')
        pose = self.unrealcv.get_pose(self.cam_id, 'soft')
        depth = depth_conversion(depth_pt, 320)
        # pose_filename = self.log_dir+'frame-{:06}.pose.txt'.format(self.count_steps)
        # depth_filename = self.log_dir+'frame-{:06}.depth.npy'.format(self.count_steps)
        pose_filename = self.log_dir + 'frame-{:06}.pose-{:06}.txt'.format(
            self.count_house_frames, self.house_id)
        depth_filename = self.log_dir + 'frame-{:06}.depth-{:06}.npy'.format(
            self.count_house_frames, self.house_id)
        write_pose(pose, pose_filename)
        np.save(depth_filename, depth)
        reward, done = self.reward(collision, move_dist)

        # limit max step per episode
        if self.count_steps > self.max_steps:
            done = True
            # print('Reach Max Steps')

        return state, reward, done, {}

    # reset the environment
    def _reset(self, start_pose_rel=None):

        x, y, z, _, yaw, _ = self.startpose
        self.house_id = 0

        for house in self.houses:
            self.unrealcv.hide_obj(house)

        self.unrealcv.show_obj(self.houses[self.house_id])

        if self.reset_type == 'random':
            distance = 1000
            azimuth = 0
            elevation = 45

            p = 90
            distance = distance + random.randint(-250, 250)
            azimuth = random.randint(0, 359)
            elevation = random.randint(35, 55)

            yaw_exp = 270 - azimuth
            pitch = -1 * elevation

            y = distance * sin(radians(p - elevation)) * cos(radians(azimuth))
            x = distance * sin(radians(p - elevation)) * sin(radians(azimuth))

            z = distance * cos(radians(p - elevation))

            self.unrealcv.set_pose(self.cam_id,
                                   [x, y, z, 0, yaw_exp, pitch
                                    ])  # pose = [x, y, z, roll, yaw, pitch]

        else:
            self.unrealcv.set_pose(
                self.cam_id,
                self.startpose)  # pose = [x, y, z, roll, yaw, pitch]

        state = self.unrealcv.get_observation(self.cam_id,
                                              self.observation_type)

        self.count_steps = 0
        self.count_house_frames = 0

        depth_pt = self.unrealcv.read_depth(self.cam_id, mode='depthFusion')
        pose = self.unrealcv.get_pose(self.cam_id, 'soft')
        depth = depth_conversion(depth_pt, 320)
        # depth_filename = self.log_dir+'frame-{:06}.depth-{:06}.npy'.format(self.count_steps)
        # pose_filename = self.log_dir+'frame-{:06}.pose-{:06}.txt'.format(self.count_steps)
        pose_filename = self.log_dir + 'frame-{:06}.pose-{:06}.txt'.format(
            self.count_house_frames, self.house_id)
        depth_filename = self.log_dir + 'frame-{:06}.depth-{:06}.npy'.format(
            self.count_house_frames, self.house_id)
        write_pose(pose, pose_filename)
        np.save(depth_filename, depth)

        out_pcl_np = depth_fusion_mult(self.log_dir,
                                       first_frame_idx=0,
                                       base_frame_idx=1000,
                                       num_frames=self.count_house_frames + 1,
                                       save_pcd=False,
                                       max_depth=1.0,
                                       house_id=self.house_id)
        # out_fn = 'log/house-' + '{:06}'.format(self.count_steps+1) + '.ply'
        # out_pcl = pcl.load(out_fn)
        # out_pcl_np = np.asarray(out_pcl)
        out_pcl_np = np.expand_dims(out_pcl_np, axis=0)
        self.cd_old = self.compute_chamfer(out_pcl_np)
        # print('cd old ', self.cd_old)
        self.pose_prev = np.array(self.start_pose_rel)

        return state

    # close docker while closing openai gym
    # def _close(self):
    # self.docker.close()

    # calcuate reward according to your task
    def reward(self, collision, move_dist):

        done = False

        depth_start = time.time()

        out_pcl_np = depth_fusion_mult(self.log_dir,
                                       first_frame_idx=0,
                                       base_frame_idx=1000,
                                       num_frames=self.count_house_frames + 1,
                                       save_pcd=False,
                                       max_depth=1.0,
                                       house_id=self.house_id)
        # print('out_pcl_np', out_pcl_np.shape)
        if out_pcl_np.shape[0] != 0:
            out_pcl_np = np.expand_dims(out_pcl_np, axis=0)
            cd = self.compute_chamfer(out_pcl_np)
        else:
            cd = 0.0
        cd_delta = cd - self.cd_old

        depth_end = time.time()

        # print("Depth Fusion time: ", depth_end - depth_start)
        # print('coverage: ', cd)
        if cd > 96.0:

            self.unrealcv.hide_obj(self.houses[self.house_id])
            self.house_id += 1

            if (self.house_id == len(self.houses)):
                done = True
                # reward = 50
                reward = 50
            else:
                # print('covered', self.count_steps)
                # print('new house')
                self.unrealcv.show_obj(self.houses[self.house_id])
                self.count_house_frames = 0
                reward = 100

                self.unrealcv.set_pose(self.cam_id, self.startpose)
                depth_pt = self.unrealcv.read_depth(self.cam_id,
                                                    mode='depthFusion')
                pose = self.unrealcv.get_pose(self.cam_id, 'soft')
                depth = depth_conversion(depth_pt, 320)
                # depth_filename = self.log_dir+'frame-{:06}.depth-{:06}.npy'.format(self.count_steps)
                # pose_filename = self.log_dir+'frame-{:06}.pose-{:06}.txt'.format(self.count_steps)
                pose_filename = self.log_dir + 'frame-{:06}.pose-{:06}.txt'.format(
                    self.count_house_frames, self.house_id)
                depth_filename = self.log_dir + 'frame-{:06}.depth-{:06}.npy'.format(
                    self.count_house_frames, self.house_id)
                write_pose(pose, pose_filename)
                np.save(depth_filename, depth)

                out_pcl_np = depth_fusion_mult(
                    self.log_dir,
                    first_frame_idx=0,
                    base_frame_idx=1000,
                    num_frames=self.count_house_frames + 1,
                    save_pcd=False,
                    max_depth=1.0,
                    house_id=self.house_id)
                # out_fn = 'log/house-' + '{:06}'.format(self.count_steps+1) + '.ply'
                # out_pcl = pcl.load(out_fn)
                # out_pcl_np = np.asarray(out_pcl)
                out_pcl_np = np.expand_dims(out_pcl_np, axis=0)
                self.cd_old = self.compute_chamfer(out_pcl_np)
                self.pose_prev = np.array(self.start_pose_rel)

        else:
            # reward = cd_delta*0.2
            reward = cd_delta
            # reward = cd_delta*0.4
            reward += -2  # added to push minimization of steps

        self.cd_old = cd
        self.total_distance += move_dist

        return reward, done

    # calcuate the 2D distance between the target and camera
    def cauculate_distance(self, target, current):
        error = abs(np.array(target) - np.array(current))[:2]  # only x and y
        distance = math.sqrt(sum(error * error))
        return distance

    def load_env_setting(self, filename):
        f = open(self.get_settingpath(filename))
        type = os.path.splitext(filename)[1]
        if type == '.json':
            import json
            setting = json.load(f)
        elif type == '.yaml':
            import yaml
            setting = yaml.load(f)
        else:
            print('unknown type')

        self.cam_id = setting['cam_id']
        self.target_list = setting['targets']
        self.max_steps = setting['maxsteps']
        self.trigger_th = setting['trigger_th']
        self.height = setting['height']
        self.pitch = setting['pitch']
        self.start_pose_rel = setting['start_pose_rel']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']
        self.env_bin = setting['env_bin']
        self.env_name = setting['env_name']
        print('env name: ', self.env_name)
        print('env id: ', setting['env_bin'])
        return setting

    def get_settingpath(self, filename):
        import gym_unrealcv
        gympath = os.path.dirname(gym_unrealcv.__file__)
        return os.path.join(gympath, 'envs/setting', filename)

    def compute_chamfer(self, output):
        # with tf.Session('') as sess:
        # sess = K.get_session()
        # self.sess.run(tf.global_variables_initializer())
        # loss_out = self.sess.run(loss,feed_dict={inp_placeholder: output})
        with tf.device('/gpu:0'):
            sess = K.get_session()
            with sess.as_default():
                # with tf.Session('') as sess:

                # inp_placeholder = tf.placeholder(tf.float32)
                # reta,retb,retc,retd=self.nn_distance(inp_placeholder,self.gt_pcl)
                # with tf.name_scope('chamfer'):
                # reta,retb,retc,retd=self.nn_distance(output,self.gt_pcl)
                _, _, retc, _ = self.nn_distance(output,
                                                 self.gt_pcl[self.house_id])
                # loss=tf.reduce_sum(reta)+tf.reduce_sum(retc)

                # loss=tf.reduce_sum(retc)
                dist_thresh = tf.greater(0.0008, retc)
                dist_mean = tf.reduce_mean(tf.cast(dist_thresh, tf.float32))

                # loss_out = tf.Tensor.eval(loss)
                coverage = tf.Tensor.eval(dist_mean)
                # coverage2 = tf.Tensor.eval(dist_mean2)
                # print('coverage2 ', coverage2)
                # loss_out = self.sess.run(loss,feed_dict={inp_placeholder: output})
                # print('coverage ', coverage)
                return coverage * 100

    def nn_distance(self, xyz1, xyz2):
        '''
     Computes the distance of nearest neighbors for a pair of point clouds
     input: xyz1: (batch_size,#points_1,3)  the first point cloud
     input: xyz2: (batch_size,#points_2,3)  the second point cloud
     output: dist1: (batch_size,#point_1)   distance from first to second
     output: idx1:  (batch_size,#point_1)   nearest neighbor from first to second
     output: dist2: (batch_size,#point_2)   distance from second to first
     output: idx2:  (batch_size,#point_2)   nearest neighbor from second to first
       '''
        return self.nn_distance_module.nn_distance(xyz1, xyz2)
Ejemplo n.º 5
0
class UnrealCvTracking_spline(gym.Env):
    def __init__(
        self,
        setting_file,
        reset_type='Random',  # random
        action_type='discrete',  # 'discrete', 'continuous'
        observation_type='color',  # 'color', 'depth', 'rgbd'
        reward_type='distance',  # distance
        docker=False,
        resolution=(80, 80)):
        setting = misc.load_env_setting(setting_file)
        self.cam_id = setting['cam_id']
        self.target_list = setting['targets']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']
        self.max_distance = setting['max_distance']
        self.max_direction = setting['max_direction']
        self.objects_env = setting['objects_list']

        self.docker = docker
        self.reset_type = reset_type
        self.roll = 0
        self.pitch = 0
        self.count_steps = 0

        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   env=self.unreal.path2env,
                                   resolution=resolution)

        # define action
        self.action_type = action_type
        assert self.action_type == 'Discrete' or self.action_type == 'Continuous'
        if self.action_type == 'Discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'Continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        # define observation space,
        #  color, depth, rgbd,...
        self.observation_type = observation_type
        assert self.observation_type == 'Color' or self.observation_type == 'Depth' or self.observation_type == 'Rgbd'
        self.observation_space = self.unrealcv.define_observation(
            self.cam_id, self.observation_type)

        # define reward
        self.reward_function = reward.Reward(setting)

        self.rendering = False

        # init augment env
        if 'Random' in self.reset_type:
            self.show_list = self.objects_env
            self.hiden_list = random.sample(self.objects_env,
                                            min(15, len(self.objects_env)))
            for x in self.hiden_list:
                self.show_list.remove(x)
                self.unrealcv.hide_obj(x)

    def render(self, mode='rgb_array', close=False):
        if close == True:
            self.unreal.close()
        return self.unrealcv.img_color

    def step(self, action):
        info = dict(
            Collision=False,
            Done=False,
            Reward=0.0,
            Action=action,
            Pose=[],
            Trajectory=self.trajectory,
            Steps=self.count_steps,
            Direction=None,
            Distance=None,
            Color=None,
            Depth=None,
        )
        action = np.squeeze(action)
        if self.action_type == 'Discrete':
            # linear
            (velocity, angle) = self.discrete_actions[action]
        else:
            (velocity, angle) = action

        info['Collision'] = self.unrealcv.move_2d(self.cam_id, angle, velocity)

        self.count_steps += 1

        info['Pose'] = self.unrealcv.get_pose(self.cam_id)
        self.target_pos = self.unrealcv.get_obj_location(self.target_list[0])
        info['Direction'] = misc.get_direction(info['Pose'], self.target_pos)
        info['Distance'] = self.unrealcv.get_distance(self.target_pos,
                                                      info['Pose'], 2)
        info['Reward'] = self.reward_function.reward_distance(
            info['Distance'], info['Direction'])

        # update observation
        state = self.unrealcv.get_observation(self.cam_id,
                                              self.observation_type, 'direct')
        info['Color'] = self.unrealcv.img_color
        info['Depth'] = self.unrealcv.img_depth

        # done condition
        if info['Distance'] > self.max_distance or abs(
                info['Direction']) > self.max_direction:
            self.count_close += 1
        if self.count_close > 10:
            info['Done'] = True
            info['Reward'] = -1

        # save the trajectory
        self.trajectory.append(info['Pose'])
        info['Trajectory'] = self.trajectory

        self.C_reward += info['Reward']
        return state, info['Reward'], info['Done'], info

    def reset(self, ):
        self.C_reward = 0
        self.count_close = 0

        self.target_pos = self.unrealcv.get_obj_pose(self.target_list[0])
        # random hide and show objects
        if 'Random' in self.reset_type:
            num_update = 5
            objs_to_hide = random.sample(self.show_list, num_update)
            for x in objs_to_hide:
                self.show_list.remove(x)
                self.hiden_list.append(x)
                self.unrealcv.hide_obj(x)
            objs_to_show = random.sample(self.hiden_list[:-num_update],
                                         num_update)
            for x in objs_to_show:
                self.hiden_list.remove(x)
                self.show_list.append(x)
                self.unrealcv.show_obj(x)
            time.sleep(0.5 * random.random())

        time.sleep(1)
        cam_pos = self.target_pos
        self.target_pos = self.unrealcv.get_obj_location(self.target_list[0])

        # set pose
        self.unrealcv.set_location(self.cam_id, cam_pos[:3])
        self.unrealcv.set_rotation(self.cam_id, cam_pos[-3:])
        current_pose = self.unrealcv.get_pose(self.cam_id, 'soft')

        # get state
        time.sleep(0.1)
        state = self.unrealcv.get_observation(self.cam_id,
                                              self.observation_type)

        self.trajectory = []
        self.trajectory.append(current_pose)
        self.count_steps = 0

        return state

    def close(self):
        self.unreal.close()

    def seed(self, seed=None):
        pass

    def render(self, mode='rgb_array', close=False):
        if close == True:
            self.unreal.close()
        return self.unrealcv.img_color
Ejemplo n.º 6
0
    def __init__(
        self,
        setting_file='sfmHouseTarget.json',
        reset_type='waypoint',  # testpoint, waypoint,
        augment_env=None,  #texture, target, light
        test=True,  # if True will use the test_xy as start point
        action_type='discrete',  # 'discrete', 'continuous'
        observation_type='rgbd',  # 'color', 'depth', 'rgbd'
        reward_type='bbox',  # distance, bbox, bbox_distance,
        docker=False,
        # resolution = (320,240)
        resolution=(640, 480)):
        setting = self.load_env_setting(setting_file)
        self.cam_id = 0

        # run virtual enrionment in docker container
        # self.docker = run_docker.RunDocker()
        # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

        # start unreal env
        docker = False
        # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect unrealcv client to server
        # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        # define action
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        self.startpose = self.unrealcv.get_pose(self.cam_id)
        print('start_pose: ', self.startpose)
        #try hardcode start pose
        # self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
        self.startpose = [0.0, 707.1068, 707.1067, 0.0, 270.0,
                          -45.0]  # [0,45,1000]
        # ACTION: (Azimuth, Elevation, Distance)
        print("start")
        object_mask = self.unrealcv.read_image(self.cam_id,
                                               'object_mask',
                                               mode='file')
        cv2.imshow('object_mask', object_mask)
        cv2.waitKey(0)
        boxes = self.unrealcv.get_bboxes(object_mask, self.target_list)

        # ACTION: (linear velocity ,angle velocity)
        # self.ACTION_LIST = [
        #         (20,  0),
        #         (20, 15),
        #         (20,-15),
        #         (20, 30),
        #         (20,-30),
        # ]
        self.count_steps = 0
        # self.max_steps = 35
        self.target_pos = (-60, 0, 50)
        # self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        state = self.unrealcv.read_image(self.cam_id, 'lit')
        self.observation_space = gym.spaces.Box(low=0,
                                                high=255,
                                                shape=state.shape)

        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)
Ejemplo n.º 7
0
class sfmHouseTarget(gym.Env):
    # init the Unreal Gym Environment
    def __init__(
        self,
        setting_file='sfmHouseTarget.json',
        reset_type='waypoint',  # testpoint, waypoint,
        augment_env=None,  #texture, target, light
        test=True,  # if True will use the test_xy as start point
        action_type='discrete',  # 'discrete', 'continuous'
        observation_type='rgbd',  # 'color', 'depth', 'rgbd'
        reward_type='bbox',  # distance, bbox, bbox_distance,
        docker=False,
        # resolution = (320,240)
        resolution=(640, 480)):
        setting = self.load_env_setting(setting_file)
        self.cam_id = 0

        # run virtual enrionment in docker container
        # self.docker = run_docker.RunDocker()
        # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

        # start unreal env
        docker = False
        # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect unrealcv client to server
        # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        # define action
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        self.startpose = self.unrealcv.get_pose(self.cam_id)
        print('start_pose: ', self.startpose)
        #try hardcode start pose
        # self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
        self.startpose = [0.0, 707.1068, 707.1067, 0.0, 270.0,
                          -45.0]  # [0,45,1000]
        # ACTION: (Azimuth, Elevation, Distance)
        print("start")
        object_mask = self.unrealcv.read_image(self.cam_id,
                                               'object_mask',
                                               mode='file')
        cv2.imshow('object_mask', object_mask)
        cv2.waitKey(0)
        boxes = self.unrealcv.get_bboxes(object_mask, self.target_list)

        # ACTION: (linear velocity ,angle velocity)
        # self.ACTION_LIST = [
        #         (20,  0),
        #         (20, 15),
        #         (20,-15),
        #         (20, 30),
        #         (20,-30),
        # ]
        self.count_steps = 0
        # self.max_steps = 35
        self.target_pos = (-60, 0, 50)
        # self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        state = self.unrealcv.read_image(self.cam_id, 'lit')
        self.observation_space = gym.spaces.Box(low=0,
                                                high=255,
                                                shape=state.shape)

        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)

    # update the environment step by step
    def _step(self, action=0):
        # (velocity, angle) = self.ACTION_LIST[action]
        self.count_steps += 1
        # collision =  self.unrealcv.move(self.cam_id, angle, velocity)
        # collision = self.unrealcv.move_2d(self.cam_id, angle, velocity)
        # collision = self.unrealcv.move_2d(self.cam_id, angle, velocity)
        azimuth, elevation, distance = action
        collision, move_dist = self.unrealcv.move_rel(self.cam_id, azimuth,
                                                      elevation, distance)

        # print('distance:   ', move_dist)
        print('reward type: ', self.reward_type)
        print('target list: ', self.target_list)
        if 'bbox' in self.reward_type:
            object_mask = self.unrealcv.read_image(self.cam_id, 'object_mask')
            cv2.imshow('object_mask', object_mask)
            cv2.waitKey(0)
            boxes = self.unrealcv.get_bboxes(object_mask, self.target_list)
            reward_bbox, bbox = self.reward_function.reward_bbox(boxes)
            print('bbox reward: ', reward_bbox, bbox)
        reward, done = self.reward(collision, move_dist)
        state = self.unrealcv.read_image(self.cam_id, 'lit')

        # limit max step per episode
        if self.count_steps > self.max_steps:
            done = True
            print('Reach Max Steps')

        return state, reward, done, {}

    # reset the environment
    def _reset(self, ):
        x, y, z, _, yaw, _ = self.startpose
        pose = self.startpose

        # self.unrealcv.set_position(self.cam_id, x, y, z)
        # self.unrealcv.set_rotation(self.cam_id, 0, yaw, 0)

        self.unrealcv.set_pose(self.cam_id, self.startpose)

        state = self.unrealcv.read_image(self.cam_id, 'lit')
        self.count_steps = 0
        return state

    # close docker while closing openai gym
    # def _close(self):
    # self.docker.close()

    # calcuate reward according to your task
    def reward(self, collision, move_dist):
        done = False
        reward = -0.01

        if collision:
            done = True
            reward = -1
            print('Collision Detected!!')
        else:
            #hard code to 1
            reward = -1 * move_dist * (1 / 2000)
            # print('dist reward: ', reward)
            # distance = self.cauculate_distance(self.target_pos, self.unrealcv.get_pose())
            # if distance < 50:
            #     reward = 10
            #     done = True
            #     print ('Get Target Place!')
        return reward, done

    # calcuate the 2D distance between the target and camera
    def cauculate_distance(self, target, current):
        error = abs(np.array(target) - np.array(current))[:2]  # only x and y
        distance = math.sqrt(sum(error * error))
        return distance

    def load_env_setting(self, filename):
        f = open(self.get_settingpath(filename))
        type = os.path.splitext(filename)[1]
        if type == '.json':
            import json
            setting = json.load(f)
        elif type == '.yaml':
            import yaml
            setting = yaml.load(f)
        else:
            print('unknown type')

        self.cam_id = setting['cam_id']
        self.target_list = setting['targets']
        self.max_steps = setting['maxsteps']
        self.trigger_th = setting['trigger_th']
        self.height = setting['height']
        self.pitch = setting['pitch']

        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']
        self.env_bin = setting['env_bin']
        self.env_name = setting['env_name']
        print('env name: ', self.env_name)
        print('env id: ', setting['env_bin'])
        return setting

    def get_settingpath(self, filename):
        import gym_unrealcv
        gympath = os.path.dirname(gym_unrealcv.__file__)
        return os.path.join(gympath, 'envs/setting', filename)
Ejemplo n.º 8
0
    def __init__(self,
                 setting_file,
                 category,
                 reset_type='waypoint',  # testpoint, waypoint,
                 augment_env=None,  # texture, target, light
                 action_type='Discrete',  # 'Discrete', 'Continuous'
                 observation_type='Rgbd',  # 'color', 'depth', 'rgbd'
                 reward_type='bbox',  # distance, bbox, bbox_distance,
                 docker=False,
                 resolution=(160, 120)
                 ):
        #print(setting_file)
        setting = self.load_env_setting(setting_file)
        self.cam_id = setting['cam_id']
        self.target_list = setting['targets'][category]
        self.trigger_th = setting['trigger_th']
        self.height = setting['height']
        self.pitch = setting['pitch']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']

        self.docker = docker
        self.reset_type = reset_type
        self.augment_env = augment_env
        
        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        #  define action
        self.action_type = action_type
        assert self.action_type == 'Discrete' or self.action_type == 'Continuous'
        if self.action_type == 'Discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'Continuous':
            self.action_space = spaces.Box(low=np.array(self.continous_actions['low']),
                                           high=np.array(self.continous_actions['high']))

        # define observation space,
        # color, depth, rgbd,...
        self.observation_type = observation_type
        assert self.observation_type == 'Color' or self.observation_type == 'Depth' or self.observation_type == 'Rgbd'
        self.observation_space = self.unrealcv.define_observation(self.cam_id, self.observation_type, 'direct')

        # define reward type
        # distance, bbox, bbox_distance,
        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)

        # set start position
        self.trigger_count = 0
        current_pose = self.unrealcv.get_pose(self.cam_id)
        current_pose[2] = self.height
        self.unrealcv.set_location(self.cam_id, current_pose[:3])

        self.count_steps = 0

        self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

        # for reset point generation and selection
        self.reset_module = reset_point.ResetPoint(setting, reset_type, current_pose)
Ejemplo n.º 9
0
class UnrealCvSearch_base(gym.Env):
    def __init__(self,
                 setting_file,
                 category,
                 reset_type='waypoint',  # testpoint, waypoint,
                 augment_env=None,  # texture, target, light
                 action_type='Discrete',  # 'Discrete', 'Continuous'
                 observation_type='Rgbd',  # 'color', 'depth', 'rgbd'
                 reward_type='bbox',  # distance, bbox, bbox_distance,
                 docker=False,
                 resolution=(160, 120)
                 ):
        #print(setting_file)
        setting = self.load_env_setting(setting_file)
        self.cam_id = setting['cam_id']
        self.target_list = setting['targets'][category]
        self.trigger_th = setting['trigger_th']
        self.height = setting['height']
        self.pitch = setting['pitch']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']

        self.docker = docker
        self.reset_type = reset_type
        self.augment_env = augment_env
        
        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        #  define action
        self.action_type = action_type
        assert self.action_type == 'Discrete' or self.action_type == 'Continuous'
        if self.action_type == 'Discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'Continuous':
            self.action_space = spaces.Box(low=np.array(self.continous_actions['low']),
                                           high=np.array(self.continous_actions['high']))

        # define observation space,
        # color, depth, rgbd,...
        self.observation_type = observation_type
        assert self.observation_type == 'Color' or self.observation_type == 'Depth' or self.observation_type == 'Rgbd'
        self.observation_space = self.unrealcv.define_observation(self.cam_id, self.observation_type, 'direct')

        # define reward type
        # distance, bbox, bbox_distance,
        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)

        # set start position
        self.trigger_count = 0
        current_pose = self.unrealcv.get_pose(self.cam_id)
        current_pose[2] = self.height
        self.unrealcv.set_location(self.cam_id, current_pose[:3])

        self.count_steps = 0

        self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

        # for reset point generation and selection
        self.reset_module = reset_point.ResetPoint(setting, reset_type, current_pose)

    def _step(self, action ):
        info = dict(
            Collision=False,
            Done=False,
            Trigger=0.0,
            Reward=0.0,
            Action=action,
            Bbox=[],
            Pose=[],
            Trajectory=self.trajectory,
            Steps=self.count_steps,
            Target=[],
            Direction=None,
            Waypoints=self.reset_module.waypoints,
            Color=None,
            Depth=None,
        )

        action = np.squeeze(action)
        if self.action_type == 'Discrete':
            (velocity, angle, info['Trigger']) = self.discrete_actions[action]
        else:
            (velocity, angle, info['Trigger']) = action
        self.count_steps += 1
        info['Done'] = False

        # take action
        info['Collision'] = self.unrealcv.move_2d(self.cam_id, angle, velocity)
        info['Pose'] = self.unrealcv.get_pose(self.cam_id, 'soft')
        # the robot think that it found the target object,the episode is done
        # and get a reward by bounding box size
        # only three times false trigger allowed in every episode
        if info['Trigger'] > self.trigger_th:
            self.trigger_count += 1
            # get reward
            if 'bbox' in self.reward_type:
                object_mask = self.unrealcv.read_image(self.cam_id, 'object_mask')
                boxes = self.unrealcv.get_bboxes(object_mask, self.target_list)
                info['Reward'], info['Bbox'] = self.reward_function.reward_bbox(boxes)
            else:
                info['Reward'] = 0

            if info['Reward'] > 0 or self.trigger_count > 3:
                info['Done'] = True
                if info['Reward'] > 0 and self.reset_type == 'waypoint':
                    self.reset_module.success_waypoint(self.count_steps)
        else:
            # get reward
            distance, self.target_id = self.select_target_by_distance(info['Pose'][:3], self.targets_pos)
            info['Target'] = self.targets_pos[self.target_id]
            info['Direction'] = self.get_direction(info['Pose'], self.targets_pos[self.target_id])

            # calculate reward according to the distance to target object
            if 'distance' in self.reward_type:
                info['Reward'] = self.reward_function.reward_distance(distance)
            else:
                info['Reward'] = 0

            # if collision detected, the episode is done and reward is -1
            if info['Collision']:
                info['Reward'] = -1
                info['Done'] = True
                if self.reset_type == 'waypoint':
                    self.reset_module.update_dis2collision(info['Pose'])

        # update observation
        state = self.unrealcv.get_observation(self.cam_id, self.observation_type)
        info['Color'] = self.unrealcv.img_color
        info['Depth'] = self.unrealcv.img_depth

        # save the trajectory
        self.trajectory.append(info['Pose'][:6])
        info['Trajectory'] = self.trajectory
        if info['Done'] and len(self.trajectory) > 5 and self.reset_type == 'waypoint':
            self.reset_module.update_waypoint(info['Trajectory'])

        return state, info['Reward'], info['Done'], info

    def _reset(self, ):

        # double check the resetpoint, it is necessary for random reset type
        collision = True
        while collision:
            current_pose = self.reset_module.select_resetpoint()
            self.unrealcv.set_pose(self.cam_id, current_pose)
            collision = self.unrealcv.move_2d(self.cam_id, 0, 100)
        self.unrealcv.set_pose(self.cam_id, current_pose)

        state = self.unrealcv.get_observation(self.cam_id, self.observation_type)

        self.trajectory = []
        self.trajectory.append(current_pose)
        self.trigger_count = 0
        self.count_steps = 0
        self.reward_function.dis2target_last, self.targetID_last = \
            self.select_target_by_distance(current_pose, self.targets_pos)
        return state

    def _seed(self, seed=None):
        return seed

    def _render(self, mode='rgb_array', close=False):
        if close==True:
            self.unreal.close()
        return self.unrealcv.img_color

    def _close(self):
        self.unreal.close()

    def _get_action_size(self):
        return len(self.action)

    def select_target_by_distance(self, current_pos, targets_pos):
        # find the nearest target, return distance and targetid
        target_id = list(self.targets_pos.keys())[0]
        distance_min = self.unrealcv.get_distance(targets_pos[target_id], current_pos, 2)
        for key, target_pos in targets_pos.items():
            distance = self.unrealcv.get_distance(target_pos, current_pos, 2)
            if distance < distance_min:
                target_id = key
                distance_min = distance
        return distance_min, target_id

    def get_direction(self, current_pose, target_pose):
        y_delt = target_pose[1] - current_pose[1]
        x_delt = target_pose[0] - current_pose[0]
        angle_now = np.arctan2(y_delt, x_delt)/np.pi*180-current_pose[4]
        if angle_now > 180:
            angle_now -= 360
        if angle_now < -180:
            angle_now += 360
        return angle_now

    def load_env_setting(self, filename):
        import gym_unrealcv
        gympath = os.path.dirname(gym_unrealcv.__file__)
        gympath = os.path.join(gympath, 'envs/setting', filename)
        #print(gympath)
        f = open(gympath)
        filetype = os.path.splitext(filename)[1]
        if filetype == '.json':
            import json
            setting = json.load(f)
        else:
            print ('unknown type')

        return setting
Ejemplo n.º 10
0
class depthFusion(gym.Env):
    # init the Unreal Gym Environment
    def __init__(
            self,
            setting_file='depth_fusion.json',
            reset_type='waypoint',  # testpoint, waypoint,
            augment_env=None,  #texture, target, light
            test=True,  # if True will use the test_xy as start point
            action_type='discrete',  # 'discrete', 'continuous'
            observation_type='rgbd',  # 'color', 'depth', 'rgbd'
            reward_type='bbox',  # distance, bbox, bbox_distance,
            docker=False,
            # resolution = (84,84)
            # resolution = (640,480),
            resolution=(640, 480),
            log_dir='log/'):
        setting = self.load_env_setting(setting_file)
        self.cam_id = 0
        # self.reset_type = 'random'
        self.reset_type = 'test'
        self.log_dir = log_dir
        # gt_pcl = pcl.load('house-000024-gt.ply')
        # gt_pcl = pcl.load('/home/daryl/gym-unrealcv/BAT6_SETA_HOUSE44_OBJ_No_InnerMesh_sampled_10k.ply')
        # self.gt_pcl = np.asarray(gt_pcl)
        gt_pcl = o3d.read_point_cloud(
            '/home/daryl/gym-unrealcv/BAT6_SETA_HOUSE44_OBJ_No_InnerMesh_sampled_10k.ply'
        )
        self.gt_pcl = np.asarray(gt_pcl.points, dtype=np.float32)
        self.gt_pcl = np.expand_dims(self.gt_pcl, axis=0)
        # run virtual enrionment in docker container
        # self.docker = run_docker.RunDocker()
        # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

        # start unreal env
        docker = False
        # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect unrealcv client to server
        # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        # define action
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        self.observation_type = observation_type
        assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'gray'
        self.observation_shape = self.unrealcv.define_observation(
            self.cam_id, self.observation_type)

        self.startpose = self.unrealcv.get_pose(self.cam_id)

        #try hardcode start pose
        # self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
        # self.startpose = [0.0, 707.1068, 707.1067,0.0,270.0, -45.0] # [0,45,1000]
        # self.startpose = [0.0,99.6,8.72,0.0,270.0,-5.0] #[for depth fusion] [0,5,100]
        # self.startpose = [0.0,70.7,70.7,0.0,270.0,-45.0]
        azimuth, elevation, distance = self.start_pose_rel
        print('start pose rel', azimuth, elevation, distance)
        self.startpose = poseRelToAbs(azimuth, elevation, distance)
        # print('start_pose: ', self.startpose)
        ''' create base frame '''
        poseOrigin(self.log_dir + 'frame-{:06}.pose.txt'.format(1000))
        # ACTION: (Azimuth, Elevation, Distance)

        # ACTION: (linear velocity ,angle velocity)
        # self.ACTION_LIST = [
        #         (20,  0),
        #         (20, 15),
        #         (20,-15),
        #         (20, 30),
        #         (20,-30),
        # ]
        self.count_steps = 0
        # self.max_steps = 35
        self.target_pos = (-60, 0, 50)
        # self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        # state = self.unrealcv.read_image(self.cam_id, 'lit')
        # self.observation_space = gym.spaces.Box(low=0, high=255, shape=state.shape)

        self.nn_distance_module = tf.load_op_library(
            '/home/daryl/gym-unrealcv/gym_unrealcv/envs/utils/tf_nndistance_so.so'
        )
        # if K.backend() == 'tensorflow':
        #     config = tf.ConfigProto()
        #     config.gpu_options.allow_growth = True
        #     self.sess = tf.Session(config=config)
        #     K.set_session(self.sess)
        # self.sess = K.get_session()

    # update the environment step by step
    def _step(self, action=0):
        # (velocity, angle) = self.ACTION_LIST[action]
        self.count_steps += 1
        # collision =  self.unrealcv.move(self.cam_id, angle, velocity)
        # collision = self.unrealcv.move_2d(self.cam_id, angle, velocity)
        # collision = self.unrealcv.move_2d(self.cam_id, angle, velocity)
        azimuth, elevation, distance = action
        # azimuth, elevation, distance = self.ACTION_LIST[action]
        # azimuth, elevation, distance  = self.discrete_actions[action]
        # print('action ', action)
        # print('pose')
        # print(azimuth, elevation, distance )
        collision, move_dist = self.unrealcv.move_rel(self.cam_id, azimuth,
                                                      elevation, distance)

        # print('distance:   ', move_dist)

        # state = self.unrealcv.read_image(self.cam_id, 'lit')
        state = self.unrealcv.get_observation(self.cam_id,
                                              self.observation_type)
        # print('state shape', state.shape)
        depth_pt = self.unrealcv.read_depth(self.cam_id, mode='depthFusion')
        pose = self.unrealcv.get_pose(self.cam_id, 'soft')
        depth = depth_conversion(depth_pt, 320)
        pose_filename = self.log_dir + 'frame-{:06}.pose.txt'.format(
            self.count_steps)
        depth_filename = self.log_dir + 'frame-{:06}.depth.npy'.format(
            self.count_steps)
        write_pose(pose, pose_filename)
        np.save(depth_filename, depth)
        reward, done = self.reward(collision, move_dist)

        # limit max step per episode
        if self.count_steps > self.max_steps:
            done = True
            print('Reach Max Steps')

        return state, reward, done, {}

    # reset the environment
    def _reset(self, start_pose_rel=None):

        x, y, z, _, yaw, _ = self.startpose

        if self.reset_type == 'random':
            distance = 1000
            azimuth = 0
            elevation = 45

            p = 90
            distance = distance + random.randint(-250, 250)
            azimuth = random.randint(0, 359)
            elevation = random.randint(35, 55)

            yaw_exp = 270 - azimuth
            pitch = -1 * elevation

            y = distance * sin(radians(p - elevation)) * cos(radians(azimuth))
            x = distance * sin(radians(p - elevation)) * sin(radians(azimuth))

            z = distance * cos(radians(p - elevation))

            self.unrealcv.set_pose(self.cam_id,
                                   [x, y, z, 0, yaw_exp, pitch
                                    ])  # pose = [x, y, z, roll, yaw, pitch]

        else:
            self.unrealcv.set_pose(
                self.cam_id,
                self.startpose)  # pose = [x, y, z, roll, yaw, pitch]
        # state = self.unrealcv.read_image(self.cam_id, 'lit')
        state = self.unrealcv.get_observation(self.cam_id,
                                              self.observation_type)
        self.count_steps = 0
        depth_pt = self.unrealcv.read_depth(self.cam_id, mode='depthFusion')
        pose = self.unrealcv.get_pose(self.cam_id, 'soft')
        depth = depth_conversion(depth_pt, 320)
        pose_filename = self.log_dir + 'frame-{:06}.pose.txt'.format(
            self.count_steps)
        depth_filename = self.log_dir + 'frame-{:06}.depth.npy'.format(
            self.count_steps)
        write_pose(pose, pose_filename)
        np.save(depth_filename, depth)

        out_pcl_np = depth_fusion(self.log_dir,
                                  first_frame_idx=0,
                                  base_frame_idx=1000,
                                  num_frames=self.count_steps + 1,
                                  save_pcd=True,
                                  max_depth=1.0)
        # out_fn = 'log/house-' + '{:06}'.format(self.count_steps+1) + '.ply'
        # out_pcl = pcl.load(out_fn)
        # out_pcl_np = np.asarray(out_pcl)
        out_pcl_np = np.expand_dims(out_pcl_np, axis=0)
        self.cd_old = self.compute_chamfer(out_pcl_np)
        # print('cd old ', self.cd_old)

        return state

    # close docker while closing openai gym
    # def _close(self):
    # self.docker.close()

    # calcuate reward according to your task
    def reward(self, collision, move_dist):

        done = False
        # reward = - 1.0

        # if collision:
        #      done = True
        #      reward = -1
        #      print('Collision Detected!!')
        # else:
        #      #hard code to 1
        #      reward = -1*move_dist*(1/2000)
        # print('dist reward: ', reward)
        # distance = self.cauculate_distance(self.target_pos, self.unrealcv.get_pose())
        # if distance < 50:
        #     reward = 10
        #     done = True
        #     print ('Get Target Place!')
        # print("start depth                                ")
        # print("!!!!!!!!!!!!")
        # print('numframes:', self.count_steps+1)
        depth_start = time.time()

        out_pcl_np = depth_fusion(self.log_dir,
                                  first_frame_idx=0,
                                  base_frame_idx=1000,
                                  num_frames=self.count_steps + 1,
                                  save_pcd=True,
                                  max_depth=1.0)
        # out_fn = 'log/house-' + '{:06}'.format(self.count_steps+1) + '.ply'
        # out_pcl = pcl.load(out_fn)
        # out_pcl_np = np.asarray(out_pcl)
        out_pcl_np = np.expand_dims(out_pcl_np, axis=0)
        cd = self.compute_chamfer(out_pcl_np)
        cd_delta = cd - self.cd_old
        # cd_delta = cd_delta
        # print('coverage: ', cd)
        # print('cov del: ', cd_delta)
        depth_end = time.time()

        # print("Depth Fusion time: ", depth_end - depth_start)

        if cd > 99.0:
            done = True
            reward = 50
            print('covered', self.count_steps)
        else:
            reward = cd_delta * 0.2

        self.cd_old = cd

        return reward, done

    # calcuate the 2D distance between the target and camera
    def cauculate_distance(self, target, current):
        error = abs(np.array(target) - np.array(current))[:2]  # only x and y
        distance = math.sqrt(sum(error * error))
        return distance

    def load_env_setting(self, filename):
        f = open(self.get_settingpath(filename))
        type = os.path.splitext(filename)[1]
        if type == '.json':
            import json
            setting = json.load(f)
        elif type == '.yaml':
            import yaml
            setting = yaml.load(f)
        else:
            print('unknown type')

        self.cam_id = setting['cam_id']
        self.target_list = setting['targets']
        self.max_steps = setting['maxsteps']
        self.trigger_th = setting['trigger_th']
        self.height = setting['height']
        self.pitch = setting['pitch']
        self.start_pose_rel = setting['start_pose_rel']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']
        self.env_bin = setting['env_bin']
        self.env_name = setting['env_name']
        print('env name: ', self.env_name)
        print('env id: ', setting['env_bin'])
        return setting

    def get_settingpath(self, filename):
        import gym_unrealcv
        gympath = os.path.dirname(gym_unrealcv.__file__)
        return os.path.join(gympath, 'envs/setting', filename)

    def compute_chamfer(self, output):
        # with tf.Session('') as sess:
        # sess = K.get_session()
        # self.sess.run(tf.global_variables_initializer())
        # loss_out = self.sess.run(loss,feed_dict={inp_placeholder: output})
        with tf.device('/cpu:0'):
            sess = K.get_session()
            with sess.as_default():
                # with tf.Session('') as sess:

                # inp_placeholder = tf.placeholder(tf.float32)
                # reta,retb,retc,retd=self.nn_distance(inp_placeholder,self.gt_pcl)
                # with tf.name_scope('chamfer'):
                # reta,retb,retc,retd=self.nn_distance(output,self.gt_pcl)
                _, _, retc, _ = self.nn_distance(output, self.gt_pcl)
                # loss=tf.reduce_sum(reta)+tf.reduce_sum(retc)
                loss = tf.reduce_sum(retc)
                dist_thresh = tf.greater(0.0008, retc)
                # dist_thresh = tf.greater(0.0002, retc)
                dist_mean = tf.reduce_mean(tf.cast(dist_thresh, tf.float32))

                # loss_out = tf.Tensor.eval(loss)
                coverage = tf.Tensor.eval(dist_mean)
                print('coverage ', coverage)
                # loss_out = self.sess.run(loss,feed_dict={inp_placeholder: output})
                return coverage * 100

    # def nn_distance_init(self, output):

    #@tf.RegisterShape('NnDistance')
    #def _nn_distance_shape(op):
    #shape1=op.inputs[0].get_shape().with_rank(3)
    #shape2=op.inputs[1].get_shape().with_rank(3)
    #return [tf.TensorShape([shape1.dims[0],shape1.dims[1]]),tf.TensorShape([shape1.dims[0],shape1.dims[1]]),
    #tf.TensorShape([shape2.dims[0],shape2.dims[1]]),tf.TensorShape([shape2.dims[0],shape2.dims[1]])]
    # @ops.RegisterGradient('NnDistance')
    # def _nn_distance_grad(op,grad_dist1,grad_idx1,grad_dist2,grad_idx2):
    #     xyz1=op.inputs[0]
    #     xyz2=op.inputs[1]
    #     idx1=op.outputs[1]
    #     idx2=op.outputs[3]
    #     return nn_distance_module.nn_distance_grad(xyz1,xyz2,grad_dist1,idx1,grad_dist2,idx2)

    def nn_distance(self, xyz1, xyz2):
        '''
     Computes the distance of nearest neighbors for a pair of point clouds
     input: xyz1: (batch_size,#points_1,3)  the first point cloud
     input: xyz2: (batch_size,#points_2,3)  the second point cloud
     output: dist1: (batch_size,#point_1)   distance from first to second
     output: idx1:  (batch_size,#point_1)   nearest neighbor from first to second
     output: dist2: (batch_size,#point_2)   distance from second to first
     output: idx2:  (batch_size,#point_2)   nearest neighbor from second to first
       '''
        return self.nn_distance_module.nn_distance(xyz1, xyz2)
Ejemplo n.º 11
0
class UnrealCvTracking_base(gym.Env):
   def __init__(self,
                setting_file = 'search_quadcopter.json',
                reset_type = 'random',       # random
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'color', # 'color', 'depth', 'rgbd'
                reward_type = 'distance', # distance
                docker = False,
                resolution=(84, 84)
                ):

     print(setting_file)
     setting = self.load_env_setting(setting_file)
     self.docker = docker
     self.reset_type = reset_type
     self.roll = 0
     self.pitch = 0

     # start unreal env
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
     env_ip, env_port = self.unreal.start(docker,resolution)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                                port= env_port,
                                ip=env_ip,
                                env=self.unreal.path2env,
                                resolution=resolution)

    # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.count_steps = 0


    # define observation space,
    # color, depth, rgbd,...
     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd'
     self.observation_shape = self.unrealcv.define_observation(self.cam_id,self.observation_type)

     # define reward type
     # distance, bbox, bbox_distance,
     self.reward_type = reward_type
     self.reward_function = reward.Reward(setting)


     self.rendering = False

     # init augment env
     if 'random' in self.reset_type or 'hide' in self.reset_type:
         self.show_list = self.objects_env
         self.hiden_list = random.sample(self.objects_env, min(15,len(self.objects_env)))
         for x in self.hiden_list:
            self.show_list.remove(x)
            self.unrealcv.hide_obj(x)

   def _render(self, mode='human', close=False):
       self.rendering = True

   def _step(self, action ):
        info = dict(
            Collision=False,
            Done = False,
            Trigger=0.0,
            Maxstep=False,
            Reward=0.0,
            Action = action,
            Pose = [],
            Trajectory = self.trajectory,
            Steps = self.count_steps,
            Direction = None,
            Distance = None,
            Color = None,
            Depth = None,
        )

        if self.action_type == 'discrete':
            # linear
            (velocity, angle) = self.discrete_actions[action]
        else:
            (velocity, angle) = action

        info['Collision'] = self.unrealcv.move_2d(self.cam_id, angle, velocity)

        self.count_steps += 1

        info['Pose'] = self.unrealcv.get_pose(self.cam_id)
        self.target_pos = self.unrealcv.get_obj_location(self.target_list[0])
        info['Direction'] = self.get_direction(self.target_pos, info['Pose'][:3]) - info['Pose'][-2]
        if info['Direction'] < -180:
            info['Direction'] += 360
        elif info['Direction'] > 180:
            info['Direction'] -= 360

        info['Distance'] = self.get_distance(self.target_pos,info['Pose'][:3])

        # update observation
        state = self.unrealcv.get_observation(self.cam_id, self.observation_type)
        info['Color'] = self.unrealcv.img_color
        info['Depth'] = self.unrealcv.img_depth

        if info['Distance'] > self.max_distance or abs(info['Direction'])> self.max_direction:
        #if self.C_reward<-450: # for evaluation
            info['Done'] = True
            info['Reward'] = -1
        elif 'distance' in self.reward_type:
            info['Reward'] = self.reward_function.reward_distance(info['Distance'],info['Direction'])

        # limit the max steps of every episode
        if self.count_steps > self.max_steps:
           info['Done'] = True
           info['Maxstep'] = True
           print('Reach Max Steps')

        # save the trajectory
        self.trajectory.append(info['Pose'])
        info['Trajectory'] = self.trajectory

        if self.rendering:
            show_info(info,self.action_type)

        self.C_reward += info['Reward']
        return state, info['Reward'], info['Done'], info
   def _reset(self, ):
       self.C_reward = 0

       self.target_pos = self.unrealcv.get_obj_location(self.target_list[0])
       # random hide and show objects
       if 'random' in self.reset_type:
           num_update = 3
           objs_to_hide = random.sample(self.show_list,num_update)
           for x in objs_to_hide:
               self.show_list.remove(x)
               self.hiden_list.append(x)
               self.unrealcv.hide_obj(x)
           #self.unrealcv.hide_objects(to_hiden)
           objs_to_show = random.sample(self.hiden_list[:-num_update],num_update)
           for x in objs_to_show:
               self.hiden_list.remove(x)
               self.show_list.append(x)
               self.unrealcv.show_obj(x)
           #self.unrealcv.show_objects(to_show)
           time.sleep(0.5 * random.random())

       time.sleep(0.5)

       cam_pos = self.target_pos
       self.target_pos = self.unrealcv.get_obj_location(self.target_list[0])
       yaw = self.get_direction(self.target_pos, cam_pos)
       # set pose
       self.unrealcv.set_location(self.cam_id, cam_pos)
       self.unrealcv.set_rotation(self.cam_id, [self.roll, yaw, self.pitch])
       current_pose = self.unrealcv.get_pose(self.cam_id,'soft')

       # get state
       state = self.unrealcv.get_observation(self.cam_id, self.observation_type)

       self.trajectory = []
       self.trajectory.append(current_pose)
       self.count_steps = 0

       return state

   def _close(self):
       if self.docker:
           self.unreal.docker.close()


   def _get_action_size(self):
       return len(self.action)


   def get_distance(self,target,current):

       error = abs(np.array(target)[:2] - np.array(current)[:2])# only x and y
       distance = math.sqrt(sum(error * error))
       return distance


   def get_direction(self,target_pos,camera_pos):
       relative_pos = np.array(target_pos) - np.array(camera_pos)
       if relative_pos[0] > 0:
           direction = 180 * np.arctan(relative_pos[1] / relative_pos[0]) / np.pi
       else:
           direction = 180 + 180 * np.arctan(relative_pos[1] / relative_pos[0]) / np.pi

       return direction


   def load_env_setting(self,filename):
       f = open(self.get_settingpath(filename))
       type = os.path.splitext(filename)[1]
       if type == '.json':
           import json
           setting = json.load(f)
       elif type == '.yaml':
           import yaml
           setting = yaml.load(f)
       else:
           print('unknown type')

       #print setting
       self.cam_id = setting['cam_id']
       self.target_list = setting['targets']
       self.max_steps = setting['max_steps']
       self.discrete_actions = setting['discrete_actions']
       self.continous_actions = setting['continous_actions']
       self.max_distance = setting['max_distance']
       self.max_direction = setting['max_direction']
       self.objects_env = setting['objects_list']

       return setting


   def get_settingpath(self, filename):
       import gym_unrealcv
       gympath = os.path.dirname(gym_unrealcv.__file__)
       return os.path.join(gympath, 'envs/setting', filename)
    def __init__(self,
                setting_file = 'depth_fusion.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                augment_env = None,   #texture, target, light
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                resolution = (640,480),
                log_dir='log/'
    ):
     setting = self.load_env_setting(setting_file)
     self.cam_id = 0
     # self.reset_type = 'random'
     self.reset_type = 'test'
     self.log_dir = log_dir
     gt_pcl = o3d.read_point_cloud('/home/daryl/gym-unrealcv/BAT6_SETA_HOUSE44_OBJ_No_InnerMesh_sampled_10k.ply')
     self.gt_pcl = np.asarray(gt_pcl.points, dtype=np.float32)
     # self.gt_pcl = np.asarray(gt_pcl)
     self.gt_pcl = np.expand_dims(self.gt_pcl,axis=0)

     # start unreal env
     docker = False
     self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
     env_ip,env_port = self.unreal.start(docker,resolution)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env,
                              resolution=resolution)
     self.unrealcv.pitch = self.pitch

      # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'gray'
     self.observation_shape = self.unrealcv.define_observation(self.cam_id,self.observation_type)

     self.startpose = self.unrealcv.get_pose(self.cam_id)

     azimuth, elevation, distance = self.start_pose_rel
     # print('start pose rel', azimuth,elevation,distance)
     self.startpose = poseRelToAbs(azimuth, elevation, distance)
     # print('start_pose: ', self.startpose)
     ''' create base frame '''
     poseOrigin(self.log_dir+'frame-{:06}.pose.txt'.format(1000))
     # ACTION: (Azimuth, Elevation, Distance)

     self.count_steps = 0
     self.depth_count = 0
     self.depth_thresh = 0.5
     self.total_distance = 0
     # self.max_steps = 35
     self.target_pos = ( -60,   0,   50)
     self.pose_prev = np.array(self.start_pose_rel)
     self.nn_distance_module =tf.load_op_library(self.nn_distance_path)
class depthFusion_keras_cont(gym.Env):
    # init the Unreal Gym Environment
    def __init__(self,
                setting_file = 'depth_fusion.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                augment_env = None,   #texture, target, light
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                resolution = (640,480),
                log_dir='log/'
    ):
     setting = self.load_env_setting(setting_file)
     self.cam_id = 0
     # self.reset_type = 'random'
     self.reset_type = 'test'
     self.log_dir = log_dir
     gt_pcl = o3d.read_point_cloud('/home/daryl/gym-unrealcv/BAT6_SETA_HOUSE44_OBJ_No_InnerMesh_sampled_10k.ply')
     self.gt_pcl = np.asarray(gt_pcl.points, dtype=np.float32)
     # self.gt_pcl = np.asarray(gt_pcl)
     self.gt_pcl = np.expand_dims(self.gt_pcl,axis=0)

     # start unreal env
     docker = False
     self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
     env_ip,env_port = self.unreal.start(docker,resolution)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env,
                              resolution=resolution)
     self.unrealcv.pitch = self.pitch

      # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'gray'
     self.observation_shape = self.unrealcv.define_observation(self.cam_id,self.observation_type)

     self.startpose = self.unrealcv.get_pose(self.cam_id)

     azimuth, elevation, distance = self.start_pose_rel
     # print('start pose rel', azimuth,elevation,distance)
     self.startpose = poseRelToAbs(azimuth, elevation, distance)
     # print('start_pose: ', self.startpose)
     ''' create base frame '''
     poseOrigin(self.log_dir+'frame-{:06}.pose.txt'.format(1000))
     # ACTION: (Azimuth, Elevation, Distance)

     self.count_steps = 0
     self.depth_count = 0
     self.depth_thresh = 0.5
     self.total_distance = 0
     # self.max_steps = 35
     self.target_pos = ( -60,   0,   50)
     self.pose_prev = np.array(self.start_pose_rel)
     self.nn_distance_module =tf.load_op_library(self.nn_distance_path)

    def _step(self, action = 0):
        self.count_steps += 1
        azimuth, elevation, distance  = action
        change_pose = np.array((azimuth, elevation, distance))

        pose_prev = np.array(self.pose_prev)
        pose_new = pose_prev + change_pose

        if pose_new[2] > self.max_distance:
            pose_new[2] = self.max_distance
        elif pose_new[2] < self.min_distance:
            pose_new[2] = self.min_distance
        if (pose_new[1] >= self.max_elevation):
            pose_new[1] = self.max_elevation
        elif (pose_new[1] <= self.min_elevation):
            pose_new[1] = self.min_elevation

        if (pose_new[0] < 0):
            pose_new[0] = 360 + pose_new[0]
        elif (pose_new[0]>=359.99):
            pose_new[0] = pose_new[0] - 360

        collision, move_dist = self.unrealcv.move_rel2(self.cam_id, pose_new[0], pose_new[1], pose_new[2])
        self.pose_prev = pose_new
        state = self.unrealcv.get_observation(self.cam_id, self.observation_type)
        depth_pt = self.unrealcv.read_depth(self.cam_id,mode='depthFusion')
        pose = self.unrealcv.get_pose(self.cam_id,'soft')
        depth = depth_conversion(depth_pt, 320)
        pose_filename = self.log_dir+'frame-{:06}.pose.txt'.format(self.count_steps)
        depth_filename = self.log_dir+'frame-{:06}.depth.npy'.format(self.count_steps)
        write_pose(pose, pose_filename)
        np.save(depth_filename, depth)
        reward, done = self.reward(collision,move_dist)

        # limit max step per episode
        if self.count_steps > self.max_steps:
            done = True

        return state, reward, done, {}

    # reset the environment
    def _reset(self, start_pose_rel = None):

       x,y,z,_, yaw, _ = self.startpose

       if self.reset_type == 'random':
           distance = 1000
           azimuth = 0
           elevation = 45
           p=90
           distance = distance + random.randint(-250,250)
           azimuth = random.randint(0,359)
           elevation = random.randint(35,55)

           yaw_exp = 270 - azimuth
           pitch = -1*elevation

           y = distance*sin(radians(p-elevation))*cos(radians(azimuth))
           x = distance*sin(radians(p-elevation))*sin(radians(azimuth))
           z = distance*cos(radians(p-elevation))
           self.unrealcv.set_pose(self.cam_id,[x,y,z,0,yaw_exp,pitch]) # pose = [x, y, z, roll, yaw, pitch]

       else:
           self.unrealcv.set_pose(self.cam_id,self.startpose) # pose = [x, y, z, roll, yaw, pitch]
       # state = self.unrealcv.read_image(self.cam_id, 'lit')
       state = self.unrealcv.get_observation(self.cam_id, self.observation_type)
       self.count_steps = 0
       depth_pt = self.unrealcv.read_depth(self.cam_id,mode='depthFusion')
       pose = self.unrealcv.get_pose(self.cam_id,'soft')
       depth = depth_conversion(depth_pt, 320)
       pose_filename = self.log_dir+'frame-{:06}.pose.txt'.format(self.count_steps)
       depth_filename = self.log_dir+'frame-{:06}.depth.npy'.format(self.count_steps)
       write_pose(pose, pose_filename)
       np.save(depth_filename, depth)

       out_pcl_np = depth_fusion(self.log_dir, first_frame_idx =0, base_frame_idx=1000, num_frames = self.count_steps + 1, save_pcd = False, max_depth = 1.0)
       #
       out_pcl_np = np.expand_dims(out_pcl_np,axis=0)
       self.cd_old = self.compute_chamfer(out_pcl_np)
       # print('cd old ', self.cd_old)
       self.pose_prev = np.array(self.start_pose_rel)

       return  state

    # calcuate reward according to your task
    def reward(self,collision,move_dist):

       done = False
       depth_start = time.time()
       out_pcl_np = depth_fusion(self.log_dir, first_frame_idx =0, base_frame_idx=1000, num_frames = self.count_steps + 1, save_pcd = False, max_depth = 1.0)
       # print('out_pcl_np', out_pcl_np.shape)
       if out_pcl_np.shape[0] != 0:
           out_pcl_np = np.expand_dims(out_pcl_np,axis=0)
           cd = self.compute_chamfer(out_pcl_np)
       else:
           cd = 0.0
       cd_delta = cd - self.cd_old
       depth_end = time.time()

       if cd > 96.0:
           done = True
           reward = 100
       else:
           reward = cd_delta
           reward += -0.04 * move_dist

       self.cd_old = cd
       self.total_distance += move_dist

       return reward, done

    # calcuate the 2D distance between the target and camera
    def cauculate_distance(self,target,current):
       error = abs(np.array(target) - np.array(current))[:2]# only x and y
       distance = math.sqrt(sum(error * error))
       return distance

    def load_env_setting(self,filename):
       f = open(self.get_settingpath(filename))
       type = os.path.splitext(filename)[1]
       if type == '.json':
            import json
            setting = json.load(f)
       elif type == '.yaml':
            import yaml
            setting = yaml.load(f)
       else:
            print('unknown type')

       self.cam_id = setting['cam_id']
       self.target_list = setting['targets']
       self.max_steps = setting['maxsteps']
       self.trigger_th = setting['trigger_th']
       self.height = setting['height']
       self.pitch = setting['pitch']
       self.start_pose_rel = setting['start_pose_rel']
       self.discrete_actions = setting['discrete_actions']
       self.continous_actions = setting['continous_actions']
       self.env_bin = setting['env_bin']
       self.env_name = setting['env_name']
       self.gt_fn = setting['pointcloud_path']
       self.nn_distance_path = self.get_nn_distance(setting['nn_distance_path'])
       self.min_elevation = setting['min_elevation']
       self.max_elevation = setting['max_elevation']
       self.min_distance = setting['min_distance']
       self.max_distance = setting['max_distance']
       print('env name: ', self.env_name)
       print('env id: ', setting['env_bin'])
       return setting

    def get_settingpath(self, filename):
       import gym_unrealcv
       gympath = os.path.dirname(gym_unrealcv.__file__)
       return os.path.join(gympath, 'envs/setting', filename)

    def get_nn_distance(self, filename):
       import gym_unrealcv
       gympath = os.path.dirname(gym_unrealcv.__file__)
       return os.path.join(gympath, filename)

    def compute_chamfer(self, output):
       with tf.device('/gpu:0'):
           sess = K.get_session()
           with sess.as_default():
               _,_,retc,_ = self.nn_distance(output,self.gt_pcl)
               dist_thresh = tf.greater(0.0008, retc)
               dist_mean = tf.reduce_mean(tf.cast(dist_thresh, tf.float32))
               coverage = tf.Tensor.eval(dist_mean)

               return coverage*100

    def nn_distance(self,xyz1,xyz2):
       '''
     Computes the distance of nearest neighbors for a pair of point clouds
     input: xyz1: (batch_size,#points_1,3)  the first point cloud
     input: xyz2: (batch_size,#points_2,3)  the second point cloud
     output: dist1: (batch_size,#point_1)   distance from first to second
     output: idx1:  (batch_size,#point_1)   nearest neighbor from first to second
     output: dist2: (batch_size,#point_2)   distance from second to first
     output: idx2:  (batch_size,#point_2)   nearest neighbor from second to first
       '''
       return self.nn_distance_module.nn_distance(xyz1,xyz2)
Ejemplo n.º 14
0
   def __init__(self,
                setting_file = 'search_rr_plant78.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                ):

     setting = self.load_env_setting(setting_file)
     self.test = test
     self.docker = docker
     self.reset_type = reset_type

     # start unreal env
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
     env_ip,env_port = self.unreal.start(docker)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env)
     self.unrealcv.pitch = self.pitch
    # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.count_steps = 0
     self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

    # define observation space,
    # color, depth, rgbd,...
     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd'
     if self.observation_type == 'color':
         state = self.unrealcv.read_image(self.cam_id,'lit')
         self.observation_space = spaces.Box(low=0, high=255, shape=state.shape)
     elif self.observation_type == 'depth':
         state = self.unrealcv.read_depth(self.cam_id)
         self.observation_space = spaces.Box(low=0, high=10, shape=state.shape)
     elif self.observation_type == 'rgbd':
         state = self.unrealcv.get_rgbd(self.cam_id)
         s_high = state
         s_high[:,:,-1] = 10.0 #max_depth
         s_high[:,:,:-1] = 255 #max_rgb
         s_low = np.zeros(state.shape)
         self.observation_space = spaces.Box(low=s_low, high=s_high)


     # define reward type
     # distance, bbox, bbox_distance,
     self.reward_type = reward_type


     # set start position
     self.trigger_count  = 0
     current_pose = self.unrealcv.get_pose(self.cam_id)
     current_pose[2] = self.height
     self.unrealcv.set_location(self.cam_id,current_pose[:3])


     self.trajectory = []

     # for reset point generation and selection
     self.reset_module = reset_point.ResetPoint(setting, reset_type, test, current_pose)

     self.reward_function = reward.Reward(setting)
     self.reward_function.dis2target_last, self.targetID_last = self.select_target_by_distance(current_pose, self.targets_pos)

     self.rendering = False
Ejemplo n.º 15
0
class UnrealCvSearch_topview(gym.Env):
   def __init__(self,
                setting_file = 'search_rr_plant78.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                ):

     setting = self.load_env_setting(setting_file)
     self.test = test
     self.docker = docker
     self.reset_type = reset_type

     # start unreal env
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
     env_ip,env_port = self.unreal.start(docker)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env)
     self.unrealcv.pitch = self.pitch
    # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.count_steps = 0
     self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

    # define observation space,
    # color, depth, rgbd,...
     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd'
     if self.observation_type == 'color':
         state = self.unrealcv.read_image(self.cam_id,'lit')
         self.observation_space = spaces.Box(low=0, high=255, shape=state.shape)
     elif self.observation_type == 'depth':
         state = self.unrealcv.read_depth(self.cam_id)
         self.observation_space = spaces.Box(low=0, high=10, shape=state.shape)
     elif self.observation_type == 'rgbd':
         state = self.unrealcv.get_rgbd(self.cam_id)
         s_high = state
         s_high[:,:,-1] = 10.0 #max_depth
         s_high[:,:,:-1] = 255 #max_rgb
         s_low = np.zeros(state.shape)
         self.observation_space = spaces.Box(low=s_low, high=s_high)


     # define reward type
     # distance, bbox, bbox_distance,
     self.reward_type = reward_type


     # set start position
     self.trigger_count  = 0
     current_pose = self.unrealcv.get_pose(self.cam_id)
     current_pose[2] = self.height
     self.unrealcv.set_location(self.cam_id,current_pose[:3])


     self.trajectory = []

     # for reset point generation and selection
     self.reset_module = reset_point.ResetPoint(setting, reset_type, test, current_pose)

     self.reward_function = reward.Reward(setting)
     self.reward_function.dis2target_last, self.targetID_last = self.select_target_by_distance(current_pose, self.targets_pos)

     self.rendering = False

   def _step(self, action ):
        info = dict(
            Collision=False,
            Done = False,
            Trigger=0.0,
            Maxstep=False,
            Reward=0.0,
            Action = action,
            Bbox =[],
            Pose = [],
            Trajectory = self.trajectory,
            Steps = self.count_steps,
            Target = [],
            Direction = None,
            Waypoints = self.reset_module.waypoints,
            Color = None,
            Depth = None,
        )


        if self.action_type == 'discrete':
            (velocity, angle, info['Trigger']) = self.discrete_actions[action]
        else:
            (velocity, angle, info['Trigger']) = action
        self.count_steps += 1
        info['Done'] = False



        info['Pose'] = self.unrealcv.get_pose(self.cam_id,'soft')


        # the robot think that it found the target object,the episode is done
        # and get a reward by bounding box size
        # only three times false trigger allowed in every episode
        if info['Trigger'] > self.trigger_th :
            self.trigger_count += 1
            # get reward
            if self.reward_type == 'bbox_distance' or self.reward_type == 'bbox':
                object_mask = self.unrealcv.read_image(self.cam_id, 'object_mask')
                boxes = self.unrealcv.get_bboxes(object_mask, self.target_list)
                info['Reward'], info['Bbox'] = self.reward_function.reward_bbox(boxes)
            else:
                info['Reward'] = 0

            if info['Reward'] > 0 or self.trigger_count > 3:
                info['Done'] = True
                if info['Reward'] > 0 and self.test == False:
                    self.reset_module.success_waypoint(self.count_steps)

                print('Trigger Terminal!')
        # if collision occurs, the episode is done and reward is -1
        else :
            # take action
            info['Collision'] = self.unrealcv.move_2d(self.cam_id, angle, velocity)
            # get reward
            info['Pose'] = self.unrealcv.get_pose(self.cam_id)
            distance, self.target_id = self.select_target_by_distance(info['Pose'][:3],self.targets_pos)
            info['Target'] = self.targets_pos[self.target_id]

            if self.reward_type=='distance' or self.reward_type == 'bbox_distance':
                info['Reward'] = self.reward_function.reward_distance(distance)
            else:
                info['Reward'] = -0.1

            info['Direction'] = self.get_direction (info['Pose'],self.targets_pos[self.target_id])
            if distance < 50:
                info['Done'] = True
                info['Reward'] = 10
                print ('get location')

            if info['Collision']:
                info['Reward'] = -1
                info['Done'] = True
                self.reset_module.update_dis2collision(info['Pose'])
                print ('Collision!!')

        # update observation
        if self.observation_type == 'color':
            state = info['Color'] = self.unrealcv.read_image(self.cam_id, 'lit')
        elif self.observation_type == 'depth':
            state = info['Depth'] = self.unrealcv.read_depth(self.cam_id)
        elif self.observation_type == 'rgbd':
            info['Color'] = self.unrealcv.read_image(self.cam_id, 'lit')
            info['Depth'] = self.unrealcv.read_depth(self.cam_id)
            state = np.append(info['Color'], info['Depth'], axis=2)

        # limit the max steps of every episode
        if self.count_steps > self.max_steps:
           info['Done'] = True
           info['Maxstep'] = True
           print('Reach Max Steps')

        # save the trajectory
        self.trajectory.append(info['Pose'][:6])
        info['Trajectory'] = self.trajectory

        if info['Done'] and len(self.trajectory) > 5 and not self.test:
            self.reset_module.update_waypoint(info['Trajectory'])

        #print info['Reward']
        if self.rendering:
            show_info(info)
        return state, info['Reward'], info['Done'], info
   def _reset(self, ):
       #self.unrealcv.keyboard('G')
       collision = True
       while collision:
           current_pose = self.reset_module.select_resetpoint()
           self.unrealcv.set_location(self.cam_id, current_pose[:3])
           self.unrealcv.set_rotation(self.cam_id, current_pose[-3:])
           collision = self.unrealcv.move_2d(self.cam_id,0,50)

       self.unrealcv.move_2d(self.cam_id, 0, -50)

       self.random_scene()


       if self.observation_type == 'color':
           state = self.unrealcv.read_image(self.cam_id, 'lit')
       elif self.observation_type == 'depth':
           state = self.unrealcv.read_depth(self.cam_id)
       elif self.observation_type == 'rgbd':
           state = self.unrealcv.get_rgbd(self.cam_id)


       self.trajectory = []
       self.trajectory.append(current_pose)
       self.trigger_count = 0
       self.count_steps = 0

       self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

       #print current_pose,self.targets_pos
       self.reward_function.dis2target_last, self.targetID_last = self.select_target_by_distance(current_pose,
                                                                                                 self.targets_pos)
       return state

   def _close(self):
       if self.docker:
           self.unreal.docker.close()

       #sys.exit()


   def _get_action_size(self):
       return len(self.action)


   def get_distance(self,target,current):

       error = abs(np.array(target)[:2] - np.array(current)[:2])# only x and y
       distance = math.sqrt(sum(error * error))
       return distance


   def select_target_by_distance(self,current_pos, targets_pos):
       # find the nearest target, return distance and targetid
       target_id = self.targets_pos.keys()[0]
       distance_min = self.get_distance(targets_pos[target_id], current_pos)
       for key, target_pos in targets_pos.items():
           distance = self.get_distance(target_pos, current_pos)
           if distance < distance_min:
               target_id = key
               distance_min = distance

       return distance_min,target_id

   def get_direction(self,current_pose,target_pose):
       y_delt = target_pose[1] - current_pose[1]
       x_delt = target_pose[0] - current_pose[0]
       if x_delt == 0:
           x_delt = 0.00001

       angle_now = np.arctan(y_delt / x_delt) / 3.1415926 * 180 - current_pose[-1]

       if x_delt < 0:
           angle_now += 180
       if angle_now < 0:
           angle_now += 360
       if angle_now > 360:
           angle_now -= 360

       return angle_now

   def random_scene(self):
       # change material
       num = ['One', 'Two', 'Three', 'Four', 'Five','Six']
       for i in num:
           self.unrealcv.keyboard(i)

       # random place the target object
       for i in self.target_list:
           z1 = 60
           while z1 > 50:
               x = random.uniform(self.reset_area[0],self.reset_area[1])
               y = random.uniform(self.reset_area[2],self.reset_area[3])
               self.unrealcv.set_object_location(i,[x,y,50])
               time.sleep(0.5)
               [x1,y1,z1] = self.unrealcv.get_obj_location(i)



   def load_env_setting(self,filename):
       f = open(self.get_settingpath(filename))
       type = os.path.splitext(filename)[1]
       if type == '.json':
           import json
           setting = json.load(f)
       elif type == '.yaml':
           import yaml
           setting = yaml.load(f)
       else:
           print('unknown type')

       #print setting
       self.cam_id = setting['cam_id']
       self.pitch = setting['pitch']
       self.target_list = setting['targets']
       self.max_steps = setting['maxsteps']
       self.trigger_th = setting['trigger_th']
       self.height = setting['height']
       self.reset_area = setting['reset_area']

       self.discrete_actions = setting['discrete_actions']
       self.continous_actions = setting['continous_actions']

       return setting


   def get_settingpath(self, filename):
       import gym_unrealcv
       gympath = os.path.dirname(gym_unrealcv.__file__)
       return os.path.join(gympath, 'envs/setting', filename)


   def open_door(self):
       self.unrealcv.keyboard('RightMouseButton')
       time.sleep(2)
       self.unrealcv.keyboard('RightMouseButton') # close the door
Ejemplo n.º 16
0
class UnrealCvSimple(gym.Env):
    # init the Unreal Gym Environment
    def __init__(self,
                setting_file = 'search_rr_plant78.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                augment_env = None,   #texture, target, light
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                resolution = (160,120)
    ):
     setting = self.load_env_setting(setting_file)
     self.cam_id = 0
     print('env_bin: ', setting.env_bin)
     # run virtual enrionment in docker container
     # self.docker = run_docker.RunDocker()
     # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

     # start unreal env
     docker = False
     # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting.env_bin)
     env_ip,env_port = self.unreal.start(docker,resolution)

     # connect unrealcv client to server
     # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)


     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env,
                              resolution=resolution)
     self.unrealcv.pitch = self.pitch
     self.startpose = self.unrealcv.get_pose(self.cam_id)
     print('start_pose: ', self.startpose)
     #try hardcode start pose
     self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
     # ACTION: (linear velocity ,angle velocity)
     self.ACTION_LIST = [
             (20,  0),
             (20, 15),
             (20,-15),
             (20, 30),
             (20,-30),
     ]
     self.count_steps = 0
     self.max_steps = 100
     self.target_pos = ( -60,   0,   50)
     self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
     state = self.unrealcv.read_image(self.cam_id, 'lit')
     self.observation_space = gym.spaces.Box(low=0, high=255, shape=state.shape)

    # update the environment step by step
    def _step(self, action = 0):
        (velocity, angle) = self.ACTION_LIST[action]
        self.count_steps += 1
        # collision =  self.unrealcv.move(self.cam_id, angle, velocity)
        collision = self.unrealcv.move_2d(self.cam_id, angle, velocity)
        reward, done = self.reward(collision)
        state = self.unrealcv.read_image(self.cam_id, 'lit')

        # limit max step per episode
        if self.count_steps > self.max_steps:
            done = True
            print('Reach Max Steps')

        return state, reward, done, {}

    # reset the environment
    def _reset(self, ):

       x,y,z,_, yaw, _ = self.startpose
       pose = self.startpose



       # self.unrealcv.set_position(self.cam_id, x, y, z)
       # self.unrealcv.set_rotation(self.cam_id, 0, yaw, 0)

       self.unrealcv.set_pose(self.cam_id,self.startpose)

       state = self.unrealcv.read_image(self.cam_id, 'lit')
       self.count_steps = 0
       return  state

    # close docker while closing openai gym
    # def _close(self):
       # self.docker.close()

    # calcuate reward according to your task
    def reward(self,collision):
       done = False
       reward = - 0.01

       if collision:
            done = True
            reward = -1
            print('Collision Detected!!')
       else:
            #hard code to 1
            reward = 1
            # distance = self.cauculate_distance(self.target_pos, self.unrealcv.get_pose())
            # if distance < 50:
            #     reward = 10
            #     done = True
            #     print ('Get Target Place!')
       return reward, done

    # calcuate the 2D distance between the target and camera
    def cauculate_distance(self,target,current):
       error = abs(np.array(target) - np.array(current))[:2]# only x and y
       distance = math.sqrt(sum(error * error))
       return distance

    def load_env_setting(self,filename):
       f = open(self.get_settingpath(filename))
       type = os.path.splitext(filename)[1]
       if type == '.json':
            import json
            setting = json.load(f)
       elif type == '.yaml':
            import yaml
            setting = yaml.load(f)
       else:
            print('unknown type')

       self.cam_id = setting['cam_id']
       self.target_list = setting['targets']
       self.max_steps = setting['maxsteps']
       self.trigger_th = setting['trigger_th']
       self.height = setting['height']
       self.pitch = setting['pitch']

       self.discrete_actions = setting['discrete_actions']
       self.continous_actions = setting['continous_actions']
       self.env_name = setting['env_name']

       return setting

    def get_settingpath(self, filename):
       import gym_unrealcv
       gympath = os.path.dirname(gym_unrealcv.__file__)
       return os.path.join(gympath, 'envs/setting', filename)