Esempio n. 1
0
   def __init__(self,
                setting_file = 'search_quadcopter.json',
                reset_type = 'random',       # random
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'color', # 'color', 'depth', 'rgbd'
                reward_type = 'distance', # distance
                docker = False,
                resolution=(84, 84)
                ):

     print(setting_file)
     setting = self.load_env_setting(setting_file)
     self.docker = docker
     self.reset_type = reset_type
     self.roll = 0
     self.pitch = 0

     # start unreal env
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
     env_ip, env_port = self.unreal.start(docker,resolution)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                                port= env_port,
                                ip=env_ip,
                                env=self.unreal.path2env,
                                resolution=resolution)

    # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.count_steps = 0


    # define observation space,
    # color, depth, rgbd,...
     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd'
     self.observation_shape = self.unrealcv.define_observation(self.cam_id,self.observation_type)

     # define reward type
     # distance, bbox, bbox_distance,
     self.reward_type = reward_type
     self.reward_function = reward.Reward(setting)


     self.rendering = False

     # init augment env
     if 'random' in self.reset_type or 'hide' in self.reset_type:
         self.show_list = self.objects_env
         self.hiden_list = random.sample(self.objects_env, min(15,len(self.objects_env)))
         for x in self.hiden_list:
            self.show_list.remove(x)
            self.unrealcv.hide_obj(x)
Esempio n. 2
0
    def __init__(self,
                setting_file = 'search_rr_plant78.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                augment_env = None,   #texture, target, light
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                resolution = (160,120)
    ):
     setting = self.load_env_setting(setting_file)
     self.cam_id = 0
     print('env_bin: ', setting.env_bin)
     # run virtual enrionment in docker container
     # self.docker = run_docker.RunDocker()
     # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

     # start unreal env
     docker = False
     # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting.env_bin)
     env_ip,env_port = self.unreal.start(docker,resolution)

     # connect unrealcv client to server
     # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)


     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env,
                              resolution=resolution)
     self.unrealcv.pitch = self.pitch
     self.startpose = self.unrealcv.get_pose(self.cam_id)
     print('start_pose: ', self.startpose)
     #try hardcode start pose
     self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
     # ACTION: (linear velocity ,angle velocity)
     self.ACTION_LIST = [
             (20,  0),
             (20, 15),
             (20,-15),
             (20, 30),
             (20,-30),
     ]
     self.count_steps = 0
     self.max_steps = 100
     self.target_pos = ( -60,   0,   50)
     self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
     state = self.unrealcv.read_image(self.cam_id, 'lit')
     self.observation_space = gym.spaces.Box(low=0, high=255, shape=state.shape)
Esempio n. 3
0
    def launch_env(self):
        if self.launched:
            return True
        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin, ENV_MAP=self.env_map)
        env_ip, env_port = self.unreal.start(self.docker, self.resolution)

        # connect UnrealCV
        self.unrealcv =Robotarm(cam_id=self.cam_id,
                                pose_range=self.pose_range,
                                port=env_port,
                                ip=env_ip,
                                targets=[],
                                env=self.unreal.path2env,
                                resolution=self.resolution)
        self.launched = True
        return self.launched
Esempio n. 4
0
    def __init__(
        self,
        ENV_BIN="RealisticRendering_Win64/WindowsNoEditor/RealisticRendering/Binaries/Win64/RealisticRendering.exe",
        docker=False,
        resolution=(640, 480, 4),
        frameskip=(2, 5)):
        self.PERC = (25 / 160, 5 / 120)
        self.DARK_LIMIT = 70
        self.STD_SHAPE = (110, 110, 4)

        self.cam_id = 0
        self.docker = docker
        self.frameskip = frameskip
        self.resolution = resolution
        self._action_set = [0, 1]
        self.ale = self
        self.np_random = seeding.np_random(0)[0]

        self.unreal = env_unreal.RunUnreal(ENV_BIN=ENV_BIN)
        env_ip, env_port = self.unreal.start(docker, resolution)
        print(f"Port lautet: {env_port}"
              )  # Standard Port: 9000 in unrealcv.ini von ENV_BIN

        self.ACTION_LIST = [0, 1]
        self.count_steps = 0
        self.MAX_STEPS = 10000
        self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        self.prev_ob = np.zeros((110, 110))
        self.cur_ob = np.zeros((110, 110))
        self.return_val = 0.0

        self.unrealcv = unrealcv_basic.UnrealCv(port=env_port,
                                                ip=env_ip,
                                                env=self.unreal.path2env,
                                                cam_id=self.cam_id,
                                                resolution=resolution)
        self.unreal_client = self.unrealcv.client
        self.observation_space = gym.spaces.Box(low=0,
                                                high=255,
                                                shape=self.STD_SHAPE,
                                                dtype=np.uint8)
    def __init__(
            self,
            setting_file='depth_fusion.json',
            reset_type='waypoint',  # testpoint, waypoint,
            augment_env=None,  #texture, target, light
            test=True,  # if True will use the test_xy as start point
            action_type='discrete',  # 'discrete', 'continuous'
            observation_type='rgbd',  # 'color', 'depth', 'rgbd'
            reward_type='bbox',  # distance, bbox, bbox_distance,
            docker=False,
            # resolution = (84,84)
            # resolution = (640,480),
            resolution=(640, 480),
            log_dir='log/'):
        setting = self.load_env_setting(setting_file)
        self.cam_id = 0
        # self.reset_type = 'random'
        self.reset_type = 'test'
        self.log_dir = log_dir
        # gt_pcl = pcl.load('house-000024-gt.ply')

        # run virtual enrionment in docker container
        # self.docker = run_docker.RunDocker()
        # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

        # start unreal env
        docker = False
        # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect unrealcv client to server
        # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        # define action
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        self.observation_type = observation_type
        assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'gray'
        self.observation_shape = self.unrealcv.define_observation(
            self.cam_id, self.observation_type)

        self.startpose = self.unrealcv.get_pose(self.cam_id)

        #try hardcode start pose
        # self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
        # self.startpose = [0.0, 707.1068, 707.1067,0.0,270.0, -45.0] # [0,45,1000]
        # self.startpose = [0.0,99.6,8.72,0.0,270.0,-5.0] #[for depth fusion] [0,5,100]
        # self.startpose = [0.0,70.7,70.7,0.0,270.0,-45.0]
        azimuth, elevation, distance = self.start_pose_rel
        # print('start pose rel', azimuth,elevation,distance)
        self.startpose = poseRelToAbs(azimuth, elevation, distance)
        # print('start_pose: ', self.startpose)
        ''' create base frame '''
        poseOrigin(self.log_dir + 'frame-{:06}.pose.txt'.format(1000))
        # ACTION: (Azimuth, Elevation, Distance)

        self.count_steps = 0
        self.count_house_frames = 0
        # self.max_steps = 35
        self.target_pos = (-60, 0, 50)
        self.gt_pclpose_prev = np.array(self.start_pose_rel)
        # self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        # state = self.unrealcv.read_image(self.cam_id, 'lit')
        # self.observation_space = gym.spaces.Box(low=0, high=255, shape=state.shape)

        self.nn_distance_module = tf.load_op_library(
            '/home/daryl/gym-unrealcv/gym_unrealcv/envs/utils/tf_nndistance_so.so'
        )
        self.total_distance = 0

        objects = self.unrealcv.get_objects()
        # print('objects', objects)
        self.houses = [(obj) for obj in objects if obj.startswith('BAT6_')]
        # print('houses', self.houses)
        for house in self.houses:
            self.unrealcv.hide_obj(house)

        self.house_id = 0
        self.unrealcv.show_obj(self.houses[self.house_id])

        gt_dir = '/hdd/AIRSCAN/datasets/house38_44/groundtruth/'

        self.gt_pcl = []
        for i in range(len(self.houses)):
            gt_fn = gt_dir + self.houses[i] + '_sampled_10k.ply'
            # print('gt', gt_fn)
            # gt_pcl = pcl.load(gt_fn)
            gt_pcl = o3d.read_point_cloud(gt_fn)
            gt_pcl = np.asarray(gt_pcl.points, dtype=np.float32)
            # gt_pcl = pcl.load('/home/daryl/datasets/BAT6_SETA_HOUSE8_WTR_sampled_10k.ply')
            # gt_pcl = np.asarray(gt_pcl)
            self.gt_pcl.append(np.expand_dims(gt_pcl, axis=0))
Esempio n. 6
0
    def __init__(
            self,
            setting_file,
            reset_type,
            action_type='discrete',  # 'discrete', 'continuous'
            observation_type='Color',  # 'color', 'depth', 'rgbd', 'Gray'
            reward_type='distance',  # distance
            docker=False,
            resolution=(320, 240),
            nav='Random',  # Random, Goal, Internal
    ):

        self.docker = docker
        self.reset_type = reset_type
        self.roll = 0
        self.nav = nav
        setting = load_env_setting(setting_file)
        self.env_name = setting['env_name']
        self.cam_id = setting['cam_id']
        self.target_list = setting['targets']
        self.discrete_actions = setting['discrete_actions']
        self.discrete_actions_player = setting['discrete_actions_player']
        self.continous_actions = setting['continous_actions']
        self.continous_actions_player = setting['continous_actions_player']
        self.max_steps = setting['max_steps']
        self.max_obstacles = setting['max_obstacles']
        self.height = setting['height']
        self.pitch = setting['pitch']
        self.objects_env = setting['objects_list']
        if setting.get('reset_area'):
            self.reset_area = setting['reset_area']
        if setting.get('cam_area'):
            self.cam_area = setting['cam_area']

        self.test = False if 'MCRoom' in self.env_name else True

        if setting.get('goal_list'):
            self.goal_list = setting['goal_list']
        if setting.get('camera_loc'):
            self.camera_loc = setting['camera_loc']

        # parameters for rendering map
        self.target_move = setting['target_move']
        self.camera_move = setting['camera_move']
        self.scale_rate = setting['scale_rate']
        self.pose_rate = setting['pose_rate']

        self.background_list = setting['backgrounds']
        self.light_list = setting['lights']
        self.target_num = setting['target_num']
        texture_dir = setting['imgs_dir']
        gym_path = os.path.dirname(gym_unrealcv.__file__)
        texture_dir = os.path.join(gym_path, 'envs', 'UnrealEnv', texture_dir)
        self.textures_list = os.listdir(texture_dir)
        self.safe_start = setting['safe_start']
        self.start_area = self.get_start_area(self.safe_start[0], 100)

        self.num_target = len(self.target_list)
        self.resolution = resolution
        self.num_cam = len(self.cam_id)

        self.cam_height = [setting['height'] for i in range(self.num_cam)]

        for i in range(len(self.textures_list)):
            if self.docker:
                self.textures_list[i] = os.path.join('/unreal',
                                                     setting['imgs_dir'],
                                                     self.textures_list[i])
            else:
                self.textures_list[i] = os.path.join(texture_dir,
                                                     self.textures_list[i])

        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect UnrealCV
        self.unrealcv = Tracking(cam_id=self.cam_id[0],
                                 port=env_port,
                                 ip=env_ip,
                                 env=self.unreal.path2env,
                                 resolution=resolution)
        self.unrealcv.color_dict = self.unrealcv.build_color_dic(
            self.target_list)
        # define action
        self.action_type = action_type
        assert self.action_type == 'Discrete' or self.action_type == 'Continuous'
        if self.action_type == 'Discrete':
            self.action_space = [
                spaces.Discrete(len(self.discrete_actions))
                for i in range(self.num_cam)
            ]
            player_action_space = [
                spaces.Discrete(len(self.discrete_actions_player))
                for i in range(1)
            ]
        elif self.action_type == 'Continuous':
            self.action_space = [
                spaces.Box(low=np.array(self.continous_actions['low']),
                           high=np.array(self.continous_actions['high']))
                for i in range(self.num_cam)
            ]
            player_action_space = spaces.Discrete(
                len(self.continous_actions_player))

        self.count_steps = 0

        # define observation space,
        # color, depth, rgbd, ...
        self.observation_type = observation_type
        assert self.observation_type in ['Color', 'Depth', 'Rgbd', 'Gray']
        self.observation_space = [
            self.unrealcv.define_observation(self.cam_id[i],
                                             self.observation_type, 'fast')
            for i in range(self.num_cam)
        ]

        self.unrealcv.pitch = self.pitch
        # define reward type
        # distance, bbox, bbox_distance
        self.reward_type = reward_type
        # self.reward_function = reward.Reward(setting)

        if not self.test:
            if self.reset_type >= 0:
                self.unrealcv.init_objects(self.objects_env)

        self.count_close = 0

        self.person_id = 0
        self.unrealcv.set_location(0, [
            self.safe_start[0][0], self.safe_start[0][1],
            self.safe_start[0][2] + 600
        ])
        self.unrealcv.set_rotation(0, [0, -180, -90])
        self.unrealcv.set_obj_location(
            "TargetBP", [-3000, -3000, 220])  # remove the additional target
        if 'Random' in self.nav:
            self.random_agents = [
                RandomAgent(self.continous_actions_player)
                for i in range(self.num_target)
            ]
        if 'Goal' in self.nav:
            if not self.test:
                self.random_agents = [
                    GoalNavAgent(self.continous_actions_player,
                                 self.reset_area, 'Mid')
                    for i in range(self.num_target)
                ]
            else:
                self.random_agents = [
                    GoalNavAgentTest(self.continous_actions_player,
                                     goal_list=self.goal_list)
                    for i in range(self.num_target)
                ]

        self.unrealcv.set_interval(30)

        self.record_eps = 0
        self.max_mask_area = np.ones(
            self.num_cam) * 0.001 * self.resolution[0] * self.resolution[1]
Esempio n. 7
0
    def __init__(
        self,
        setting_file,
        reset_type,
        action_type='Discrete',  # 'discrete', 'continuous'
        observation_type='Color',  # 'color', 'depth', 'rgbd'
        reward_type='distance',  # distance
        docker=False,
        resolution=(160, 120)):
        self.docker = docker
        self.reset_type = reset_type
        self.roll = 0

        setting = misc.load_env_setting(setting_file)
        self.env_name = setting['env_name']
        self.cam_id = setting['cam_id']
        self.target_list = setting['targets']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']
        self.max_distance = setting['max_distance']
        self.min_distance = setting['min_distance']
        self.max_direction = setting['max_direction']
        self.height = setting['height']
        self.pitch = setting['pitch']
        self.reset_area = setting['reset_area']
        self.background_list = setting['backgrounds']
        self.light_list = setting['lights']
        self.target_num = setting['target_num']
        self.exp_distance = setting['exp_distance']
        self.safe_start = setting['safe_start']

        self.textures_list = misc.get_textures(setting['imgs_dir'],
                                               self.docker)

        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect UnrealCV
        self.unrealcv = Tracking(cam_id=self.cam_id,
                                 port=env_port,
                                 ip=env_ip,
                                 env=self.unreal.path2env,
                                 resolution=resolution)

        # define action
        self.action_type = action_type
        assert self.action_type == 'Discrete' or self.action_type == 'Continuous'
        if self.action_type == 'Discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'Continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        # define observation space,
        # color, depth, rgbd,...
        self.observation_type = observation_type
        assert self.observation_type == 'Color' or self.observation_type == 'Depth' or self.observation_type == 'Rgbd'
        self.observation_space = self.unrealcv.define_observation(
            self.cam_id, self.observation_type, 'fast')

        # define reward type
        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)

        self.rendering = False
        self.unrealcv.start_walking(self.target_list[0])
        self.count_steps = 0
        self.count_close = 0
        self.unrealcv.pitch = self.pitch
    def __init__(
        self,
        setting_file='sfmHouseTarget.json',
        reset_type='waypoint',  # testpoint, waypoint,
        augment_env=None,  #texture, target, light
        test=True,  # if True will use the test_xy as start point
        action_type='discrete',  # 'discrete', 'continuous'
        observation_type='rgbd',  # 'color', 'depth', 'rgbd'
        reward_type='bbox',  # distance, bbox, bbox_distance,
        docker=False,
        # resolution = (320,240)
        resolution=(640, 480)):
        setting = self.load_env_setting(setting_file)
        self.cam_id = 0

        # run virtual enrionment in docker container
        # self.docker = run_docker.RunDocker()
        # env_ip, env_dir = self.docker.start(ENV_NAME=ENV_NAME)

        # start unreal env
        docker = False
        # self.unreal = env_unreal.RunUnreal(ENV_BIN="house_withfloor/MyProject2/Binaries/Linux/MyProject2")
        self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect unrealcv client to server
        # self.unrealcv = UnrealCv(self.cam_id, ip=env_ip, env=env_dir)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        # define action
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        self.startpose = self.unrealcv.get_pose(self.cam_id)
        print('start_pose: ', self.startpose)
        #try hardcode start pose
        # self.startpose = [750.0, 295.0, 212.3,356.5,190.273, 0.0]
        self.startpose = [0.0, 707.1068, 707.1067, 0.0, 270.0,
                          -45.0]  # [0,45,1000]
        # ACTION: (Azimuth, Elevation, Distance)
        print("start")
        object_mask = self.unrealcv.read_image(self.cam_id,
                                               'object_mask',
                                               mode='file')
        cv2.imshow('object_mask', object_mask)
        cv2.waitKey(0)
        boxes = self.unrealcv.get_bboxes(object_mask, self.target_list)

        # ACTION: (linear velocity ,angle velocity)
        # self.ACTION_LIST = [
        #         (20,  0),
        #         (20, 15),
        #         (20,-15),
        #         (20, 30),
        #         (20,-30),
        # ]
        self.count_steps = 0
        # self.max_steps = 35
        self.target_pos = (-60, 0, 50)
        # self.action_space = gym.spaces.Discrete(len(self.ACTION_LIST))
        state = self.unrealcv.read_image(self.cam_id, 'lit')
        self.observation_space = gym.spaces.Box(low=0,
                                                high=255,
                                                shape=state.shape)

        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)
Esempio n. 9
0
    def __init__(self,
                 setting_file,
                 category,
                 reset_type='waypoint',  # testpoint, waypoint,
                 augment_env=None,  # texture, target, light
                 action_type='Discrete',  # 'Discrete', 'Continuous'
                 observation_type='Rgbd',  # 'color', 'depth', 'rgbd'
                 reward_type='bbox',  # distance, bbox, bbox_distance,
                 docker=False,
                 resolution=(160, 120)
                 ):
        #print(setting_file)
        setting = self.load_env_setting(setting_file)
        self.cam_id = setting['cam_id']
        self.target_list = setting['targets'][category]
        self.trigger_th = setting['trigger_th']
        self.height = setting['height']
        self.pitch = setting['pitch']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']

        self.docker = docker
        self.reset_type = reset_type
        self.augment_env = augment_env
        
        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect UnrealCV
        self.unrealcv = Navigation(cam_id=self.cam_id,
                                   port=env_port,
                                   ip=env_ip,
                                   targets=self.target_list,
                                   env=self.unreal.path2env,
                                   resolution=resolution)
        self.unrealcv.pitch = self.pitch

        #  define action
        self.action_type = action_type
        assert self.action_type == 'Discrete' or self.action_type == 'Continuous'
        if self.action_type == 'Discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'Continuous':
            self.action_space = spaces.Box(low=np.array(self.continous_actions['low']),
                                           high=np.array(self.continous_actions['high']))

        # define observation space,
        # color, depth, rgbd,...
        self.observation_type = observation_type
        assert self.observation_type == 'Color' or self.observation_type == 'Depth' or self.observation_type == 'Rgbd'
        self.observation_space = self.unrealcv.define_observation(self.cam_id, self.observation_type, 'direct')

        # define reward type
        # distance, bbox, bbox_distance,
        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)

        # set start position
        self.trigger_count = 0
        current_pose = self.unrealcv.get_pose(self.cam_id)
        current_pose[2] = self.height
        self.unrealcv.set_location(self.cam_id, current_pose[:3])

        self.count_steps = 0

        self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

        # for reset point generation and selection
        self.reset_module = reset_point.ResetPoint(setting, reset_type, current_pose)
Esempio n. 10
0
import gym
import unrealcv
import numpy as np
import math, random
import matplotlib.pyplot as plt
from gym_unrealcv.envs.utils import env_unreal, unrealcv_basic
import time

ENV_BIN = "RealisticRendering_Win64/WindowsNoEditor/RealisticRendering/Binaries/Win64/RealisticRendering.exe"
docker = False
resolution = (640, 480, 4)
cam_id = 0

unreal = env_unreal.RunUnreal(ENV_BIN=ENV_BIN)
env_ip, env_port = unreal.start(docker, resolution)

ACTION_LIST = [0, 1]
count_steps = 0
max_steps = 1000
action_space = gym.spaces.Discrete(len(ACTION_LIST))

unrealcv = unrealcv_basic.UnrealCv(port=env_port,
                                   ip=env_ip,
                                   env=unreal.path2env,
                                   cam_id=cam_id,
                                   resolution=resolution)
state = unrealcv.read_image(cam_id, 'lit')
observation_space = gym.spaces.Box(low=0,
                                   high=255,
                                   shape=(80, 80, 3),
                                   dtype=np.uint8)
Esempio n. 11
0
    def __init__(
            self,
            setting_file,
            category,
            reset_type='random',  # testpoint, waypoint, random
            augment_env=None,  # texture, target, light
            action_type='Discrete',  # 'Discrete', 'Continuous'
            observation_type='HeightFeatures',  # 'color', 'depth', 'rgbd', 'PoseColor'
            reward_type='mask',  # distance, bbox, bbox_distance, 'mask'
            docker=False,  # True/False
            resolution=(320, 240)  # Res of window
    ):

        # load in settings from json
        setting = self.load_env_setting(setting_file)
        self.cam_id = setting['cam_id']
        self.maxsteps = setting['maxsteps']
        self.stepscale = setting['stepscale']
        self.target_list = setting['targets'][category]
        self.trigger_th = setting['trigger_th']  # Not Sure about trigger
        self.done_th = setting['done_th']
        self.success_th = setting['success_th']
        self.target_object = setting['target_object']
        self.discrete_actions = setting['discrete_actions']
        self.continous_actions = setting['continous_actions']

        self.docker = docker
        self.reset_type = reset_type  # Not Sure about reset_type
        self.augment_env = augment_env  # Not Sure about augment_env

        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker, resolution)

        # connect UnrealCV
        time.sleep(1)
        self.unrealcv = Landing(cam_id=self.cam_id,
                                port=env_port,
                                ip=env_ip,
                                targets=self.target_list,
                                env=self.unreal.path2env,
                                resolution=resolution)
        # self.unrealcv.pitch = self.pitch

        #  define action
        self.action_type = action_type

        assert self.action_type == 'Discrete' or self.action_type == 'Continuous'
        if self.action_type == 'Discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'Continuous':
            self.action_space = spaces.Box(
                low=np.array(self.continous_actions['low']),
                high=np.array(self.continous_actions['high']))

        # define observation space
        observation_types = [
            'Color', 'Depth', 'Rgbd', 'PoseColor', 'HeightFeatures',
            'StepHeightFeatures', 'StepHeightVelocityFeatures'
        ]
        self.observation_type = observation_type
        assert (self.observation_type in observation_types)
        self.observation_space = self.unrealcv.define_observation(
            self.cam_id, self.observation_type, 'direct')

        # define reward type
        self.reward_type = reward_type
        self.reward_function = reward.Reward(setting)

        # set start position
        self.trigger_count = 0
        current_pose = self.unrealcv.get_pose(self.cam_id)

        self.unrealcv.set_location(self.cam_id, current_pose[:3])

        self.count_steps = 0
        self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

        # for reset point generation and selection
        log.info("Initialized with RESET TYPE: {}".format(reset_type))
        self.reset_module = reset_point.ResetPoint(setting, reset_type,
                                                   current_pose)
    def __init__(self,
                setting_file = 'depth_fusion.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                augment_env = None,   #texture, target, light
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                resolution = (640,480),
                log_dir='log/'
    ):
     setting = self.load_env_setting(setting_file)
     self.cam_id = 0
     # self.reset_type = 'random'
     self.reset_type = 'test'
     self.log_dir = log_dir
     gt_pcl = o3d.read_point_cloud('/home/daryl/gym-unrealcv/BAT6_SETA_HOUSE44_OBJ_No_InnerMesh_sampled_10k.ply')
     self.gt_pcl = np.asarray(gt_pcl.points, dtype=np.float32)
     # self.gt_pcl = np.asarray(gt_pcl)
     self.gt_pcl = np.expand_dims(self.gt_pcl,axis=0)

     # start unreal env
     docker = False
     self.unreal = env_unreal.RunUnreal(ENV_BIN=self.env_bin)
     env_ip,env_port = self.unreal.start(docker,resolution)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env,
                              resolution=resolution)
     self.unrealcv.pitch = self.pitch

      # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'gray'
     self.observation_shape = self.unrealcv.define_observation(self.cam_id,self.observation_type)

     self.startpose = self.unrealcv.get_pose(self.cam_id)

     azimuth, elevation, distance = self.start_pose_rel
     # print('start pose rel', azimuth,elevation,distance)
     self.startpose = poseRelToAbs(azimuth, elevation, distance)
     # print('start_pose: ', self.startpose)
     ''' create base frame '''
     poseOrigin(self.log_dir+'frame-{:06}.pose.txt'.format(1000))
     # ACTION: (Azimuth, Elevation, Distance)

     self.count_steps = 0
     self.depth_count = 0
     self.depth_thresh = 0.5
     self.total_distance = 0
     # self.max_steps = 35
     self.target_pos = ( -60,   0,   50)
     self.pose_prev = np.array(self.start_pose_rel)
     self.nn_distance_module =tf.load_op_library(self.nn_distance_path)
   def __init__(self,
                setting_file = 'search_rr_plant78.json',
                reset_type = 'waypoint',       # testpoint, waypoint,
                test = True,               # if True will use the test_xy as start point
                action_type = 'discrete',  # 'discrete', 'continuous'
                observation_type = 'rgbd', # 'color', 'depth', 'rgbd'
                reward_type = 'bbox', # distance, bbox, bbox_distance,
                docker = False,
                ):

     setting = self.load_env_setting(setting_file)
     self.test = test
     self.docker = docker
     self.reset_type = reset_type

     # start unreal env
     self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
     env_ip,env_port = self.unreal.start(docker)

     # connect UnrealCV
     self.unrealcv = Navigation(cam_id=self.cam_id,
                              port= env_port,
                              ip=env_ip,
                              targets=self.target_list,
                              env=self.unreal.path2env)
     self.unrealcv.pitch = self.pitch
    # define action
     self.action_type = action_type
     assert self.action_type == 'discrete' or self.action_type == 'continuous'
     if self.action_type == 'discrete':
         self.action_space = spaces.Discrete(len(self.discrete_actions))
     elif self.action_type == 'continuous':
         self.action_space = spaces.Box(low = np.array(self.continous_actions['low']),high = np.array(self.continous_actions['high']))

     self.count_steps = 0
     self.targets_pos = self.unrealcv.build_pose_dic(self.target_list)

    # define observation space,
    # color, depth, rgbd,...
     self.observation_type = observation_type
     assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd'
     if self.observation_type == 'color':
         state = self.unrealcv.read_image(self.cam_id,'lit')
         self.observation_space = spaces.Box(low=0, high=255, shape=state.shape)
     elif self.observation_type == 'depth':
         state = self.unrealcv.read_depth(self.cam_id)
         self.observation_space = spaces.Box(low=0, high=10, shape=state.shape)
     elif self.observation_type == 'rgbd':
         state = self.unrealcv.get_rgbd(self.cam_id)
         s_high = state
         s_high[:,:,-1] = 10.0 #max_depth
         s_high[:,:,:-1] = 255 #max_rgb
         s_low = np.zeros(state.shape)
         self.observation_space = spaces.Box(low=s_low, high=s_high)


     # define reward type
     # distance, bbox, bbox_distance,
     self.reward_type = reward_type


     # set start position
     self.trigger_count  = 0
     current_pose = self.unrealcv.get_pose(self.cam_id)
     current_pose[2] = self.height
     self.unrealcv.set_location(self.cam_id,current_pose[:3])


     self.trajectory = []

     # for reset point generation and selection
     self.reset_module = reset_point.ResetPoint(setting, reset_type, test, current_pose)

     self.reward_function = reward.Reward(setting)
     self.reward_function.dis2target_last, self.targetID_last = self.select_target_by_distance(current_pose, self.targets_pos)

     self.rendering = False
Esempio n. 14
0
    def __init__(
        self,
        setting_file='search_rr_plant78.json',
        reset_type='keyboard',  # testpoint, waypoint,
        test=True,  # if True will use the test_xy as start point
        action_type='discrete',  # 'discrete', 'continuous'
        observation_type='color',  # 'color', 'depth', 'rgbd'
        reward_type='move',  # distance, move, move_distance
        docker=False,
    ):

        setting = self.load_env_setting(setting_file)
        self.test = test
        self.docker = docker
        self.reset_type = reset_type

        # start unreal env
        self.unreal = env_unreal.RunUnreal(ENV_BIN=setting['env_bin'])
        env_ip, env_port = self.unreal.start(docker)

        # connect UnrealCV
        self.unrealcv = UnrealCv(cam_id=self.cam_id,
                                 port=env_port,
                                 ip=env_ip,
                                 targets=self.target_list,
                                 env=self.unreal.path2env)

        # define action type
        self.action_type = action_type
        assert self.action_type == 'discrete' or self.action_type == 'continuous'
        if self.action_type == 'discrete':
            self.action_space = spaces.Discrete(len(self.discrete_actions))
        elif self.action_type == 'continuous':
            self.action_low = np.array(self.continous_actions['low'])
            self.action_high = np.array(self.continous_actions['high'])
            self.action_space = spaces.Box(low=self.action_low,
                                           high=self.action_high)

        self.pose_low = np.array(self.pose_range['low'])
        self.pose_high = np.array(self.pose_range['high'])

        self.count_steps = 0

        # define observation space,
        # color, depth, rgbd...
        self.observation_type = observation_type
        assert self.observation_type == 'color' or self.observation_type == 'depth' or self.observation_type == 'rgbd' or self.observation_type == 'measured'
        if self.observation_type == 'color':
            state = self.unrealcv.read_image(self.cam_id, 'lit')
            self.observation_space = spaces.Box(low=0,
                                                high=255,
                                                shape=state.shape)
        elif self.observation_type == 'depth':
            state = self.unrealcv.read_depth(self.cam_id)
            self.observation_space = spaces.Box(low=0,
                                                high=10,
                                                shape=state.shape)
        elif self.observation_type == 'rgbd':
            state = self.unrealcv.get_rgbd(self.cam_id)
            s_high = state
            s_high[:, :, -1] = 10.0
            s_high[:, :, :-1] = 255
            s_low = np.zeros(state.shape)
            self.observation_space = spaces.Box(low=s_low, high=s_high)
        elif self.observation_type == 'measured':
            s_high = [85, 80, 90, 95, 120, 200, 300, 360, 250, 400,
                      360]  # arm_pose, grip_position, target_position
            s_low = [0, -90, -60, -55, -120, -400, -150, 0, -350, -150, 40]
            self.observation_space = spaces.Box(low=np.array(s_low),
                                                high=np.array(s_high))

        # define reward type
        # distance, bbox, bbox_distance,
        self.reward_type = reward_type
        self.rendering = False