Example #1
0
 def enable_display(self):
     if not self.display:
         self.display = fs.Display(self.map, self.robot)
         self.display.update()
Example #2
0
    def __init__(self,
                 xml_env,
                 reward_func="distance_multigoal",
                 display=False,
                 light_sensor_range=200.,
                 light_sensor_mode="realistic"):
        # Fastsim setup
        # XML files typically contain relative names (for map) wrt their own path. Make that work
        xml_dir, xml_file = os.path.split(xml_env)
        if (xml_dir == ""):
            xml_dir = "./"
        oldcwd = os.getcwd()
        os.chdir(xml_dir)
        settings = fs.Settings(xml_file)
        os.chdir(oldcwd)

        self.map = settings.map()
        self.background = np.expand_dims(netpbmfile.imread(
            os.path.join(xml_dir, 'maze_hard.pbm')),
                                         axis=-1)
        self.background_size = [200, 200]
        self.map_size = [600, 600]
        self.robot = settings.robot()

        if (display):
            self.display = fs.Display(self.map, self.robot)
        else:
            self.display = None

        self.maxVel = 4  # Same as in the C++ sferes2 experiment

        # Lasers
        lasers = self.robot.get_lasers()
        n_lasers = len(lasers)
        if (n_lasers > 0):
            self.maxSensorRange = lasers[0].get_range(
            )  # Assume at least 1 laser ranger
        else:
            self.maxSensorRange = 0.

        #Light sensors
        self.ls_mode = light_sensor_mode
        lightsensors = self.robot.get_light_sensors()
        n_lightsensors = len(lightsensors)
        if (n_lightsensors > 0):
            self.maxLightSensorRange = light_sensor_range  # Assume at least 1 laser ranger
        else:
            self.maxLightSensorRange = 0.

        # State
        self.initPos = self.get_robot_pos()
        self.current_pos = self.get_robot_pos()
        self.old_pos = self.get_robot_pos()

        self.v1_motor_order = 0.
        self.v2_motor_order = 0.

        self.goal = self.map.get_goals()
        self.goalPos = [[goal.get_x(), goal.get_y()] for goal in self.goal]
        self.goalRadius = [goal.get_diam() / 2. for goal in self.goal]

        self.observation_space = spaces.Box(
            low=np.array([0.] * n_lasers + [0.] * 2 +
                         [0. if (self.ls_mode == "realistic") else -1] *
                         n_lightsensors),
            high=np.array([self.maxSensorRange] * n_lasers + [
                1. if
                (self.ls_mode == "realistic") else self.maxLightSensorRange
            ] * 2 + [1.] * n_lightsensors,
                          dtype=np.float32))
        self.action_space = spaces.Box(low=-self.maxVel,
                                       high=self.maxVel,
                                       shape=(2, ),
                                       dtype=np.float32)
        self.max_steps = 2000
        self.step_count = 0
        # Reward
        self.reward_area = None
        if (reward_func not in reward_functions):
            raise RuntimeError("Unknown reward '%s'" % str(reward_func))
        else:
            self.reward_func = reward_functions[reward_func]