Esempio n. 1
0
 def load_team(self, location, team=constants.OBJECT_TYPE_TEAM_0):
     NERO.module.NeroModule.load_team(self, location, team)
     rtneat = OpenNero.get_ai('rtneat-%s' % team)
     if rtneat:
         rtneat.set_lifetime(sys.maxint)
         rtneat.disable_evolution()
     OpenNero.disable_ai()  # don't run until button
Esempio n. 2
0
 def set_display_hint(self):
     """
     set the display hint above the agent's head (toggled with F2)
     """
     display_hint = constants.getDisplayHint()
     if display_hint:
         if display_hint == 'fitness':
             self.state.label = '%.2f' % self.org.fitness
         elif display_hint == 'time alive':
             self.state.label = str(self.org.time_alive)
         elif display_hint == 'hit points':
             self.state.label = ''.join('.' for i in range(int(5*OpenNero.get_environment().get_hitpoints(self))))
         elif display_hint == 'id':
             self.state.label = str(self.org.genome.id)
         elif display_hint == 'champion':
             if self.org.champion:
                 self.state.label = 'champ!'
             else:
                 self.state.label = ''
         elif display_hint == 'rank':
             self.state.label = str(self.org.rank)
         elif display_hint == 'debug':
             self.state.label = str(OpenNero.get_environment().get_state(self))
         else:
             self.state.label = '?'
     else:
         # the first time we switch away from displaying stuff,
         # change the window caption
         if self.state.label:
             self.state.label = ""
Esempio n. 3
0
 def distribute_bots(self, num_bots, bot_type):
     """distribute bots so that they don't overlap"""
     # make a number of tiles to stick bots in
     N_TILES = 10
     tiles = [(r, c) for r in range(N_TILES) for c in range(N_TILES)]
     random.shuffle(tiles)
     bots_to_add = num_bots
     while bots_to_add > 0:
         (r, c) = tiles.pop()  # random tile
         x, y = r * constants.XDIM / float(
             N_TILES), c * constants.YDIM / float(
                 N_TILES)  # position within tile
         x, y = x + random.random(
         ) * constants.XDIM * 0.5 / N_TILES, y + random.random(
         ) * constants.YDIM * 0.5 / N_TILES  # random offset
         if in_bounds(x, y):
             agent_id = common.addObject(
                 bot_type,
                 OpenNero.Vector3f(x, y, 0),
                 scale=OpenNero.Vector3f(1, 1, 1),
                 type=constants.OBJECT_TYPE_ROOMBA,
                 collision=constants.OBJECT_TYPE_ROOMBA)
             self.agent_ids.append(agent_id)
             bots_to_add -= 1
         else:
             pass  # if a tile caused a bad starting point, we won't see it again
Esempio n. 4
0
 def load_rtneat(self, location , pop, team=constants.OBJECT_TYPE_TEAM_0):
     location = os.path.relpath("/") + location
     if os.path.exists(location):
         OpenNero.set_ai("rtneat-%s" % team, OpenNero.RTNEAT(
                 str(location), "data/ai/neat-params.dat",
                 constants.pop_size,
                 OpenNero.get_environment().agent_info.reward))
Esempio n. 5
0
    def get_agent_info(self, agent):
        """
        return a blueprint for a new agent
        """
        for a in constants.WALL_RAY_SENSORS:
            agent.add_sensor(OpenNero.RaySensor(
                    math.cos(math.radians(a)), math.sin(math.radians(a)), 0,
                    constants.WALL_SENSOR_RADIUS,
                    constants.OBJECT_TYPE_OBSTACLE,
                    False))
        for a0, a1 in constants.FLAG_RADAR_SENSORS:
            agent.add_sensor(OpenNero.RadarSensor(
                    a0, a1, -90, 90, constants.MAX_VISION_RADIUS,
                    constants.OBJECT_TYPE_FLAG,
                    False))
        sense = constants.OBJECT_TYPE_TEAM_0
        if agent.get_team() == sense:
            sense = constants.OBJECT_TYPE_TEAM_1
        for a0, a1 in constants.ENEMY_RADAR_SENSORS:
            agent.add_sensor(OpenNero.RadarSensor(
                    a0, a1, -90, 90, constants.MAX_VISION_RADIUS,
                    sense,
                    False))
        for a in constants.TARGETING_SENSORS:
            agent.add_sensor(OpenNero.RaySensor(
                    math.cos(math.radians(a)), math.sin(math.radians(a)), 0,
                    constants.TARGET_SENSOR_RADIUS,
                    sense,
                    False))

        return agent.info
Esempio n. 6
0
    def set_environment(self, env):
        self.environment = env
        for id in self.wall_ids:  # delete the walls
            common.removeObject(id)
        del self.wall_ids[:]  # clear the ids
        OpenNero.set_environment(env)

        common.addObject(
            "data/shapes/cube/WhiteCube.xml",
            OpenNero.Vector3f(1 * constants.GRID_DX, 2 * constants.GRID_DY, 0 * constants.GRID_DZ),
            OpenNero.Vector3f(0, 0, 0),
            scale=OpenNero.Vector3f(0.25, 0.25, 4),
        )
        common.addObject(
            "data/shapes/cube/WhiteCube.xml",
            OpenNero.Vector3f(2 * constants.GRID_DX, 2 * constants.GRID_DY, 0 * constants.GRID_DZ),
            OpenNero.Vector3f(0, 0, 0),
            scale=OpenNero.Vector3f(0.25, 0.25, 4),
        )
        common.addObject(
            "data/shapes/cube/WhiteCube.xml",
            OpenNero.Vector3f(3 * constants.GRID_DX, 2 * constants.GRID_DY, 0 * constants.GRID_DZ),
            OpenNero.Vector3f(0, 0, 0),
            scale=OpenNero.Vector3f(0.25, 0.25, 4),
        )
Esempio n. 7
0
 def __init__(self, team_type):
     NeroTeam.__init__(self, team_type)
     self.pop = OpenNero.Population()
     self.rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat", self.pop,
                                   constants.DEFAULT_LIFETIME_MIN,
                                   constants.DEFAULT_EVOLVE_RATE)
     self.generation = 1
 def get_agent_info(self, agent):
     """
     return a blueprint for a new agent
     """
     if isinstance(agent, FirstPersonAgent):
         # don't add built in sensors for now
         return self.agent_info
     for a in constants.WALL_RAY_SENSORS:
         agent.add_sensor(
             OpenNero.RaySensor(math.cos(math.radians(a)),
                                math.sin(math.radians(a)), 0, 50,
                                constants.OBJECT_TYPE_OBSTACLE, False))
     for a0, a1 in constants.FLAG_RADAR_SENSORS:
         agent.add_sensor(
             OpenNero.RadarSensor(a0, a1, -90, 90,
                                  constants.MAX_VISION_RADIUS,
                                  constants.OBJECT_TYPE_FLAG, False))
     for a0, a1 in constants.ENEMY_RADAR_SENSORS:
         sense = constants.OBJECT_TYPE_TEAM_0
         if agent.get_team() == 0:
             sense = constants.OBJECT_TYPE_TEAM_1
         agent.add_sensor(
             OpenNero.RadarSensor(a0, a1, -90, 90,
                                  constants.MAX_VISION_RADIUS, sense,
                                  False))
     return self.agent_info
Esempio n. 9
0
 def set_display_hint(self):
     """
     set the display hint above the agent's head (toggled with F2)
     """
     display_hint = constants.getDisplayHint()
     if display_hint:
         org = self.get_org()
         if display_hint == 'fitness':
             self.state.label = '%.2f' % org.fitness
         elif display_hint == 'time alive':
             self.state.label = str(org.time_alive)
         elif display_hint == 'hit points':
             self.state.label = ''.join('.' for i in range(
                 int(5 * OpenNero.get_environment().get_hitpoints(self))))
         elif display_hint == 'id':
             self.state.label = str(org.id)
         elif display_hint == 'species id':
             self.state.label = str(org.species_id)
         elif display_hint == 'champion':
             if org.champion:
                 self.state.label = 'champ!'
             else:
                 self.state.label = ''
         elif display_hint == 'rank':
             self.state.label = str(org.rank)
         elif display_hint == 'debug':
             self.state.label = str(
                 OpenNero.get_environment().get_state(self))
         else:
             self.state.label = '?'
     else:
         # the first time we switch away from displaying stuff,
         # change the window caption
         if self.state.label:
             self.state.label = ""
Esempio n. 10
0
 def load_team(self, location, team=constants.OBJECT_TYPE_TEAM_0):
     NERO.module.NeroModule.load_team(self, location, team)
     rtneat = OpenNero.get_ai('rtneat-%s' % team)
     if rtneat:
         rtneat.set_lifetime(sys.maxint)
         rtneat.disable_evolution()
     OpenNero.disable_ai() # don't run until button
Esempio n. 11
0
    def spawnAgent(self, team=constants.OBJECT_TYPE_TEAM_0, ai=None):
        """
        This is the function ran when an agent already in the field
        causes the generation of a new agent.

        Returns the id of the spawned agent.
        """
        if not self.environment:
            return

        if ai == 'rtneat' and not OpenNero.get_ai('rtneat-%s' % team):
            self.start_rtneat(team)
        if ai == 'rtneatq' and not OpenNero.get_ai('rtneatq-%s' % team):
            self.start_rtneatq(team)

        self.curr_team = team
        color = constants.TEAM_LABELS[team]

        dx = random.randrange(constants.XDIM / 20) - constants.XDIM / 40
        dy = random.randrange(constants.XDIM / 20) - constants.XDIM / 40
        return common.addObject(
            "data/shapes/character/steve_%s_%s.xml" % (color, ai),
            OpenNero.Vector3f(self.spawn_x[team] + dx, self.spawn_y[team] + dy,
                              2),
            type=team)
Esempio n. 12
0
def Match(team0, team1):
    '''Run a single battle between two population files.'''
    mod = module.getMod()
    mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0)
    mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1)
    mod.set_speedup(100)
    OpenNero.enable_ai()
    def is_episode_over(self, agent):
        """
        is the current episode over for the agent?
        """
        if agent.group == 'Turret':
            return False

        team = agent.get_team()
        state = self.get_state(agent)
        dead = self.hitpoints > 0 and state.total_damage >= self.hitpoints
        old = self.lifetime > 0 and agent.step > 0 and 0 == agent.step % self.lifetime

        if agent.ai == 'qlearning':
            if dead or old:
                # simulate a respawn by moving this agent towards the spawn location.
                state.total_damage = 0
                state.randomize()
                agent.state.position = copy.copy(state.initial_position)
                agent.state.rotation = copy.copy(state.initial_rotation)
                agent.teleport()
            return False

        rtneat = OpenNero.get_ai("rtneat-%s" % team)
        if not rtneat:
            rtneat = OpenNero.get_ai("rtneatq-%s" % team)
        orphaned = rtneat and not rtneat.has_organism(agent)

        return orphaned or dead or old
Esempio n. 14
0
 def set_display_hint(self):
     """
     set the display hint above the agent's head (toggled with F2)
     """
     display_hint = constants.getDisplayHint()
     if display_hint:
         if display_hint == 'fitness':
             self.state.label = '%.2g' % self.fitness[0]
         elif display_hint == 'time alive':
             self.state.label = str(self.step)
         elif display_hint == 'hit points':
             self.state.label = ''.join('.' for i in range(
                 int(5 * OpenNero.get_environment().get_hitpoints(self))))
         elif display_hint == 'id':
             self.state.label = str(self.state.id)
         elif display_hint == 'species id':
             self.state.label = 'q'
         elif display_hint == 'debug':
             self.state.label = str(
                 OpenNero.get_environment().get_state(self))
         else:
             self.state.label = '?'
     else:
         # the first time we switch away from displaying stuff,
         # change the window caption
         if self.state.label:
             self.state.label = ""
Esempio n. 15
0
    def closest_enemy(self, agent):
        """
        Returns the nearest enemy to agent 
        """
        friends, foes = self.getFriendFoe(agent)
        if not foes:
            return None

        min_enemy = None
        min_dist = constants.MAX_FIRE_ACTION_RADIUS
        pose = self.get_state(agent).pose
        color = OpenNero.Color(128, 0, 0, 0)
        for f in foes:
            f_pose = self.get_state(f).pose
            dist = self.distance(pose, f_pose)
            if dist < min_dist:
                source_pos = agent.state.position
                enemy_pos = f.state.position
                source_pos.z = source_pos.z + 5
                enemy_pos.z = enemy_pos.z + 5
                obstacles = OpenNero.getSimContext().findInRay(
                    source_pos,
                    enemy_pos,
                    constants.OBJECT_TYPE_OBSTACLE,
                    False,
                    color,
                    color)
                if len(obstacles) == 0:
                    min_enemy = f
                    min_dist = dist
        return min_enemy
Esempio n. 16
0
def Match(team0, team1):
    '''Run a single battle between two population files.'''
    mod = module.getMod()
    mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0)
    mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1)
    mod.set_speedup(100)
    OpenNero.enable_ai()
Esempio n. 17
0
    def __init__(self):
        self.environment = None
        self.agent_id = None

        self.flag_loc = None
        self.flag_id = None

        self.set_speedup(constants.DEFAULT_SPEEDUP)

        x = constants.XDIM / 2.0
        y = constants.YDIM / 3.0
        self.spawn_x = {}
        self.spawn_y = {}
        self.set_spawn(x, y, constants.OBJECT_TYPE_TEAM_0)
        self.set_spawn(x, 2 * y, constants.OBJECT_TYPE_TEAM_1)

        # Bounds for sensors in neural network and advice language. These bounds are
        # used to convert sensor values between network and advice.
        self.sbounds_network = OpenNero.FeatureVectorInfo()
        self.sbounds_advice = OpenNero.FeatureVectorInfo()

        # Networks have better learning bias when cube sensors produce values in the
        # range [-1, 1], but the range [0, 1] is more intuitive in the advice
        # language.  Wall sensors use the same range [0, 1] for both network and advice.
        # The sense() method in the ForageEnvironment class use these network bounds
        # to scale the sensor values.
        for i in range(constants.N_SENSORS):
            self.sbounds_network.add_continuous(0, 1)
            self.sbounds_advice.add_continuous(0, 1)

        # The last sensor is the bias, which always takes the value 1 (upper bound).
        self.sbounds_network.add_continuous(0, 1)
        self.sbounds_advice.add_continuous(0, 1)

        print 'sbounds_network', self.sbounds_network
Esempio n. 18
0
 def snapshot(self):
     print 'snapshot was called'
     if os.access('Hw5/snapshots/color/', os.W_OK):
         filename = 'Hw5/snapshots/color/' + str(time.time()*100)[:-2] + '.png'
         OpenNero.getSimContext().getActiveCamera().snapshot(filename)
         # Launch python script to show this image
         os.system('python Hw5/show_image.py "' + filename + '"')
Esempio n. 19
0
 def setup_sandbox(self):
     """
     setup the sandbox environment
     """
     OpenNero.getSimContext().delay = 0.0
     self.environment = RoombaEnvironment(constants.XDIM, constants.YDIM)
     OpenNero.set_environment(self.environment)
Esempio n. 20
0
 def spawnAgent(self,
                team=constants.OBJECT_TYPE_TEAM_0,
                agent_xml=None,
                first_person=True,
                z_pos=2):
     """
     This is the function ran when an agent already in the field
     causes the generation of a new agent.
     """
     if agent_xml is None:
         color = 'blue'
         if team is constants.OBJECT_TYPE_TEAM_1:
             color = 'red'
         agent_xml = "data/shapes/character/steve_still_%s.xml" % color
     if first_person:
         agent_pos = OpenNero.Vector3f(self.spawn_x[team],
                                       self.spawn_y[team], z_pos)
         self.curr_team = team
         self.first_person_agent = common.addObject(agent_xml,
                                                    agent_pos,
                                                    type=team)
     else:
         pos = self.random_spawn_position(team)
         agent_pos = OpenNero.Vector3f(pos[0], pos[1], z_pos)
         common.addObject(agent_xml, agent_pos, type=team)
Esempio n. 21
0
 def ltChange(self, value):
     self.environment.lifetime = value
     for team in constants.TEAMS:
         rtneat = OpenNero.get_ai("rtneat-%s" % team)
         if not rtneat:
             rtneat = OpenNero.get_ai("rtneatq-%s" % team)
         if rtneat:
             rtneat.set_lifetime(value)
Esempio n. 22
0
 def snapshot(self):
     print 'snapshot was called'
     if os.access('Hw5/snapshots/color/', os.W_OK):
         filename = 'Hw5/snapshots/color/' + str(
             time.time() * 100)[:-2] + '.png'
         OpenNero.getSimContext().getActiveCamera().snapshot(filename)
         # Launch python script to show this image
         os.system('python Hw5/show_image.py "' + filename + '"')
Esempio n. 23
0
 def mark(self, x, y, marker):
     """ Mark a position (x, y) with the specified color """
     # remove the previous object, if necessary
     self.unmark(x, y)
     # add a new marker object
     id = common.addObject(marker, OpenNero.Vector3f(x, y, -1), OpenNero.Vector3f(0,0,0), OpenNero.Vector3f(0.5,0.5,0.5), type = constants.OBJECT_TYPE_MARKER)
     # remember the ID of the object we are about to create
     self.marker_map[(x, y)] = id
Esempio n. 24
0
 def ltChange(self, value):
     self.environment.lifetime = value
     for team in constants.TEAMS:
         rtneat = OpenNero.get_ai("rtneat-%s" % team)
         if not rtneat:
             rtneat = OpenNero.get_ai("rtneatq-%s" % team)
         if rtneat:
             rtneat.set_lifetime(value)
 def set_weight(self, key, value):
     self.reward_weights[key] = value
     for team in self.teams:
         rtneat = OpenNero.get_ai("rtneat-%s" % team)
         if not rtneat:
             rtneat = OpenNero.get_ai("rtneatq-%s" % team)
         if rtneat:
             rtneat.set_weight(constants.FITNESS_INDEX[key], value)
Esempio n. 26
0
 def load_rtneat(self, location, pop, team=constants.OBJECT_TYPE_TEAM_0):
     location = os.path.relpath("/") + location
     if os.path.exists(location):
         OpenNero.set_ai(
             "rtneat-%s" % team,
             OpenNero.RTNEAT(str(location), "data/ai/neat-params.dat",
                             constants.pop_size,
                             OpenNero.get_environment().agent_info.reward))
Esempio n. 27
0
 def add_wall():
     object_ids.append(
         common.addObject("data/shapes/cube/Cube.xml",
                          OpenNero.Vector3f(
                              location.x, location.y,
                              constants.HEIGHT + constants.OFFSET),
                          OpenNero.Vector3f(0, 0, 90),
                          scale=OpenNero.Vector3f(1, 30, constants.HEIGHT),
                          type=1))
Esempio n. 28
0
 def add_wall():
     obj_id = common.addObject(
         "data/shapes/cube/Cube.xml",
         OpenNero.Vector3f(location.x, location.y,
                           constants.HEIGHT + constants.OFFSET),
         OpenNero.Vector3f(0, 0, 90),
         scale=OpenNero.Vector3f(5, 30, constants.HEIGHT * 2),
         type=1)
     object_ids[obj_id] = set(['rotate', 'move', 'scale', 'remove'])
Esempio n. 29
0
def addObject(templateFile,
              position,
              rotation=OpenNero.Vector3f(0, 0, 0),
              scale=OpenNero.Vector3f(1, 1, 1),
              label="",
              type=0,
              collision=0):
    return OpenNero.getSimContext().addObject(templateFile, position, rotation,
                                              scale, label, collision, type)
Esempio n. 30
0
 def change_flag(self, loc):
     if self.flag_id:
         common.removeObject(self.flag_id)
     self.flag_loc = OpenNero.Vector3f(*loc)
     self.flag_id = common.addObject("data/shapes/cube/BlueCube.xml",
                                     self.flag_loc,
                                     label="Flag",
                                     scale=OpenNero.Vector3f(1, 1, 10),
                                     type=constants.OBJECT_TYPE_FLAG)
Esempio n. 31
0
 def __init__(self, agent):
     self.id = agent.state.id
     self.agent = agent
     self.pose = (0, 0, 0)  # current x, y, heading
     self.prev_pose = (0, 0, 0)
     self.initial_position = OpenNero.Vector3f(0, 0, 0)
     self.initial_rotation = OpenNero.Vector3f(0, 0, 0)
     self.total_damage = 0
     self.curr_damage = 0
Esempio n. 32
0
def toggle_ai_callback():
    global ai_state
    OpenNero.toggle_ai()
    if not ai_state:
        module.getMod().start_rtneat()
        ai_state = "Started"
    elif ai_state == "Started":
        ai_state = "Paused"
    elif ai_state == "Paused":
        ai_state = "Started"
Esempio n. 33
0
def toggle_ai_callback():
    global ai_state
    OpenNero.toggle_ai()
    if not ai_state:
        module.getMod().start_rtneat()
        ai_state = 'Started'
    elif ai_state == 'Started':
        ai_state = 'Paused'
    elif ai_state == 'Paused':
        ai_state = 'Started'
Esempio n. 34
0
def start_my_planner_2_disk():
    """ start the tower demo """
    getMod().num_disks = 2
    OpenNero.disable_ai()
    getMod().stop_agent()
    env = TowerEnvironment()
    env.initialize_blocks()
    getMod().set_environment(env)
    getMod().agent_id = common.addObject("data/shapes/character/MyPlanningRobot2.xml", OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX, TowerofHanoi.constants.GRID_DY, 2), type=TowerofHanoi.constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
    OpenNero.enable_ai()
Esempio n. 35
0
 def start_tower1(self): #Problem reduction
     """ start the tower demo """
     self.num_disks = 3
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     self.agent_id = common.addObject("data/shapes/character/BlocksRobot.xml", OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2), type=constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
     OpenNero.enable_ai()
Esempio n. 36
0
 def start_fps(self):
     print 'start_fps was called'
     if self.first_person_agent is None:
         print 'adding first person agent!'
         self.spawnAgent(agent_xml = 'data/shapes/character/FirstPersonAgent.xml')
         OpenNero.enable_ai()
     else:
         print 'removing first person agent!'
         common.removeObject(self.first_person_agent)
         self.first_person_agent = None
Esempio n. 37
0
File: main.py Progetto: bradyz/cs343
def start_nlp_extended(): #Natural Language Processing
    """ start the tower demo """
    getMod().num_disks = 3
    OpenNero.disable_ai()
    getMod().stop_agent()
    env = TowerEnvironment()
    env.initialize_blocks()
    getMod().set_environment(env)
    getMod().agent_id = common.addObject("data/shapes/character/MyNLPRobot.xml", OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX, TowerofHanoi.constants.GRID_DY, 2), type=TowerofHanoi.constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
    OpenNero.enable_ai()
Esempio n. 38
0
    def __init__(self):
        """
        Create the environment
        """
        OpenNero.Environment.__init__(self)

        self.curr_id = 0
        self.max_steps = 20
        self.MAX_DIST = math.hypot(constants.XDIM, constants.YDIM)
        self.states = {}
        self.teams = {}
        self.script = 'Hw5/menu.py'

        abound = OpenNero.FeatureVectorInfo() # actions
        sbound = OpenNero.FeatureVectorInfo() # sensors
        rbound = OpenNero.FeatureVectorInfo() # rewards

        # actions
        abound.add_continuous(-1, 1) # forward/backward speed (gets multiplied by constants.MAX_MOVEMENT_SPEED)
        abound.add_continuous(-constants.MAX_TURN_RADIANS, constants.MAX_TURN_RADIANS) # left/right turn (in radians)

        # sensor dimensions
        for a in range(constants.N_SENSORS):
            sbound.add_continuous(0, 1);

        # Rewards
        # the enviroment returns the raw multiple dimensions of the fitness as
        # they get each step. This then gets combined into, e.g. Z-score, by
        # the ScoreHelper in order to calculate the final rtNEAT-fitness
        for f in constants.FITNESS_DIMENSIONS:
            # we don't care about the bounds of the individual dimensions
            rbound.add_continuous(-sys.float_info.max, sys.float_info.max) # range for reward

        # initialize the rtNEAT algorithm parameters
        # input layer has enough nodes for all the observations plus a bias
        # output layer has enough values for all the actions
        # population size matches ours
        # 1.0 is the weight initialization noise
        rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat",
                                 constants.N_SENSORS,
                                 constants.N_ACTIONS,
                                 constants.pop_size,
                                 1.0,
                                 rbound, False)

        key = "rtneat-%s" % constants.OBJECT_TYPE_TEAM_0
        OpenNero.set_ai(key, rtneat)
        print "get_ai(%s): %s" % (key, OpenNero.get_ai(key))

        # set the initial lifetime
        lifetime = module.getMod().lt
        rtneat.set_lifetime(lifetime)
        print 'rtNEAT lifetime:', lifetime

        self.agent_info = OpenNero.AgentInitInfo(sbound, abound, rbound)
Esempio n. 39
0
 def reset_sandbox(self=None):
     """
     reset the sandbox and refill with stuff to vacuum
     """
     for id in self.marker_map.values():
         common.removeObject(id)  # delete id from Registry, not from dict
     self.marker_map = {}
     for id in self.agent_ids:
         common.removeObject(id)  # delete id from Registry, not from list
     self.agent_ids = []
     OpenNero.reset_ai()
Esempio n. 40
0
 def start_fps(self):
     print 'start_fps was called'
     if self.first_person_agent is None:
         print 'adding first person agent!'
         self.spawnAgent(
             agent_xml='data/shapes/character/FirstPersonAgent.xml')
         OpenNero.enable_ai()
     else:
         print 'removing first person agent!'
         common.removeObject(self.first_person_agent)
         self.first_person_agent = None
Esempio n. 41
0
 def start_tower3(self): #2 Disk Goal Stack Planner
     """ start the tower demo """
     self.num_disks = 2
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     #self.set_environment(TowerEnvironment())
     self.agent_id = common.addObject("data/shapes/character/BlocksRobot3.xml", OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2), type=constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
     OpenNero.enable_ai()
Esempio n. 42
0
 def add_bots(self, bot_type, num_bots):
     OpenNero.disable_ai()
     num_bots = int(num_bots)
     if bot_type.lower().find("script") >= 0:
         self.distribute_bots(num_bots, "data/shapes/roomba/Roomba.xml")
         OpenNero.enable_ai()
         return True
     elif bot_type.lower().find("rtneat") >= 0:
         self.start_rtneat(num_bots)
         return True
     else:
         return False
    def __init__(self):
        """
        Create the environment
        """
        OpenNero.Environment.__init__(self)

        self.curr_id = 0
        self.max_steps = 20
        self.MAX_DIST = math.hypot(constants.XDIM, constants.YDIM)
        self.states = {}
        self.teams = {}
        self.script = 'Hw5/menu.py'

        abound = OpenNero.FeatureVectorInfo()  # actions
        sbound = OpenNero.FeatureVectorInfo()  # sensors
        rbound = OpenNero.FeatureVectorInfo()  # rewards

        # actions
        abound.add_continuous(
            -1, 1
        )  # forward/backward speed (gets multiplied by constants.MAX_MOVEMENT_SPEED)
        abound.add_continuous(
            -constants.MAX_TURN_RADIANS,
            constants.MAX_TURN_RADIANS)  # left/right turn (in radians)

        # sensor dimensions
        for a in range(constants.N_SENSORS):
            sbound.add_continuous(0, 1)

        # Rewards
        # the enviroment returns the raw multiple dimensions of the fitness as
        # they get each step. This then gets combined into, e.g. Z-score, by
        # the ScoreHelper in order to calculate the final rtNEAT-fitness
        for f in constants.FITNESS_DIMENSIONS:
            # we don't care about the bounds of the individual dimensions
            rbound.add_continuous(-sys.float_info.max,
                                  sys.float_info.max)  # range for reward

        # initialize the rtNEAT algorithm parameters
        # input layer has enough nodes for all the observations plus a bias
        # output layer has enough values for all the actions
        # population size matches ours
        # 1.0 is the weight initialization noise
        rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat",
                                 OpenNero.Population(), constants.pop_size, 1)

        key = "rtneat-%s" % constants.OBJECT_TYPE_TEAM_0
        OpenNero.set_ai(key, rtneat)
        print "get_ai(%s): %s" % (key, OpenNero.get_ai(key))

        # set the initial lifetime
        lifetime = module.getMod().lt
        rtneat.set_lifetime(lifetime)
        print 'rtNEAT lifetime:', lifetime

        self.agent_info = OpenNero.AgentInitInfo(sbound, abound, rbound)
Esempio n. 44
0
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "LT": mod.ltChange(vali)
    if command == "EE": mod.eeChange(vali)
    if command == "HP": mod.hpChange(vali)
    if command == "SP": mod.set_speedup(vali)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat')
    if command == "qlearning": mod.deploy('qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
    if command == "example":
        print 'command: example'
        if arg == "start":
            print 'command: example start'
            mod.start_demonstration()
        elif arg == "cancel":
            print 'command: example cancel'
            OpenNero.get_environment().cancel_demonstration()
        elif arg == "confirm":
            print 'command: example confirm'
            OpenNero.get_environment().use_demonstration()
Esempio n. 45
0
    def setup_map(self):
        """
        setup the test environment
        """
        OpenNero.disable_ai()

        if self.environment:
            error("Environment already created")
            return
        self.environment = self.create_environment()
        OpenNero.set_environment(self.environment)
        self.environment.setup()

        return True
Esempio n. 46
0
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "LT": mod.ltChange(vali)
    if command == "EE": mod.eeChange(vali)
    if command == "HP": mod.hpChange(vali)
    if command == "SP": mod.set_speedup(vali)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat')
    if command == "qlearning": mod.deploy('qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
    if command == "example":
        print 'command: example'
        if arg == "start":
            print 'command: example start'
            mod.start_demonstration()
        elif arg == "cancel":
            print 'command: example cancel'
            OpenNero.get_environment().cancel_demonstration()
        elif arg == "confirm":
            print 'command: example confirm'
            OpenNero.get_environment().use_demonstration()
Esempio n. 47
0
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat', 'neat')
    if command == "qlearning": mod.deploy('none', 'qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
Esempio n. 48
0
 def start_agent_state_space_search(self):  # State-space search
     """ start the tower demo """
     self.num_disks = 3
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     # self.set_environment(TowerEnvironment())
     self.agent_id = common.addObject(
         "data/shapes/character/BlocksRobot2.xml",
         OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2),
         type=constants.AGENT_MASK,
         scale=OpenNero.Vector3f(3, 3, 3),
     )
     OpenNero.enable_ai()
Esempio n. 49
0
    def closest_enemy(self, agent):
        """
        Returns the nearest enemy to agent 
        """
        friends, foes = self.get_friend_foe(agent)
        if not foes:
            return None

        min_enemy = None
        min_dist = constants.MAX_FIRE_ACTION_RADIUS
        pose = self.get_state(agent).pose
        color = OpenNero.Color(128, 0, 0, 0)
        for f in foes:
            f_pose = self.get_state(f).pose
            dist = self.distance(pose, f_pose)
            if dist < min_dist:
                source_pos = agent.state.position
                enemy_pos = f.state.position
                source_pos.z = source_pos.z + 5
                enemy_pos.z = enemy_pos.z + 5
                obstacles = OpenNero.getSimContext().findInRay(
                    source_pos, enemy_pos, constants.OBJECT_TYPE_OBSTACLE, False, color, color
                )
                if len(obstacles) == 0:
                    min_enemy = f
                    min_dist = dist
        return min_enemy
Esempio n. 50
0
 def start_demonstration(self):
     '''
     start the keyboard agent to collect demonstration example
     '''
     OpenNero.disable_ai()
     team = constants.OBJECT_TYPE_TEAM_0
     self.curr_team = team
     #self.environment.remove_all_agents(team)
     location = (self.spawn_x[team], self.spawn_y[team], 2)
     agnt = common.addObject(
         "data/shapes/character/steve_keyboard.xml",
         position = OpenNero.Vector3f(*location),
         type=team)
     OpenNero.enable_ai()
     self.environment.start_tracing()
     return agnt
Esempio n. 51
0
 def setAdvice(self, advice, team=constants.OBJECT_TYPE_TEAM_0):
     """ advice for rtneat agents """
     # if there are rtneat agents in the environment, give them some advice
     rtneat = OpenNero.get_ai("rtneat-%s" % team)
     if not rtneat:
         rtneat = OpenNero.get_ai("rtneatq-%s" % team)
     if rtneat:
         try:
             rtneat.advice = OpenNero.Advice(advice, rtneat, constants.N_SENSORS+1, constants.N_ACTIONS, True, self.sbounds_network, self.sbounds_advice)
         except RuntimeError as err:
             err_msg = \
                 '<message><content class="edu.utexas.cs.nn.opennero.ErrorMessage"' + \
                 ' name="%s" text="%s" /></message>' % ('Advice Error', err.message)
             getServer().write_data(err_msg)
             return
     for agent in self.environment.teams[team]:
         agent.has_new_advice = True
Esempio n. 52
0
def ModTick(dt):
    if OpenNero.getAppConfig().rendertype == 'null':
        return
    script_server = module.getServer()
    data = script_server.read_data()
    while data:
        module.parseInput(data.strip())
        data = script_server.read_data()
Esempio n. 53
0
 def set_weight(self, key, value):
     i = constants.FITNESS_INDEX[key]
     value = (value - 100) / 100.0 # value in [-1, 1]
     for team in (constants.OBJECT_TYPE_TEAM_0, constants.OBJECT_TYPE_TEAM_1):
         rtneat = OpenNero.get_ai("rtneat-%s" % team)
         if rtneat:
             rtneat.set_weight(i, value)
     print key, value
Esempio n. 54
0
 def deploy(self, ai='rtneat', team=constants.OBJECT_TYPE_TEAM_0):
     OpenNero.disable_ai()
     if ai == 'rtneat':
         OpenNero.set_ai('rtneat-%s' % team, None)
     if ai == 'rtneatq':
         OpenNero.set_ai('rtneatq-%s' % team, None)
     self.environment.remove_all_agents(team)
     for _ in range(constants.pop_size):
         self.spawnAgent(ai=ai, team=team)
     OpenNero.enable_ai()
Esempio n. 55
0
def ClientMain():
    # physics off, ai off by default
    # disable_physics()
    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        inputConfig.switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500)

    # common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    CreateGui(common.getGuiManager())

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 15000
    camZoomSpeed = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)
    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = inputConfig.createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
    def maybe_spawn(self, agent):
        '''Spawn more agents if there are more to spawn.'''
        if agent.ai not in ('rtneat', 'rtneatq') or agent.group != 'Agent':
            return

        team = agent.get_team()
        rtneat = OpenNero.get_ai('rtneat-%s' % team)
        if not rtneat:
            rtneat = OpenNero.get_ai('rtneatq-%s' % team)
        if not rtneat or not rtneat.ready():
            return

        friends, foes = self.getFriendFoe(agent)
        if len(friends) >= constants.pop_size:
            return

        if agent is tuple(f for f in friends if f.ai == agent.ai)[0]:
            module.getMod().spawnAgent(team=team, ai=agent.ai)