Пример #1
0
def Match(team0, team1):
    '''Run a single battle between two population files.'''
    mod = module.getMod()
    mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0)
    mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1)
    mod.set_speedup(100)
    OpenNero.enable_ai()
Пример #2
0
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "LT": mod.ltChange(vali)
    if command == "EE": mod.eeChange(vali)
    if command == "HP": mod.hpChange(vali)
    if command == "SP": mod.set_speedup(vali)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat')
    if command == "qlearning": mod.deploy('qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
    if command == "example":
        print 'command: example'
        if arg == "start":
            print 'command: example start'
            mod.start_demonstration()
        elif arg == "cancel":
            print 'command: example cancel'
            OpenNero.get_environment().cancel_demonstration()
        elif arg == "confirm":
            print 'command: example confirm'
            OpenNero.get_environment().use_demonstration()
Пример #3
0
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "LT": mod.ltChange(vali)
    if command == "EE": mod.eeChange(vali)
    if command == "HP": mod.hpChange(vali)
    if command == "SP": mod.set_speedup(vali)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat')
    if command == "qlearning": mod.deploy('qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
    if command == "example":
        print 'command: example'
        if arg == "start":
            print 'command: example start'
            mod.start_demonstration()
        elif arg == "cancel":
            print 'command: example cancel'
            OpenNero.get_environment().cancel_demonstration()
        elif arg == "confirm":
            print 'command: example confirm'
            OpenNero.get_environment().use_demonstration()
Пример #4
0
def Match(team0, team1):
    '''Run a single battle between two population files.'''
    mod = module.getMod()
    mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0)
    mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1)
    mod.set_speedup(100)
    OpenNero.enable_ai()
Пример #5
0
 def deploy(self, ai='rtneat', team=constants.OBJECT_TYPE_TEAM_0):
     OpenNero.disable_ai()
     if ai == 'rtneat':
         OpenNero.set_ai('rtneat-%s' % team, None)
     self.environment.remove_all_agents(team)
     for _ in range(constants.pop_size):
         self.spawnAgent(ai=ai, team=team)
     OpenNero.enable_ai()
Пример #6
0
 def deploy(self, ai='rtneat', team=constants.OBJECT_TYPE_TEAM_0):
     OpenNero.disable_ai()
     if ai == 'rtneat':
         OpenNero.set_ai('rtneat-%s' % team, None)
     self.environment.remove_all_agents(team)
     for _ in range(constants.pop_size):
         self.spawnAgent(ai=ai, team=team)
     OpenNero.enable_ai()
Пример #7
0
 def start_tower1(self): #Problem reduction
     """ start the tower demo """
     self.num_disks = 3
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     self.agent_id = common.addObject("data/shapes/character/BlocksRobot.xml", OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2), type=constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
     OpenNero.enable_ai()
Пример #8
0
def start_my_planner_2_disk():
    """ start the tower demo """
    getMod().num_disks = 2
    OpenNero.disable_ai()
    getMod().stop_agent()
    env = TowerEnvironment()
    env.initialize_blocks()
    getMod().set_environment(env)
    getMod().agent_id = common.addObject("data/shapes/character/MyPlanningRobot2.xml", OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX, TowerofHanoi.constants.GRID_DY, 2), type=TowerofHanoi.constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
    OpenNero.enable_ai()
Пример #9
0
def start_nlp_extended(): #Natural Language Processing
    """ start the tower demo """
    getMod().num_disks = 3
    OpenNero.disable_ai()
    getMod().stop_agent()
    env = TowerEnvironment()
    env.initialize_blocks()
    getMod().set_environment(env)
    getMod().agent_id = common.addObject("data/shapes/character/MyNLPRobot.xml", OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX, TowerofHanoi.constants.GRID_DY, 2), type=TowerofHanoi.constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
    OpenNero.enable_ai()
Пример #10
0
 def start_fps(self):
     print 'start_fps was called'
     if self.first_person_agent is None:
         print 'adding first person agent!'
         self.spawnAgent(agent_xml = 'data/shapes/character/FirstPersonAgent.xml')
         OpenNero.enable_ai()
     else:
         print 'removing first person agent!'
         common.removeObject(self.first_person_agent)
         self.first_person_agent = None
Пример #11
0
 def start_tower3(self): #2 Disk Goal Stack Planner
     """ start the tower demo """
     self.num_disks = 2
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     #self.set_environment(TowerEnvironment())
     self.agent_id = common.addObject("data/shapes/character/BlocksRobot3.xml", OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2), type=constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
     OpenNero.enable_ai()
Пример #12
0
 def start_fps(self):
     print 'start_fps was called'
     if self.first_person_agent is None:
         print 'adding first person agent!'
         self.spawnAgent(
             agent_xml='data/shapes/character/FirstPersonAgent.xml')
         OpenNero.enable_ai()
     else:
         print 'removing first person agent!'
         common.removeObject(self.first_person_agent)
         self.first_person_agent = None
Пример #13
0
 def add_bots(self, bot_type, num_bots):
     OpenNero.disable_ai()
     num_bots = int(num_bots)
     if bot_type.lower().find("script") >= 0:
         self.distribute_bots(num_bots, "data/shapes/roomba/Roomba.xml")
         OpenNero.enable_ai()
         return True
     elif bot_type.lower().find("rtneat") >= 0:
         self.start_rtneat(num_bots)
         return True
     else:
         return False
Пример #14
0
 def start_rtneat(self, pop_size):
     " start the rtneat learning demo "
     OpenNero.disable_ai()
     #self.environment = RoombaEnvironment(constants.XDIM, constants.YDIM, self)
     #set_environment(self.environment)
     #self.reset_sandbox()
     # Create RTNEAT object
     rbound = OpenNero.FeatureVectorInfo()
     rbound.add_continuous(-sys.float_info.max, sys.float_info.max)
     rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat", 2, 1, pop_size, 1.0, rbound, False)
     rtneat.set_weight(0,1)
     OpenNero.set_ai("rtneat",rtneat)
     OpenNero.enable_ai()
     self.distribute_bots(pop_size, "data/shapes/roomba/RoombaRTNEAT.xml")
Пример #15
0
 def start_tower1(self):  #Problem reduction
     """ start the tower demo """
     self.num_disks = 3
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     self.agent_id = common.addObject(
         "data/shapes/character/BlocksRobot.xml",
         OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2),
         type=constants.AGENT_MASK,
         scale=OpenNero.Vector3f(3, 3, 3))
     OpenNero.enable_ai()
Пример #16
0
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat', 'neat')
    if command == "qlearning": mod.deploy('none', 'qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
Пример #17
0
 def start_tower3(self):  #2 Disk Goal Stack Planner
     """ start the tower demo """
     self.num_disks = 2
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     #self.set_environment(TowerEnvironment())
     self.agent_id = common.addObject(
         "data/shapes/character/BlocksRobot3.xml",
         OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2),
         type=constants.AGENT_MASK,
         scale=OpenNero.Vector3f(3, 3, 3))
     OpenNero.enable_ai()
Пример #18
0
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat', 'neat')
    if command == "qlearning": mod.deploy('none', 'qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
Пример #19
0
 def start_demonstration(self):
     '''
     start the keyboard agent to collect demonstration example
     '''
     OpenNero.disable_ai()
     team = constants.OBJECT_TYPE_TEAM_0
     self.curr_team = team
     #self.environment.remove_all_agents(team)
     location = (self.spawn_x[team], self.spawn_y[team], 2)
     agnt = common.addObject("data/shapes/character/steve_keyboard.xml",
                             position=OpenNero.Vector3f(*location),
                             type=team)
     OpenNero.enable_ai()
     self.environment.start_tracing()
     return agnt
Пример #20
0
 def start_agent_state_space_search(self):  #State-space search
     """ start the tower demo """
     self.num_disks = 3
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     #self.set_environment(TowerEnvironment())
     self.agent_id = common.addObject(
         "data/shapes/character/BlocksRobot2.xml",
         OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2),
         type=constants.AGENT_MASK,
         scale=OpenNero.Vector3f(3, 3, 3))
     OpenNero.enable_ai()
Пример #21
0
def start_nlp_extended():  #Natural Language Processing
    """ start the tower demo """
    getMod().num_disks = 3
    OpenNero.disable_ai()
    getMod().stop_agent()
    env = TowerEnvironment()
    env.initialize_blocks()
    getMod().set_environment(env)
    getMod().agent_id = common.addObject(
        "data/shapes/character/MyNLPRobot.xml",
        OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX,
                          TowerofHanoi.constants.GRID_DY, 2),
        type=TowerofHanoi.constants.AGENT_MASK,
        scale=OpenNero.Vector3f(3, 3, 3))
    OpenNero.enable_ai()
Пример #22
0
def start_my_planner_3_disk():
    """ start the tower demo """
    getMod().num_disks = 3
    OpenNero.disable_ai()
    getMod().stop_agent()
    env = TowerEnvironment()
    env.initialize_blocks()
    getMod().set_environment(env)
    getMod().agent_id = common.addObject(
        "data/shapes/character/MyPlanningRobot3.xml",
        OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX,
                          TowerofHanoi.constants.GRID_DY, 2),
        type=TowerofHanoi.constants.AGENT_MASK,
        scale=OpenNero.Vector3f(3, 3, 3))
    OpenNero.enable_ai()
Пример #23
0
 def start_agent_state_space_search(self):  # State-space search
     """ start the tower demo """
     self.num_disks = 3
     OpenNero.disable_ai()
     self.stop_agent()
     env = TowerEnvironment()
     env.initialize_blocks()
     self.set_environment(env)
     # self.set_environment(TowerEnvironment())
     self.agent_id = common.addObject(
         "data/shapes/character/BlocksRobot2.xml",
         OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2),
         type=constants.AGENT_MASK,
         scale=OpenNero.Vector3f(3, 3, 3),
     )
     OpenNero.enable_ai()
Пример #24
0
 def start_demonstration(self):
     '''
     start the keyboard agent to collect demonstration example
     '''
     OpenNero.disable_ai()
     team = constants.OBJECT_TYPE_TEAM_0
     self.curr_team = team
     #self.environment.remove_all_agents(team)
     location = (self.spawn_x[team], self.spawn_y[team], 2)
     agnt = common.addObject(
         "data/shapes/character/steve_keyboard.xml",
         position = OpenNero.Vector3f(*location),
         type=team)
     OpenNero.enable_ai()
     self.environment.start_tracing()
     return agnt
Пример #25
0
 def add_bots(self, bot_type, num_bots):
     OpenNero.disable_ai()
     num_bots = int(num_bots)
     if bot_type.lower().find("script") >= 0:
         self.distribute_bots(num_bots, "data/shapes/roomba/Roomba.xml")
         OpenNero.enable_ai()
         return True
     elif bot_type.lower().find("rtneat") >= 0:
         self.start_rtneat(num_bots)
         return True
     elif bot_type.lower().find("rlagent") >= 0:
         self.distribute_bots(num_bots, "data/shapes/roomba/RLAgent.xml")
         OpenNero.enable_ai()
         return True
     else:
         return False
Пример #26
0
def parseInput(strn):
    if strn == "deploy" or len(strn) < 2:
        return
    mod = getMod()
    # first word is command rest is filename
    loc, val = strn.split(' ', 1)
    vali = 1
    if strn.isupper():
        vali = int(val)
    if loc == "HP": mod.hpChange(vali)
    if loc == "SP": mod.set_speedup(vali)
    if loc == "load1": mod.load_team(val, constants.OBJECT_TYPE_TEAM_0)
    if loc == "load2": mod.load_team(val, constants.OBJECT_TYPE_TEAM_1)
    if loc == "rtneat":
        mod.deploy('rtneat', constants.OBJECT_TYPE_TEAM_0)
        mod.deploy('rtneat', constants.OBJECT_TYPE_TEAM_1)
    if loc == "qlearning":
        mod.deploy('qlearning', constants.OBJECT_TYPE_TEAM_0)
        mod.deploy('qlearning', constants.OBJECT_TYPE_TEAM_1)
    if loc == "pause": OpenNero.disable_ai()
    if loc == "resume": OpenNero.enable_ai()
Пример #27
0
def parseInput(strn):
    if strn == "deploy" or len(strn) < 2:
        return
    mod = getMod()
    # first word is command rest is filename
    loc, val = strn.split(' ',1)
    vali = 1
    if strn.isupper():
        vali = int(val)
    if loc == "HP": mod.hpChange(vali)
    if loc == "SP": mod.set_speedup(vali)
    if loc == "load1": mod.load_team(val, constants.OBJECT_TYPE_TEAM_0)
    if loc == "load2": mod.load_team(val, constants.OBJECT_TYPE_TEAM_1)
    if loc == "rtneat":
        mod.deploy('rtneat', constants.OBJECT_TYPE_TEAM_0)
        mod.deploy('rtneat', constants.OBJECT_TYPE_TEAM_1)
    if loc == "qlearning":
        mod.deploy('qlearning', constants.OBJECT_TYPE_TEAM_0)
        mod.deploy('qlearning', constants.OBJECT_TYPE_TEAM_1)
    if loc == "pause": OpenNero.disable_ai()
    if loc == "resume": OpenNero.enable_ai()
Пример #28
0
 def start_rtneat(self):
     """ start the rtneat learning stuff"""
     self.spawnAgent()
     OpenNero.enable_ai()
Пример #29
0
 def start_rtneat(self):
     """ start the rtneat learning stuff"""
     self.spawnAgent()
     OpenNero.enable_ai()
Пример #30
0
class NeroModule:
    def __init__(self):
        self.environment = None
        self.agent_id = None

        self.flag_loc = None
        self.flag_id = None

        self.set_speedup(constants.DEFAULT_SPEEDUP)

        x = constants.XDIM / 2.0
        y = constants.YDIM / 3.0
        self.spawn_x = {}
        self.spawn_y = {}
        self.set_spawn(x, y, constants.OBJECT_TYPE_TEAM_0)
        self.set_spawn(x, 2 * y, constants.OBJECT_TYPE_TEAM_1)

        # Bounds for sensors in neural network and advice language. These bounds are
        # used to convert sensor values between network and advice.
        self.sbounds_network = OpenNero.FeatureVectorInfo()
        self.sbounds_advice = OpenNero.FeatureVectorInfo()

        # Networks have better learning bias when cube sensors produce values in the
        # range [-1, 1], but the range [0, 1] is more intuitive in the advice
        # language.  Wall sensors use the same range [0, 1] for both network and advice.
        # The sense() method in the ForageEnvironment class use these network bounds
        # to scale the sensor values.
        for i in range(constants.N_SENSORS):
            self.sbounds_network.add_continuous(0, 1)
            self.sbounds_advice.add_continuous(0, 1)

        # The last sensor is the bias, which always takes the value 1 (upper bound).
        self.sbounds_network.add_continuous(0, 1)
        self.sbounds_advice.add_continuous(0, 1)

        print 'sbounds_network', self.sbounds_network

    def setup_map(self):
        """
        setup the test environment
        """
        OpenNero.disable_ai()

        if self.environment:
            error("Environment already created")
            return

        # create the environment - this also creates the rtNEAT object
        self.environment = self.create_environment()
        OpenNero.set_environment(self.environment)

        # world walls
        height = constants.HEIGHT + constants.OFFSET
        common.addObject("data/shapes/cube/Cube.xml",
                         OpenNero.Vector3f(constants.XDIM / 2, 0, height),
                         OpenNero.Vector3f(0, 0, 90),
                         scale=OpenNero.Vector3f(constants.WIDTH,
                                                 constants.XDIM,
                                                 constants.HEIGHT * 2),
                         label="World Wall0",
                         type=constants.OBJECT_TYPE_OBSTACLE)
        common.addObject("data/shapes/cube/Cube.xml",
                         OpenNero.Vector3f(0, constants.YDIM / 2, height),
                         OpenNero.Vector3f(0, 0, 0),
                         scale=OpenNero.Vector3f(constants.WIDTH,
                                                 constants.YDIM,
                                                 constants.HEIGHT * 2),
                         label="World Wall1",
                         type=constants.OBJECT_TYPE_OBSTACLE)
        common.addObject("data/shapes/cube/Cube.xml",
                         OpenNero.Vector3f(constants.XDIM, constants.YDIM / 2,
                                           height),
                         OpenNero.Vector3f(0, 0, 0),
                         scale=OpenNero.Vector3f(constants.WIDTH,
                                                 constants.YDIM,
                                                 constants.HEIGHT * 2),
                         label="World Wall2",
                         type=constants.OBJECT_TYPE_OBSTACLE)
        common.addObject("data/shapes/cube/Cube.xml",
                         OpenNero.Vector3f(constants.XDIM / 2, constants.YDIM,
                                           height),
                         OpenNero.Vector3f(0, 0, 90),
                         scale=OpenNero.Vector3f(constants.WIDTH,
                                                 constants.XDIM,
                                                 constants.HEIGHT * 2),
                         label="World Wall3",
                         type=constants.OBJECT_TYPE_OBSTACLE)

        # Add an obstacle wall in the middle
        common.addObject("data/shapes/cube/Cube.xml",
                         OpenNero.Vector3f(constants.XDIM / 2,
                                           constants.YDIM / 2, height),
                         OpenNero.Vector3f(0, 0, 90),
                         scale=OpenNero.Vector3f(constants.WIDTH,
                                                 constants.YDIM / 4,
                                                 constants.HEIGHT * 2),
                         label="World Wall4",
                         type=constants.OBJECT_TYPE_OBSTACLE)

        # Add some trees
        for i in (0.25, 0.75):
            for j in (0.25, 0.75):
                # don't collide with trees - they are over 500 triangles each
                common.addObject("data/shapes/tree/Tree.xml",
                                 OpenNero.Vector3f(i * constants.XDIM,
                                                   j * constants.YDIM,
                                                   constants.OFFSET),
                                 OpenNero.Vector3f(0, 0, 0),
                                 scale=OpenNero.Vector3f(1, 1, 1),
                                 label="Tree %d %d" % (10 * i, 10 * j),
                                 type=constants.OBJECT_TYPE_LEVEL_GEOM)
                # collide with their trunks represented with cubes instead
                common.addObject(
                    "data/shapes/cube/Cube.xml",
                    OpenNero.Vector3f(i * constants.XDIM, j * constants.YDIM,
                                      constants.OFFSET),
                    OpenNero.Vector3f(0, 0, 0),
                    scale=OpenNero.Vector3f(1, 1, constants.HEIGHT),
                    type=constants.OBJECT_TYPE_OBSTACLE)

        # Add the surrounding Environment
        common.addObject("data/terrain/NeroWorld.xml",
                         OpenNero.Vector3f(constants.XDIM / 2,
                                           constants.YDIM / 2, 0),
                         scale=OpenNero.Vector3f(1.2, 1.2, 1.2),
                         label="NeroWorld",
                         type=constants.OBJECT_TYPE_LEVEL_GEOM)

        return True

    def create_environment(self):
        return NeroEnvironment.NeroEnvironment()

    def remove_flag(self):
        if self.flag_id:
            common.removeObject(self.flag_id)

    def change_flag(self, loc):
        if self.flag_id:
            common.removeObject(self.flag_id)
        self.flag_loc = OpenNero.Vector3f(*loc)
        self.flag_id = common.addObject("data/shapes/cube/BlueCube.xml",
                                        self.flag_loc,
                                        label="Flag",
                                        scale=OpenNero.Vector3f(1, 1, 10),
                                        type=constants.OBJECT_TYPE_FLAG)

    def place_basic_turret(self, loc):
        return common.addObject("data/shapes/character/steve_basic_turret.xml",
                                OpenNero.Vector3f(*loc),
                                type=constants.OBJECT_TYPE_TEAM_1)

    #The following is run when one of the Deploy buttons is pressed
    def deploy(self, ai='rtneat', team=constants.OBJECT_TYPE_TEAM_0):
        OpenNero.disable_ai()
        if ai == 'rtneat':
            OpenNero.set_ai('rtneat-%s' % team, None)
        self.environment.remove_all_agents(team)
        for _ in range(constants.pop_size):
            self.spawnAgent(ai=ai, team=team)
        OpenNero.enable_ai()

    #The following is run when the Save button is pressed
    def save_team(self, location, team=constants.OBJECT_TYPE_TEAM_0):
        # if there are rtneat agents in the environment, save them as a group.
        rtneat = OpenNero.get_ai("rtneat-%s" % team)
        if rtneat:
            location = rtneat.save_population(str(location))
        # then, check whether there are any qlearning agents, and save them.
        with open(location, 'a') as handle:
            for agent in self.environment.teams[team]:
                if agent.group == 'Agent' and agent.ai == 'qlearning':
                    handle.write('\n\n%s' % agent.to_string())
                if hasattr(agent, 'stats'):
                    handle.write('\n\n%s' % agent.stats())

    #The following is run when the Load button is pressed
    def load_team(self, location, team=constants.OBJECT_TYPE_TEAM_0):
        OpenNero.disable_ai()

        self.environment.remove_all_agents(team)

        if not os.path.exists(location):
            print location, 'does not exist, cannot load population'
            return

        # parse out different agents from the population file.
        contents = ''
        try:
            try:
                handle = gzip.open(location)
                contents = handle.read()
            finally:
                handle.close()
        except Exception, e:
            with open(location) as handle:
                contents = handle.read()

        if not contents:
            print 'cannot read', location, 'skipping'
            return

        rtneat, qlearning, stats = self._split_population(
            contents.splitlines(True))

        print 'qlearning agents:', qlearning.count('Approximator')

        # load any qlearning agents first, subtracting them from the population
        # size that rtneat will need to manage. since we cannot deserialize an
        # agent's state until after it's been added to the world, we put the
        # serialized chunk for the agent into a map, then NeroEnvironment#step
        # takes care of the deserialization.
        pop_size = constants.pop_size
        if qlearning.strip():
            for chunk in re.split(r'\n\n+', qlearning):
                if not chunk.strip():
                    continue
                id = self.spawnAgent(ai='qlearning', team=team)
                self.environment.agents_to_load[id] = chunk
                pop_size -= 1
                if pop_size == 0:
                    break

        print 'rtneat agents:', rtneat.count('genomeend')

        # load any rtneat agents from the file, as a group.
        if pop_size > 0 and rtneat.strip():
            tf = tempfile.NamedTemporaryFile(delete=False)
            tf.write(rtneat)
            tf.close()
            print tf.name
            OpenNero.set_ai(
                "rtneat-%s" % team,
                OpenNero.RTNEAT(tf.name, "data/ai/neat-params.dat", pop_size,
                                rtneat_rewards(), False))
            os.unlink(tf.name)
            while pop_size > 0:
                self.spawnAgent(ai='rtneat', team=team)
                pop_size -= 1

        OpenNero.enable_ai()