def ClientMain(): # disable physics and AI updates at first # disable_physics() disable_ai() # initialize random number generator with current time seed() # add a camera camRotateSpeed = 100 camMoveSpeed = 1500 camZoomSpeed = 100 cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed) cam.setPosition(Vector3f(100, 100, 50)) cam.setTarget(Vector3f(1, 1, 1)) cam.setFarPlane(1000) cam.setEdgeScroll(False) getMod().setup_sandbox() # add a light source getSimContext().addLightSource(Vector3f(500,-500,1000), 1500) # create the io map getSimContext().setInputMapping(createInputMapping()) # setup the gui CreateGui(getSimContext().getGuiManager())
def ClientMain(): # disable physics and AI updates at first # disable_physics() disable_ai() # initialize random number generator with current time seed() # add a camera camRotateSpeed = 100 camMoveSpeed = 1500 camZoomSpeed = 100 cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed) cam.setPosition(Vector3f(100, 100, 50)) cam.setTarget(Vector3f(1, 1, 1)) cam.setFarPlane(1000) cam.setEdgeScroll(False) getMod().setup_sandbox() # add a light source getSimContext().addLightSource(Vector3f(500, -500, 1000), 1500) # create the io map getSimContext().setInputMapping(createInputMapping()) # setup the gui CreateGui(getSimContext().getGuiManager())
def randomize(self): dx = random.randrange(constants.SPAWN_RANGE * 2) - constants.SPAWN_RANGE dy = random.randrange(constants.SPAWN_RANGE * 2) - constants.SPAWN_RANGE self.initial_position.x = module.getMod().spawn_x[self.agent.get_team()] + dx self.initial_position.y = module.getMod().spawn_y[self.agent.get_team()] + dy self.prev_pose = self.pose = (self.initial_position.x, self.initial_position.y, self.initial_rotation.z)
def toggle_ai_callback(): global ai_state OpenNero.toggle_ai() if not ai_state: module.getMod().start_rtneat() ai_state = "Started" elif ai_state == "Started": ai_state = "Paused" elif ai_state == "Paused": ai_state = "Started"
def toggle_ai_callback(): global ai_state OpenNero.toggle_ai() if not ai_state: module.getMod().start_rtneat() ai_state = 'Started' elif ai_state == 'Started': ai_state = 'Paused' elif ai_state == 'Paused': ai_state = 'Started'
def maybe_spawn(self, agent): '''Spawn more agents if there are more to spawn.''' # do not spawn just because a first person agent is on the field if isinstance(agent, FirstPersonAgent): return team = agent.get_team() friends, foes = self.getFriendFoe(agent) friends = tuple(friends or [None]) if (agent.group == 'Agent' and agent is friends[0] and OpenNero.get_ai("rtneat-%s" % team).ready() and len(friends) < constants.pop_size): module.getMod().spawnAgent(team)
def ClientMain(): # physics off, ai off by default # disable_physics() OpenNero.disable_ai() if not module.getMod().setup_map(): inputConfig.switchToHub() return # add a light source OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500) # common.addSkyBox("data/sky/irrlicht2") # setup the gui CreateGui(common.getGuiManager()) # add a camera camRotateSpeed = 100 camMoveSpeed = 15000 camZoomSpeed = 200 cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed) cam.setFarPlane(40000) cam.setEdgeScroll(False) recenter_cam = recenter(cam) recenter_cam() # create the io map ioMap = inputConfig.createInputMapping() ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam) OpenNero.getSimContext().setInputMapping(ioMap)
def initialize_blocks(self): from module import getMod num_disks = getMod().num_disks if num_disks >= 1: if not self.get_block_state('blue'): self.add_block("data/shapes/cube/BlueCube.xml", 0, 1, 0, 5, 'blue') else: self.set_block('blue',0,1,0,5) bstate = self.get_block_state('blue') if num_disks >= 2: if not self.get_block_state('green'): self.add_block('data/shapes/cube/GreenCube.xml', 0, 1, 1, 4, 'green') else: self.set_block('green',0,1,1,4) gstate = self.get_block_state('green') if num_disks >= 3: if not self.get_block_state('yellow'): self.add_block('data/shapes/cube/YellowCube.xml', 0, 1, 2, 3, 'yellow') else: self.set_block('yellow',0,1,2,3) ystate = self.get_block_state('yellow') if num_disks >= 4: if not self.get_block_state('red'): self.add_block('data/shapes/cube/RedCube.xml', 0, 1, 3, 2, 'red', scaler = (1.0/2.5)) else: self.set_block('red',0,1,3,2) elif self.get_block_state('red'): self.remove_block('red') rstate = self.get_block_state('red') if num_disks >= 5: if not self.get_block_state('white'): self.add_block('data/shapes/cube/BlueCube.xml', 0, 1, 4, 1, 'white') else: self.set_block('white',0,1,4,1) elif self.get_block_state('white'): self.remove_block('white') wstate = self.get_block_state('white') bstate.above = gstate gstate.below = bstate if num_disks > 2: gstate.above = ystate if num_disks > 2: ystate.below = gstate if num_disks > 3: ystate.above = rstate if num_disks > 3: rstate.below = ystate if num_disks > 4: rstate.above = wstate if num_disks > 4: wstate.below = rstate print 'Initialized TowerEnvironment'
def ClientMain(): # physics off, ai off by default #disable_physics() OpenNero.disable_ai() if not module.getMod().setup_map(): inputConfig.switchToHub() return # add a light source OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500) #common.addSkyBox("data/sky/irrlicht2") # setup the gui CreateGui(common.getGuiManager()) # add a camera camRotateSpeed = 100 camMoveSpeed = 15000 camZoomSpeed = 200 cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed) cam.setFarPlane(40000) cam.setEdgeScroll(False) recenter_cam = recenter(cam) recenter_cam() # create the io map ioMap = inputConfig.createInputMapping() ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam) OpenNero.getSimContext().setInputMapping(ioMap)
def maybe_spawn(self, agent): '''Spawn more agents if there are more to spawn.''' if agent.ai != 'rtneat' or agent.group != 'Agent': return team = agent.get_team() rtneat = OpenNero.get_ai('rtneat-%s' % team) if not rtneat or not rtneat.ready(): return friends, foes = self.getFriendFoe(agent) if len(friends) >= constants.pop_size: return if agent is tuple(f for f in friends if f.ai == agent.ai)[0]: module.getMod().spawnAgent(team=team, ai=agent.ai)
def Match(team0, team1): '''Run a single battle between two population files.''' mod = module.getMod() mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0) mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1) mod.set_speedup(100) OpenNero.enable_ai()
def __init__(self): """ Create the environment """ OpenNero.Environment.__init__(self) self.curr_id = 0 self.max_steps = 20 self.MAX_DIST = math.hypot(constants.XDIM, constants.YDIM) self.states = {} self.teams = {} self.script = 'Hw5/menu.py' abound = OpenNero.FeatureVectorInfo() # actions sbound = OpenNero.FeatureVectorInfo() # sensors rbound = OpenNero.FeatureVectorInfo() # rewards # actions abound.add_continuous( -1, 1 ) # forward/backward speed (gets multiplied by constants.MAX_MOVEMENT_SPEED) abound.add_continuous( -constants.MAX_TURN_RADIANS, constants.MAX_TURN_RADIANS) # left/right turn (in radians) # sensor dimensions for a in range(constants.N_SENSORS): sbound.add_continuous(0, 1) # Rewards # the enviroment returns the raw multiple dimensions of the fitness as # they get each step. This then gets combined into, e.g. Z-score, by # the ScoreHelper in order to calculate the final rtNEAT-fitness for f in constants.FITNESS_DIMENSIONS: # we don't care about the bounds of the individual dimensions rbound.add_continuous(-sys.float_info.max, sys.float_info.max) # range for reward # initialize the rtNEAT algorithm parameters # input layer has enough nodes for all the observations plus a bias # output layer has enough values for all the actions # population size matches ours # 1.0 is the weight initialization noise rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat", OpenNero.Population(), constants.pop_size, 1) key = "rtneat-%s" % constants.OBJECT_TYPE_TEAM_0 OpenNero.set_ai(key, rtneat) print "get_ai(%s): %s" % (key, OpenNero.get_ai(key)) # set the initial lifetime lifetime = module.getMod().lt rtneat.set_lifetime(lifetime) print 'rtNEAT lifetime:', lifetime self.agent_info = OpenNero.AgentInitInfo(sbound, abound, rbound)
def __init__(self): """ Create the environment """ OpenNero.Environment.__init__(self) self.curr_id = 0 self.max_steps = 20 self.MAX_DIST = math.hypot(constants.XDIM, constants.YDIM) self.states = {} self.teams = {} self.script = 'Hw5/menu.py' abound = OpenNero.FeatureVectorInfo() # actions sbound = OpenNero.FeatureVectorInfo() # sensors rbound = OpenNero.FeatureVectorInfo() # rewards # actions abound.add_continuous(-1, 1) # forward/backward speed (gets multiplied by constants.MAX_MOVEMENT_SPEED) abound.add_continuous(-constants.MAX_TURN_RADIANS, constants.MAX_TURN_RADIANS) # left/right turn (in radians) # sensor dimensions for a in range(constants.N_SENSORS): sbound.add_continuous(0, 1); # Rewards # the enviroment returns the raw multiple dimensions of the fitness as # they get each step. This then gets combined into, e.g. Z-score, by # the ScoreHelper in order to calculate the final rtNEAT-fitness for f in constants.FITNESS_DIMENSIONS: # we don't care about the bounds of the individual dimensions rbound.add_continuous(-sys.float_info.max, sys.float_info.max) # range for reward # initialize the rtNEAT algorithm parameters # input layer has enough nodes for all the observations plus a bias # output layer has enough values for all the actions # population size matches ours # 1.0 is the weight initialization noise rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat", constants.N_SENSORS, constants.N_ACTIONS, constants.pop_size, 1.0, rbound, False) key = "rtneat-%s" % constants.OBJECT_TYPE_TEAM_0 OpenNero.set_ai(key, rtneat) print "get_ai(%s): %s" % (key, OpenNero.get_ai(key)) # set the initial lifetime lifetime = module.getMod().lt rtneat.set_lifetime(lifetime) print 'rtNEAT lifetime:', lifetime self.agent_info = OpenNero.AgentInitInfo(sbound, abound, rbound)
def ModTick(dt): mod = module.getMod() if mod.environment: mod.environment.tick(dt) if OpenNero.getAppConfig().rendertype == 'null': return script_server = module.getServer() data = script_server.read_data() while data: module.parseInput(data.strip()) data = script_server.read_data()
def action_list_generator(self): from module import getMod self.num_disks = getMod().num_disks #self.state.label = 'Starting to Solve!' for a in ACTIONS_TURN_LEFT_TO_BEGIN1: yield a for a in self.dohanoi(self.num_disks, 'c', 'a', 'b'): yield a #self.state.label = 'Problem Solved!' for a in ACTIONS_CELEBERATE: yield a
def action_queue_generator(self): from module import getMod self.num_disks = getMod().num_disks #self.state.label = 'Starting to Solve!' for a in ACTIONS_BEGIN1: yield a for a in self.dohanoi(self.num_disks, 'c', 'a', 'b'): yield a #self.state.label = 'Problem Solved!' for a in ACTIONS_CELEBERATE: yield a
def action_list_generator(self): from module import getMod self.num_disks = getMod().num_disks # self.state.label = 'Starting to Solve!' for a in ACTIONS_TURN_LEFT_TO_BEGIN1: yield a for a in self.dohanoi(self.num_disks, "c", "a", "b"): yield a # self.state.label = 'Problem Solved!' for a in ACTIONS_CELEBERATE: yield a
def is_episode_over(self, agent): """ is the current episode over for the agent? """ if agent.group == "Turret": return False if isinstance(agent, FirstPersonAgent): return False # first person agents never stop self.max_steps = module.getMod().lt if self.max_steps != 0 and agent.step >= self.max_steps: return True team = agent.get_team() if not OpenNero.get_ai("rtneat-%s" % team).has_organism(agent): return True state = self.get_state(agent) if module.getMod().hp != 0 and state.total_damage >= module.getMod().hp: return True return False
def ClientMain(): global modify_object_id global object_ids global guiMan OpenNero.disable_ai() if not module.getMod().setup_map(): switchToHub() return # add a light source OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500) common.addSkyBox("data/sky/irrlicht2") # setup the gui guiMan = common.getGuiManager() object_ids = {} modify_object_id = {} # add a camera camRotateSpeed = 100 camMoveSpeed = 15000 camZoomSpeed = 200 cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed) cam.setFarPlane(40000) cam.setEdgeScroll(False) def recenter(cam): def closure(): cam.setPosition(OpenNero.Vector3f(0, 0, 100)) cam.setTarget(OpenNero.Vector3f(100, 100, 0)) return closure recenter_cam = recenter(cam) recenter_cam() # create the io map ioMap = createInputMapping() ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam) OpenNero.getSimContext().setInputMapping(ioMap)
def calculate_reward(self, agent, action): reward = self.agent_info.reward.get_instance() state = self.get_state(agent) friends, foes = self.getFriendFoe(agent) R = dict([(f, 0) for f in constants.FITNESS_DIMENSIONS]) R[constants.FITNESS_STAND_GROUND] = -abs(action[0]) friend = self.nearest(state.pose, friends) if friend: d = self.distance(self.get_state(friend).pose, state.pose) R[constants.FITNESS_STICK_TOGETHER] = -d * d foe = self.nearest(state.pose, foes) if foe: d = self.distance(self.get_state(foe).pose, state.pose) R[constants.FITNESS_APPROACH_ENEMY] = -d * d f = module.getMod().flag_loc if f: d = self.distance(state.pose, (f.x, f.y)) R[constants.FITNESS_APPROACH_FLAG] = -d * d target = self.target(agent) if target is not None: obstacles = OpenNero.getSimContext().findInRay( agent.state.position, target.state.position, constants.OBJECT_TYPE_OBSTACLE | agent.get_team(), True) if len(obstacles) == 0: self.get_state(target).curr_damage += 1 R[constants.FITNESS_HIT_TARGET] = 1 damage = state.update_damage() R[constants.FITNESS_AVOID_FIRE] = -damage for i, f in enumerate(constants.FITNESS_DIMENSIONS): reward[i] = R[f] return reward
def queue_init(self): self.init_queue = [1,5] self.atob = [5,1,4,3,4,1,5,2,] self.btoa = [3,5,1,4,2,4,1,5,] self.atoc = [5,1,4,3,4,1,1,5,2,5,1,4,] self.ctoa = [4,1,5,3,5,1,1,4,2,4,1,5,] self.btoc = [3,4,1,5,2,5,1,4,] self.ctob = [4,1,5,3,5,1,4,2,] self.end_queue = [0,0,0,5,5,1] from module import getMod self.num_towers = getMod().num_towers #self.state.label = 'Starting to Solve!' for a in self.init_queue: yield a for a in self.dohanoi(self.num_towers, 'b', 'a', 'c'): yield a #self.state.label = 'Problem Solved!' for a in self.end_queue: yield a
def ModMain(mode=""): module.getMod() # initialize the NERO_Battle module. client.ClientMain()
def set_spawn(): module.getMod().set_spawn(location.x, location.y)
def __init__(self, team=None, group='Agent'): self.team = team or module.getMod().curr_team self.group = group self.info = OpenNero.AgentInitInfo(*self.agent_info_tuple())
def place_flag(): module.getMod().change_flag([location.x, location.y, 0])
def place_basic_turret(): obj_id = module.getMod().place_basic_turret([location.x, location.y, 0]) object_ids[obj_id] = set(['move', 'remove'])
def ModMain(mode = ""): module.getMod() # initialize the NERO_Battle module. client.ClientMain()
def remove_flag(): module.getMod().remove_flag()
def calculate_reward(self, agent, action, scored_hit = False): reward = agent.info.reward.get_instance() state = self.get_state(agent) friends, foes = self.getFriendFoe(agent) if agent.group != 'Turret' and self.hitpoints > 0 and state.total_damage >= self.hitpoints: return reward R = dict((f, 0) for f in constants.FITNESS_DIMENSIONS) R[constants.FITNESS_STAND_GROUND] = -abs(action[0]) friend = self.nearest(state.pose, friends) if friend: d = self.distance(self.get_state(friend).pose, state.pose) R[constants.FITNESS_STICK_TOGETHER] = -d * d foe = self.nearest(state.pose, foes) if foe: d = self.distance(self.get_state(foe).pose, state.pose) R[constants.FITNESS_APPROACH_ENEMY] = -d * d f = module.getMod().flag_loc if f: d = self.distance(state.pose, (f.x, f.y)) R[constants.FITNESS_APPROACH_FLAG] = -d * d # target = self.target(agent) # if target is not None: # source_pos = agent.state.position # target_pos = target.state.position # source_pos.z = source_pos.z + 5 # target_pos.z = target_pos.z + 5 # dist = target_pos.getDistanceFrom(source_pos) # d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS # if random.random() < d/2: # attempt a shot depending on distance # team_color = constants.TEAM_LABELS[agent.get_team()] # if team_color == 'red': # color = OpenNero.Color(255, 255, 0, 0) # elif team_color == 'blue': # color = OpenNero.Color(255, 0, 0, 255) # else: # color = OpenNero.Color(255, 255, 255, 0) # wall_color = OpenNero.Color(128, 0, 255, 0) # obstacles = OpenNero.getSimContext().findInRay( # source_pos, # target_pos, # constants.OBJECT_TYPE_OBSTACLE, # True, # wall_color, # color) # if len(obstacles) == 0 and random.random() < d/2: # # count as hit depending on distance # self.get_state(target).curr_damage += 1 # R[constants.FITNESS_HIT_TARGET] = 1 if scored_hit: R[constants.FITNESS_HIT_TARGET] = 1 damage = state.update_damage() R[constants.FITNESS_AVOID_FIRE] = -damage if len(reward) == 1: for i, f in enumerate(constants.FITNESS_DIMENSIONS): reward[0] += self.reward_weights[f] * R[f] / constants.FITNESS_SCALE.get(f, 1.0) #print f, self.reward_weights[f], R[f] / constants.FITNESS_SCALE.get(f, 1.0) else: for i, f in enumerate(constants.FITNESS_DIMENSIONS): reward[i] = R[f] return reward
def place_basic_turret(): obj_id = module.getMod().place_basic_turret( [location.x, location.y, 0]) object_ids[obj_id] = set(['move', 'remove'])
def set_spawn_2(): module.getMod().set_spawn(location.x, location.y, constants.OBJECT_TYPE_TEAM_1)
def queue_init(self): self.init_queue = [1, 5] self.atob = [ 5, 1, 4, 3, 4, 1, 5, 2, ] self.btoa = [ 3, 5, 1, 4, 2, 4, 1, 5, ] self.atoc = [ 5, 1, 4, 3, 4, 1, 1, 5, 2, 5, 1, 4, ] self.ctoa = [ 4, 1, 5, 3, 5, 1, 1, 4, 2, 4, 1, 5, ] self.btoc = [ 3, 4, 1, 5, 2, 5, 1, 4, ] self.ctob = [ 4, 1, 5, 3, 5, 1, 4, 2, ] self.end_queue = [0, 0, 0, 5, 5, 1] from module import getMod self.num_towers = getMod().num_towers #self.state.label = 'Starting to Solve!' for a in self.init_queue: yield a for a in self.dohanoi(self.num_towers, 'b', 'a', 'c'): yield a #self.state.label = 'Problem Solved!' for a in self.end_queue: yield a
def initialize_blocks(self): from module import getMod num_disks = getMod().num_disks if num_disks >= 1: if not self.get_block_state('blue'): self.add_block("data/shapes/cube/BlueCube.xml", 0, 1, 0, 5, 'blue') else: self.set_block('blue', 0, 1, 0, 5) bstate = self.get_block_state('blue') if num_disks >= 2: if not self.get_block_state('green'): self.add_block('data/shapes/cube/GreenCube.xml', 0, 1, 1, 4, 'green') else: self.set_block('green', 0, 1, 1, 4) gstate = self.get_block_state('green') if num_disks >= 3: if not self.get_block_state('yellow'): self.add_block('data/shapes/cube/YellowCube.xml', 0, 1, 2, 3, 'yellow') else: self.set_block('yellow', 0, 1, 2, 3) ystate = self.get_block_state('yellow') if num_disks >= 4: if not self.get_block_state('red'): self.add_block('data/shapes/cube/RedCube.xml', 0, 1, 3, 2, 'red', scaler=(1.0 / 2.5)) else: self.set_block('red', 0, 1, 3, 2) elif self.get_block_state('red'): self.remove_block('red') rstate = self.get_block_state('red') if num_disks >= 5: if not self.get_block_state('white'): self.add_block('data/shapes/cube/BlueCube.xml', 0, 1, 4, 1, 'white') else: self.set_block('white', 0, 1, 4, 1) elif self.get_block_state('white'): self.remove_block('white') wstate = self.get_block_state('white') bstate.above = gstate gstate.below = bstate if num_disks > 2: gstate.above = ystate if num_disks > 2: ystate.below = gstate if num_disks > 3: ystate.above = rstate if num_disks > 3: rstate.below = ystate if num_disks > 4: rstate.above = wstate if num_disks > 4: wstate.below = rstate print 'Initialized TowerEnvironment'
def place_basic_turret(): module.getMod().place_basic_turret([location.x, location.y, 0])
def closure(): removeBotsButton.enabled = False addBotsButton.enabled = True getMod().remove_bots()
def closure(): removeBotsButton.enabled = True addBotsButton.enabled = False getMod().add_bots(botTypeBox.text, numBotBox.text)