コード例 #1
0
def ClientMain():    
    # disable physics and AI updates at first
    # disable_physics()
    disable_ai()
    
    # initialize random number generator with current time
    seed()
    
    # add a camera
    camRotateSpeed = 100
    camMoveSpeed   = 1500
    camZoomSpeed   = 100
    cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
    cam.setPosition(Vector3f(100, 100, 50))
    cam.setTarget(Vector3f(1, 1, 1))
    cam.setFarPlane(1000)
    cam.setEdgeScroll(False)
    
    getMod().setup_sandbox()
    
    # add a light source
    getSimContext().addLightSource(Vector3f(500,-500,1000), 1500)

    # create the io map
    getSimContext().setInputMapping(createInputMapping())

    # setup the gui
    CreateGui(getSimContext().getGuiManager())
コード例 #2
0
def ClientMain():
    # disable physics and AI updates at first
    # disable_physics()
    disable_ai()

    # initialize random number generator with current time
    seed()

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 1500
    camZoomSpeed = 100
    cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
    cam.setPosition(Vector3f(100, 100, 50))
    cam.setTarget(Vector3f(1, 1, 1))
    cam.setFarPlane(1000)
    cam.setEdgeScroll(False)

    getMod().setup_sandbox()

    # add a light source
    getSimContext().addLightSource(Vector3f(500, -500, 1000), 1500)

    # create the io map
    getSimContext().setInputMapping(createInputMapping())

    # setup the gui
    CreateGui(getSimContext().getGuiManager())
コード例 #3
0
 def randomize(self):
     dx = random.randrange(constants.SPAWN_RANGE * 2) - constants.SPAWN_RANGE
     dy = random.randrange(constants.SPAWN_RANGE * 2) - constants.SPAWN_RANGE
     self.initial_position.x = module.getMod().spawn_x[self.agent.get_team()] + dx
     self.initial_position.y = module.getMod().spawn_y[self.agent.get_team()] + dy
     self.prev_pose = self.pose = (self.initial_position.x,
                                   self.initial_position.y,
                                   self.initial_rotation.z)
コード例 #4
0
ファイル: client.py プロジェクト: bradyz/cs343
def toggle_ai_callback():
    global ai_state
    OpenNero.toggle_ai()
    if not ai_state:
        module.getMod().start_rtneat()
        ai_state = "Started"
    elif ai_state == "Started":
        ai_state = "Paused"
    elif ai_state == "Paused":
        ai_state = "Started"
コード例 #5
0
def toggle_ai_callback():
    global ai_state
    OpenNero.toggle_ai()
    if not ai_state:
        module.getMod().start_rtneat()
        ai_state = 'Started'
    elif ai_state == 'Started':
        ai_state = 'Paused'
    elif ai_state == 'Paused':
        ai_state = 'Started'
コード例 #6
0
 def maybe_spawn(self, agent):
     '''Spawn more agents if there are more to spawn.'''
     # do not spawn just because a first person agent is on the field
     if isinstance(agent, FirstPersonAgent):
         return
     team = agent.get_team()
     friends, foes = self.getFriendFoe(agent)
     friends = tuple(friends or [None])
     if (agent.group == 'Agent' and agent is friends[0]
             and OpenNero.get_ai("rtneat-%s" % team).ready()
             and len(friends) < constants.pop_size):
         module.getMod().spawnAgent(team)
コード例 #7
0
ファイル: NeroEnvironment.py プロジェクト: bradyz/cs343
 def maybe_spawn(self, agent):
     '''Spawn more agents if there are more to spawn.'''
     # do not spawn just because a first person agent is on the field
     if isinstance(agent, FirstPersonAgent):
         return
     team = agent.get_team()
     friends, foes = self.getFriendFoe(agent)
     friends = tuple(friends or [None])
     if (agent.group == 'Agent' and
         agent is friends[0] and
         OpenNero.get_ai("rtneat-%s" % team).ready() and
         len(friends) < constants.pop_size):
         module.getMod().spawnAgent(team)
コード例 #8
0
ファイル: client.py プロジェクト: bradyz/cs343
def ClientMain():
    # physics off, ai off by default
    # disable_physics()
    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        inputConfig.switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500)

    # common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    CreateGui(common.getGuiManager())

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 15000
    camZoomSpeed = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)
    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = inputConfig.createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
コード例 #9
0
ファイル: environment.py プロジェクト: DavidDeAngelo/opennero
    def initialize_blocks(self):
        from module import getMod
        num_disks = getMod().num_disks

        if num_disks >= 1:
            if not self.get_block_state('blue'):
                self.add_block("data/shapes/cube/BlueCube.xml", 0, 1, 0, 5, 'blue')
            else:
                self.set_block('blue',0,1,0,5)

        bstate = self.get_block_state('blue')

        if num_disks >= 2:
            if not self.get_block_state('green'):
                self.add_block('data/shapes/cube/GreenCube.xml', 0, 1, 1, 4, 'green')
            else:
                self.set_block('green',0,1,1,4)

        gstate = self.get_block_state('green')

        if num_disks >= 3:
            if not self.get_block_state('yellow'):
                self.add_block('data/shapes/cube/YellowCube.xml', 0, 1, 2, 3, 'yellow')
            else:
                self.set_block('yellow',0,1,2,3)

        ystate = self.get_block_state('yellow')

        if num_disks >=  4:
            if not self.get_block_state('red'):
                self.add_block('data/shapes/cube/RedCube.xml', 0, 1, 3, 2, 'red', scaler = (1.0/2.5))
            else:
                self.set_block('red',0,1,3,2)
        
        elif self.get_block_state('red'):
            self.remove_block('red')
        
        rstate = self.get_block_state('red')

        if num_disks >=  5:
            if not self.get_block_state('white'):
                self.add_block('data/shapes/cube/BlueCube.xml', 0, 1, 4, 1, 'white')
            else:
                self.set_block('white',0,1,4,1)
        
        elif self.get_block_state('white'):
            self.remove_block('white')
        
        wstate = self.get_block_state('white')
        
        bstate.above = gstate
        gstate.below = bstate
        if num_disks > 2: gstate.above = ystate
        if num_disks > 2: ystate.below = gstate
        if num_disks > 3: ystate.above = rstate
        if num_disks > 3: rstate.below = ystate
        if num_disks > 4: rstate.above = wstate
        if num_disks > 4: wstate.below = rstate

        print 'Initialized TowerEnvironment'
コード例 #10
0
def ClientMain():
    # physics off, ai off by default
    #disable_physics()
    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        inputConfig.switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000),
                                            1500)

    #common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    CreateGui(common.getGuiManager())

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 15000
    camZoomSpeed = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed,
                                             camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)
    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = inputConfig.createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
コード例 #11
0
    def maybe_spawn(self, agent):
        '''Spawn more agents if there are more to spawn.'''
        if agent.ai != 'rtneat' or agent.group != 'Agent':
            return

        team = agent.get_team()
        rtneat = OpenNero.get_ai('rtneat-%s' % team)
        if not rtneat or not rtneat.ready():
            return

        friends, foes = self.getFriendFoe(agent)
        if len(friends) >= constants.pop_size:
            return

        if agent is tuple(f for f in friends if f.ai == agent.ai)[0]:
            module.getMod().spawnAgent(team=team, ai=agent.ai)
コード例 #12
0
ファイル: main.py プロジェクト: baviera08/opennero
def Match(team0, team1):
    '''Run a single battle between two population files.'''
    mod = module.getMod()
    mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0)
    mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1)
    mod.set_speedup(100)
    OpenNero.enable_ai()
コード例 #13
0
ファイル: main.py プロジェクト: tonyle9/opennero
def Match(team0, team1):
    '''Run a single battle between two population files.'''
    mod = module.getMod()
    mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0)
    mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1)
    mod.set_speedup(100)
    OpenNero.enable_ai()
コード例 #14
0
    def __init__(self):
        """
        Create the environment
        """
        OpenNero.Environment.__init__(self)

        self.curr_id = 0
        self.max_steps = 20
        self.MAX_DIST = math.hypot(constants.XDIM, constants.YDIM)
        self.states = {}
        self.teams = {}
        self.script = 'Hw5/menu.py'

        abound = OpenNero.FeatureVectorInfo()  # actions
        sbound = OpenNero.FeatureVectorInfo()  # sensors
        rbound = OpenNero.FeatureVectorInfo()  # rewards

        # actions
        abound.add_continuous(
            -1, 1
        )  # forward/backward speed (gets multiplied by constants.MAX_MOVEMENT_SPEED)
        abound.add_continuous(
            -constants.MAX_TURN_RADIANS,
            constants.MAX_TURN_RADIANS)  # left/right turn (in radians)

        # sensor dimensions
        for a in range(constants.N_SENSORS):
            sbound.add_continuous(0, 1)

        # Rewards
        # the enviroment returns the raw multiple dimensions of the fitness as
        # they get each step. This then gets combined into, e.g. Z-score, by
        # the ScoreHelper in order to calculate the final rtNEAT-fitness
        for f in constants.FITNESS_DIMENSIONS:
            # we don't care about the bounds of the individual dimensions
            rbound.add_continuous(-sys.float_info.max,
                                  sys.float_info.max)  # range for reward

        # initialize the rtNEAT algorithm parameters
        # input layer has enough nodes for all the observations plus a bias
        # output layer has enough values for all the actions
        # population size matches ours
        # 1.0 is the weight initialization noise
        rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat",
                                 OpenNero.Population(), constants.pop_size, 1)

        key = "rtneat-%s" % constants.OBJECT_TYPE_TEAM_0
        OpenNero.set_ai(key, rtneat)
        print "get_ai(%s): %s" % (key, OpenNero.get_ai(key))

        # set the initial lifetime
        lifetime = module.getMod().lt
        rtneat.set_lifetime(lifetime)
        print 'rtNEAT lifetime:', lifetime

        self.agent_info = OpenNero.AgentInitInfo(sbound, abound, rbound)
コード例 #15
0
ファイル: NeroEnvironment.py プロジェクト: bradyz/cs343
    def __init__(self):
        """
        Create the environment
        """
        OpenNero.Environment.__init__(self)

        self.curr_id = 0
        self.max_steps = 20
        self.MAX_DIST = math.hypot(constants.XDIM, constants.YDIM)
        self.states = {}
        self.teams = {}
        self.script = 'Hw5/menu.py'

        abound = OpenNero.FeatureVectorInfo() # actions
        sbound = OpenNero.FeatureVectorInfo() # sensors
        rbound = OpenNero.FeatureVectorInfo() # rewards

        # actions
        abound.add_continuous(-1, 1) # forward/backward speed (gets multiplied by constants.MAX_MOVEMENT_SPEED)
        abound.add_continuous(-constants.MAX_TURN_RADIANS, constants.MAX_TURN_RADIANS) # left/right turn (in radians)

        # sensor dimensions
        for a in range(constants.N_SENSORS):
            sbound.add_continuous(0, 1);

        # Rewards
        # the enviroment returns the raw multiple dimensions of the fitness as
        # they get each step. This then gets combined into, e.g. Z-score, by
        # the ScoreHelper in order to calculate the final rtNEAT-fitness
        for f in constants.FITNESS_DIMENSIONS:
            # we don't care about the bounds of the individual dimensions
            rbound.add_continuous(-sys.float_info.max, sys.float_info.max) # range for reward

        # initialize the rtNEAT algorithm parameters
        # input layer has enough nodes for all the observations plus a bias
        # output layer has enough values for all the actions
        # population size matches ours
        # 1.0 is the weight initialization noise
        rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat",
                                 constants.N_SENSORS,
                                 constants.N_ACTIONS,
                                 constants.pop_size,
                                 1.0,
                                 rbound, False)

        key = "rtneat-%s" % constants.OBJECT_TYPE_TEAM_0
        OpenNero.set_ai(key, rtneat)
        print "get_ai(%s): %s" % (key, OpenNero.get_ai(key))

        # set the initial lifetime
        lifetime = module.getMod().lt
        rtneat.set_lifetime(lifetime)
        print 'rtNEAT lifetime:', lifetime

        self.agent_info = OpenNero.AgentInitInfo(sbound, abound, rbound)
コード例 #16
0
ファイル: main.py プロジェクト: tonyle9/opennero
def ModTick(dt):
    mod = module.getMod()
    if mod.environment:
        mod.environment.tick(dt)
    if OpenNero.getAppConfig().rendertype == 'null':
        return
    script_server = module.getServer()
    data = script_server.read_data()
    while data:
        module.parseInput(data.strip())
        data = script_server.read_data()
コード例 #17
0
ファイル: main.py プロジェクト: baviera08/opennero
def ModTick(dt):
    mod = module.getMod()
    if mod.environment:
        mod.environment.tick(dt)
    if OpenNero.getAppConfig().rendertype == 'null':
        return
    script_server = module.getServer()
    data = script_server.read_data()
    while data:
        module.parseInput(data.strip())
        data = script_server.read_data()
コード例 #18
0
    def action_list_generator(self):
        from module import getMod
        self.num_disks = getMod().num_disks

        #self.state.label = 'Starting to Solve!'
        for a in ACTIONS_TURN_LEFT_TO_BEGIN1:
            yield a
        for a in self.dohanoi(self.num_disks, 'c', 'a', 'b'):
            yield a
        #self.state.label = 'Problem Solved!'
        for a in ACTIONS_CELEBERATE:
            yield a
コード例 #19
0
ファイル: agent.py プロジェクト: PCoelho07/opennero
    def action_queue_generator(self):
        from module import getMod
        self.num_disks = getMod().num_disks

        #self.state.label = 'Starting to Solve!'
        for a in ACTIONS_BEGIN1:
            yield a
        for a in self.dohanoi(self.num_disks, 'c', 'a', 'b'):
            yield a
        #self.state.label = 'Problem Solved!'
        for a in ACTIONS_CELEBERATE:
            yield a
コード例 #20
0
ファイル: agent.py プロジェクト: vurte/opennero
    def action_list_generator(self):
        from module import getMod

        self.num_disks = getMod().num_disks

        # self.state.label = 'Starting to Solve!'
        for a in ACTIONS_TURN_LEFT_TO_BEGIN1:
            yield a
        for a in self.dohanoi(self.num_disks, "c", "a", "b"):
            yield a
        # self.state.label = 'Problem Solved!'
        for a in ACTIONS_CELEBERATE:
            yield a
コード例 #21
0
    def is_episode_over(self, agent):
        """
        is the current episode over for the agent?
        """
        if agent.group == "Turret":
            return False

        if isinstance(agent, FirstPersonAgent):
            return False # first person agents never stop
        self.max_steps = module.getMod().lt
        if self.max_steps != 0 and agent.step >= self.max_steps:
            return True

        team = agent.get_team()
        if not OpenNero.get_ai("rtneat-%s" % team).has_organism(agent):
            return True

        state = self.get_state(agent)
        if module.getMod().hp != 0 and state.total_damage >= module.getMod().hp:
            return True

        return False
コード例 #22
0
ファイル: client.py プロジェクト: tonyle9/opennero
def ClientMain():
    global modify_object_id
    global object_ids
    global guiMan

    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000),
                                            1500)

    common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    guiMan = common.getGuiManager()
    object_ids = {}
    modify_object_id = {}

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 15000
    camZoomSpeed = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed,
                                             camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)

    def recenter(cam):
        def closure():
            cam.setPosition(OpenNero.Vector3f(0, 0, 100))
            cam.setTarget(OpenNero.Vector3f(100, 100, 0))

        return closure

    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
コード例 #23
0
    def calculate_reward(self, agent, action):
        reward = self.agent_info.reward.get_instance()
        state = self.get_state(agent)
        friends, foes = self.getFriendFoe(agent)

        R = dict([(f, 0) for f in constants.FITNESS_DIMENSIONS])

        R[constants.FITNESS_STAND_GROUND] = -abs(action[0])

        friend = self.nearest(state.pose, friends)
        if friend:
            d = self.distance(self.get_state(friend).pose, state.pose)
            R[constants.FITNESS_STICK_TOGETHER] = -d * d

        foe = self.nearest(state.pose, foes)
        if foe:
            d = self.distance(self.get_state(foe).pose, state.pose)
            R[constants.FITNESS_APPROACH_ENEMY] = -d * d

        f = module.getMod().flag_loc
        if f:
            d = self.distance(state.pose, (f.x, f.y))
            R[constants.FITNESS_APPROACH_FLAG] = -d * d

        target = self.target(agent)
        if target is not None:
            obstacles = OpenNero.getSimContext().findInRay(
                agent.state.position,
                target.state.position,
                constants.OBJECT_TYPE_OBSTACLE | agent.get_team(),
                True)
            if len(obstacles) == 0:
                self.get_state(target).curr_damage += 1
                R[constants.FITNESS_HIT_TARGET] = 1

        damage = state.update_damage()
        R[constants.FITNESS_AVOID_FIRE] = -damage

        for i, f in enumerate(constants.FITNESS_DIMENSIONS):
            reward[i] = R[f]

        return reward
コード例 #24
0
ファイル: client.py プロジェクト: DavidDeAngelo/opennero
def ClientMain():
    global modify_object_id
    global object_ids
    global guiMan

    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500)

    common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    guiMan = common.getGuiManager()
    object_ids = {}
    modify_object_id = {}

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed   = 15000
    camZoomSpeed   = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)

    def recenter(cam):
        def closure():
            cam.setPosition(OpenNero.Vector3f(0, 0, 100))
            cam.setTarget(OpenNero.Vector3f(100, 100, 0))
        return closure

    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
コード例 #25
0
    def queue_init(self):
        self.init_queue = [1,5]
        self.atob = [5,1,4,3,4,1,5,2,]
        self.btoa = [3,5,1,4,2,4,1,5,]
        self.atoc = [5,1,4,3,4,1,1,5,2,5,1,4,]
        self.ctoa = [4,1,5,3,5,1,1,4,2,4,1,5,]
        self.btoc = [3,4,1,5,2,5,1,4,]
        self.ctob = [4,1,5,3,5,1,4,2,]
        self.end_queue = [0,0,0,5,5,1]

        from module import getMod
        self.num_towers = getMod().num_towers

        #self.state.label = 'Starting to Solve!'
        for a in self.init_queue:
            yield a
        for a in self.dohanoi(self.num_towers, 'b', 'a', 'c'):
            yield a
        #self.state.label = 'Problem Solved!'
        for a in self.end_queue:
            yield a
コード例 #26
0
ファイル: main.py プロジェクト: tonyle9/opennero
def ModMain(mode=""):
    module.getMod()  # initialize the NERO_Battle module.
    client.ClientMain()
コード例 #27
0
 def set_spawn():
     module.getMod().set_spawn(location.x, location.y)
コード例 #28
0
ファイル: agent.py プロジェクト: DavidDeAngelo/opennero
 def __init__(self, team=None, group='Agent'):
     self.team = team or module.getMod().curr_team
     self.group = group
     self.info = OpenNero.AgentInitInfo(*self.agent_info_tuple())
コード例 #29
0
ファイル: client.py プロジェクト: bradyz/cs343
 def place_flag():
     module.getMod().change_flag([location.x, location.y, 0])
コード例 #30
0
ファイル: client.py プロジェクト: DavidDeAngelo/opennero
 def place_basic_turret():
     obj_id = module.getMod().place_basic_turret([location.x, location.y, 0])
     object_ids[obj_id] = set(['move', 'remove'])
コード例 #31
0
ファイル: main.py プロジェクト: baviera08/opennero
def ModMain(mode = ""):
    module.getMod()  # initialize the NERO_Battle module.
    client.ClientMain()
コード例 #32
0
ファイル: client.py プロジェクト: tonyle9/opennero
 def remove_flag():
     module.getMod().remove_flag()
コード例 #33
0
    def calculate_reward(self, agent, action, scored_hit = False):
        reward = agent.info.reward.get_instance()

        state = self.get_state(agent)
        friends, foes = self.getFriendFoe(agent)

        if agent.group != 'Turret' and self.hitpoints > 0 and state.total_damage >= self.hitpoints:
            return reward

        R = dict((f, 0) for f in constants.FITNESS_DIMENSIONS)

        R[constants.FITNESS_STAND_GROUND] = -abs(action[0])

        friend = self.nearest(state.pose, friends)
        if friend:
            d = self.distance(self.get_state(friend).pose, state.pose)
            R[constants.FITNESS_STICK_TOGETHER] = -d * d

        foe = self.nearest(state.pose, foes)
        if foe:
            d = self.distance(self.get_state(foe).pose, state.pose)
            R[constants.FITNESS_APPROACH_ENEMY] = -d * d

        f = module.getMod().flag_loc
        if f:
            d = self.distance(state.pose, (f.x, f.y))
            R[constants.FITNESS_APPROACH_FLAG] = -d * d

#        target = self.target(agent)
#        if target is not None:
#            source_pos = agent.state.position
#            target_pos = target.state.position
#            source_pos.z = source_pos.z + 5
#            target_pos.z = target_pos.z + 5
#            dist = target_pos.getDistanceFrom(source_pos)
#            d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
#            if random.random() < d/2: # attempt a shot depending on distance
#                team_color = constants.TEAM_LABELS[agent.get_team()]
#                if team_color == 'red':
#                    color = OpenNero.Color(255, 255, 0, 0)
#                elif team_color == 'blue':
#                    color = OpenNero.Color(255, 0, 0, 255)
#                else:
#                    color = OpenNero.Color(255, 255, 255, 0)
#                wall_color = OpenNero.Color(128, 0, 255, 0)
#                obstacles = OpenNero.getSimContext().findInRay(
#                    source_pos,
#                    target_pos,
#                    constants.OBJECT_TYPE_OBSTACLE,
#                    True,
#                    wall_color,
#                    color)
#                if len(obstacles) == 0 and random.random() < d/2:
#                    # count as hit depending on distance
#                    self.get_state(target).curr_damage += 1
#                    R[constants.FITNESS_HIT_TARGET] = 1

        if scored_hit:
            R[constants.FITNESS_HIT_TARGET] = 1

        damage = state.update_damage()
        R[constants.FITNESS_AVOID_FIRE] = -damage

        if len(reward) == 1:
            for i, f in enumerate(constants.FITNESS_DIMENSIONS):
                reward[0] += self.reward_weights[f] * R[f] / constants.FITNESS_SCALE.get(f, 1.0)
                #print f, self.reward_weights[f], R[f] / constants.FITNESS_SCALE.get(f, 1.0)
        else:
            for i, f in enumerate(constants.FITNESS_DIMENSIONS):
                reward[i] = R[f]

        return reward
コード例 #34
0
ファイル: client.py プロジェクト: tonyle9/opennero
 def place_basic_turret():
     obj_id = module.getMod().place_basic_turret(
         [location.x, location.y, 0])
     object_ids[obj_id] = set(['move', 'remove'])
コード例 #35
0
ファイル: client.py プロジェクト: tonyle9/opennero
 def place_flag():
     module.getMod().change_flag([location.x, location.y, 0])
コード例 #36
0
ファイル: client.py プロジェクト: tonyle9/opennero
 def set_spawn_2():
     module.getMod().set_spawn(location.x, location.y,
                               constants.OBJECT_TYPE_TEAM_1)
コード例 #37
0
ファイル: agent.py プロジェクト: thermalpilot/opennero
 def __init__(self, team=None, group='Agent'):
     self.team = team or module.getMod().curr_team
     self.group = group
     self.info = OpenNero.AgentInitInfo(*self.agent_info_tuple())
コード例 #38
0
    def queue_init(self):
        self.init_queue = [1, 5]
        self.atob = [
            5,
            1,
            4,
            3,
            4,
            1,
            5,
            2,
        ]
        self.btoa = [
            3,
            5,
            1,
            4,
            2,
            4,
            1,
            5,
        ]
        self.atoc = [
            5,
            1,
            4,
            3,
            4,
            1,
            1,
            5,
            2,
            5,
            1,
            4,
        ]
        self.ctoa = [
            4,
            1,
            5,
            3,
            5,
            1,
            1,
            4,
            2,
            4,
            1,
            5,
        ]
        self.btoc = [
            3,
            4,
            1,
            5,
            2,
            5,
            1,
            4,
        ]
        self.ctob = [
            4,
            1,
            5,
            3,
            5,
            1,
            4,
            2,
        ]
        self.end_queue = [0, 0, 0, 5, 5, 1]

        from module import getMod
        self.num_towers = getMod().num_towers

        #self.state.label = 'Starting to Solve!'
        for a in self.init_queue:
            yield a
        for a in self.dohanoi(self.num_towers, 'b', 'a', 'c'):
            yield a
        #self.state.label = 'Problem Solved!'
        for a in self.end_queue:
            yield a
コード例 #39
0
ファイル: environment.py プロジェクト: tonyle9/opennero
    def initialize_blocks(self):
        from module import getMod
        num_disks = getMod().num_disks

        if num_disks >= 1:
            if not self.get_block_state('blue'):
                self.add_block("data/shapes/cube/BlueCube.xml", 0, 1, 0, 5,
                               'blue')
            else:
                self.set_block('blue', 0, 1, 0, 5)

        bstate = self.get_block_state('blue')

        if num_disks >= 2:
            if not self.get_block_state('green'):
                self.add_block('data/shapes/cube/GreenCube.xml', 0, 1, 1, 4,
                               'green')
            else:
                self.set_block('green', 0, 1, 1, 4)

        gstate = self.get_block_state('green')

        if num_disks >= 3:
            if not self.get_block_state('yellow'):
                self.add_block('data/shapes/cube/YellowCube.xml', 0, 1, 2, 3,
                               'yellow')
            else:
                self.set_block('yellow', 0, 1, 2, 3)

        ystate = self.get_block_state('yellow')

        if num_disks >= 4:
            if not self.get_block_state('red'):
                self.add_block('data/shapes/cube/RedCube.xml',
                               0,
                               1,
                               3,
                               2,
                               'red',
                               scaler=(1.0 / 2.5))
            else:
                self.set_block('red', 0, 1, 3, 2)

        elif self.get_block_state('red'):
            self.remove_block('red')

        rstate = self.get_block_state('red')

        if num_disks >= 5:
            if not self.get_block_state('white'):
                self.add_block('data/shapes/cube/BlueCube.xml', 0, 1, 4, 1,
                               'white')
            else:
                self.set_block('white', 0, 1, 4, 1)

        elif self.get_block_state('white'):
            self.remove_block('white')

        wstate = self.get_block_state('white')

        bstate.above = gstate
        gstate.below = bstate
        if num_disks > 2: gstate.above = ystate
        if num_disks > 2: ystate.below = gstate
        if num_disks > 3: ystate.above = rstate
        if num_disks > 3: rstate.below = ystate
        if num_disks > 4: rstate.above = wstate
        if num_disks > 4: wstate.below = rstate

        print 'Initialized TowerEnvironment'
コード例 #40
0
ファイル: client.py プロジェクト: bradyz/cs343
 def set_spawn():
     module.getMod().set_spawn(location.x, location.y)
コード例 #41
0
 def place_basic_turret():
     module.getMod().place_basic_turret([location.x, location.y, 0])
コード例 #42
0
 def closure():
     removeBotsButton.enabled = False
     addBotsButton.enabled = True
     getMod().remove_bots()
コード例 #43
0
ファイル: client.py プロジェクト: DavidDeAngelo/opennero
 def set_spawn_2():
     module.getMod().set_spawn(location.x, location.y, constants.OBJECT_TYPE_TEAM_1)
コード例 #44
0
 def closure():
     removeBotsButton.enabled = True
     addBotsButton.enabled = False
     getMod().add_bots(botTypeBox.text, numBotBox.text)
コード例 #45
0
ファイル: client.py プロジェクト: DavidDeAngelo/opennero
 def remove_flag():
     module.getMod().remove_flag()
コード例 #46
0
ファイル: client.py プロジェクト: bradyz/cs343
 def place_basic_turret():
     module.getMod().place_basic_turret([location.x, location.y, 0])
コード例 #47
0
 def closure():
     removeBotsButton.enabled = False
     addBotsButton.enabled = True
     getMod().remove_bots()
コード例 #48
0
 def closure():
     removeBotsButton.enabled = True
     addBotsButton.enabled = False
     getMod().add_bots(botTypeBox.text, numBotBox.text)