示例#1
0
    def closest_enemy(self, agent):
        """
        Returns the nearest enemy to agent 
        """
        friends, foes = self.getFriendFoe(agent)
        if not foes:
            return None

        min_enemy = None
        min_dist = constants.MAX_FIRE_ACTION_RADIUS
        pose = self.get_state(agent).pose
        color = OpenNero.Color(128, 0, 0, 0)
        for f in foes:
            f_pose = self.get_state(f).pose
            dist = self.distance(pose, f_pose)
            if dist < min_dist:
                source_pos = agent.state.position
                enemy_pos = f.state.position
                source_pos.z = source_pos.z + 5
                enemy_pos.z = enemy_pos.z + 5
                obstacles = OpenNero.getSimContext().findInRay(
                    source_pos,
                    enemy_pos,
                    constants.OBJECT_TYPE_OBSTACLE,
                    False,
                    color,
                    color)
                if len(obstacles) == 0:
                    min_enemy = f
                    min_dist = dist
        return min_enemy
示例#2
0
    def step(self, agent, action):
        """
        2A step for an agent
        """
        # if this agent has a serialized representation waiting, load it.
        chunk = self.agents_to_load.get(agent.state.id)
        if chunk is not None:
            print 'loading agent', agent.state.id, 'from', len(chunk), 'bytes'
            del self.agents_to_load[agent.state.id]
            try:
                agent.from_string(chunk)
            except:
                # if loading fails, remove this agent.
                print 'error loading agent', agent.state.id
                self.remove_agent(agent)

                # if a user has a badly formatted q-learning agent in a mixed
                # population file, the agent won't load and will be properly
                # removed here. however, RTNEAT has only allocated enough brainz
                # to cover (pop_size - num_qlearning_agents) agents, so whenever
                # it comes time to spawn new agents, RTNEAT will think that it
                # needs to spawn an extra agent to cover for this "missing" one.
                # to prevent this exception, we decrement pop_size here.
                #
                # this probably prevents teams from having the proper number of
                # agents if the user clicks on the deploy button after loading a
                # broken pop file ... but that's tricky to fix.
                constants.pop_size -= 1

                return agent.info.reward.get_instance()

        # set the epsilon for this agent, in case it's changed recently.
        agent.epsilon = self.epsilon

        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.info.reward.get_instance()

        # display agent info if neccessary
        if hasattr(agent, 'set_display_hint'):
            agent.set_display_hint()

        # spawn more agents if possible.
        self.maybe_spawn(agent)

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = (firing >= 0.5)

        scored_hit = False
        # firing decision
        closest_enemy = self.closest_enemy(agent)
        if firing_status:
            if closest_enemy is not None:
                pose = state.pose
                closest_enemy_pose = self.get_state(closest_enemy).pose
                relative_angle = self.angle(pose, closest_enemy_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    closest_enemy_pos = closest_enemy.state.position
                    source_pos.z = source_pos.z + 5
                    closest_enemy_pos.z = closest_enemy_pos.z + 5
                    dist = closest_enemy_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
                    if random.random() < d/2: # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.get_team()]
                        if team_color == 'red':
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == 'blue':
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos,
                            closest_enemy_pos,
                            constants.OBJECT_TYPE_OBSTACLE,
                            True,
                            wall_color,
                            color)
                        #if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(closest_enemy).curr_damage += 1
                            scored_hit = True
                else: # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        # tell the system to make the calculated motion
        # if the motion doesn't result in a collision
        dist = constants.MAX_MOVEMENT_SPEED * move_by
        heading = common.wrap_degrees(agent.state.rotation.z, turn_by)
        x = agent.state.position.x + dist * math.cos(math.radians(heading))
        y = agent.state.position.y + dist * math.sin(math.radians(heading))

        # manual collision detection
        desired_pose = (x, y, heading)

        collision_detected = False

        friends, foes = self.getFriendFoe(agent)
        for f in friends:
            if f != agent:
                f_state = self.get_state(f)
                # we impose an order on agents to avoid deadlocks. Without this
                # two agents which spawn very close to each other can never escape
                # each other's collision radius
                if state.id > f_state.id:
                    f_pose = f_state.pose
                    dist = self.distance(desired_pose, f_pose)
                    if dist < constants.MANUAL_COLLISION_DISTANCE:
                        collision_detected = True
                        continue

        # no need to check for collisions with all enemies
        #if foes:
        #    if not collision_detected:
        #        for f in foes:
        #            f_pose = self.get_state(f).pose
        #            dist = self.distance(desired_pose, f_pose)
        #            if dist < constants.MANUAL_COLLISION_DISTANCE:
        #                collision_detected = True
        #                continue

        # just check for collisions with the closest enemy
        if closest_enemy:
            if not collision_detected:
                f_pose = self.get_state(closest_enemy).pose
                dist = self.distance(desired_pose, f_pose)
                if dist < constants.MANUAL_COLLISION_DISTANCE:
                    collision_detected = True

        if not collision_detected:
            state.update_pose(move_by, turn_by)

        return reward
示例#3
0
    def step(self, agent, action):
        """
        2A step for an agent
        """
        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.rewards.get_instance()

        # display agent info if neccessary
        if hasattr(agent, 'set_display_hint'):
            agent.set_display_hint()

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = (firing >= 0.5)

        scored_hit = False
        # firing decision
        closest_enemy = self.closest_enemy(agent)
        if firing_status:
            if closest_enemy is not None:
                pose = state.pose
                closest_enemy_pose = self.get_state(closest_enemy).pose
                relative_angle = self.angle(pose, closest_enemy_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    closest_enemy_pos = closest_enemy.state.position
                    source_pos.z = source_pos.z + 5
                    closest_enemy_pos.z = closest_enemy_pos.z + 5
                    dist = closest_enemy_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
                    if random.random() < d/2: # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.team_type]
                        if team_color == 'red':
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == 'blue':
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos,
                            closest_enemy_pos,
                            constants.OBJECT_TYPE_OBSTACLE,
                            True,
                            wall_color,
                            color)
                        #if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(closest_enemy).curr_damage += 1
                            scored_hit = True
                else: # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        team = self.get_team(agent)

        # tell the system to make the calculated motion
        # if the motion doesn't result in a collision
        dist = constants.MAX_MOVEMENT_SPEED * move_by
        heading = common.wrap_degrees(agent.state.rotation.z, turn_by)
        x = agent.state.position.x + dist * math.cos(math.radians(heading))
        y = agent.state.position.y + dist * math.sin(math.radians(heading))

        # manual collision detection
        desired_pose = (x, y, heading)

        collision_detected = False

        friends, foes = self.get_friend_foe(agent)
        for f in friends:
            if f != agent:
                f_state = self.get_state(f)
                # we impose an order on agents to avoid deadlocks. Without this
                # two agents which spawn very close to each other can never escape
                # each other's collision radius
                if state.id > f_state.id:
                    f_pose = f_state.pose
                    dist = self.distance(desired_pose, f_pose)
                    if dist < constants.MANUAL_COLLISION_DISTANCE:
                        collision_detected = True
                        continue

        # just check for collisions with the closest enemy
        if closest_enemy:
            if not collision_detected:
                f_pose = self.get_state(closest_enemy).pose
                dist = self.distance(desired_pose, f_pose)
                if dist < constants.MANUAL_COLLISION_DISTANCE:
                    collision_detected = True

        if not collision_detected:
            state.update_pose(move_by, turn_by)

        return reward
    def step(self, agent, action):
        """
        2A step for an agent
        """
        # if this agent has a serialized representation waiting, load it.
        chunk = self.agents_to_load.get(agent.state.id)
        if chunk is not None:
            print 'loading agent', agent.state.id, 'from', len(chunk), 'bytes'
            del self.agents_to_load[agent.state.id]
            try:
                agent.from_string(chunk)
            except:
                # if loading fails, remove this agent.
                print 'error loading agent', agent.state.id
                self.remove_agent(agent)

                # if a user has a badly formatted q-learning agent in a mixed
                # population file, the agent won't load and will be properly
                # removed here. however, RTNEAT has only allocated enough brainz
                # to cover (pop_size - num_qlearning_agents) agents, so whenever
                # it comes time to spawn new agents, RTNEAT will think that it
                # needs to spawn an extra agent to cover for this "missing" one.
                # to prevent this exception, we decrement pop_size here.
                #
                # this probably prevents teams from having the proper number of
                # agents if the user clicks on the deploy button after loading a
                # broken pop file ... but that's tricky to fix.
                constants.pop_size -= 1

                return agent.info.reward.get_instance()

        # set the epsilon for this agent, in case it's changed recently.
        agent.epsilon = self.epsilon

        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.info.reward.get_instance()

        # display agent info if neccessary
        if hasattr(agent, 'set_display_hint'):
            agent.set_display_hint()

        # spawn more agents if possible.
        self.maybe_spawn(agent)

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = (firing >= 0.5)

        scored_hit = False
        # firing decision
        if firing_status:
            target = self.closest_enemy(agent)
            if target is not None:
                pose = state.pose
                target_pose = self.get_state(target).pose
                relative_angle = self.angle(pose, target_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    target_pos = target.state.position
                    source_pos.z = source_pos.z + 5
                    target_pos.z = target_pos.z + 5
                    dist = target_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
                    if random.random() < d/2: # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.get_team()]
                        if team_color == 'red':
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == 'blue':
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos,
                            target_pos,
                            constants.OBJECT_TYPE_OBSTACLE,
                            True,
                            wall_color,
                            color)
                        #if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(target).curr_damage += 1
                            scored_hit = True
                else: # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        # tell the system to make the calculated motion
        state.update_pose(move_by, turn_by)

        return reward