Beispiel #1
0
def ClientMain():
    # physics off, ai off by default
    # disable_physics()
    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        inputConfig.switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500)

    # common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    CreateGui(common.getGuiManager())

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 15000
    camZoomSpeed = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)
    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = inputConfig.createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
Beispiel #2
0
def ClientMain():
    # physics off, ai off by default
    #disable_physics()
    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        inputConfig.switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000),
                                            1500)

    #common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    CreateGui(common.getGuiManager())

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 15000
    camZoomSpeed = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed,
                                             camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)
    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = inputConfig.createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
Beispiel #3
0
 def snapshot(self):
     print 'snapshot was called'
     if os.access('Hw5/snapshots/color/', os.W_OK):
         filename = 'Hw5/snapshots/color/' + str(time.time()*100)[:-2] + '.png'
         OpenNero.getSimContext().getActiveCamera().snapshot(filename)
         # Launch python script to show this image
         os.system('python Hw5/show_image.py "' + filename + '"')
Beispiel #4
0
 def snapshot(self):
     print 'snapshot was called'
     if os.access('Hw5/snapshots/color/', os.W_OK):
         filename = 'Hw5/snapshots/color/' + str(
             time.time() * 100)[:-2] + '.png'
         OpenNero.getSimContext().getActiveCamera().snapshot(filename)
         # Launch python script to show this image
         os.system('python Hw5/show_image.py "' + filename + '"')
Beispiel #5
0
    def closest_enemy(self, agent):
        """
        Returns the nearest enemy to agent 
        """
        friends, foes = self.getFriendFoe(agent)
        if not foes:
            return None

        min_enemy = None
        min_dist = constants.MAX_FIRE_ACTION_RADIUS
        pose = self.get_state(agent).pose
        color = OpenNero.Color(128, 0, 0, 0)
        for f in foes:
            f_pose = self.get_state(f).pose
            dist = self.distance(pose, f_pose)
            if dist < min_dist:
                source_pos = agent.state.position
                enemy_pos = f.state.position
                source_pos.z = source_pos.z + 5
                enemy_pos.z = enemy_pos.z + 5
                obstacles = OpenNero.getSimContext().findInRay(
                    source_pos,
                    enemy_pos,
                    constants.OBJECT_TYPE_OBSTACLE,
                    False,
                    color,
                    color)
                if len(obstacles) == 0:
                    min_enemy = f
                    min_dist = dist
        return min_enemy
Beispiel #6
0
 def setup_sandbox(self):
     """
     setup the sandbox environment
     """
     OpenNero.getSimContext().delay = 0.0
     self.environment = RoombaEnvironment(constants.XDIM, constants.YDIM)
     OpenNero.set_environment(self.environment)
    def closest_enemy(self, agent):
        """
        Returns the nearest enemy to agent 
        """
        friends, foes = self.get_friend_foe(agent)
        if not foes:
            return None

        min_enemy = None
        min_dist = constants.MAX_FIRE_ACTION_RADIUS
        pose = self.get_state(agent).pose
        color = OpenNero.Color(128, 0, 0, 0)
        for f in foes:
            f_pose = self.get_state(f).pose
            dist = self.distance(pose, f_pose)
            if dist < min_dist:
                source_pos = agent.state.position
                enemy_pos = f.state.position
                source_pos.z = source_pos.z + 5
                enemy_pos.z = enemy_pos.z + 5
                obstacles = OpenNero.getSimContext().findInRay(
                    source_pos, enemy_pos, constants.OBJECT_TYPE_OBSTACLE, False, color, color
                )
                if len(obstacles) == 0:
                    min_enemy = f
                    min_dist = dist
        return min_enemy
Beispiel #8
0
def ClientMain():
    global modify_object_id
    global object_ids
    global guiMan

    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000),
                                            1500)

    common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    guiMan = common.getGuiManager()
    object_ids = {}
    modify_object_id = {}

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed = 15000
    camZoomSpeed = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed,
                                             camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)

    def recenter(cam):
        def closure():
            cam.setPosition(OpenNero.Vector3f(0, 0, 100))
            cam.setTarget(OpenNero.Vector3f(100, 100, 0))

        return closure

    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
Beispiel #9
0
def addObject(templateFile,
              position,
              rotation=OpenNero.Vector3f(0, 0, 0),
              scale=OpenNero.Vector3f(1, 1, 1),
              label="",
              type=0,
              collision=0):
    return OpenNero.getSimContext().addObject(templateFile, position, rotation,
                                              scale, label, collision, type)
Beispiel #10
0
def ClientMain():
    global modify_object_id
    global object_ids
    global guiMan

    OpenNero.disable_ai()

    if not module.getMod().setup_map():
        switchToHub()
        return

    # add a light source
    OpenNero.getSimContext().addLightSource(OpenNero.Vector3f(500, -500, 1000), 1500)

    common.addSkyBox("data/sky/irrlicht2")

    # setup the gui
    guiMan = common.getGuiManager()
    object_ids = {}
    modify_object_id = {}

    # add a camera
    camRotateSpeed = 100
    camMoveSpeed   = 15000
    camZoomSpeed   = 200
    cam = OpenNero.getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
    cam.setFarPlane(40000)
    cam.setEdgeScroll(False)

    def recenter(cam):
        def closure():
            cam.setPosition(OpenNero.Vector3f(0, 0, 100))
            cam.setTarget(OpenNero.Vector3f(100, 100, 0))
        return closure

    recenter_cam = recenter(cam)
    recenter_cam()

    # create the io map
    ioMap = createInputMapping()
    ioMap.BindKey("KEY_SPACE", "onPress", recenter_cam)
    OpenNero.getSimContext().setInputMapping(ioMap)
Beispiel #11
0
def mouse_action():
    import math
    global modify_object_id
    global object_ids

    if len(modify_object_id) == 0:
        return

    sim_context = OpenNero.getSimContext()

    cursor = sim_context.getMousePosition()
    location = sim_context.getClickedPosition(cursor.x, cursor.y)

    if 'move' in modify_object_id:
        sim_context.setObjectPosition(
            modify_object_id['move'],
            OpenNero.Vector3f(location.x, location.y,
                              constants.HEIGHT + constants.OFFSET))

    if 'rot' in modify_object_id:
        position = sim_context.getObjectPosition(modify_object_id['rot'])
        angle = math.atan2(location.x - position.x, location.y - position.y)
        sim_context.setObjectRotation(
            modify_object_id['rot'],
            OpenNero.Vector3f(0, 0, -math.degrees(angle)))

    if 'scale' in modify_object_id:
        position = sim_context.getObjectPosition(modify_object_id['scale'])
        rotation = sim_context.getObjectRotation(modify_object_id['scale'])
        theta = math.radians(rotation.z)

        # calculate mouse location in the frame of reference of the object
        localx = (location.x - position.x) * math.cos(theta) + (
            location.y - position.y) * math.sin(theta)
        localy = -(location.x - position.x) * math.sin(theta) + (
            location.y - position.y) * math.cos(theta)

        # scale < 1 if local coordinate is -ve and scale > 1 otherwise
        scalex = 1 + math.fabs(localx)
        scaley = 1 + math.fabs(localy)

        if localx < 0: scalex = 1 / scalex
        if localy < 0: scaley = 1 / scaley

        prev_scale = sim_context.getObjectScale(modify_object_id['scale'])
        sim_context.setObjectScale(
            modify_object_id['scale'],
            OpenNero.Vector3f(scalex, scaley, prev_scale.z))
Beispiel #12
0
def mouse_action():
    import math

    global modify_object_id
    global object_ids

    if len(modify_object_id) == 0:
        return

    sim_context = OpenNero.getSimContext()

    cursor = sim_context.getMousePosition()
    location = sim_context.getClickedPosition(cursor.x, cursor.y)

    if "move" in modify_object_id:
        sim_context.setObjectPosition(
            modify_object_id["move"], OpenNero.Vector3f(location.x, location.y, constants.HEIGHT + constants.OFFSET)
        )

    if "rot" in modify_object_id:
        position = sim_context.getObjectPosition(modify_object_id["rot"])
        angle = math.atan2(location.x - position.x, location.y - position.y)
        sim_context.setObjectRotation(modify_object_id["rot"], OpenNero.Vector3f(0, 0, -math.degrees(angle)))

    if "scale" in modify_object_id:
        position = sim_context.getObjectPosition(modify_object_id["scale"])
        rotation = sim_context.getObjectRotation(modify_object_id["scale"])
        theta = math.radians(rotation.z)

        # calculate mouse location in the frame of reference of the object
        localx = (location.x - position.x) * math.cos(theta) + (location.y - position.y) * math.sin(theta)
        localy = -(location.x - position.x) * math.sin(theta) + (location.y - position.y) * math.cos(theta)

        # scale < 1 if local coordinate is -ve and scale > 1 otherwise
        scalex = 1 + math.fabs(localx)
        scaley = 1 + math.fabs(localy)

        if localx < 0:
            scalex = 1 / scalex
        if localy < 0:
            scaley = 1 / scaley

        prev_scale = sim_context.getObjectScale(modify_object_id["scale"])
        sim_context.setObjectScale(modify_object_id["scale"], OpenNero.Vector3f(scalex, scaley, prev_scale.z))
Beispiel #13
0
    def calculate_reward(self, agent, action):
        reward = self.agent_info.reward.get_instance()
        state = self.get_state(agent)
        friends, foes = self.getFriendFoe(agent)

        R = dict([(f, 0) for f in constants.FITNESS_DIMENSIONS])

        R[constants.FITNESS_STAND_GROUND] = -abs(action[0])

        friend = self.nearest(state.pose, friends)
        if friend:
            d = self.distance(self.get_state(friend).pose, state.pose)
            R[constants.FITNESS_STICK_TOGETHER] = -d * d

        foe = self.nearest(state.pose, foes)
        if foe:
            d = self.distance(self.get_state(foe).pose, state.pose)
            R[constants.FITNESS_APPROACH_ENEMY] = -d * d

        f = module.getMod().flag_loc
        if f:
            d = self.distance(state.pose, (f.x, f.y))
            R[constants.FITNESS_APPROACH_FLAG] = -d * d

        target = self.target(agent)
        if target is not None:
            obstacles = OpenNero.getSimContext().findInRay(
                agent.state.position,
                target.state.position,
                constants.OBJECT_TYPE_OBSTACLE | agent.get_team(),
                True)
            if len(obstacles) == 0:
                self.get_state(target).curr_damage += 1
                R[constants.FITNESS_HIT_TARGET] = 1

        damage = state.update_damage()
        R[constants.FITNESS_AVOID_FIRE] = -damage

        for i, f in enumerate(constants.FITNESS_DIMENSIONS):
            reward[i] = R[f]

        return reward
    def step(self, agent, action):
        """
        2A step for an agent
        """
        # check if the action is valid
        assert (self.agent_info.actions.validate(action))

        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = copy.copy(agent.state.position)
            r = copy.copy(agent.state.rotation)
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return self.agent_info.reward.get_instance()

        # spawn more agents if possible.
        self.maybe_spawn(agent)

        # get the desired action of the agent
        move_by = action[0]
        turn_by = math.degrees(action[1])

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        if delay > 0.0:  # if there is a need to show animation
            agent.state.animation_speed = move_by * 28.0 / delay

        reward = self.calculate_reward(agent, action)

        # tell the system to make the calculated motion
        state.update_pose(move_by, turn_by)

        return reward
Beispiel #15
0
    def step(self, agent, action):
        """
        2A step for an agent
        """
        # check if the action is valid
        assert(self.agent_info.actions.validate(action))
        
        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = copy.copy(agent.state.position)
            r = copy.copy(agent.state.rotation)
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return self.agent_info.reward.get_instance()

        # spawn more agents if possible.
        self.maybe_spawn(agent)

        # get the desired action of the agent
        move_by = action[0]
        turn_by = math.degrees(action[1])

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        if delay > 0.0: # if there is a need to show animation
            agent.state.animation_speed = move_by * 28.0 / delay

        reward = self.calculate_reward(agent, action)

        # tell the system to make the calculated motion
        state.update_pose(move_by, turn_by)

        return reward
Beispiel #16
0
 def set_speedup(self, speedup):
     OpenNero.getSimContext().delay = 1.0 - (speedup / 100.0)
     print 'speedup delay', OpenNero.getSimContext().delay
Beispiel #17
0
    def step(self, agent, action):
        """
        2A step for an agent
        """
        # if this agent has a serialized representation waiting, load it.
        chunk = self.agents_to_load.get(agent.state.id)
        if chunk is not None:
            print 'loading agent', agent.state.id, 'from', len(chunk), 'bytes'
            del self.agents_to_load[agent.state.id]
            try:
                agent.from_string(chunk)
            except:
                # if loading fails, remove this agent.
                print 'error loading agent', agent.state.id
                self.remove_agent(agent)

                # if a user has a badly formatted q-learning agent in a mixed
                # population file, the agent won't load and will be properly
                # removed here. however, RTNEAT has only allocated enough brainz
                # to cover (pop_size - num_qlearning_agents) agents, so whenever
                # it comes time to spawn new agents, RTNEAT will think that it
                # needs to spawn an extra agent to cover for this "missing" one.
                # to prevent this exception, we decrement pop_size here.
                #
                # this probably prevents teams from having the proper number of
                # agents if the user clicks on the deploy button after loading a
                # broken pop file ... but that's tricky to fix.
                constants.pop_size -= 1

                return agent.info.reward.get_instance()

        # set the epsilon for this agent, in case it's changed recently.
        agent.epsilon = self.epsilon

        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.info.reward.get_instance()

        # display agent info if neccessary
        if hasattr(agent, 'set_display_hint'):
            agent.set_display_hint()

        # spawn more agents if possible.
        self.maybe_spawn(agent)

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = (firing >= 0.5)

        scored_hit = False
        # firing decision
        if firing_status:
            target = self.closest_enemy(agent)
            if target is not None:
                pose = state.pose
                target_pose = self.get_state(target).pose
                relative_angle = self.angle(pose, target_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    target_pos = target.state.position
                    source_pos.z = source_pos.z + 5
                    target_pos.z = target_pos.z + 5
                    dist = target_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
                    if random.random() < d/2: # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.get_team()]
                        if team_color == 'red':
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == 'blue':
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos,
                            target_pos,
                            constants.OBJECT_TYPE_OBSTACLE,
                            True,
                            wall_color,
                            color)
                        #if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(target).curr_damage += 1
                            scored_hit = True
                else: # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        # tell the system to make the calculated motion
        state.update_pose(move_by, turn_by)

        return reward
Beispiel #18
0
def show_context_menu():
    global modify_object_id
    global object_ids

    if len(modify_object_id) > 0:
        return

    guiMan.setTransparency(1.0)

    sim_context = OpenNero.getSimContext()

    # find the screen position at which to open the context menu
    cursor = sim_context.getMousePosition()
    location = sim_context.getClickedPosition(cursor.x, cursor.y)
    selected_object_id = sim_context.getClickedEntityId(cursor.x, cursor.y)

    if selected_object_id not in object_ids and abs(location.z) > 10: return

    print "location:", location

    contextMenu = gui.create_context_menu(guiMan, 'context', cursor)

    def add_wall():
        obj_id = common.addObject(
            "data/shapes/cube/Cube.xml",
            OpenNero.Vector3f(location.x, location.y, constants.HEIGHT + constants.OFFSET),
            OpenNero.Vector3f(0, 0, 90),
            scale=OpenNero.Vector3f(5, 30, constants.HEIGHT*2),
            type=1)
        object_ids[obj_id] = set(['rotate', 'move', 'scale', 'remove'])

    def rotate_object():
        modify_object_id['rot'] = selected_object_id

    def scale_object():
        modify_object_id['scale'] = selected_object_id

    def move_object():
        modify_object_id['move'] = selected_object_id

    def remove_flag():
        module.getMod().remove_flag()

    def place_flag():
        module.getMod().change_flag([location.x, location.y, 0])

    def place_basic_turret():
        obj_id = module.getMod().place_basic_turret([location.x, location.y, 0])
        object_ids[obj_id] = set(['move', 'remove'])

    def set_spawn_1():
        module.getMod().set_spawn(location.x, location.y, constants.OBJECT_TYPE_TEAM_0)

    def set_spawn_2():
        module.getMod().set_spawn(location.x, location.y, constants.OBJECT_TYPE_TEAM_1)

    def remove_wall():
        common.removeObject(selected_object_id)

    if selected_object_id in object_ids:
        operations = object_ids[selected_object_id]

        if 'rotate' in operations:
            rotateButton = gui.create_button(guiMan, 'rotate', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
            rotateButton.OnMouseLeftClick = lambda: rotate_object()
            contextMenu.addItem('Rotate Object', rotateButton)

        if 'scale' in operations:
            scaleButton = gui.create_button(guiMan, 'scale', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
            scaleButton.OnMouseLeftClick = lambda: scale_object()
            contextMenu.addItem('Scale Object', scaleButton)

        if 'move' in operations:
            moveButton = gui.create_button(guiMan, 'move', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
            moveButton.OnMouseLeftClick = lambda: move_object()
            contextMenu.addItem('Move Object', moveButton)

        if 'remove' in operations:
            removeButton = gui.create_button(guiMan, 'remove', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
            removeButton.OnMouseLeftClick = lambda: remove_wall()
            contextMenu.addItem('Remove Object', removeButton)

    else:
        wallButton = gui.create_button(guiMan, 'wall', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
        wallButton.OnMouseLeftClick = lambda: add_wall()
        contextMenu.addItem('Add wall', wallButton)

        rmFlagButton = gui.create_button(guiMan, 'flag', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
        rmFlagButton.OnMouseLeftClick = lambda: remove_flag()
        contextMenu.addItem('Remove Flag', rmFlagButton)

        flagButton = gui.create_button(guiMan, 'flag', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
        flagButton.OnMouseLeftClick = lambda: place_flag()
        contextMenu.addItem('Place Flag', flagButton)

        turretButton = gui.create_button(guiMan, 'b_turret', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
        turretButton.OnMouseLeftClick = lambda: place_basic_turret()
        contextMenu.addItem('Place Basic Turret', turretButton)

        spawn1Button = gui.create_button(guiMan, 'blue spawn', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
        spawn1Button.OnMouseLeftClick = lambda: set_spawn_1()
        contextMenu.addItem('Set Blue Spawn Location', spawn1Button)

        spawn2Button = gui.create_button(guiMan, 'red spawn', OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), '')
        spawn2Button.OnMouseLeftClick = lambda: set_spawn_2()
        contextMenu.addItem('Set Red Spawn Location', spawn2Button)
Beispiel #19
0
    def __init__(self, xdim, ydim, world_file = WORLD_FILE):
        """
        Create the environment
        """

        #self.Qlog = open("Qlog", "wb+")
        OpenNero.Environment.__init__(self) 
        OpenNero.getSimContext().delay = 0.0;
        
        self.XDIM = xdim
        self.YDIM = ydim
        self.max_steps = 500       
        self.states = {} # dictionary of agent states

        if WORLD_FILE is None:
            self.crumbs = world_handler.pattern_cluster(500, "Roomba/world_config.txt")
        else: # read external file
            data = world_handler.read_world(WORLD_FILE)
            self.XDIM = data['dims'][0]
            self.YDIM = data['dims'][1]
            self.crumbs = data['pellets']
            print len(self.crumbs)
        # only keep crumbs that are inside the walls
        self.crumbs = [c for c in self.crumbs if in_bounds(c.x,c.y)]

        self.init_list = AgentInit()
        self.init_list.add_type("<class 'Roomba.roomba.RoombaBrain'>")
        self.init_list.add_type("<class 'Roomba.RTNEATAgent.RTNEATAgent'>")
        self.init_list.add_type("<class 'Roomba.RLAgent.TabularRLAgent'>")
        #print self.init_list.types

        roomba_abound = self.init_list.get_action("<class 'Roomba.roomba.RoombaBrain'>")
        roomba_sbound = self.init_list.get_sensor("<class 'Roomba.roomba.RoombaBrain'>")
        roomba_rbound = self.init_list.get_reward("<class 'Roomba.roomba.RoombaBrain'>")
        rtneat_abound = self.init_list.get_action("<class 'Roomba.RTNEATAgent.RTNEATAgent'>")
        rtneat_sbound = self.init_list.get_sensor("<class 'Roomba.RTNEATAgent.RTNEATAgent'>")
        rtneat_rbound = self.init_list.get_reward("<class 'Roomba.RTNEATAgent.RTNEATAgent'>")

       
        ### Bounds for Roomba ###
        # actions
        roomba_abound.add_continuous(-math.pi, math.pi) # amount to turn by
        
        # sensors
        roomba_sbound.add_discrete(0,1)    # wall bump
        roomba_sbound.add_continuous(0,xdim)   # self.x
        roomba_sbound.add_continuous(0,ydim)   # self.y
        roomba_sbound.add_continuous(0,xdim)   # closest.x
        roomba_sbound.add_continuous(0,ydim)   # closest.y
        
        # rewards
        roomba_rbound.add_continuous(-100,100) # range for reward

        ### End Bounds for Roomba ####

        ### Bounds for RTNEAT ###
        # actions
        rtneat_abound.add_continuous(-math.pi, math.pi) # amount to turn by
        #rtneat_abound.add_continuous(0, 1) # signal
       
        # sensors
        rtneat_sbound.add_continuous(-math.pi, math.pi) # nearest crumb angle
        rtneat_sbound.add_continuous(0, 1) # proportion of crumb nearby

	# angle and distance sensors for (up to) 8  nearest neighbors
        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)
        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)
        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)
        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)

        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)
        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)
        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)
        rtneat_sbound.add_continuous(-math.pi, math.pi)
        rtneat_sbound.add_continuous(0, 1)
    
        # rewards
        rtneat_rbound.add_continuous(-math.pi, math.pi)
        ### End Bounds for RTNEAT ###

        #------------------------------------------------------------------------
        rltabular_abound = self.init_list.get_action("<class 'Roomba.RLAgent.TabularRLAgent'>")
        rltabular_sbound = self.init_list.get_sensor("<class 'Roomba.RLAgent.TabularRLAgent'>")
        rltabular_rbound = self.init_list.get_reward("<class 'Roomba.RLAgent.TabularRLAgent'>")

        ### Bounds for RLTabular ###
        # actions
        rltabular_abound.add_continuous(-math.pi, math.pi) # amount to turn by
        
        # sensors
        rltabular_sbound.add_continuous(-1, 1)
        rltabular_sbound.add_continuous(-1, 1)
        rltabular_sbound.add_continuous(-1, 1)
        rltabular_sbound.add_continuous(-1, 1)
        rltabular_sbound.add_continuous(-1, 1)
        rltabular_sbound.add_continuous(-1, 1)
    
        # rewards
        rltabular_rbound.add_continuous(-1, 1)
        ### End Bounds for RLTabular ###
        #------------------------------------------------------------------------


        # set up shop
        # Add Wayne's Roomba room with experimentally-derived vertical offset to match crumbs.
        common.addObject("data/terrain/RoombaRoom.xml", OpenNero.Vector3f(xdim/2,ydim/2, -1), OpenNero.Vector3f(0,0,0), OpenNero.Vector3f(xdim/245.0, ydim/245.0, constants.HEIGHT/24.5), type = constants.OBJECT_TYPE_WALLS)

        # OpenNero.getSimContext().addAxes()
        self.add_crumbs()
        for crumb in self.crumbs:
            self.add_crumb_sensors(roomba_sbound)        
Beispiel #20
0
def initObjectBrain(simId, brain):
    return OpenNero.getSimContext().initObjectBrain(simId, brain)
Beispiel #21
0
 def set_speedup(self, speedup):
     OpenNero.getSimContext().delay = 1.0 - (speedup / 100.0)
     if self.environment:
         self.environment.speedup = (speedup / 100.0)
    def step(self, agent, action):
        """
        2A step for an agent
        """
        # if this agent has a serialized representation waiting, load it.
        chunk = self.agents_to_load.get(agent.state.id)
        if chunk is not None:
            print 'loading agent', agent.state.id, 'from', len(chunk), 'bytes'
            del self.agents_to_load[agent.state.id]
            try:
                agent.from_string(chunk)
            except:
                # if loading fails, remove this agent.
                print 'error loading agent', agent.state.id
                self.remove_agent(agent)

                # if a user has a badly formatted q-learning agent in a mixed
                # population file, the agent won't load and will be properly
                # removed here. however, RTNEAT has only allocated enough brainz
                # to cover (pop_size - num_qlearning_agents) agents, so whenever
                # it comes time to spawn new agents, RTNEAT will think that it
                # needs to spawn an extra agent to cover for this "missing" one.
                # to prevent this exception, we decrement pop_size here.
                #
                # this probably prevents teams from having the proper number of
                # agents if the user clicks on the deploy button after loading a
                # broken pop file ... but that's tricky to fix.
                constants.pop_size -= 1

                return agent.info.reward.get_instance()

        # set the epsilon for this agent, in case it's changed recently.
        agent.epsilon = self.epsilon

        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.info.reward.get_instance()

        # display agent info if neccessary
        if hasattr(agent, 'set_display_hint'):
            agent.set_display_hint()

        # spawn more agents if possible.
        self.maybe_spawn(agent)

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = (firing >= 0.5)

        scored_hit = False
        # firing decision
        if firing_status:
            target = self.closest_enemy(agent)
            if target is not None:
                pose = state.pose
                target_pose = self.get_state(target).pose
                relative_angle = self.angle(pose, target_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    target_pos = target.state.position
                    source_pos.z = source_pos.z + 5
                    target_pos.z = target_pos.z + 5
                    dist = target_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
                    if random.random() < d/2: # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.get_team()]
                        if team_color == 'red':
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == 'blue':
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos,
                            target_pos,
                            constants.OBJECT_TYPE_OBSTACLE,
                            True,
                            wall_color,
                            color)
                        #if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(target).curr_damage += 1
                            scored_hit = True
                else: # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        # tell the system to make the calculated motion
        state.update_pose(move_by, turn_by)

        return reward
Beispiel #23
0
def show_context_menu():
    global modify_object_id
    global object_ids

    if len(modify_object_id) > 0:
        return

    guiMan.setTransparency(1.0)

    sim_context = OpenNero.getSimContext()

    # find the screen position at which to open the context menu
    cursor = sim_context.getMousePosition()
    location = sim_context.getClickedPosition(cursor.x, cursor.y)
    selected_object_id = sim_context.getClickedEntityId(cursor.x, cursor.y)

    if selected_object_id not in object_ids and abs(location.z) > 1:
        return

    print "location:", location

    contextMenu = gui.create_context_menu(guiMan, "context", cursor)

    def add_wall():
        object_ids.append(
            common.addObject(
                "data/shapes/cube/Cube.xml",
                OpenNero.Vector3f(location.x, location.y, constants.HEIGHT + constants.OFFSET),
                OpenNero.Vector3f(0, 0, 90),
                scale=OpenNero.Vector3f(1, 30, constants.HEIGHT),
                type=1,
            )
        )

    def rotate_object():
        modify_object_id["rot"] = selected_object_id

    def scale_object():
        modify_object_id["scale"] = selected_object_id

    def move_object():
        modify_object_id["move"] = selected_object_id

    def place_flag():
        module.getMod().change_flag([location.x, location.y, 0])

    def place_basic_turret():
        module.getMod().place_basic_turret([location.x, location.y, 0])

    def set_spawn():
        module.getMod().set_spawn(location.x, location.y)

    def remove_wall():
        common.removeObject(selected_object_id)

    if selected_object_id in object_ids:
        rotateButton = gui.create_button(guiMan, "rotate", OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), "")
        rotateButton.OnMouseLeftClick = lambda: rotate_object()
        contextMenu.addItem("Rotate Object", rotateButton)

        scaleButton = gui.create_button(guiMan, "scale", OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), "")
        scaleButton.OnMouseLeftClick = lambda: scale_object()
        contextMenu.addItem("Scale Object", scaleButton)

        moveButton = gui.create_button(guiMan, "move", OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), "")
        moveButton.OnMouseLeftClick = lambda: move_object()
        contextMenu.addItem("Move Object", moveButton)

        removeButton = gui.create_button(guiMan, "remove", OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), "")
        removeButton.OnMouseLeftClick = lambda: remove_wall()
        contextMenu.addItem("Remove Object", removeButton)

    else:
        wallButton = gui.create_button(guiMan, "wall", OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), "")
        wallButton.OnMouseLeftClick = lambda: add_wall()
        contextMenu.addItem("Add wall", wallButton)

        flagButton = gui.create_button(guiMan, "flag", OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), "")
        flagButton.OnMouseLeftClick = lambda: place_flag()
        contextMenu.addItem("Place Flag", flagButton)

        turretButton = gui.create_button(guiMan, "b_turret", OpenNero.Pos2i(0, 0), OpenNero.Pos2i(0, 0), "")
        turretButton.OnMouseLeftClick = lambda: place_basic_turret()
        contextMenu.addItem("Place Basic Turret", turretButton)
Beispiel #24
0
def addSkyBox(prefix, extension=None):
    """ add a skybox starting with the prefix and ending with the extension """
    if extension:
        OpenNero.getSimContext().addSkyBox(prefix, extension)
    else:
        OpenNero.getSimContext().addSkyBox(prefix)
Beispiel #25
0
def show_context_menu():
    global modify_object_id
    global object_ids

    if len(modify_object_id) > 0:
        return

    guiMan.setTransparency(1.0)

    sim_context = OpenNero.getSimContext()

    # find the screen position at which to open the context menu
    cursor = sim_context.getMousePosition()
    location = sim_context.getClickedPosition(cursor.x, cursor.y)
    selected_object_id = sim_context.getClickedEntityId(cursor.x, cursor.y)

    if selected_object_id not in object_ids and abs(location.z) > 10: return

    print "location:", location

    contextMenu = gui.create_context_menu(guiMan, 'context', cursor)

    def add_wall():
        obj_id = common.addObject(
            "data/shapes/cube/Cube.xml",
            OpenNero.Vector3f(location.x, location.y,
                              constants.HEIGHT + constants.OFFSET),
            OpenNero.Vector3f(0, 0, 90),
            scale=OpenNero.Vector3f(5, 30, constants.HEIGHT * 2),
            type=1)
        object_ids[obj_id] = set(['rotate', 'move', 'scale', 'remove'])

    def rotate_object():
        modify_object_id['rot'] = selected_object_id

    def scale_object():
        modify_object_id['scale'] = selected_object_id

    def move_object():
        modify_object_id['move'] = selected_object_id

    def remove_flag():
        module.getMod().remove_flag()

    def place_flag():
        module.getMod().change_flag([location.x, location.y, 0])

    def place_basic_turret():
        obj_id = module.getMod().place_basic_turret(
            [location.x, location.y, 0])
        object_ids[obj_id] = set(['move', 'remove'])

    def set_spawn_1():
        module.getMod().set_spawn(location.x, location.y,
                                  constants.OBJECT_TYPE_TEAM_0)

    def set_spawn_2():
        module.getMod().set_spawn(location.x, location.y,
                                  constants.OBJECT_TYPE_TEAM_1)

    def remove_wall():
        common.removeObject(selected_object_id)

    if selected_object_id in object_ids:
        operations = object_ids[selected_object_id]

        if 'rotate' in operations:
            rotateButton = gui.create_button(guiMan, 'rotate',
                                             OpenNero.Pos2i(0, 0),
                                             OpenNero.Pos2i(0, 0), '')
            rotateButton.OnMouseLeftClick = lambda: rotate_object()
            contextMenu.addItem('Rotate Object', rotateButton)

        if 'scale' in operations:
            scaleButton = gui.create_button(guiMan, 'scale',
                                            OpenNero.Pos2i(0, 0),
                                            OpenNero.Pos2i(0, 0), '')
            scaleButton.OnMouseLeftClick = lambda: scale_object()
            contextMenu.addItem('Scale Object', scaleButton)

        if 'move' in operations:
            moveButton = gui.create_button(guiMan, 'move',
                                           OpenNero.Pos2i(0, 0),
                                           OpenNero.Pos2i(0, 0), '')
            moveButton.OnMouseLeftClick = lambda: move_object()
            contextMenu.addItem('Move Object', moveButton)

        if 'remove' in operations:
            removeButton = gui.create_button(guiMan, 'remove',
                                             OpenNero.Pos2i(0, 0),
                                             OpenNero.Pos2i(0, 0), '')
            removeButton.OnMouseLeftClick = lambda: remove_wall()
            contextMenu.addItem('Remove Object', removeButton)

    else:
        wallButton = gui.create_button(guiMan, 'wall', OpenNero.Pos2i(0, 0),
                                       OpenNero.Pos2i(0, 0), '')
        wallButton.OnMouseLeftClick = lambda: add_wall()
        contextMenu.addItem('Add wall', wallButton)

        rmFlagButton = gui.create_button(guiMan, 'flag', OpenNero.Pos2i(0, 0),
                                         OpenNero.Pos2i(0, 0), '')
        rmFlagButton.OnMouseLeftClick = lambda: remove_flag()
        contextMenu.addItem('Remove Flag', rmFlagButton)

        flagButton = gui.create_button(guiMan, 'flag', OpenNero.Pos2i(0, 0),
                                       OpenNero.Pos2i(0, 0), '')
        flagButton.OnMouseLeftClick = lambda: place_flag()
        contextMenu.addItem('Place Flag', flagButton)

        turretButton = gui.create_button(guiMan, 'b_turret',
                                         OpenNero.Pos2i(0, 0),
                                         OpenNero.Pos2i(0, 0), '')
        turretButton.OnMouseLeftClick = lambda: place_basic_turret()
        contextMenu.addItem('Place Basic Turret', turretButton)

        spawn1Button = gui.create_button(guiMan, 'blue spawn',
                                         OpenNero.Pos2i(0, 0),
                                         OpenNero.Pos2i(0, 0), '')
        spawn1Button.OnMouseLeftClick = lambda: set_spawn_1()
        contextMenu.addItem('Set Blue Spawn Location', spawn1Button)

        spawn2Button = gui.create_button(guiMan, 'red spawn',
                                         OpenNero.Pos2i(0, 0),
                                         OpenNero.Pos2i(0, 0), '')
        spawn2Button.OnMouseLeftClick = lambda: set_spawn_2()
        contextMenu.addItem('Set Red Spawn Location', spawn2Button)
Beispiel #26
0
    def step(self, agent, action):
        """
        2A step for an agent
        """
        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.rewards.get_instance()

        # display agent info if neccessary
        if hasattr(agent, 'set_display_hint'):
            agent.set_display_hint()

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = (firing >= 0.5)

        scored_hit = False
        # firing decision
        closest_enemy = self.closest_enemy(agent)
        if firing_status:
            if closest_enemy is not None:
                pose = state.pose
                closest_enemy_pose = self.get_state(closest_enemy).pose
                relative_angle = self.angle(pose, closest_enemy_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    closest_enemy_pos = closest_enemy.state.position
                    source_pos.z = source_pos.z + 5
                    closest_enemy_pos.z = closest_enemy_pos.z + 5
                    dist = closest_enemy_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
                    if random.random() < d/2: # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.team_type]
                        if team_color == 'red':
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == 'blue':
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos,
                            closest_enemy_pos,
                            constants.OBJECT_TYPE_OBSTACLE,
                            True,
                            wall_color,
                            color)
                        #if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(closest_enemy).curr_damage += 1
                            scored_hit = True
                else: # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        team = self.get_team(agent)

        # tell the system to make the calculated motion
        # if the motion doesn't result in a collision
        dist = constants.MAX_MOVEMENT_SPEED * move_by
        heading = common.wrap_degrees(agent.state.rotation.z, turn_by)
        x = agent.state.position.x + dist * math.cos(math.radians(heading))
        y = agent.state.position.y + dist * math.sin(math.radians(heading))

        # manual collision detection
        desired_pose = (x, y, heading)

        collision_detected = False

        friends, foes = self.get_friend_foe(agent)
        for f in friends:
            if f != agent:
                f_state = self.get_state(f)
                # we impose an order on agents to avoid deadlocks. Without this
                # two agents which spawn very close to each other can never escape
                # each other's collision radius
                if state.id > f_state.id:
                    f_pose = f_state.pose
                    dist = self.distance(desired_pose, f_pose)
                    if dist < constants.MANUAL_COLLISION_DISTANCE:
                        collision_detected = True
                        continue

        # just check for collisions with the closest enemy
        if closest_enemy:
            if not collision_detected:
                f_pose = self.get_state(closest_enemy).pose
                dist = self.distance(desired_pose, f_pose)
                if dist < constants.MANUAL_COLLISION_DISTANCE:
                    collision_detected = True

        if not collision_detected:
            state.update_pose(move_by, turn_by)

        return reward
Beispiel #27
0
 def set_speedup(self, speedup):
     print 'Speedup set to', speedup
     # speed up between 0 (delay set to 1 second) and 1 (delay set to 0)
     OpenNero.getSimContext().delay = 1.0 - speedup
Beispiel #28
0
    def step(self, agent, action):
        """
        2A step for an agent
        """
        # if this agent has a serialized representation waiting, load it.
        chunk = self.agents_to_load.get(agent.state.id)
        if chunk is not None:
            print 'loading agent', agent.state.id, 'from', len(chunk), 'bytes'
            del self.agents_to_load[agent.state.id]
            try:
                agent.from_string(chunk)
            except:
                # if loading fails, remove this agent.
                print 'error loading agent', agent.state.id
                self.remove_agent(agent)

                # if a user has a badly formatted q-learning agent in a mixed
                # population file, the agent won't load and will be properly
                # removed here. however, RTNEAT has only allocated enough brainz
                # to cover (pop_size - num_qlearning_agents) agents, so whenever
                # it comes time to spawn new agents, RTNEAT will think that it
                # needs to spawn an extra agent to cover for this "missing" one.
                # to prevent this exception, we decrement pop_size here.
                #
                # this probably prevents teams from having the proper number of
                # agents if the user clicks on the deploy button after loading a
                # broken pop file ... but that's tricky to fix.
                constants.pop_size -= 1

                return agent.info.reward.get_instance()

        # set the epsilon for this agent, in case it's changed recently.
        agent.epsilon = self.epsilon

        state = self.get_state(agent)

        #Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.info.reward.get_instance()

        # display agent info if neccessary
        if hasattr(agent, 'set_display_hint'):
            agent.set_display_hint()

        # spawn more agents if possible.
        self.maybe_spawn(agent)

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = (firing >= 0.5)

        scored_hit = False
        # firing decision
        closest_enemy = self.closest_enemy(agent)
        if firing_status:
            if closest_enemy is not None:
                pose = state.pose
                closest_enemy_pose = self.get_state(closest_enemy).pose
                relative_angle = self.angle(pose, closest_enemy_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    closest_enemy_pos = closest_enemy.state.position
                    source_pos.z = source_pos.z + 5
                    closest_enemy_pos.z = closest_enemy_pos.z + 5
                    dist = closest_enemy_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist)/constants.MAX_SHOT_RADIUS
                    if random.random() < d/2: # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.get_team()]
                        if team_color == 'red':
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == 'blue':
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos,
                            closest_enemy_pos,
                            constants.OBJECT_TYPE_OBSTACLE,
                            True,
                            wall_color,
                            color)
                        #if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(closest_enemy).curr_damage += 1
                            scored_hit = True
                else: # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, 'run')
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        # tell the system to make the calculated motion
        # if the motion doesn't result in a collision
        dist = constants.MAX_MOVEMENT_SPEED * move_by
        heading = common.wrap_degrees(agent.state.rotation.z, turn_by)
        x = agent.state.position.x + dist * math.cos(math.radians(heading))
        y = agent.state.position.y + dist * math.sin(math.radians(heading))

        # manual collision detection
        desired_pose = (x, y, heading)

        collision_detected = False

        friends, foes = self.getFriendFoe(agent)
        for f in friends:
            if f != agent:
                f_state = self.get_state(f)
                # we impose an order on agents to avoid deadlocks. Without this
                # two agents which spawn very close to each other can never escape
                # each other's collision radius
                if state.id > f_state.id:
                    f_pose = f_state.pose
                    dist = self.distance(desired_pose, f_pose)
                    if dist < constants.MANUAL_COLLISION_DISTANCE:
                        collision_detected = True
                        continue

        # no need to check for collisions with all enemies
        #if foes:
        #    if not collision_detected:
        #        for f in foes:
        #            f_pose = self.get_state(f).pose
        #            dist = self.distance(desired_pose, f_pose)
        #            if dist < constants.MANUAL_COLLISION_DISTANCE:
        #                collision_detected = True
        #                continue

        # just check for collisions with the closest enemy
        if closest_enemy:
            if not collision_detected:
                f_pose = self.get_state(closest_enemy).pose
                dist = self.distance(desired_pose, f_pose)
                if dist < constants.MANUAL_COLLISION_DISTANCE:
                    collision_detected = True

        if not collision_detected:
            state.update_pose(move_by, turn_by)

        return reward
Beispiel #29
0
 def set_speedup(self, speedup):
     OpenNero.getSimContext().delay = 1.0 - (speedup / 100.0)
     print 'speedup delay', OpenNero.getSimContext().delay
     if self.environment:
         self.environment.speedup = speedup / 100.0
Beispiel #30
0
 def set_speedup(self, speedup):
     print 'Speedup set to', speedup
     # speed up between 0 (delay set to 1 second) and 1 (delay set to 0)
     OpenNero.getSimContext().delay = 1.0 - speedup
Beispiel #31
0
 def set_speedup(self, speedup):
     OpenNero.getSimContext().delay = 1.0 - (speedup / 100.0)
     print 'speedup delay', OpenNero.getSimContext().delay
Beispiel #32
0
 def set_speedup(self, speedup):
     OpenNero.getSimContext().delay = 1.0 - (speedup / 100.0)
     print 'speedup delay', OpenNero.getSimContext().delay
     if self.environment:
         self.environment.speedup = speedup / 100.0
Beispiel #33
0
 def addAxes(self):
     OpenNero.getSimContext().addAxes()
Beispiel #34
0
 def addAxes(self):
     OpenNero.getSimContext().addAxes()
Beispiel #35
0
def initObjectBrain(simId, brain):
    return OpenNero.getSimContext().initObjectBrain(simId, brain)
Beispiel #36
0
    def step(self, agent, action):
        """
        2A step for an agent
        """
        state = self.get_state(agent)

        # Initilize Agent state
        if agent.step == 0 and agent.group != "Turret":
            p = agent.state.position
            r = agent.state.rotation
            if agent.group == "Agent":
                r.z = random.randrange(360)
                agent.state.rotation = r
            state.reset_pose(p, r)
            return agent.rewards.get_instance()

        # display agent info if neccessary
        if hasattr(agent, "set_display_hint"):
            agent.set_display_hint()

        # get the desired action of the agent
        move_by = action[constants.ACTION_INDEX_SPEED]
        turn_by = math.degrees(action[constants.ACTION_INDEX_TURN])
        firing = action[constants.ACTION_INDEX_FIRE]
        firing_status = firing >= 0.5

        scored_hit = False
        # firing decision
        closest_enemy = self.closest_enemy(agent)
        if firing_status:
            if closest_enemy is not None:
                pose = state.pose
                closest_enemy_pose = self.get_state(closest_enemy).pose
                relative_angle = self.angle(pose, closest_enemy_pose)
                if abs(relative_angle) <= 2:
                    source_pos = agent.state.position
                    closest_enemy_pos = closest_enemy.state.position
                    source_pos.z = source_pos.z + 5
                    closest_enemy_pos.z = closest_enemy_pos.z + 5
                    dist = closest_enemy_pos.getDistanceFrom(source_pos)
                    d = (constants.MAX_SHOT_RADIUS - dist) / constants.MAX_SHOT_RADIUS
                    if random.random() < d / 2:  # attempt a shot depending on distance
                        team_color = constants.TEAM_LABELS[agent.team_type]
                        if team_color == "red":
                            color = OpenNero.Color(255, 255, 0, 0)
                        elif team_color == "blue":
                            color = OpenNero.Color(255, 0, 0, 255)
                        else:
                            color = OpenNero.Color(255, 255, 255, 0)
                        wall_color = OpenNero.Color(128, 0, 255, 0)
                        obstacles = OpenNero.getSimContext().findInRay(
                            source_pos, closest_enemy_pos, constants.OBJECT_TYPE_OBSTACLE, True, wall_color, color
                        )
                        # if len(obstacles) == 0 and random.random() < d/2:
                        if len(obstacles) == 0:
                            # count as hit depending on distance
                            self.get_state(closest_enemy).curr_damage += 1
                            scored_hit = True
                else:  # turn toward the enemy
                    turn_by = relative_angle

        # set animation speed
        # TODO: move constants into constants.py
        self.set_animation(agent, state, "run")
        delay = OpenNero.getSimContext().delay
        agent.state.animation_speed = move_by * constants.ANIMATION_RATE

        reward = self.calculate_reward(agent, action, scored_hit)

        team = self.get_team(agent)

        # tell the system to make the calculated motion
        # if the motion doesn't result in a collision
        dist = constants.MAX_MOVEMENT_SPEED * move_by
        heading = common.wrap_degrees(agent.state.rotation.z, turn_by)
        x = agent.state.position.x + dist * math.cos(math.radians(heading))
        y = agent.state.position.y + dist * math.sin(math.radians(heading))

        # manual collision detection
        desired_pose = (x, y, heading)

        collision_detected = False

        friends, foes = self.get_friend_foe(agent)
        for f in friends:
            if f != agent:
                f_state = self.get_state(f)
                # we impose an order on agents to avoid deadlocks. Without this
                # two agents which spawn very close to each other can never escape
                # each other's collision radius
                if state.id > f_state.id:
                    f_pose = f_state.pose
                    dist = self.distance(desired_pose, f_pose)
                    if dist < constants.MANUAL_COLLISION_DISTANCE:
                        collision_detected = True
                        continue

        # just check for collisions with the closest enemy
        if closest_enemy:
            if not collision_detected:
                f_pose = self.get_state(closest_enemy).pose
                dist = self.distance(desired_pose, f_pose)
                if dist < constants.MANUAL_COLLISION_DISTANCE:
                    collision_detected = True

        if not collision_detected:
            state.update_pose(move_by, turn_by)

        return reward
Beispiel #37
0
def removeObject(ID):
    """ remove an object from the simulation """
    OpenNero.getSimContext().removeObject(ID)
Beispiel #38
0
 def set_speedup(self, speedup):
     OpenNero.getSimContext().delay = 1.0 - (speedup/100.0)
     if self.environment:
         self.environment.speedup = (speedup/100.0)
Beispiel #39
0
def addSkyBox(prefix, extension = None):
    """ add a skybox starting with the prefix and ending with the extension """
    if extension:
        OpenNero.getSimContext().addSkyBox(prefix, extension)
    else:
        OpenNero.getSimContext().addSkyBox(prefix)
Beispiel #40
0
def removeObject(ID):
    """ remove an object from the simulation """
    OpenNero.getSimContext().removeObject(ID)
Beispiel #41
0
def getGuiManager():
    return OpenNero.getSimContext().getGuiManager()
Beispiel #42
0
def getGuiManager():
    return OpenNero.getSimContext().getGuiManager()
Beispiel #43
0
def addObject(templateFile, position, rotation=OpenNero.Vector3f(0, 0, 0), scale=OpenNero.Vector3f(1, 1, 1), label="", type=0, collision=0):
    return OpenNero.getSimContext().addObject(
        templateFile, position, rotation, scale, label, collision, type)